diff --git "a/1684.jsonl" "b/1684.jsonl" new file mode 100644--- /dev/null +++ "b/1684.jsonl" @@ -0,0 +1,707 @@ +{"seq_id":"27327291948","text":"#! /usr/bin/python\n#-*- coding: utf-8 -*-\n\n# Ce programme applique la feuille de style contenu dans le fichier \n# \"outils2html.xsl\"\n# au document XML contenu dans le fichier \"outils_xml.xml\"\n\nimport libxml2\nimport libxslt\n\n# \"Parsing\" du document XML\ndoc = libxml2.parseFile(\"outils_xml.xml\");\n\n# Parsing de la feuille de style\ndoc_xslt = libxml2.parseFile(\"outils2html.xsl\");\n\n# Instanciation de la transformation (à faire 1x)\nxslt = libxslt.parseStylesheetDoc(doc_xslt)\n\n# application de la transformation\nresult = xslt.applyStylesheet(doc,None)\n\nresult.htmlSaveFile(\"result.html\")\n# si le résultat était en XML, il faudrait utiliser :\n# result.saveFormatFile(\"result.xml\",1)\n\nresult.freeDoc()\ndoc.freeDoc()\n","repo_name":"master-defi-xml/courses","sub_path":"pages/020.cours/05.programmation_xml/02.programmation_python/exemple.py","file_name":"exemple.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12893176903","text":"\"\"\"\nFile: titanic_batch_gradient_descent.py\nName: Johnson\n-----------------------------\nThis file demonstrates how to use batch\ngradient descent to update weights by numpy \narray. The training process should be way\nfaster than stochastic gradient descent\n(You should see a smooth training curve as well)\n-----------------------------\nw1.shape = (n, 1)\nX.shape = (n, m)\nH.shape = (1, m)\nY.shape = (1, m)\n\"\"\"\n\n\nimport time\nimport numpy as np\n\n\nTRAIN = 'titanic_data/train.csv'\nNUM_EPOCHS = 60000\nALPHA = 0.05\n\n\ndef main():\n\tstart = time.time()\n\tX_train, Y = data_preprocessing()\n\tprint('Y.shape', Y.shape)\n\tprint('X.shape', X_train.shape)\n\t# ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\n\tn, m = X_train.shape\n\tX = normalize(X_train)\n\tW, b = batch_gradient_descent(X, Y)\n\tK = W.T.dot(X)+b\n\tpredictions = np.where(K > 0, 1, 0)\n\tacc = np.equal(predictions, Y)\n\tnum_acc = np.sum(acc)\n\tprint(num_acc/m)\n\tend = time.time()\n\tprint('Total run time (Batch-GD):', end-start)\n\n\ndef normalize(X):\n\t\"\"\"\n\t:param X: numpy_array, the dimension is (n, m)\n\t:return: numpy_array, the values are normalized, where the dimension is still (n, m)\n\t\"\"\"\n\tmin_arr = np.amin(X, axis=1, keepdims=True)\n\tmax_arr = np.amax(X, axis=1, keepdims=True)\n\treturn (X-min_arr)/(max_arr-min_arr)\n\n\ndef batch_gradient_descent(X, Y):\n\t\"\"\"\n\t:param X: numpy_array, the array holding all the training data\n\t:param Y: numpy_array, the array holding all the ture labels in X\n\t:return: numpy_array, the trained weights with dimension (n, m)\n\t\"\"\"\n\tnp.random.seed(0)\n\t# Initialize w and b\n\tn, m = X.shape\n\tW = np.random.rand(n, 1) - 0.5\n\tb = np.random.rand(1, 1) - 0.5\n\n\t# Start Training\n\tprint_every = 1000\n\tfor epoch in range(NUM_EPOCHS):\n\t\tK = W.T.dot(X) + b\n\t\tH = 1/(1+np.exp(-K))\n\t\tL = -(Y*np.log(H)+(1-Y)*np.log(1-H))\n\t\tJ = (1/m)*np.sum(L)\n\t\tif epoch % print_every == 0:\n\t\t\tprint('Cost: ', J)\n\t\tW = W - ALPHA * ((1/m)*np.sum(X.dot((H-Y).T), axis=1, keepdims=True))\n\t\tb = b - ALPHA * ((1/m)*np.sum(H-Y))\n\treturn W, b\n\n\ndef data_preprocessing(mode='train'):\n\t\"\"\"\n\t:param mode: str, indicating if it's training mode or testing mode\n\t:return: Tuple(numpy_array, numpy_array), the first one is X, the other one is Y\n\t\"\"\"\n\tdata_lst = []\n\tlabel_lst = []\n\tfirst_data = True\n\tif mode == 'train':\n\t\twith open(TRAIN, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tdata = line.split(',')\n\t\t\t\t# ['0PassengerId', '1Survived', '2Pclass', '3Last Name', '4First Name', '5Sex', '6Age', '7SibSp', '8Parch', '9Ticket', '10Fare', '11Cabin', '12Embarked']\n\t\t\t\tif first_data:\n\t\t\t\t\tfirst_data = False\n\t\t\t\t\tcontinue\n\t\t\t\tif not data[6]:\n\t\t\t\t\tcontinue\n\t\t\t\tlabel = [int(data[1])]\n\t\t\t\tif data[5] == 'male':\n\t\t\t\t\tsex = 1\n\t\t\t\telse:\n\t\t\t\t\tsex = 0\n\t\t\t\t# ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\n\t\t\t\tpassenger_lst = [int(data[2]), sex, float(data[6]), int(data[7]), int(data[8]), float(data[10])]\n\t\t\t\tdata_lst.append(passenger_lst)\n\t\t\t\tlabel_lst.append(label)\n\telse:\n\t\tpass\n\treturn np.array(data_lst).T, np.array(label_lst).T\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"johnson70630/titanic-data-machine-learning","sub_path":"titanic_deep_learning/titanic_batch_gradient_descent.py","file_name":"titanic_batch_gradient_descent.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34677759925","text":"import pytest\nfrom constants import ROOT_PATH\nfrom tests.lesson_23_homework_db.company_repo import CompanyRepo\n\n\n@pytest.fixture(scope='module')\ndef company_repo():\n return CompanyRepo(f\"{ROOT_PATH}\\\\tests\\\\lesson_23_homework_db\\\\db\\\\my.db\")\n\n\n@pytest.fixture()\ndef fake_product(faker):\n data = {\n \"name\": faker.first_name(), # Didn't find fake product\n \"quantity\": faker.pyint(0, 60),\n \"description\": faker.text(50),\n \"price\": float(faker.pyint(0, 60000))\n }\n return data\n","repo_name":"AndriiBazdukov/PyTest","sub_path":"tests/lesson_23_homework_db/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12649293501","text":"import docker\n\n\nif __name__ == '__main__':\n update_args = {\n 'mem_limit': '4G',\n 'mem_reservation': '2G',\n 'memswap_limit': '8G',\n }\n client = docker.from_env()\n containers = client.containers.list()\n\n for container in containers:\n container.update(**update_args)\n\n\n\n\n","repo_name":"SHTUPLUS/DockerMonitor","sub_path":"scripts/update_container.py","file_name":"update_container.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40494578327","text":"import openpyxl as xl\nimport math\n\nclass Ingredient(object):\n def __init__(self, item_columns, headers, row=0):\n self.row = row + 1\n self.item_info = item_columns\n self.headers = headers\n self.category = [\"Frozen / Other\", \"\"]\n for i, header in enumerate(headers):\n if header == \"Ingredient\":\n self.name = [item_columns[i], header]\n if header == \"Category\":\n self.category = [item_columns[i], header]\n if header == \"Amt\":\n self.amount = [item_columns[i], header]\n if header == \"Unit\" or header == \"Cooking Unit\":\n self.unit = [item_columns[i], header]\n if header == \"Price\":\n self.price = [item_columns[i], header]\n if header == \"Store\":\n self.store = [item_columns[i], header]\n\n\n\nclass Recipe(object):\n def __init__(self, name, ingredients):\n self.row = 0\n self.name = name\n self.ingredients = ingredients\n\n\ndef get_headers(worksheet):\n column = worksheet.max_column\n headers = []\n for i in range(column):\n header = worksheet.cell(row=1, column=i+1).value\n headers.append(header)\n\n return headers\n\n\ndef get_ingredients(worksheet):\n\n row = worksheet.max_row\n column = worksheet.max_column\n headers = get_headers(worksheet)\n ingredients = []\n\n for i in range(row):\n ing_columns = []\n for j in range(column):\n ing_columns.append(worksheet.cell(row=i+1, column=j+1).value)\n\n ingredient = Ingredient(ing_columns, headers, i)\n ingredients.append(ingredient)\n\n return ingredients\n\n\ndef get_recipes(worksheet):\n recipes = []\n row = worksheet.max_row\n for i in range(1, row+1):\n if worksheet.cell(row=i+1, column=1).value is not None:\n recipes.append(worksheet.cell(row=i+1, column=1).value)\n\n return recipes\n\n\ndef get_extra(worksheet, header):\n column = worksheet.max_column\n col = 0\n extra_info = []\n for i in range(column):\n if header == worksheet.cell(row=1, column=i+1).value:\n col = i+1\n break\n if col != 0:\n for i in range(worksheet.max_row-1):\n value = worksheet.cell(row=i+2, column=col).value\n if value is None:\n continue\n extra_info.append(value)\n else:\n print(\"Error: Could not find column header '%s'\" % header)\n return False\n return extra_info\n\n\ndef conversion(rec_ing, store_ing, worksheet=None):\n # from recipe_ingredients to store_ingredients\n # print(\"Store ingredient price is \" + str(store_ing.price[0]))\n # print(\"Recipe amount is \" + str(rec_ing.amount[0]))\n amount = float(rec_ing.amount[0])\n store_ing.price[0] = float(store_ing.price[0])\n if rec_ing.unit[0] == \"can\" and store_ing.unit[0] == \"can\":\n return amount, amount * store_ing.price[0], store_ing.unit[0]\n elif rec_ing.unit[0] == \"whole\" and store_ing.unit[0] == \"whole\":\n return amount, amount * store_ing.price[0], store_ing.unit[0]\n elif rec_ing.unit[0] == \"cup\" and store_ing.unit[0] == \"can\":\n amount = math.ceil(rec_ing.amount[0] * 237/400)\n return amount, amount * store_ing.price[0], store_ing.unit[0]\n elif rec_ing.unit[0] == \"whole\" and store_ing.unit[0] == \"lbs\":\n amount, price, unit = convert_to_lbs(worksheet, rec_ing, store_ing.price[0])\n return amount, price, unit\n elif rec_ing.unit[0] == \"lbs\" and store_ing.unit[0] == \"lbs\":\n return amount, amount * store_ing.price[0], store_ing.unit[0]\n else:\n return rec_ing.amount[0], store_ing.price[0], rec_ing.unit[0]\n\n# 237 mL/cup\n# 15 mL/tbsp\n# 5 mL/tsp\n# 3750 mL/gal\n\ndef convert_to_lbs(worksheet, rec_ing, price):\n ws1 = worksheet\n row = ws1.max_row\n for i in range(1, ws1.max_row+1):\n if rec_ing.name[0] == ws1.cell(row=i, column=1).value:\n row = i\n break\n amount = float(rec_ing.amount[0]) * ws1.cell(row=row, column=3).value\n price = amount * price\n return amount, \"lbs\", price\n\n\n","repo_name":"lonesome-polecat/meal_planner","sub_path":"meal_classes.py","file_name":"meal_classes.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33034952718","text":"import datetime\n\n\nfrom database.database_connect import conn\n\nasync def query_score_sql(id: int):\n try:\n cursor = conn.cursor()\n score_num = cursor.execute('SELECT sum(currency) FROM nanami.daliy where qqnum={}'.format(id))\n conn.commit()\n desc = cursor.description\n data_dict = [dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall()]\n return data_dict[0][\"sum(currency)\"]\n finally:\n cursor.close()\n \n\n\n\nasync def shopping_sql(id: int, date: datetime, spend: int):\n try:\n cursor = conn.cursor()\n cursor.execute('insert into daliy(`qqnum`,`date`,`currency`)values({},\\'{}\\',{})'.format(id, date, spend))\n conn.commit()\n finally:\n cursor.close()","repo_name":"coderrbk/NanamiBot","sub_path":"plugins/shopping/data_source.py","file_name":"data_source.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"28798291616","text":"import pandas as pd\ndata=pd.read_csv('./Cinderella.csv')\ndata\n\nimport spacy\nfrom spacy import displacy\nnlp=spacy.load('en_core_web_sm')\ncontents=data['Cinderella']\nstr_format = \"{:>25}\"*4\n\nfor content in contents:\n nouns=[]\n if type(content)==float:continue\n doc = nlp(content)\n #displacy.render(doc, style=\"dep\",jupyter=True,options={'distance':90})\n #displacy.render(doc,style='ent',jupyter=True,options={'distance':90})\n #for token in doc:\n # print(token.text, token.pos_, token.tag_, token.ent_type_) \n for chunk in doc.noun_chunks:\n nouns.append(chunk.text)\n #print(str_format.format(chunk.text, chunk.root.text, chunk.root.dep_, chunk.root.head.text))\n print(nouns)\n\n#문장을 단순화 하면 명사를 뽑는 정확도가 더 높아지지 않을까?\n\n#이 단어들로 word 유사도를 찍어서 cluster로 만들면 사람과 사물을 확실히 구분할 수 있지 않을까?\ndoc = nlp('time girl Cinderella she stepsisters who they dress chores')\nfor t1 in doc:\n for t2 in doc:\n print(t1,t2,':',t1.similarity(t2))\n","repo_name":"seawavve/PeekABook","sub_path":"AIcontents/dataProcessing/spaCy/spaCy_noun.py","file_name":"spaCy_noun.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"28001774413","text":"from importlib.resources import path\r\nfrom django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name=\"index\"),\r\n path('setting', views.setting, name=\"setting\"),\r\n path('upload', views.upload, name=\"upload\"),\r\n path('follow', views.follow, name=\"follow\"),\r\n path('profile/', views.profile, name=\"profile\"),\r\n path('signup', views.signup, name=\"signup\"),\r\n path('signin', views.signin, name=\"signin\"),\r\n path('signout', views.signout, name=\"signout\"),\r\n path('wiki/', views.wiki, name=\"wiki\"),\r\n path('make_book', views.make_book, name=\"make_book\"),\r\n path('book_content/', views.book_content, name=\"book_content\"),\r\n path('like_post', views.like_post, name=\"like_post\"),\r\n path('content_read/', views.content_read, name=\"content_read\"),\r\n path('make_content/', views.make_content, name=\"make_content\")\r\n]","repo_name":"daffadeveloper23/blow","sub_path":"social_pedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26045469294","text":"from otree.api import Currency as c, currency_range\nfrom . import models\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants, Message, PrivateMessage\nfrom random import random, randint\nimport json\nfrom django.db.models import Count\n\n\nclass AssignAvatar(Page):\n def is_displayed(self):\n return (not self.player.node.bot) and self.participant.vars['consent']\n \n def before_next_page(self):\n if self.timeout_happened:\n players_change = [player for player in self.player.participant.get_players() if 'node' in player._meta.get_all_field_names()]\n for player in players_change:\n player.node.bot = True\n player.node.save()\n\n\nclass Discuss(Page):\n\n timeout_seconds = 60*10 \n\n def before_next_page(self):\n if self.timeout_happened:\n players_change = [player for player in self.player.participant.get_players() if 'node' in player._meta.get_all_field_names()]\n for player in players_change:\n player.node.bot = True\n player.node.save()\n \n def is_displayed(self):\n return (not self.player.node.bot) and (self.subsession.session.config['condition_messaging'] != 'none') and self.participant.vars['consent'] \n \n def vars_for_template(self):\n group_players = self.group.get_players()\n message_round = group_players[0].participant.vars['message_round']\n \n if self.session.config['instant_messaging'] == 'True':\n message_round = -1\n \n if self.session.config['condition_network_knowledge'] == 'global':\n nodes = self.group.network.getNodes()\n edges = self.group.network.getEdges()\n networkDisplay = 'The'\n elif self.session.config['condition_network_knowledge'] == 'local':\n nodes = self.group.network.get_nodes_from_player(self.player)\n edges = self.group.network.get_edges_from_player(self.player)\n networkDisplay = 'Your'\n else:\n nodes = []\n edges = []\n\n for node in nodes:\n string = node['label'].split('\\n')\n if string[0] == self.player.get_user_name():\n string[0] += ' (You)'\n node['label'] = '\\n'.join(string) \n \n if self.session.config['show_network_threshold'] == 'False':\n for node in nodes:\n node['label'] = node['label'].split('\\n')[0]\n \n group_dict = dict([(node.avatar.get_name(), node.avatar.src) for node in self.group.network.node_set.all()])\n group_dict[self.player.get_user_name() + ' (you)'] = group_dict[self.player.get_user_name()]\n del group_dict[self.player.get_user_name()]\n \n player_node = self.player.node\n print(player_node) \n posted_wall_messages = Message.objects.filter(messageRound = message_round)\n posted_wall_messages = posted_wall_messages.exclude(deleted = True)\n print(posted_wall_messages)\n posted_wall_messages = posted_wall_messages.filter(createdBy = player_node)\n\n wall_counts = posted_wall_messages.values('wall__node').annotate(count=Count('wall__node'))\n wall_counts = {r['wall__node']:r['count'] for r in wall_counts} \n\n posted_pm_messages = PrivateMessage.objects.filter(messageRound = message_round)\n posted_pm_messages = posted_pm_messages.exclude(deleted = True)\n posted_pm_messages = posted_pm_messages.filter(createdBy = player_node)\n \n pm_counts = posted_pm_messages.values('wall__node').annotate(count=Count('wall__node'))\n pm_counts = {r['wall__node']:r['count'] for r in pm_counts} \n\n comm_type = self.session.config['condition_messaging']\n network_type = self.session.config['condition_network_knowledge']\n screenImage = 'instructions/screenshot-{}-{}.png' \n\n initEntryTable = 'none'\n if comm_type == 'bilateral':\n initEntryTable = self.player.get_private_entry_table()\n if comm_type == 'wall':\n initEntryTable = self.player.get_entry_table()\n \n \n return {\n 'avatar': self.player.get_avatar(),\n 'user_name': self.player.get_user_name(),\n 'avatars': dict([(node.id, node.avatar.src) for node in self.group.network.node_set.all()]),\n 'thresholds': dict([(node.id, node.threshold_text) for node in self.group.network.node_set.all()]),\n 'user_names': dict([(node.id, node.avatar.get_name()) for node in self.group.network.node_set.all()]),\n 'neighbor_net': dict(zip([node.id for node in self.player.get_neighbors()],[[node.id for node in P.get_neighbors()] for P in self.player.get_neighbors()])),\n 'neighbors': [node.id for node in self.player.get_neighbors()], \n 'messages': Constants.messages,\n 'wall': json.dumps(self.player.get_messages()),\n 'privateMessages': json.dumps(self.player.get_private_messages()), \n 'nodes': json.dumps(nodes),\n 'edges': json.dumps(edges),\n 'messageRound': message_round,\n 'lastRound': message_round == Constants.num_messaging_rounds,\n 'networkDisplay': networkDisplay,\n 'group': group_dict,\n 'wall_sent_to': wall_counts,\n 'pm_sent_to': pm_counts,\n 'screenImage': screenImage.format(comm_type,network_type),\n 'initEntryTable': json.dumps(initEntryTable),\n }\n\nclass BeginWaitPage(WaitPage):\n\n template_name = 'main/wait_page.html'\n\n def after_all_players_arrive(self):\n pass\n \n\nclass IntermediateWaitPage(WaitPage):\n\n template_name = 'main/wait_page.html'\n\n def after_all_players_arrive(self):\n group_players = self.group.get_players()\n\n### SIMPLE CODE FOR AUTOMATED MESSAGES\n message_round = group_players[0].participant.vars['message_round'] \n# for node in self.group.network.node_set.all():\n# if node.bot:\n# neighbors = node.get_neighbors()\n# for neighbor in neighbors:\n# if int(round(random())) and (self.session.config['condition_messaging'] in ['wall','both']):\n# wall = neighbor.wall_set.first()\n# wall.message_set.add(\n# Message(createdBy=node,\n# messageRound = message_round, \n# message=Constants.messages[randint(1,2)]\n# )\n# )\n# \n# if int(round(random())) and (self.session.config['condition_messaging'] in ['bilateral','both']): \n# privateMessageBoard = neighbor.privatemessageboard_set.first()\n# privateMessageBoard.privatemessage_set.add(\n# PrivateMessage(createdBy=node,\n# messageRound = message_round, \n# message=Constants.messages[randint(1,2)]\n# )\n# ) \n \n group_players[0].participant.vars['message_round'] += 1\n \n for gPlayer in group_players:\n gPlayer.participant.vars['discuss_participate'] = False\n\n\nclass EndWaitPage(WaitPage):\n\n template_name = 'main/wait_page.html'\n\n def after_all_players_arrive(self):\n\n for node in self.group.network.node_set.all():\n if node.bot:\n node.participate = 0 #randint(0,1) #Non random = always no participate.\n else:\n node.participate = node.player_set.first().participate \n \n group_players = self.group.get_players()\n group_players[0].participant.vars['message_round'] -= 1\n self.group.set_payoffs()\n group_players[0].participant.vars['message_round'] = 1 \n \n pass\n\nclass Decide(Discuss):\n \n template_name = 'main/Decide.html'\n \n form_model = models.Player\n form_fields = [\n 'participate'\n ]\n \n def before_next_page(self):\n if self.timeout_happened:\n players_change = [player for player in self.player.participant.get_players() if 'node' in player._meta.get_all_field_names()]\n for player in players_change:\n player.node.bot = True\n player.node.save()\n \n def is_displayed(self):\n return (not self.player.node.bot) and self.participant.vars['consent']\n\n\nclass Intro(Page):\n template_name = 'main/intro.html'\n \n def is_displayed(self):\n return (self.subsession.round_number == 1) and self.participant.vars['consent']\n\n\nmessaging_apps = [x for i in range(Constants.num_messaging_rounds) for x in [Discuss, IntermediateWaitPage]]\nseq = [Intro, AssignAvatar, BeginWaitPage]\nseq.extend(messaging_apps)\nseq.extend([Decide, EndWaitPage])\n\npage_sequence = seq\n","repo_name":"SDALMinerva/coordination-game","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18869603774","text":"import scrapy\nfrom datetime import time, datetime\nimport json\n\n\nclass vi_Nhandan_Spider(scrapy.Spider):\n name = 'vi_nhandan'\n allowed_domains = ['nhandan.vn']\n custom_settings = {\"DOWNLOAD_TIMEOUT\": 60, \"DOWNLOAD_DELAY\": 0.5}\n\n start_urls = ['https://nhandan.vn/api/morenews-latest-0-0.html?phrase=']\n\n def __init__(self, start_date, end_date):\n self.start_date = start_date\n self.end_date = end_date\n self.start_time = datetime.combine(start_date, time())\n self.end_time = datetime.combine(end_date, time())\n self.i=0\n\n def parse(self, response):\n \n articles = json.loads(response.body)['data']['articles']\n sel = scrapy.Selector(text=articles)\n articles = sel.xpath('//article')\n \n for article in articles:\n\n date_time_str = article.xpath('.//a[@class=\"text text2\"]/text()')[-1].get().strip()\n date_time = datetime.strptime(date_time_str, \"%d/%m/%Y %H:%M\")\n \n if date_time < self.start_time:\n return\n elif date_time >= self.end_time:\n continue\n\n url = article.xpath('./h3/a/@href').get()\n date = str(date_time.date())\n title = article.xpath('./h3/a/@title').get()\n \n yield scrapy.Request(url=url,\n callback=self.parse_article,\n cb_kwargs={\"date\": date, \"title\": title})\n\n self.i += 1\n if self.i < 100:\n yield scrapy.Request('https://nhandan.vn/api/morenews-latest-0-{}.html?phrase='.format(str(self.i)), callback=self.parse)\n\n\n def parse_article(self, response, *args, **kwargs):\n \n date = kwargs[\"date\"]\n title = kwargs[\"title\"]\n \n text_nodes = response.xpath('//div[@class=\"article__body cms-body\"]/p')\n texts=[''.join(text_node.xpath(\".//text()\").getall()).replace('\\n', \" \") for text_node in text_nodes if not text_node.xpath('.//script')]\n text = \"\\n\".join([t.strip() for t in texts if t.strip()]).replace(u'\\xa0', \" \").replace(u'\\u3000', \" \")\n \n if text and title:\n yield {\"date\": date,\n \"source\": self.name,\n \"title\": title.strip(),\n \"text\": text.strip()}\n\n def warn_on_generator_with_return_value_stub(spider, callable):\n pass\n\n scrapy.utils.misc.warn_on_generator_with_return_value = warn_on_generator_with_return_value_stub\n scrapy.core.scraper.warn_on_generator_with_return_value = warn_on_generator_with_return_value_stub\n","repo_name":"zouxunlong/web_crawl","sub_path":"web_crawl/spiders/vi_Nhandan_Spider.py","file_name":"vi_Nhandan_Spider.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42450875391","text":"\"\"\"\r\nRevit Python Wrapper\r\ngithub.com/gtalarico/revitpythonwrapper\r\nrevitpythonwrapper.readthedocs.io\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights to\r\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\r\nof the Software, and to permit persons to whom the Software is furnished to do\r\nso, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in\r\nall copies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\r\nTHE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\r\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\r\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\r\nOTHER DEALINGS IN THE SOFTWARE.\r\n\r\nCopyright 2017 Gui Talarico\r\n\r\n\"\"\"\r\n\r\n__title__ = 'revitpythonwrapper'\r\n__version__ = '1.0.0'\r\n__maintainer__ = ['Gui Talarico', 'Ehsan Iran-Nejad']\r\n__license__ = 'MIT'\r\n__contact__ = 'github.com/gtalarico/revitpythonwrapper'\r\n\r\n\r\n# Basic services for sub-modules -----------------------------------------------\r\n\r\n# determining if rpw is running under document generator (sphinx)\r\ntry:\r\n # noinspection PyUnresolvedReferences\r\n __sphinx__\r\n DOC_MODE = True\r\nexcept:\r\n DOC_MODE = False\r\n\r\n\r\n# determining if rpw is running under revitpythonshell(RPS)\r\ntry:\r\n # noinspection PyUnresolvedReferences\r\n global __message__\r\n RPS_MODE = True\r\nexcept:\r\n RPS_MODE = False\r\n\r\n\r\n# determining if rpw is running under pyRevit,\r\n# in that case, collect command name and path\r\ntry:\r\n # noinspection PyUnresolvedReferences\r\n __ipyengine__\r\n PYREVIT_MODE = True\r\n # noinspection PyUnresolvedReferences\r\n PYREVIT_CMDNAME = __commandname__\r\n # noinspection PyUnresolvedReferences\r\n PYREVIT_CMDPATH = __commandpath__\r\nexcept:\r\n PYREVIT_MODE = False\r\n PYREVIT_CMDNAME = PYREVIT_CMDPATH = None\r\n\r\n\r\nfrom rpw.exceptions import *\r\n\r\n# rpw configurations\r\n# noinspection PyUnresolvedReferences\r\nimport rpw.utils.config as config\r\n\r\n# logging facilities\r\n# noinspection PyUnresolvedReferences\r\nfrom rpw.utils.logger import get_logger\r\n\r\n# Interface to host api, app and document access -------------------------------\r\n# noinspection PyUnresolvedReferences\r\nfrom rpw.hostapp import HOST_APP, ASSEMBLY_FILE_TYPE\r\n# noinspection PyUnresolvedReferences\r\nfrom rpw.hostapp import DB, UI, HOST_API_NAMESPACE\r\n# noinspection PyUnresolvedReferences\r\nfrom rpw.hostapp import doc, uidoc, all_docs\r\n\r\n# base wrappers\r\n# noinspection PyUnresolvedReferences\r\nfrom rpw.base import BaseObject, BaseObjectWrapper, BaseEnumWrapper\r\n\r\n# And now exposing rpw api to outside ------------------------------------------\r\n# noinspection PyUnresolvedReferences\r\nfrom rpw.db import *\r\n\r\n\r\nif RPS_MODE:\r\n # noinspection PyUnresolvedReferences\r\n import rpw.geom as geom\r\n\r\n\r\n # cleanups of internal imports for a clean user interface\r\n del(exceptions)\r\n del(hostapp, base, db, DOC_MODE)\r\n del(BaseObject, BaseObjectWrapper, BaseEnumWrapper)\r\n del(HOST_APP, HOST_API_NAMESPACE)\r\n del(PYREVIT_CMDNAME, PYREVIT_CMDPATH)\r\n","repo_name":"gtalarico/revitpythonwrapper","sub_path":"misc/merge-pending/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"61"} +{"seq_id":"29592454937","text":"from typing import Literal\nfrom vsa import VSA\nimport itertools\nimport torch\n\ndef get_vsa(dir, mode, algo, codebooks=None, dim=2048, max_num_objects=3, num_colors=7, num_pos_x=3, num_pos_y=3, fold_dim=256, ehd_bits=9, sim_bits=13, seed=None, device=None):\n if algo == \"algo1\":\n vsa = MultiConceptMNISTVSA1(dir, mode=mode, codebooks=codebooks, dim=dim, max_num_objects=max_num_objects, num_colors=num_colors, num_pos_x=num_pos_x, num_pos_y=num_pos_y, fold_dim=fold_dim, ehd_bits=ehd_bits, sim_bits=sim_bits, seed=seed, device=device)\n elif algo == \"algo2\":\n vsa = MultiConceptMNISTVSA2(dir, mode=mode, codebooks=codebooks, dim=dim, max_num_objects=max_num_objects, num_colors=num_colors, num_pos_x=num_pos_x, num_pos_y=num_pos_y, fold_dim=fold_dim, ehd_bits=ehd_bits, sim_bits=sim_bits, seed=seed, device=device)\n return vsa\n\nclass MultiConceptMNISTVSA1(VSA):\n\n def __init__(\n self,\n root: str,\n mode: Literal['SOFTWARE', 'HARDWARE'],\n codebooks = None,\n dim: int = 2048,\n max_num_objects = 3,\n num_pos_x = 3,\n num_pos_y = 3,\n num_colors = 7,\n fold_dim = 256,\n ehd_bits = 8,\n sim_bits = 13,\n seed: None or int = None, # random seed\n device = \"cpu\"):\n\n super().__init__(root, mode, dim, codebooks=codebooks, num_factors = 4, num_codevectors = (num_pos_x, num_pos_y, num_colors, 10), fold_dim = fold_dim, ehd_bits = ehd_bits, sim_bits = sim_bits, seed = seed, device = device)\n\n self.num_pos_x = num_pos_x\n self.num_pos_y = num_pos_y\n self.num_colors = num_colors\n\n def lookup(self, label: list, device = None):\n return self.get_vector(label, quantize=False, device=device)\n\n\nclass MultiConceptMNISTVSA2(VSA):\n\n def __init__(\n self,\n root: str,\n mode: Literal['SOFTWARE', 'HARDWARE'],\n codebooks = None,\n dim: int = 2048,\n max_num_objects = 3,\n num_pos_x = 3,\n num_pos_y = 3,\n num_colors = 7,\n fold_dim = 256,\n ehd_bits = 8,\n sim_bits = 13,\n seed: None or int = None, # random seed\n device = \"cpu\"):\n\n # Assign an ID to each object in the scene\n self.num_id = max_num_objects\n self.num_pos_x = num_pos_x\n self.num_pos_y = num_pos_y\n self.num_colors = num_colors\n super().__init__(root, mode, dim, codebooks=codebooks, num_factors = 5, num_codevectors = (num_pos_x, num_pos_y, num_colors, 10, self.num_id), fold_dim = fold_dim, ehd_bits = ehd_bits, sim_bits = sim_bits, seed = seed, device = device)\n\n self.id_codebook = self.codebooks[-1]\n self.x_codebook = self.codebooks[0]\n self.y_codebook = self.codebooks[1]\n\n # Construct priority list for sorting based on (x, y) locations\n self.rule = [x for x in itertools.product(range(len(self.x_codebook)), range(len(self.y_codebook)))]\n\n def lookup(self, label: list, bundled: bool = True, device = None):\n '''\n `label` doesn't include ID\n We reorder the label list based on the (x, y) locations of the objects in the scene, then bind the corresponding\n ID to the compositional vector\n '''\n \n # Get all objects (excluding ID)\n objects = [self.get_vector(label[j], quantize=True) for j in range(len(label))]\n\n # Reorder the positions of the objects in each label in the ascending order of (x, y), the first two elements in the label\n _, objects = list(zip(*sorted(zip(label, objects), key=lambda k: self.rule.index(k[0][0:2]))))\n # Remember the original indice of the codebooks for reordering later\n indices = sorted(range(len(label)), key=lambda k: self.rule.index(label[k][0:2]))\n # Bind the vector with ID determined by the position in the list\n objects = [self.bind(objects[j], self.id_codebook[j]) for j in range(len(objects))]\n # Return to the original order (for similarity check)\n objects = [objects[i] for i in indices]\n if bundled:\n objects = self.multiset(torch.stack(objects))\n\n return objects.to(device)\n\n","repo_name":"allpan3/multi-concept-MNIST","sub_path":"models/vsa.py","file_name":"vsa.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2767088988","text":"import numpy as np\nimport cv2\nimport os\n\n# Paths to dataset and face detection model(s)\ndataset_input_path = 'dataset_input'\ndataset_output_path = 'dataset_output'\n\n# resolution for cutted faces, \n# to keep all images in final dataset of same size\nout_resolution = (350, 350)\n\n# os.path.sep.join() used here instead of simply 'folder/filename' \n# because of different path separation in Linux vs Windows systems\n# Linux uses 'folder/filename'\n# Windows uses 'folder\\\\filename' \nprotoPath = os.path.sep.join([\"face_detection_model\", \"deploy.prototxt\"])\nmodelPath = os.path.sep.join([\"face_detection_model\",\n\t\"res10_300x300_ssd_iter_140000.caffemodel\"])\n\n# minimal confidence. Parameter used by face detection \nminConfidence = 0.95\n\n\n\n################################################################################\n# Function Definitions \n################################################################################\n\n# ----------- Extract Faces from image -------------------------\ndef extract_faces(emotion):\n # Get list of all images in 'emotion' folder\n path = os.path.sep.join([dataset_input_path, emotion])\n # get files in 'emotion' folder and sort them alphabetically\n dirpath, dirnames, filenames = next(os.walk(path))\n files = []\n for f in filenames:\n # files.append(os.path.sep.join([dataset_input_path, emotion, f]))\n files.append(os.path.sep.join([emotion, f]))\n files.sort()\n print('\\n\\nfiles = %s' %files)\n # filenumber = 0\n\n counter = 1\n for f in files:\n print('folder %s: %s of %s' %(emotion, counter, len(files)))\n counter += 1\n\n # Open image\n frame = cv2.imread(os.path.sep.join([dataset_input_path, f]))\n # frame = cv2.imread(f)\n\n if frame is None:\n print('--(!)Error: Failed reading image ', f)\n continue\n\n # # find faces in the frame\n # faces = get_faces(frame, detector)\n # # print(\"found faces: \", faces)\n\n # a list of detected faces \n detectedFaces = []\n\n # grab the image dimensions\n (h, w) = frame.shape[:2]\n\n # construct a blob from the image\n imageBlob = cv2.dnn.blobFromImage(\n cv2.resize(frame, (300, 300)), 1.0, (300, 300),\n (104.0, 177.0, 123.0), swapRB=False, crop=False)\n \n # apply OpenCV's deep learning-based face detector to localize\n # faces in the input image\n detector.setInput(imageBlob)\n detections = detector.forward()\n\n # loop over the detections\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with\n # the prediction\n confidence = detections[0, 0, i, 2]\n \n # filter out weak detections\n if confidence > minConfidence:\n # compute the (x, y)-coordinates of the bounding box for\n # the face\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # extract the face ROI\n face = frame[startY:endY, startX:endX]\n (fH, fW) = face.shape[:2]\n\n # ensure the face width and height are sufficiently large\n if fW < 20 or fH < 20:\n continue\n\n # save face into separate files\n try:\n # Make face black'n'white\n bwFace = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)\n # Resize face so all images have same size and save new file\n out_image = cv2.resize(bwFace, out_resolution)\n out_path = os.path.join(dataset_output_path, f)\n # if counter > 0: path += \"_\" + counter\n # out_path += \"_\" + counter\n\n # if such filename exists add \"_\" at the end\n while (os.path.isfile(out_path)):\n path_parts = os.path.splitext(out_path)\n out_path = os.path.sep.join(path_parts[:-1]) + '_' + path_parts[-1]\n \n # save new file\n cv2.imwrite(out_path, out_image)\n except:\n print('--(!)Error: Failed saving image ', out_path)\n pass #If error, pass file\n\n\n################################################################################\n# Program Execution \n################################################################################\n\n# Copy folder names from dataset_input to assign categories ('happy', 'sad', 'neutral', etc)\ndirpath, dirnames, filenames = next(os.walk(dataset_input_path))\ncategories = dirnames\nprint('categories = ', categories)\n\n# Create folders in dataset_output \n# cv2.imwrite() won't be able to save file in non existing folder\nfor categorie in categories:\n dir_path = os.path.join(dataset_output_path, categorie)\n if not os.path.isdir(dir_path):\n os.mkdir(dir_path)\n\n# Load our serialized face detector from disk\nprint(\"[INFO] loading face detector...\")\ndetector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n\n# Detect faces from dataset_input and extract them to dataset_output\nfor emotion in categories:\n extract_faces(emotion)\n","repo_name":"antonpolevyy/emotion-copycap_partial-solutions","sub_path":"extract-faces-caffemodel/extract-faces.py","file_name":"extract-faces.py","file_ext":"py","file_size_in_byte":5294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44461938217","text":"import os\nimport logging\nfrom inaugurator import sh\n\n\nUSER_SETTINGS_DIR = \"etc/default\"\nUSER_SETTINGS_FILENAME = \"grub\"\n\n\ndef changeGrubConfiguration(destination, data, parameter=None):\n destUserSettingsDir = os.path.join(destination, USER_SETTINGS_DIR)\n existingConfiguration = \"\"\n if os.path.isfile(destUserSettingsDir):\n logging.warning(\"It seems that there's a file instead of a directory in GRUB2's user settings path \"\n \" (%(path)s). Removing it...\", dict(path=destUserSettingsDir))\n os.unlink(destUserSettingsDir)\n if not os.path.exists(destUserSettingsDir):\n os.makedirs(destUserSettingsDir)\n destUserSettingsFilename = os.path.join(destUserSettingsDir, USER_SETTINGS_FILENAME)\n if os.path.isfile(destUserSettingsFilename):\n logging.info(\"GRUB2's user settings file already exists. Reading it...\")\n with open(destUserSettingsFilename, \"r\") as grubDefaultConfig:\n existingConfiguration = grubDefaultConfig.read()\n elif os.path.exists(destUserSettingsFilename):\n logging.warning(\"It seems that there is a non-file in GRUB2's user settings path: %(path)s. Will not\"\n \"modify GRUB2 settings.\", dict(path=destUserSettingsDir))\n return\n wasGrubCmdlineLinuxParameterWritten = False\n logging.info(\"Modifying GRUB2 user settings file...\")\n if parameter:\n newParameterConfiguration = \"%s=%s\" % (parameter, data)\n with open(destUserSettingsFilename, \"wb\") as userSettingsFile:\n for line in existingConfiguration.splitlines():\n line = line.strip()\n if line.startswith(\"GRUB_CMDLINE_LINUX=\"):\n wasGrubCmdlineLinuxParameterWritten = True\n maxSplit = 1\n cmdline = line.split(\"=\", maxSplit)[1].strip(\" \\\"\")\n if parameter:\n logging.info(\"Grub configuration: Overriding %s parameter with %s\", parameter, data)\n argsWithoutParameter = [arg for arg in cmdline.split(\" \")\n if not arg.startswith(\"%s=\" % parameter)]\n confNoParam = \" \".join(argsWithoutParameter)\n if data:\n line = \"GRUB_CMDLINE_LINUX=\\\"%(confNoParam)s %(parameterconfiguration)s\\\"\" % \\\n dict(confNoParam=confNoParam,\n parameterconfiguration=newParameterConfiguration)\n else:\n line = \"GRUB_CMDLINE_LINUX=\\\"%(confNoParam)s\\\"\" % \\\n dict(confNoParam=confNoParam)\n else:\n line = \"GRUB_CMDLINE_LINUX=\\\"%(newConfiguration)s %(oldConfiguration)s\\\"\" % \\\n dict(newConfiguration=data,\n oldConfiguration=cmdline)\n logging.info(\"Grub configuration line is %s\", line)\n\n userSettingsFile.write(line)\n userSettingsFile.write(os.linesep)\n if not wasGrubCmdlineLinuxParameterWritten:\n userSettingsFile.write(\"# Generated by Inaugurator\\n\")\n userSettingsFile.write(\"GRUB_CMDLINE_LINUX=\\\"%s\\\"\\n\" % (consoleConfiguration,))\n\n\ndef install(targetDevice, destination):\n try:\n chrootScript = 'grub2-install %s && grub2-mkconfig > /boot/grub2/grub.cfg' % targetDevice\n sh.run(\"/usr/sbin/busybox chroot %s sh -c '%s'\" % (destination, chrootScript))\n return '/boot/grub2/grub.cfg'\n except:\n logging.exception(\"Failed to run grub2-install or grub2-mkconfig. Is the dest rootfs a debian-like?\")\n logging.warning(\"Trying to run grub-install and grub-mkconfig instead\")\n chrootScript = 'grub-install %s && grub-mkconfig > /boot/grub/grub.cfg' % targetDevice\n sh.run(\"/usr/sbin/busybox chroot %s sh -c '%s'\" % (destination, chrootScript))\n return '/boot/grub/grub.cfg'\n","repo_name":"Stratoscale/inaugurator","sub_path":"inaugurator/grub.py","file_name":"grub.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35753856766","text":"# Two Pointers\n\nclass Solution:\n def minSubArrayLen(self, s, nums):\n \"\"\"\n :type s: int\n :type nums: List[int]\n :rtype: int\n \"\"\"\n res = float(\"inf\")\n left = 0\n right = 0\n rangeSum = 0\n while right <= len(nums):\n if rangeSum >= s:\n res = min(res, right-left)\n rangeSum -= nums[left]\n left += 1\n elif right < len(nums):\n rangeSum += nums[right]\n right += 1\n else:\n right += 1\n if res == float(\"inf\"):\n return 0\n else:\n return res","repo_name":"xiaosuper163/leetcode","sub_path":"algo/209. Minimum Size Subarray Sum.py","file_name":"209. Minimum Size Subarray Sum.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23148807412","text":"import math\n\n\ndef main():\n N = int(input())\n x = []\n y = []\n total = 0\n \n for i in range(N):\n a, b = map(int, input().split())\n x.append(a)\n y.append(b)\n for i in range(N):\n now_x = x[i]\n now_y = y[i]\n for j in range(N):\n if i == j:\n continue\n total += math.sqrt(abs(now_x - x[j]) ** 2 + abs(now_y - y[j]) ** 2) * math.factorial(N - 1)\n print(\"{0:.7f}\".format(total / math.factorial(N)))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tomoki-Kikuta/atcoder","sub_path":"abc145/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15007317848","text":"n, k = map(int, input().split())\r\nwv = []\r\nfor _ in range(n):\r\n wv.append(tuple(map(int, input().split())))\r\nbag = [[0] * (k + 1) for _ in range(n)]\r\ncheck = {0}\r\nfor i in range(n):\r\n new_check = set()\r\n for now in check:\r\n bag[i][now] = bag[i - 1][now]\r\n for now in check:\r\n if now + wv[i][0] <= k and bag[i - 1][now] + wv[i][1] > bag[i][now+wv[i][0]]:\r\n bag[i][now + wv[i][0]] = bag[i - 1][now] + wv[i][1]\r\n new_check.add(now + wv[i][0])\r\n check |= new_check\r\nprint(max(bag[n - 1]))\r\n","repo_name":"CodingOnionFarmer/JNH-BOJ-SWEA","sub_path":"백준/Gold/12865. 평범한 배낭/평범한 배낭.py","file_name":"평범한 배낭.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2558083928","text":"import typing\nfrom typing import Union, List\n\nNumber = Union[int]\n\n\nclass NumberIterator(object):\n\n def __init__(self, container: List[Number]):\n self.__container = container\n self.position = 0\n\n def __is_odd(self, number):\n return number % 2 == 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n value = self.__container[self.position]\n self.position += 1\n while not self.__is_odd(value):\n value = self.__container[self.position]\n self.position += 1\n except IndexError:\n raise StopIteration()\n return value\n\n\nif __name__ == \"__main__\":\n a = [1, 2, 3, 4, 6, 5]\n\n ni = NumberIterator(a)\n\n for number in ni:\n print(number)\n","repo_name":"wprazuch/PythonPlayground","sub_path":"DesignPatterns/GoF/Behavioral/Iterator.py","file_name":"Iterator.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2664199052","text":"import sqlite3\ndef add(name, type, age, price, pfdo, geo):\n try:\n with open('2.txt', 'r') as f:\n c = f.read()\n cc = c\n con = sqlite3.connect('mydatabase2.db')\n cur = con.cursor()\n a = [c, name, type, age, price, pfdo, geo]\n cur.execute('INSERT INTO cources VALUES (?, ?, ?, ?, ?, ?, ?)', a)\n con.commit()\n except Exception as e:\n print(e)\n with open('2.txt', 'w') as f:\n xd = str(int(cc)+1)\n f.write(xd)\n","repo_name":"cyberaea/project2","sub_path":"circles39/circles39/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3980464533","text":"import re\nfrom typing import get_args\nfrom flask import Flask, url_for, render_template, redirect, request, session, make_response, jsonify\nfrom markupsafe import escape\nimport json \nimport os\n\nfrom numpy import imag\nimport Simulator as sim\n\n# from werkzeug.utils import redirect\n# $env:FLASK_APP='main.py'\n# $env:FLASK_ENV=development\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'f43t56gyeutr'\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/results', methods=['GET'])\ndef results():\n input_data = session['input'] \n input_data = json.loads(input_data)\n image_names = os.listdir('static')\n for i in image_names:\n if i.find('.png') < 0: # Remove CSS file from list\n image_names.remove(i)\n return render_template('results.html', result_data=image_names, input_data=input_data)\n \n\n@app.route('/run_sim', methods=['POST'])\ndef run_sim():\n # Save for use in listing input in results page\n input_data = request.get_data()\n session['input'] = input_data.decode()\n \n # Convert to list before passing to simulator\n mylist = json.loads(input_data.decode())\n retval = sim.run(mylist, no_plot=False)\n if (retval == -1):\n ret = ['

FEM only works for constant right hand side equal to zero and domain [0,1 x 0,1]

']\n template_context = jsonify(ret)\n return make_response(template_context, 400) \n \n image_names = os.listdir('static')\n retval = []\n for i in image_names:\n if i.find('.png') : # Remove CSS file from list\n retval.append('/results') \n \n template_context = jsonify(retval)\n return make_response(template_context)\n\n@app.route('/test', methods=['GET'])\ndef test():\n print(\"Serving test request\")\n image_names = os.listdir('static')\n\n print(request.headers)\n\n return jsonify(image_names)\n\n@app.route('/test2', methods=['GET', 'POST'])\ndef test2():\n\n print(\"Serving test 2 request is \" + request.method) \n\n if request.method == 'GET':\n retval = {}\n retval['first'] = 'myfirst'\n retval['second'] = 'mysecond'\n else:\n print(\"POST request, got \" + json.loads(request.data.decode()))\n retval = request.data.decode()\n\n\n print(request.headers)\n\n return jsonify(retval)\n\n\nif __name__ == \"__main__\":\n app.run() ","repo_name":"otto67/PoissonSolverFlask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3208074132","text":"from typing import List\nimport math\n\n\nclass Solution:\n def productQueries(self, n: int, queries: List[List[int]]) -> List[int]:\n x = int(math.log(n, 2))\n powers = []\n while x >= 0:\n a, b = divmod(n, 2 ** x)\n if a != 0:\n powers.append(2 ** x)\n n = b\n x -= 1\n powers = list(reversed(powers))\n ans = []\n for q in queries:\n t = 1\n for i in range(q[0], q[1] + 1):\n t *= powers[i]\n ans.append(int(t % (1e9 + 7)))\n return ans\n\n\nif __name__ == '__main__':\n n = 919\n queries = [[0, 6]]\n solution = Solution()\n res = solution.productQueries(n, queries)\n print(res)\n","repo_name":"foreverxujiahuan/algorithm","sub_path":"数组/lc6209.py","file_name":"lc6209.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37510908405","text":"import fqe\nimport numpy as np\nimport scipy as sp\nfrom scipy.sparse.coo import coo_matrix\n\nimport openfermion as of\n# from openfermion.hamiltonians.richardson_gaudin import RichardsonGaudin\nfrom openfermion.chem.molecular_data import spinorb_from_spatial\n\nfrom fqe.openfermion_utils import integrals_to_fqe_restricted\n\nfrom itertools import chain, product\n\n# from qcpanop.cc.lambda_ccd import (kernel, ccsd_d1, ccsd_d2, ccsd_energy,\n# lagrangian_energy)\nfrom lambda_ccd import (kernel, ccsd_d1, ccsd_d2, ccsd_energy,\n lagrangian_energy)\nfrom lambda_ccd import singles_residual\nfrom lambda_ccd import doubles_residual\n\nfrom lambda_ccd_2 import kernel as kernel2\nfrom lambda_ccd_2 import ccsd_energy as ccsd_energy2\nfrom lambda_ccd_2 import lagrangian_energy as lagrangian_energy2\nfrom lambda_ccd_2 import singles_residual as singles_residual2\nfrom lambda_ccd_2 import doubles_residual as doubles_residual2\n\nimport matplotlib.pyplot as plt\n\nsplus = lambda xx: of.QubitOperator((xx, 'X'), coefficient=0.5) + of.QubitOperator((xx, 'Y'), coefficient=-0.5j)\nsminus = lambda xx: of.QubitOperator((xx, 'X'), coefficient=0.5) + of.QubitOperator((xx, 'Y'), coefficient=0.5j)\nsz = lambda xx: of.QubitOperator((xx, 'Z'), coefficient=1)\n\n\ndef drop_identity(qubit_ham):\n new_ham = of.QubitOperator()\n for term in qubit_ham:\n if not of.is_identity(term):\n new_ham += term\n return new_ham\n\n\ndef get_num_projector(n_qubits, n_target):\n n_row_idx = []\n for ii in range(2**n_qubits):\n ket = [int(xx) for xx in np.binary_repr(ii, width=n_qubits)]\n if sum(ket) == n_target:\n n_row_idx.append(ii)\n n_projector = coo_matrix(([1] * len(n_row_idx), (n_row_idx, n_row_idx)),\n shape=(2 ** n_qubits, 2 ** n_qubits))\n return n_projector\n\ndef get_sz_projector(n_qubits, n_target):\n n_row_idx = []\n for ii in range(2**n_qubits):\n ket = np.array([int(xx) for xx in np.binary_repr(ii, width=n_qubits)])\n keta = ket[::2]\n ketb = ket[1::2]\n if np.isclose(sum(keta) - sum(ketb), n_target):\n n_row_idx.append(ii)\n n_projector = coo_matrix(([1] * len(n_row_idx), (n_row_idx, n_row_idx)),\n shape=(2 ** n_qubits, 2 ** n_qubits))\n return n_projector\n\ndef get_doci_projector(n_qubits, n_target):\n \"\"\"Get the projector on to the doubly occupied space\n\n :param int n_qubits: number of qubits (or fermionic modes)\n :param int n_target: number of electrons total\n :return: coo_matrix that is 1 along diagonal elements that correspond\n to doci terms.\n \"\"\"\n n_row_idx = []\n for ii in range(2**n_qubits):\n ket = np.binary_repr(ii, width=n_qubits)\n keta = ket[::2]\n ketb = ket[1::2]\n res = int(keta, 2) ^ int(ketb, 2)\n if np.isclose(res, 0) and ket.count('1') == n_target:\n n_row_idx.append(ii)\n n_projector = coo_matrix(([1] * len(n_row_idx), (n_row_idx, n_row_idx)),\n shape=(2 ** n_qubits, 2 ** n_qubits))\n return n_projector, n_row_idx\n\n\ndef get_gs(ham, projectors=None, sector_op=None, sector_n=None):\n dense_ham = of.get_sparse_operator(ham).real\n for proj in projectors:\n dense_ham = proj.T @ dense_ham @ proj\n\n\n w, v = np.linalg.eigh(dense_ham.toarray())\n\n if sector_op is None and sector_n is None:\n return w[0], v[:, [0]]\n else:\n for ii in range(len(w)):\n n_val = v[:, [ii]].conj().T @ sector_op @ v[:, [ii]]\n # print(\"Sector selector \", n_val)\n if np.isclose(n_val, sector_n):\n return w[ii], v[:, [ii]]\n else:\n raise ValueError(\"Didn't find desired sector\")\n\n\ndef print_wf(wf):\n n_qubits = int(np.log2(wf.shape[0]))\n for ii in range(2**n_qubits):\n if not np.isclose(np.abs(wf[ii]), 0):\n print(ii, np.binary_repr(ii, width=n_qubits), wf[ii])\n\n\ndef get_fermion_operator(rg_ham):\n oei, tei = rg_ham.get_projected_integrals()\n ele_ham = integrals_to_fqe_restricted(oei, tei)\n soei, stei = spinorb_from_spatial(oei, tei)\n astei = np.einsum('ijkl', stei) - np.einsum('ijlk', stei)\n io_ham = of.InteractionOperator(0, soei, 0.25 * astei)\n return io_ham, ele_ham\n\n\ndef qubit_wf_to_fqe(wf):\n \"\"\"Convert to full space wf then to fqe\"\"\"\n n_qubits = int(np.log2(wf.shape[0]))\n full_space_wf = np.zeros((2**(2 * n_qubits)))\n for ii in range(2**n_qubits):\n doci_space_ket = [int(xx) for xx in np.binary_repr(ii, width=n_qubits)]\n full_space_ket = list(chain(*zip(doci_space_ket, doci_space_ket)))\n full_space_idx = int(\"\".join([str(xx) for xx in full_space_ket]), 2)\n full_space_wf[full_space_idx] = wf[ii]\n # fqe_wf = fqe.from_cirq(full_space_wf, 1.0E-12)\n fqe_wf = None\n return fqe_wf, full_space_wf\n\n\ndef get_rg_fham(g, spatial_orbs):\n h1 = np.diag(np.arange(spatial_orbs) + 1)\n h1 = np.kron(h1, np.eye(2))\n h2 = np.zeros((2 * spatial_orbs,) * 4)\n fham = of.FermionOperator()\n for pp in range(spatial_orbs):\n fham += of.FermionOperator(((2 * pp, 1), (2 * pp, 0)), coefficient=float(h1[2 * pp, 2 * pp]))\n fham += of.FermionOperator(((2 * pp + 1, 1), (2 * pp + 1, 0)), coefficient=float(h1[2 * pp + 1, 2 * pp + 1]))\n for p, q in product(range(spatial_orbs), repeat=2):\n if p != q:\n h2[2 * p, 2 * p + 1, 2 * q + 1, 2 * q] = g / 2\n h2[2 * p + 1, 2 * p, 2 * q, 2 * q + 1] = g / 2\n ab = ((2 * p, 1), (2 * p + 1, 1), (2 * q + 1, 0), (2 * q, 0))\n ba = ((2 * p + 1, 1), (2 * p, 1), (2 * q, 0), (2 * q + 1, 0))\n fham += of.FermionOperator(ab, coefficient=g/2)\n fham += of.FermionOperator(ba, coefficient=g/2)\n return h1, h2, fham\n\n\ndef get_rg_qham(g, spatial_orbs):\n \"\"\"\n sum_{p}e_{p}~N_{p} + g sum_{pq} P_{p}^P_{q}\n :param g:\n :param spatial_orbs:\n :return:\n \"\"\"\n qham = of.QubitOperator()\n h1 = np.diag(np.arange(spatial_orbs) + 1 )\n # constant term\n constant = sum(np.diagonal(h1))\n for pp in range(spatial_orbs):\n qham += of.QubitOperator((pp, 'Z'), coefficient=float(-h1[pp, pp]))\n for qq in range(spatial_orbs):\n if pp != qq:\n qham += of.QubitOperator(((pp, 'X'), (qq, 'X')), coefficient=g/4)\n qham += of.QubitOperator(((pp, 'Y'), (qq, 'Y')), coefficient=g/4)\n return qham, float(constant)\n\n\ndef solve_cc_equations2(soei, astei):\n \"\"\"Lambda-CCD equations are solved. Return amplitudes and RDMs\"\"\"\n nso = soei.shape[0]\n nsocc = nso // 2\n nsvirt = nso - nsocc\n n = np.newaxis\n o = slice(None, nsocc)\n v = slice(nsocc, None)\n\n fock = soei + np.einsum('piiq->pq', 4 * astei[:, o, o, :]) # the CC equations generated in ccd_2 don't involve the 1/4 term\n hf_energy = 0.5 * np.einsum('ii', (fock + soei)[o, o])\n\n eps = np.diagonal(fock)\n e_abij = 1 / (-eps[v, n, n, n] - eps[n, v, n, n] + eps[n, n, o, n] + eps[\n n, n, n, o])\n e_ai = 1 / (-eps[v, n] + eps[n, o])\n g = astei.transpose(0, 1, 3, 2)\n t1z, t2z = np.zeros((nsvirt, nsocc)), np.zeros((nsvirt, nsvirt, nsocc, nsocc))\n t1f, t2f, l1f, l2f = kernel2(t1z, t2z, soei, g, o, v, e_ai, e_abij,\n stopping_eps=1.0E-6, max_iter=300, damping=0.5)\n assert np.isclose(np.linalg.norm(t1f), 0)\n assert np.isclose(np.linalg.norm(l1f), 0)\n print(\"{: 5.15f} HF Energy \".format(hf_energy))\n final_cc_energy = ccsd_energy2(t1f, t2f, soei, g, o, v) - hf_energy\n print(\"{: 5.15f} Final Correlation Energy\".format(final_cc_energy))\n final_lagrangian_energy = lagrangian_energy2(t1f, t2f, l1f, l2f, soei, g, o, v) - hf_energy\n print(\"{: 5.15f} Lagrangian Energy - HF\".format(final_lagrangian_energy))\n\n kd = np.eye(nso)\n opdm = ccsd_d1(t1f, t2f, l1f, l2f, kd, o, v)\n tpdm = ccsd_d2(t1f, t2f, l1f, l2f, kd, o, v)\n tpdm = tpdm.transpose(0, 1, 3, 2) # openfermion ordering\n\n return final_cc_energy + hf_energy, final_lagrangian_energy + hf_energy, opdm, tpdm, t2f, l2f\n\n\ndef solve_cc_equations(soei, astei):\n \"\"\"Lambda-CCD equations are solved. Return amplitudes and RDMs\"\"\"\n nso = soei.shape[0]\n nsocc = nso // 2\n nsvirt = nso - nsocc\n n = np.newaxis\n o = slice(None, nsocc)\n v = slice(nsocc, None)\n\n fock = soei + np.einsum('piiq->pq', astei[:, o, o, :])\n hf_energy = 0.5 * np.einsum('ii', (fock + soei)[o, o])\n eps = np.diag(fock)\n e_abij = 1 / (-eps[v, n, n, n] - eps[n, v, n, n] + eps[n, n, o, n] + eps[\n n, n, n, o])\n e_ai = 1 / (-eps[v, n] + eps[n, o])\n g = astei.transpose(0, 1, 3, 2)\n t1z, t2z = np.zeros((nsvirt, nsocc)), np.zeros((nsvirt, nsvirt, nsocc, nsocc))\n t1f, t2f, l1f, l2f = kernel(t1z, t2z, fock, g, o, v, e_ai, e_abij,\n stopping_eps=1.0E-6, damping=0.5, max_iter=300)\n assert np.isclose(np.linalg.norm(t1f), 0)\n assert np.isclose(np.linalg.norm(l1f), 0)\n print(\"{: 5.15f} HF Energy \".format(hf_energy))\n final_cc_energy = ccsd_energy(t1f, t2f, fock, g, o, v) - hf_energy\n print(\"{: 5.15f} Final Correlation Energy\".format(final_cc_energy))\n final_lagrangian_energy = lagrangian_energy(t1f, t2f, l1f, l2f, fock, g, o, v) - hf_energy\n print(\"{: 5.15f} Lagrangian Energy - HF\".format(final_lagrangian_energy))\n\n kd = np.eye(nso)\n opdm = ccsd_d1(t1f, t2f, l1f, l2f, kd, o, v)\n tpdm = ccsd_d2(t1f, t2f, l1f, l2f, kd, o, v)\n tpdm = tpdm.transpose(0, 1, 3, 2) # openfermion ordering\n\n return final_cc_energy + hf_energy, final_lagrangian_energy + hf_energy, opdm, tpdm, t1f, t2f, l1f, l2f\n\n\ndef doci_vec_to_full_space_pyscf(doci_vec, doci_index, n_qubits):\n \"\"\"This function is very slow because of from_cirq. This will\n be sped up in the future.\"\"\"\n wf = np.zeros((2**n_qubits), dtype=np.complex128)\n for idx, val in enumerate(doci_index):\n wf[val] = doci_vec[idx]\n\n fqe_wf = fqe.from_cirq(wf, thresh=1.0E-12)\n return fqe_wf\n\n\ndef main():\n # set up couplings and simulation parameters\n couplings = np.linspace(-0.5, 1.5, 20)\n n_qubits = 6# this is for the DOCI space so equivalent to spatial orbs\n nmo = n_qubits\n\n # results storage\n ccd_energies = []\n ccd_energies2 = []\n doci_energies = []\n true_sc_order_parameter = []\n ccd_sc_order_parameter = []\n\n doci_projector, s0_basis = get_doci_projector(2 * n_qubits, n_qubits)\n # run through everything\n for g in couplings:\n print(\"Coupling parameter \", g)\n ncr_h1, ncr_h2, ncr_fham = get_rg_fham(g, n_qubits)\n print(ncr_fham)\n\n # construct antisymmetric coefficient matrix\n # such that the operator commutes with antisymmeterizer\n ncr_h2_antisymm = ncr_h2 - np.einsum('ijlk', ncr_h2)\n for p, q, r, s in product(range(2 * n_qubits), repeat=4):\n if not np.isclose(ncr_h2_antisymm[p, q, r, s], 0):\n # print((p, q, r, s), ncr_h2_antisymm[p, q, r, s])\n assert np.isclose(ncr_h2_antisymm[p, q, r, s], -ncr_h2_antisymm[p, q, s, r])\n assert np.isclose(ncr_h2_antisymm[p, q, r, s], -ncr_h2_antisymm[q, p, r, s])\n assert np.isclose(ncr_h2_antisymm[p, q, r, s], ncr_h2_antisymm[q, p, s, r])\n\n # get interaction operator\n # normally a factor of 1/4 is associated with antisymmetric 2-electron\n # integrals. Here is is 1/2 (because we only consider alpha-beta space)\n ncr_antisymm_fham = of.InteractionOperator(0, ncr_h1.astype(float),\n 0.5 * ncr_h2_antisymm.astype(\n float))\n test1 = of.normal_ordered(of.get_fermion_operator(ncr_antisymm_fham))\n test2 = of.normal_ordered(ncr_fham)\n assert test1 == test2 # check if equivalent to the original HamiltonianA\n\n # diagonalize in DOCI space.\n afham = of.get_sparse_operator(ncr_antisymm_fham).toarray().real\n afham = afham[:, doci_projector.row]\n afham = afham[doci_projector.row, :]\n ncr_afham_eigs, ncr_afham_vecs = np.linalg.eigh(afham)\n doci_energies.append(ncr_afham_eigs[0])\n print(ncr_afham_eigs[:10])\n\n afham = of.get_sparse_operator(ncr_fham).toarray().real\n afham = afham[:, doci_projector.row]\n afham = afham[doci_projector.row, :]\n ncr_afham_eigs, ncr_afham_vecs = np.linalg.eigh(afham)\n doci_energies.append(ncr_afham_eigs[0])\n print(ncr_afham_eigs[:10])\n\n\n full_space_wf = np.zeros((4 ** nmo), dtype=np.complex128)\n for idx in range(len(s0_basis)):\n full_space_wf[s0_basis[idx]] = ncr_afham_vecs[idx, 0]\n # print wf\n fqe_doci = fqe.from_cirq(full_space_wf.flatten(), thresh=1.0E-12)\n fqe_doci.print_wfn()\n # check S0 wf gives the same expectation value\n fqe_fham = fqe.get_hamiltonian_from_openfermion(ncr_fham)\n doci_energy = fqe_doci.expectationValue(fqe_fham)\n assert np.isclose(doci_energy, ncr_afham_eigs[0])\n\n # get FCI RDMs\n fqe_wf = doci_vec_to_full_space_pyscf(ncr_afham_vecs[:, [0]].flatten(),\n doci_projector.row, 2 * n_qubits)\n fqe_wf.print_wfn()\n\n fqe_opdm, fqe_tpdm = fqe_wf.sector((n_qubits, 0)).get_openfermion_rdms()\n true_num_fluctation = (2 / n_qubits) * np.sum(np.sqrt(np.diagonal(fqe_opdm) - np.diagonal(fqe_opdm)**2))\n true_sc_order_parameter.append(true_num_fluctation)\n\n\n # print()\n # solve CCD equations\n cc_energy, lagrangian_energy, cc_opdm, cc_tpdm, cc_t1f, cc_t2f, cc_l1f, cc_l2f = solve_cc_equations(\n ncr_h1.astype(float), 4 * 0.5 * ncr_h2_antisymm.astype(float))\n ccd_energies.append(cc_energy)\n ccd_num_fluctation = (2 / n_qubits) * np.sum(np.sqrt(np.diagonal(cc_opdm) - np.diagonal(cc_opdm)**2))\n ccd_sc_order_parameter.append(ccd_num_fluctation)\n\n # Solving ccsd_v2\n nso = ncr_h1.shape[0]\n nsocc = nso // 2\n nsvirt = nso - nsocc\n n = np.newaxis\n o = slice(None, nsocc)\n v = slice(nsocc, None)\n fock = ncr_h1 + np.einsum('piiq->pq', 2 * ncr_h2_antisymm[:, o, o, :])\n print(\"CC-S\",\n np.linalg.norm(singles_residual(cc_t1f, cc_t2f, fock, 2 * ncr_h2_antisymm.transpose(0, 1, 3, 2), o, v)))\n print(\"CC-D\",\n np.linalg.norm(doubles_residual(cc_t1f, cc_t2f, fock, 2 * ncr_h2_antisymm.transpose(0, 1, 3, 2), o, v)))\n\n print(\"CC-S\",\n np.linalg.norm(singles_residual2(cc_t1f, cc_t2f, ncr_h1, 0.5 * ncr_h2_antisymm.transpose(0, 1, 3, 2), o, v)))\n print(\"CC-D\",\n np.linalg.norm(doubles_residual2(cc_t1f, cc_t2f, ncr_h1, 0.5 * ncr_h2_antisymm.transpose(0, 1, 3, 2), o, v)))\n\n cc_energy, lagrangian_energy, cc_opdm, cc_tpdm, cc_t2f, cc_l2f = solve_cc_equations2(\n ncr_h1.astype(float), 0.5 * ncr_h2_antisymm.astype(float))\n ccd_energies2.append(cc_energy)\n\n\n\n np.save(\"RG_n_{}_gvalues.npy\".format(n_qubits), couplings)\n np.save(\"RG_n_{}_doci_energy.npy\".format(n_qubits), np.array(doci_energies))\n np.save(\"RG_n_{}_ccd_energies.npy\".format(n_qubits), np.array(ccd_energies))\n np.save(\"RG_n_{}_ccd_energies2.npy\".format(n_qubits), np.array(ccd_energies2))\n\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6.4, 4.8))\n ax.plot(couplings, doci_energies, 'k', linestyle='-', label='DOCI')\n ax.plot(couplings, ccd_energies, marker='s', linestyle='None', label='CCSD-f-v', mfc='C0', mec='None')\n ax.plot(couplings, ccd_energies2, marker='o', linestyle='None', label='CCSD-h-g', mfc='C1', mec='None')\n\n ax.tick_params(which='both', labelsize=18, direction='in')\n ax.set_xlabel(\"g\", fontsize=18)\n ax.set_ylabel(r\"E\", fontsize=18)\n ax.legend(loc='center', fontsize=13, ncol=1, frameon=False)\n plt.gcf().subplots_adjust(bottom=0.15, left=0.2)\n plt.savefig(\"RG_n_{}_absolute_ccd_doci_energies.png\".format(n_qubits), format='PNG', dpi=300)\n plt.show()\n\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6.4, 4.8))\n ax.plot(couplings, np.array(ccd_energies).flatten() - doci_energies, 'C0o-', label='CCSD-f-v')\n ax.plot(couplings, np.array(ccd_energies2).flatten() - doci_energies, 'C1o-', label='CCSD-h-g')\n ax.tick_params(which='both', labelsize=18, direction='in')\n ax.set_xlabel(\"g\", fontsize=18)\n ax.set_ylabel(r\"$E - E_{\\mathrm{exact}}$\", fontsize=18)\n plt.axhline(0, color='k')\n ax.legend(loc='upper right', fontsize=13, ncol=2)\n # plt.gcf().subplots_adjust(bottom=0.15, left=0.2)\n plt.tight_layout()\n plt.savefig(\"RG_n_{}_relative_ccd_doci_energies.png\".format(n_qubits), format='PNG', dpi=300)\n plt.show()\n\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6.4, 4.8))\n ax.plot(couplings, ccd_sc_order_parameter, 'C0o-', label='CCD')\n ax.plot(couplings, true_sc_order_parameter, 'k-', label='FCI')\n ax.tick_params(which='both', labelsize=18, direction='in')\n ax.set_xlabel(\"g\", fontsize=18)\n ax.set_ylabel(r\"$\\Delta_{b}$\", fontsize=18)\n plt.axhline(0, color='k')\n ax.legend(loc='upper right', fontsize=13, ncol=2)\n plt.gcf().subplots_adjust(bottom=0.15, left=0.2)\n plt.savefig(\"RG_n_{}_superconducting_correlation_l4.png\".format(n_qubits), format='PNG', dpi=300)\n plt.show()\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ncrubin/qcpanop","sub_path":"examples/richardson_gaudin/rg_pccd_sim.py","file_name":"rg_pccd_sim.py","file_ext":"py","file_size_in_byte":17420,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"39311906445","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport sys\nimport pandas as pd\n\ndef histogram(file, get_head=False):\n sns.set(style=\"ticks\", color_codes=True)\n\n house_column = 'Hogwarts House'\n hist_col = 'Arithmancy'\n\n points = pd.read_csv(file).dropna()\n only_int = pd.DataFrame(points.select_dtypes(exclude=['object']))\n\n if (get_head is True):\n for col in only_int:\n cur_col = pd.DataFrame([points[house_column], only_int[col]]).T\n sns.catplot(x=house_column, y=col, kind='bar', data=cur_col)\n plt.show()\n return\n cur_col = pd.DataFrame([points[house_column], only_int[hist_col]]).T\n d = {}\n for truc in cur_col.groupby([points[house_column]]):\n d[truc[0]] = truc[1][hist_col]\n df = pd.DataFrame(d)\n df.plot.hist(alpha=0.5)\n plt.show()\n\nif __name__ == '__main__':\n if (len(sys.argv) > 1):\n try:\n histogram(sys.argv[1], len(sys.argv) > 2 and sys.argv[2] == \"-a\")\n except:\n print(\"error\")\n","repo_name":"ygarrot/dslr_v2","sub_path":"histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74896105473","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom .views import CourseViewSet, BranchViewSet, ContactViewSet\n\nrouter=routers.DefaultRouter()\nrouter.register('courses', CourseViewSet)\nrouter.register('branches', BranchViewSet)\nrouter.register('contacts', ContactViewSet)\n\nurlpatterns=[\n path('', include(router.urls))\n]","repo_name":"Fattijenishbek/Coursespro","sub_path":"courseapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44071648871","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contacts', '0011_auto_20150923_1641'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='savedsearch',\n options={'verbose_name_plural': 'recherches sauvegardées', 'verbose_name': 'recherche sauvegardée', 'ordering': ['name'], 'permissions': (('view_savedsearch', 'Can view a saved search'),)},\n ),\n migrations.AddField(\n model_name='alert',\n name='creation_date',\n field=models.DateTimeField(verbose_name='date de création', auto_now_add=True, default=datetime.datetime(2015, 9, 24, 9, 41, 30, 944159, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='alert',\n name='update_date',\n field=models.DateTimeField(verbose_name='date de mise à jour', default=datetime.datetime(2015, 9, 24, 9, 41, 36, 135, tzinfo=utc), auto_now=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='meeting',\n name='creation_date',\n field=models.DateTimeField(verbose_name='date de création', auto_now_add=True, default=datetime.datetime(2015, 9, 24, 9, 41, 40, 687772, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='meeting',\n name='update_date',\n field=models.DateTimeField(verbose_name='date de mise à jour', default=datetime.datetime(2015, 9, 24, 9, 41, 45, 59091, tzinfo=utc), auto_now=True),\n preserve_default=False,\n ),\n ]\n","repo_name":"vegaelle/pyru","sub_path":"contacts/migrations/0012_auto_20150924_1141.py","file_name":"0012_auto_20150924_1141.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72058606914","text":"from datetime import timedelta,datetime\n\n\ndef validate_bill(data):\n json_data = []\n for num in range(1, len(data)):\n if any( '-' in str(item) or item == '' for item in data[num]):\n continue\n #convert date\n data[num][4] = datetime.fromordinal(datetime(1900, 1, 1)\\\n .toordinal() + data[num][4] - 2).date()\n #validate sum\n data[num][3]= float(str(data[num][3]).replace(',','.'))\n json_data.append(dict(zip(data[0],data[num])))\n return json_data","repo_name":"denis200/drf_bill_handler","sub_path":"bills/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74355811713","text":"DOC_TREND_TYPE_CHOICES = (\n (1, 'Trend'),\n (2, 'Megatrend'),\n)\n\nDOC_UNCERTAINTIES_TYPE_CHOICES = (\n (1, 'Rationale'),\n (2, 'Data'),\n (3, 'Methodology (related to the model)'),\n)\n\nRELATION_TYPE_CHOICES = (\n (1, 'Cause-effect relationship'),\n (2, 'Neutral relationship'),\n)\n\nCANONICAL_ROLES = (\n 'Authenticated',\n 'Administrator',\n 'Contributor',\n 'Viewer',\n 'Reviewer',\n)\n","repo_name":"eea/flis.horizon-scanning-tool","sub_path":"hstool/definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11486979408","text":"# https://programmers.co.kr/learn/courses/30/lessons/42627\n# 디스크 컨트롤러\n\n\nimport heapq as hq\n\ndef solution(jobs):\n total_len = len(jobs)\n\n jobs.sort(key=lambda x: (x[0], x[1]), reverse=True)\n readyQ = [(0,jobs.pop())] # [(priority, job), ]\n \n ans = 0\n curT = 0\n while readyQ:\n _, (insertT, processT) = hq.heappop(readyQ)\n if curT < insertT:\n curT = insertT + processT\n else:\n curT += processT\n ans += (curT - insertT) # (curT - insertT + processT)\n \n # insert tasks into readyQ (in order of processT)\n while jobs:\n insertT, processT = jobs[-1]\n if insertT <= curT:\n hq.heappush(readyQ, (processT, jobs.pop()))\n else:\n break\n \n if (not readyQ) and jobs:\n readyQ.append((0, jobs.pop()))\n \n return ans//total_len","repo_name":"treejw/python-for-coding-test","sub_path":"heap/disk_controllers.py","file_name":"disk_controllers.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29400378106","text":"# Program to detect faces in images\n# Modify original image at line 7\nimport cv2\n\n\n# Video source to use - cv2.VideoCapture(0) for default webcam\n# To use a camera: - set use_camera to True (line 11)\n# - set cv2.VideoCapture() to the right value (default camera is 0) (line 12)\n# To use a video : - set use_camera to False (line 11)\n# - set video_to_use to the right video filename (line 13)\nuse_camera = False\nsource_to_use = cv2.VideoCapture(0)\nvideo_to_use = cv2.VideoCapture(\"videos/normal_clip.mp4\")\nmin_neighbors = 30\n\n# Load pre-trained data on frontal faces from OpenCV (Haar Cascade algorithm)\ntrained_face_data = cv2.CascadeClassifier(\n \"algos/haarcascade_frontalface_default.xml\")\n\n# Iterate through the frames\nwhile True:\n # Read the current frame\n successful_frame, frame = source_to_use.read() if use_camera else video_to_use.read()\n\n # Make sure that the frame was captured\n if not successful_frame:\n if use_camera:\n print(\"Error: could not capture frame\")\n break\n\n # Convert the image to a grayscale\n grayscale_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Detect faces\n face_coords = trained_face_data.detectMultiScale(\n grayscale_img, minNeighbors=min_neighbors)\n\n # Draw a rectangle around each detected face\n for x, y, w, h in face_coords:\n #cv2.rectangle({IMAGE}, (x, y), (width, heigth), BGR(), THICKNESS)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # Display the images\n frame = cv2.resize(frame, (960, 540))\n cv2.imshow(\"Face Detector (Escape to exit)\", frame)\n\n # Wait for keyboard input before closing the image for a millisecond only\n key = cv2.waitKey(1)\n\n # Check if user pressed on the escape key, or q/Q (quit program)\n if key == 27 or key == 81 or key == 113:\n print(\"Exiting program...\")\n break\n\n# Release the camera if required\nif use_camera:\n source_to_use.release()\n\n# Confirm program is up and running\nprint(\"Program successfully exited...\")\n","repo_name":"z1chh/Face-Detection-App","sub_path":"face_detector.py","file_name":"face_detector.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13694732472","text":"from typing import Optional\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def flatten(self, root: Optional[TreeNode]) -> None:\n if not root:\n return\n\n nodes = []\n def dfs(prev, node):\n nonlocal nodes\n if not node:\n return\n\n nodes.append(node)\n prev.left = None\n dfs(node, node.left)\n dfs(node, node.right)\n\n def flatten_tree(i, node):\n if i == len(nodes):\n return\n node.right = nodes[i]\n flatten_tree(i+1, node.right)\n\n dfs(root, root.left)\n dfs(root, root.right)\n flatten_tree(0, root)","repo_name":"pdkz/leetcode","sub_path":"0114_Flatten_Binary_Tree_to_Linked_List/0114_Flatten_Binary_Tree_to_Linked_List.py","file_name":"0114_Flatten_Binary_Tree_to_Linked_List.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72985049473","text":"#!/usr/bin/env python3\n# Like makeblocks.py but for the overworld.\nfrom nts2shared import *\n\n# Globals\ndefault_palette = 0\ndefault_base = 0\nblock = None\npriority = False\nall_blocks = []\n\n# Read and process the file\nwith open(\"tools/overworldblocks.txt\") as f:\n text = [s.rstrip() for s in f.readlines()]\n\ndef saveBlock():\n\tif block == None:\n\t\treturn\n\tall_blocks.append(block)\n\n# Nametable format:\n# Bit 0-9 - Character Number (000h-3FFh)\n# Bit 10-12 - Palette Number (0-7)\n# Bit 13 - BG Priority (0=Lower, 1=Higher)\n# Bit 14 - X-Flip (0=Normal, 1=Mirror horizontally)\n# Bit 15 - Y-Flip (0=Normal, 1=Mirror vertically)\n\nfor line in text:\n\tif not len(line):\n\t\tcontinue\n\tif line.startswith(\"#\"): # comment\n\t\tcontinue\n\tif line.startswith(\"+\"): # new block\n\t\tsaveBlock()\n\t\t# Reset to prepare for the new block\n\t\tpriority = False\n\t\tblock = {\"name\": line[1:], \"tiles\": []}\n\t\tcontinue\n\tword, arg = separateFirstWord(line)\n\t# Miscellaneous directives\n\tif word == \"alias\":\n\t\tname, value = separateFirstWord(arg)\n\t\taliases[name] = value\n\n\t# Tile info shared with several blocks\n\telif word == \"base\":\n\t\tdefault_base = parseNumber(arg)\n\telif word == \"palette\":\n\t\tdefault_palette = parseNumber(arg)\n\n\t# Specifying tiles and tile attributes\n\telif word == \"priority\":\n\t\tpriority = True\n\telif word == \"no_priority\":\n\t\tpriority = False\n\telif word == \"t\": # add tiles\n\t\tsplit = arg.split(\" \")\n\t\tfor tile in split:\n\t\t\tblock[\"tiles\"].append(parseMetatileTile(tile, default_palette, default_base, priority))\n\telif word == \"q\": # add four tiles at once\n\t\ttile = parseMetatileTile(arg, default_palette, default_base, priority)\n\t\tblock[\"tiles\"] = [tile, tile+1, tile+16, tile+17]\n\n# Save the last one\nsaveBlock()\n\n# Generate the output that's actually usable in the game\noutfile = open(\"src/overworldblockdata.s\", \"w\")\n\noutfile.write('; This is automatically generated. Edit \"overworldblocks.txt\" instead\\n')\noutfile.write('.export OWBlockTopLeft, OWBlockTopRight, OWBlockBottomLeft, OWBlockBottomRight\\n')\noutfile.write('\\n.segment \"C_Overworld\"\\n\\n')\n\n# Block appearance information\ncorners = [\"TopLeft\", \"TopRight\", \"BottomLeft\", \"BottomRight\"]\nfor corner, cornername in enumerate(corners):\n\toutfile.write(\".proc OWBlock%s\\n\" % cornername)\n\tfor b in all_blocks:\n\t\toutfile.write(' .word $%.4x ; %s\\n' % (b['tiles'][corner], b['name']))\n\toutfile.write(\".endproc\\n\\n\")\noutfile.close()\n\n# Generate the enum in a separate file\noutfile = open(\"src/overworldblockenum.s\", \"w\")\noutfile.write('; This is automatically generated. Edit \"overworldblocks.txt\" instead\\n')\noutfile.write('.enum OverworldBlock\\n')\nfor b in all_blocks:\n\toutfile.write(' %s\\n' % b['name'])\noutfile.write('.endenum\\n\\n')\n","repo_name":"NovaSquirrel/NovaTheSquirrel2","sub_path":"tools/makeoverworldblocks.py","file_name":"makeoverworldblocks.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"61"} +{"seq_id":"35804022643","text":"from image import Image\nimport numpy\nfrom scipy.ndimage.morphology import binary_dilation\n\nclass Dilation(Image):\n\n def dilate(self, structure):\n img = self.get()\n result = binary_dilation(img, structure=structure)\n self.set(result)\n self.astype(img.dtype)\n\nif __name__ == \"__main__\":\n import sys\n i = Dilation(sys.argv[1])\n i.binary_set()\n structure = numpy.ones((int(sys.argv[2]), int(sys.argv[3])))\n i.dilate(structure)\n i.binary_normalize()\n print(\"IMG: \", i.get().max(), i.get())\n i.save_to_file(\"dilation.pbm\")\n","repo_name":"willgoncruz/unicamp","sub_path":"mc-920/t4/dilation.py","file_name":"dilation.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33822380274","text":"# You are given an array of prices where prices[i] is the price of a given stock on an ith day.\n# You want to maximize your profit by choosing a single day to buy one stock and choosing a different day in the future to sell that stock. Return the maximum profit you can achieve from this transaction. If you cannot achieve any profit, return 0.\n\ndef maxProfit(prices):\n down = prices[0]\n up = down\n bestSell = up - down\n for i in range(len(prices)):\n if prices[i] < down:\n down = prices[i]\n up = down\n if prices[i] > up:\n up = prices[i]\n if up - down > bestSell:\n bestSell = up - down\n return bestSell\n\n\nif __name__ == '__main__':\n print(maxProfit([7,6,4,3,1]))","repo_name":"Samarth-8836/algorithms-challenge","sub_path":"Arrays Part-I/Stock Buy and Sell.py","file_name":"Stock Buy and Sell.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41088758983","text":"import matplotlib.pyplot as plt\nimport os\nimport cv2\n\n\n# visualize the image size distribution of the specific image set\ndef visualize_distribution(images_root):\n pointX = []\n pointY = []\n folders = os.listdir(images_root)\n for folder in folders:\n if not os.path.isdir(images_root + '/' + folder):\n continue\n imgs = os.listdir(images_root + '/' + folder)\n for img in imgs:\n height, width = cv2.imread(images_root + '/' + folder + '/' + img).shape[:2]\n pointX.append(width)\n pointY.append(height)\n plt.scatter(pointX, pointY, s = 30, c = 'red')\n plt.xlabel('width')\n plt.ylabel('height')\n plt.savefig(images_root + '/face_train_size_distribution.jpg')\n plt.show()\n\n\n\n\n\ndef visualize_curve(log_root):\n log_file = open(log_root, \"r\")\n result_root = log_root[:log_root.rfind('/') + 1 ] + 'train.jpg'\n ac = []\n loss = []\n for line in log_file.readlines():\n line = line.strip().split()\n if len(line) < 8:\n continue\n if 'accuracy' in line[6]:\n string = line[6]\n ac.append(float(string.split('=')[1]))\n #if 'top_k_accuracy_5' in line[8]:\n #string = line[8]\n #ac_top5.append(float(string.split('=')[1]))\n if 'cross-entropy' in line[7]:\n string = line[7]\n loss.append(float(string.split('=')[1]))\n log_file.close()\n\n plt.figure('result')\n plt.subplot(211)\n plt.plot(ac)\n #plt.plot(ac_top5)\n plt.legend([\"accuracy\"],loc = 'upper right')\n plt.grid(True)\n plt.subplot(212)\n plt.plot(loss)\n plt.legend(['cross_entropy'],loc = 'upper right')\n plt.grid(True)\n plt.savefig(result_root)\n #plt.show()\n\nif __name__=='__main__':\n imgs_root = '/mnt/data-1/data/qi01.zhang/COCO/data/face_anno/data_unprocessed/train'\n #visualize_distribution(imgs_root)\n visualize_curve('/mnt/data-1/data/qi01.zhang/COCO/model_data/softmax-lr-0.1-wd-0.001-gamma-0.8-batch_size-64-face-mirror-inception/train.log')\n","repo_name":"labyrinth7x/PIPA_baseline","sub_path":"models/head/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"16101959608","text":"print(\"Welcome to the secret auction program.\")\nauction = {}\n\ndef check_winner(auction):\n max_val = 0\n winner = \"\"\n for key in auction:\n if auction[key] > max_val:\n max_val = auction[key]\n winner = key\n print(f\"The winner is {winner} with a bid of ${max_val}!\")\n\n\ndef cls():\n print(\"\\n\" * 50)\n\n\nwhile True:\n name = input(\"Enter your name: \")\n bid = int(input(\"Enter your bid: $\"))\n auction[name] = bid\n sentinel = input(\"Are there any other bidders? Type 'yes' or 'no'.\").lower()\n if sentinel == 'no':\n break\n cls()\n\ncheck_winner(auction)","repo_name":"jcongmon/python100DOC","sub_path":"Day 9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18564224279","text":"import pybars\n\nfrom auth import is_authorized\nfrom template import templates\n\n\ndef render_index(req, packet):\n\n index = templates[\"index.html\"]({})\n\n req.send(b\"HTTP/1.1 200 OK\\r\\nServer: FASS\\r\\nContent-Length: \" + bytes(str(len(index)), \"utf-8\") + b\"\\r\\n\\r\\n\")\n req.send(bytes(index, \"utf-8\"))\n\n\ndef render_admin(req, packet, err, errmsg):\n index = templates[\"login.html\"]({\"error\": err, \"errormsg\": errmsg})\n\n if is_authorized(packet):\n\n req.send(b\"HTTP/1.1 302 Found\\r\\nLocation: /dashboard\\r\\n\\r\\n\")\n return\n\n else:\n req.send(b\"HTTP/1.1 200 OK\\r\\nServer: FASS\\r\\nContent-Length: \" + bytes(str(len(index)), \"utf-8\") + b\"\\r\\n\\r\\n\")\n req.send(bytes(index, \"utf-8\"))\n","repo_name":"Lucas-S-T/FASS_Server","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2775656505","text":"import random\n\nHANGMAN = (\n \"\"\"\n-----\n| |\n|\n|\n|\n|\n|\n|\n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n|\n|\n|\n|\n|\n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n| -+-\n|\n|\n|\n|\n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n| /-+-\n|\n|\n|\n|\n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n| /-+-\\ \n|\n|\n|\n|\n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n| /-+-\\ \n| | \n|\n|\n|\n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n| /-+-\\ \n| | \n| | \n|\n|\n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n| /-+-\\ \n| | \n| | \n| |\n|\n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n| /-+-\\ \n| | \n| | \n| | \n| | \n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n| /-+-\\ \n| | \n| | \n| | | \n| | \n|\n--------\n\"\"\",\n \"\"\"\n-----\n| |\n| 0\n| /-+-\\ \n| | \n| | \n| | | \n| | | \n|\n--------\n\"\"\",\n)\n\n# list of words to guess\nword_list = [\"python\", \"table\", \"chairs\", \"computer\"]\n\n# choose first word at random\nword = list(random.choice(word_list)) # ['p','y'...]\n\n# list of guessed letters or '_' if not guessed\ndisplay = []\n\n# list holding all guessed letters\nguessed_letters = []\n\n# hold same content as word\ndisplay.extend(word)\nguessed_letters.extend(display)\n\nfor i in range(len(display)):\n display[i] = \"_\"\n\nprint(\" \".join(display))\nprint()\nprint(\"Total letters: \", len(display))\nprint()\n\n# we are counting life from 0 - 10\nlife = len(HANGMAN)\n\nwhile list.count(display, \"_\") > 0 and life != 0:\n guess = input(\"Please guess a letter or full word: \")\n guess = guess.lower()\n\n if guess == \"\".join(word):\n display = list(guess)\n print(\"\\nYay! You scored all letters!\")\n print(\" \".join(display))\n break\n\n for i in range(len(word)):\n if word[i] == guess and guess in guessed_letters:\n display[i] = guess\n guessed_letters.remove(guess)\n\n if guess not in display:\n print(\"Sorry, wrong guess\")\n # decrease life by 1\n life -= 1\n\n # as life is lost, this will count from 1, 2, 3 etc\n print(HANGMAN[(len(HANGMAN) - 1) - life])\n print(\" \".join(display))\n print()\n\nprint(\"\\nGame Over!\")\n","repo_name":"kingsley-ijomah/python-project-ideas","sub_path":"05_hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6651873881","text":"\ndef step(rest, lastStock, stock):\n lastTwoMean = stock + ((lastStock - stock)/2)\n print(\"lastStock: \" + str(lastStock))\n print(\"stock: \" + str(stock))\n print(\"lastTwoMean: \" + str(lastTwoMean))\n if stock == 0:\n leftOverStock = 0\n else:\n leftOverStock = rest/stock\n newStockAttempt = round(lastTwoMean * (1 - ((leftOverStock)**(3/2)-0.3)))\n print(\"newStock: \" + str(newStockAttempt))\n print(\"-------------------\")\n rest = round(int(input(\"rest?: \")))\n print(\"-------------------\")\n step(rest, stock, newStockAttempt)\n\n\nstep(0, 5, 0)\n","repo_name":"Gustavo-01/Projeto_Lic_Fisica","sub_path":"testFactoryProductionFunction.py","file_name":"testFactoryProductionFunction.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23581125381","text":"\r\nf = open(\"C:\\\\Users\\\\TocarIP\\\\Google Drive\\\\Downloads\\\\A-large.in\")\r\nlines = f.readlines()\r\nnumcases = int(lines[0])\r\ni = 1\r\npos = 1\r\nwhile i <= numcases:\r\n d,n = [int(x) for x in lines[pos].split()]\r\n hrs = []\r\n op =pos\r\n while pos < op+ n:\r\n pos +=1\r\n hrs.append([float(x) for x in lines[pos].split()])\r\n hrs = sorted(hrs,key=lambda x: x[0])[::-1]\r\n ind = 0\r\n arr = 0.0\r\n c_h = []\r\n while ind < len(hrs):\r\n h = hrs[ind]\r\n c_arr = (d-h[0])/h[1]\r\n #print(c_arr)\r\n if c_arr > arr:\r\n arr = c_arr\r\n ind +=1\r\n sp = d/arr\r\n res = \"Case #\" + str(i) + \": \" + str(sp)\r\n print (res)\r\n i = i+ 1\r\n pos +=1","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/609.py","file_name":"609.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5066707986","text":"# Status Constant Definition\nS_BRAKING = 1\nS_ENG_LIM_ACC = 2\nS_TIRE_LIM_ACC = 3\nS_SUSTAINING = 4\nS_DRAG_LIM = 5\nS_SHIFTING = 6\nS_TOPPED_OUT = 7\n\n# Output Index Constant Definitions (Columns)\nO_TIME = 0\nO_DISTANCE = 1\nO_VELOCITY = 2\nO_NF = 3\nO_NF2 = 4\nO_NR = 5\nO_NR2 = 6\nO_SECTORS = 7\nO_STATUS = 8\nO_GEAR = 9\nO_LONG_ACC = 10\nO_LAT_ACC = 11\nO_YAW_ACC = 12\nO_YAW_VEL = 13\nO_FF_REMAINING = 14\nO_FF2_REMAINING = 15\nO_FR_REMAINING = 16\nO_FR2_REMAINING = 17\n\nO_CURVATURE = 18\nO_ENG_RPM = 19\n\nO_CO2 = 20\n\nO_AERO_MODE = 21\n\nO_MATRIX_COLS = 22\nO_NAMES = [\"Time\",\"Distance\",\"Velocity\",\"Front Normal Force\",\"Front Normal Force 2\",\"Rear Normal Force\",\"Rear Normal Force 2\",\n \"Sector\",\"Status\",\"Gear\",\"Longitudinal Acc.\",\"Lateral Acc.\",\"Yaw Acceleration\", \"Yaw Velocity\",\n \"Remaining Front Force\",\"Remaining Front Force 2\", \"Remaining Rear Force\", \"Remaining Rear Force 2\", \"Curvature\", \"Engine Speed\", \"CO2\", \"Aero Mode\"]\nO_UNITS = [\"s\",\"ft\",\"ft/s\",\"lb\",\"lb\",\"lb\",\"lb\",\"\",\"\",\"\",\"G's\",\"G's\",\"rad/s^2\",\"rad/s\",\"lb\",\"lb\",\"lb\",\"lb\",\"ft^-1\",\"RPM\",\"lbm\",\"\"]\n\n# Shifting status codes\nIN_PROGRESS = 0\nJUST_FINISHED = 1\nNOT_SHIFTING = 2\n\n\n# Decision constants for DP\nD_SHIFT_UP = 3\nD_ACCELERATE = 0\nD_SHIFT_DOWN = 4\nD_SUSTAIN = 2\nD_BRAKE = 1\n\n# State Tuple Items for DP\nG_ID = 0 # int\nG_INDEX = 1 # list of ints of length p\nG_STEP = 2 # int\nG_PARENT_ID = 3 # int\nG_DECISION = 4 # int\nG_COST = 5 # float\nG_VELOCITY = 6 # float\nG_GEAR_DATA = 7 # tuple(int, int, float)\n\nAERO_FULL = 0\nAERO_DRS = 1\nAERO_BRK = 2\n","repo_name":"RoseGPE/RoseLapWeb","sub_path":"py/RoseLapCore/sims/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"61"} +{"seq_id":"20204750296","text":"import random\n\ndef rsp(user):\n computer_kor = [\"가위\", \"바위\", \"보\"]\n computer_num = ['0', '1', '2']\n computer = random.choice(computer_kor) # 컴퓨터가 가위, 바위, 보 중 무작위로 택 1\n\n # 양식에 맞지 않는 입력을 한 경우\n if not user in computer_kor:\n if not user in computer_num:\n print(\"잘못 입력되었습니다. 양식에 맞게 다시 입력해주세요\")\n return\n\n # 가위, 바위, 보를 입력한 경우\n if user == \"가위\" or user == \"바위\" or user == \"보\":\n print(f'사용자: {user}')\n print(f'컴퓨터: {computer}')\n\n # 0, 1, 2를 입력한 경우\n if user == '0':\n user = '가위' # 0으로 입력한 경우 가위로 바꿔서 처리\n print(f'사용자: {user}')\n print(f'컴퓨터: {computer}')\n\n if user == '1':\n user = '바위' # 1로 입력한 경우 바위로 바꿔서 처리\n print(f'사용자: {user}')\n print(f'컴퓨터: {computer}')\n\n if user == '2':\n user = '보' # 2로 입력한 경우 보로 바꿔서 처리\n print(f'사용자: {user}')\n print(f'컴퓨터: {computer}')\n\n\n # 가위, 바위, 보 수행\n if user == computer: # 비긴경우\n print(\"비겼습니다\")\n\n if (user == \"가위\" and computer == \"보\") or (user == \"바위\" and computer == \"가위\") or (user == \"보\" and computer == \"바위\"): # 사용자가 이기는 경우\n print(\"사용자가 이겼습니다\")\n\n if (computer == \"가위\" and user == \"보\") or (computer == \"바위\" and user == \"가위\") or (computer == \"보\" and user == \"바위\"): # 컴퓨터가 이기는 경우\n print(\"컴퓨터가 이겼습니다\")\n\n\nuserrsp = input(\"가위(0), 바위(1), 보(2) 중 하나를 선택하세요: \")\nrsp(userrsp)","repo_name":"kimdaeyeobbb/Programming-Language","sub_path":"Python/Study/PY4E 2022/Week 02/rockpaperscissors.py","file_name":"rockpaperscissors.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2027793464","text":"'''\nProf. Luiz Henrique\nCondicional if trablaha com comparações\nSe tal número é maior que outro então faça isso SENÃO faça aquilo\ncomo fazer isso no algorítmo\nOperadores relacionais\nmaior >\nmaior ou igual >=\nmenor <\nmenor ou igual <=\nigual ==\ndiferente !=\nOperadores lógicos\nou\ne\nnão ou\nnão e\ntabela verdade\n\np q ou e\nv v v v\nv f v f\nf v v f\nf f f f\n\n'''\na = 15\nb = 14\nprint(ab):\n print('a é maior que b')\nelse:\n print('a é menor que b')\n#outro exemplo\n'''\nOperadores matemáticos\nsoma +\nsubtração -\ndivisão /\nmultiplicação *\nresto da divisao %\nquociente //\nexponenciação math.pow(base, expoente)\nraiz mesmo processo.\n'''\n#Realizando operações básicas\nnumero1 = int(10)\nnumero2 = int(5)\nnumero3 = 16\n#fazendo uma soma\nsoma = int(numero1 + numero2)\nprint(f'A soma de {numero1} + {numero2} = {soma}')\n#fazendo subtração\nsubtracao = int(numero1 - numero2)\nprint(f'A subtração de {numero1} - {numero2} = {subtracao}')\n# fazendo a divisao\ndivisao = float(numero1 / numero2)\nprint(f'A divisao de {numero1} / {numero2} = {divisao:.2f}')\n# fazendo a multiplicacao\nmultiplicacao = int(numero1 * numero2)\nprint(f'A multiplicacao de {numero1} * {numero2} = {multiplicacao}')\n# fazendo a precedencia\nprecedencia = float(numero1 * numero2 / numero1-numero1)\nprint(f'A precedencia de {numero1} por {numero2} = {precedencia:.2f}')\n# por que deu esse resultado?\n# fazendo a exponenciacao\nexponenciacao = pow(numero1,2)\nprint(f'A exponenciacao de {numero1}² = {exponenciacao}')\n# fazendo a raiz basta elevar a base por 0.5 ou 1/2 que dá a raiz quadrática\nraiz = pow(numero3,1/2)\n# fazendo a raiz cúbica\nraiz1 = pow(numero3,1/3)\nprint(f'A raiz de {numero3} = {raiz1}')\n# fazendo a resto da divisao\nresto = int(numero1%2)\nresto1 = int(numero1%3)\n#10/2 qual sera o resto, deverá se o numero 0 \\n->pula a linha\nprint(f'A resto de {numero1} % 2 = {resto}\\n'\n f'E o resto de {numero1} % 3 = {resto1}')\n# fazendo o quociente\nquociente = int(numero1 // numero2)\nprint(f'A quociente de {numero1} // {numero2} = {quociente}')\n\n\n\n","repo_name":"luizhenriquefernandes/python","sub_path":"4operadores_logicos_relacionais/1operadores_logicos_relacionais.py","file_name":"1operadores_logicos_relacionais.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41797498661","text":"# Imports\nimport simpy\nfrom scipy.stats import expon\nimport random\nimport numpy as np\nimport pandas as pd\n\n# Global lists / DataFrames\nstations_list = []\nfinished_orders = 0\nearly_orders = 0\nearliness_list = []\ntardy_orders = 0\ntardiness_list = []\n\n# Routing of the product types\nrouting = {1: [1, 2, 3], 2: [2, 3, 1], 3: [3, 2, 1], 4: [3, 1], 5: [2, 3]}\n\n# Track order information\norder_number = 0\n\n# Simulation Parameters\nperiod_length = 1440\nperiod = 1\nnew_order_time = 80\nSIM_TIME = 1000000\nenv = simpy.Environment()\n\n# Order Pool\norder_pool = []\norder_pool_dict = dict()\n\n# Order tracking\norder_tracking_dict = dict()\norder_tracking_df = pd.DataFrame()\norder_features_df = pd.DataFrame()\n\n\n# Tracking\ndef order_track_creation(order, environment):\n \"\"\"\n Tracks the information of each order at the time it was created.\n :param order: The newly created order.\n :param environment: The orders' environment.\n :return: Appends the information to the ta.order_tracking_dict.\n \"\"\"\n global order_tracking_dict\n global period\n\n order_tracking_dict[order.order_id] = dict()\n order_tracking_dict[order.order_id]['product_type'] = order.product_type\n order_tracking_dict[order.order_id]['due_date'] = order.due_date\n order_tracking_dict[order.order_id]['time_created'] = environment.now\n order_tracking_dict[order.order_id]['period_created'] = period\n\n\ndef order_track_release(order, environment):\n \"\"\"\n Tracks the time the order was released.\n :param order: The released order.\n :param environment: The orders' environment.\n :return: Appends the information to the ta.order_tracking_dict.\n \"\"\"\n global order_tracking_dict\n\n if order.order_id in order_tracking_dict.keys():\n order_tracking_dict[order.order_id]['time_released'] = environment.now\n\n\ndef order_track_finished(order_id, product_type, environment, station):\n \"\"\"\n This function first checks if the order visited its last station (== the order is finished).\n If that is the case, the time the order was finished is calculated, and the sftt.\n :param order_id: The orders ID.\n :param product_type: The orders' product type.\n :param environment: The orders' environment.\n :param station: The station the order visited.\n :return: Appends the information to the ta.order_tracking_dict.\n \"\"\"\n global order_tracking_dict\n global order_tracking_df\n\n if order_id in order_tracking_dict.keys():\n if station.number == routing.get(product_type)[-1]:\n order_tracking_dict[order_id]['time_finished'] = environment.now\n\n # Calculate SFTT\n time_released = order_tracking_dict[order_id]['time_released']\n order_tracking_dict[order_id]['sftt'] = environment.now - time_released\n\n # Store information in ta.order_tracking_df\n new_dict = order_tracking_dict.pop(order_id)\n new_dict['order_id'] = order_id\n new_df = pd.DataFrame(new_dict, index=['order_id'])\n new_df.index.names = ['order_id']\n order_tracking_df = pd.concat([order_tracking_df, new_df])\n\n\n# Track features\ndef get_wip():\n \"\"\"\n Calculated the WIP.\n :return: Returns the wip.\n \"\"\"\n global stations_list\n\n wip = 0\n for station in stations_list:\n wip += len(station.machine.queue)\n\n wip += 3\n return wip\n\n\ndef nb_orders_queue_routing(product_type):\n \"\"\"\n Calculates the number of orders waiting in front of the stations the order needs to visit.\n :param product_type: The orders' product type.\n :return: Return the number of orders waiting in front of the stations on the orders' routing.\n \"\"\"\n global stations_list\n global routing\n\n nb_queue_routing = 0\n\n for station_nr in routing.get(product_type):\n nb_queue_routing += len(stations_list[station_nr - 1].machine.queue)\n\n return nb_queue_routing\n\n\ndef last_sftt(product_type):\n \"\"\"\n Takes the most recently finished sftt of the oder with the same product type.\n :param product_type: The orders' product type.\n :return: Returns the sftt of the most recently finished order of the same product type.\n \"\"\"\n global order_tracking_df\n try:\n last_sftt_1 = order_tracking_df['sftt'].loc[order_tracking_df['product_type'] == product_type].tail(1).item()\n except:\n last_sftt_1 = 0\n return last_sftt_1\n\n\ndef last_sftt_5(product_type):\n \"\"\"\n Takes the most 5 recently finished sftt of the oder with the same product type.\n :param product_type: The orders' product type.\n :return: Returns the mean and median sftt of the 5 most recently finished order of the same product type.\n \"\"\"\n global order_tracking_df\n try:\n last_5_sftt = order_tracking_df['sftt'].loc[order_tracking_df['product_type'] == product_type].tail(5)\n\n mean_last_sftt_5 = np.mean(last_5_sftt)\n median_last_sftt_5 = np.median(last_5_sftt)\n\n except:\n mean_last_sftt_5 = 0\n median_last_sftt_5 = 0\n\n return mean_last_sftt_5, median_last_sftt_5\n\n\ndef last_sftt_50(product_type):\n \"\"\"\n Takes the most 5 recently finished sftt of the oder with the same product type.\n :param product_type: The orders' product type.\n :return: Returns the mean and median sftt of the 5 most recently finished order of the same product type.\n \"\"\"\n global order_tracking_df\n\n try:\n last_50_sftt = order_tracking_df['sftt'].loc[order_tracking_df['product_type'] == product_type].tail(\n 50)\n\n mean_last_sftt_50 = np.mean(last_50_sftt)\n median_last_sftt_50 = np.median(last_50_sftt)\n except:\n mean_last_sftt_50 = 0\n median_last_sftt_50 = 0\n return mean_last_sftt_50, median_last_sftt_50\n\n\ndef collect_features(order):\n \"\"\"\n This functions calls all the follwing functions to collect the orders features.\n :param order: The new order.\n :return: Appends the information to the order_features_df\n \"\"\"\n global order_features_df\n\n wip = get_wip()\n nb_orders_routing_queue = nb_orders_queue_routing(order.product_type)\n sftt_1 = last_sftt(order.product_type)\n sftt_5_mean, sftt_5_median = last_sftt_5(order.product_type)\n sftt_50_mean, sftt_50_median = last_sftt_50(order.product_type)\n\n new_df = pd.DataFrame({'order_id': order.order_id,\n 'wip': wip,\n 'nb_order_queue_routing': nb_orders_routing_queue,\n 'last_sftt': sftt_1,\n 'last_5_sftt_mean': sftt_5_mean,\n 'last_5_sftt_median': sftt_5_median,\n 'last_50_sftt_mean': sftt_50_mean,\n 'last_50_sftt_median': sftt_50_median,\n }, index=['order_id'])\n\n order_features_df = pd.concat([order_features_df, new_df])\n\n\n# Sorting definitions\ndef edd(order_pool):\n \"\"\"\n This functions sorts the given order_pool by their earliest due date.\n :param order_pool: The current order pool.\n :return: Returns the list sorted by the earliest due date.\n \"\"\"\n return sorted(order_pool, key=lambda x: x.due_date)\n\n\ndef earliest_prd(order_pool):\n \"\"\"\n This function sorts the given order_pool by their earliest planned release date.\n :param order_pool: The current order pool.\n :return: Returns the list sorted by the earliest planned release date.\n \"\"\"\n return sorted(order_pool, key=lambda x: x.prd)\n\n\n# Predict SFTT\ndef expected_sftt(order):\n \"\"\"\n This function calculates the expected mean SFTT for each order and subtracts it in the second step from\n the orders due date. To do this the number of stations on the orders routing is multiplied by the mean\n production time of each station (100).\n :param order: The current order.\n \"\"\"\n global order_pool_dict\n\n order.prd = order.due_date - (len(routing.get(order.product_type)) * 100)\n release_period = int((order.prd) / 1440)\n\n if release_period in order_pool_dict.keys():\n order_pool_dict[release_period].append(order)\n else:\n order_pool_dict[release_period] = [order]\n\n\n# BIL Release\ndef bil(order_pool_dict):\n \"\"\"\n This function filters the order pool dict, that all orders with the referring period are released.\n :param order_pool_dict: The exisitng dict where all created orders are stored in.\n :return: A list of orders to be released.\n \"\"\"\n global period\n try:\n release_list = []\n l = order_pool_dict.keys()\n o = period\n for periode in range(1, period + 1):\n if periode in order_pool_dict.keys():\n release_list.extend(order_pool_dict.get(periode))\n del order_pool_dict[periode]\n except:\n release_list = []\n\n return release_list\n\n\n# IR Release\ndef ir(order_pool):\n \"\"\"\n This functions takes the order_pool and clears it.\n :param order_pool: The current order pool\n :return: returns a list of orders to be released.\n \"\"\"\n release_list = []\n release_list.extend(order_pool)\n order_pool.clear()\n\n return release_list\n\n\ndef track_order(due_date, product_type, station_number, time):\n \"\"\"\n This function first checks if the order visited its last station (this means the order is finished) and later\n it checks whether the order missed its due date or was finished on time.\n :param due_date: The orders' due date.\n :param product_type: The orders' product type.\n :param station_number: The current station the order visited.\n :param time: The time the order leaved the station.\n \"\"\"\n global finished_orders\n global early_orders\n global earliness_list\n global tardy_orders\n global tardiness_list\n\n if station_number == routing.get(product_type)[-1]:\n finished_orders += 1\n\n # If the order is finished before its due date it is an early order\n if time < due_date:\n early_orders += 1\n earliness = due_date - time\n earliness_list.append(earliness)\n else:\n tardy_orders += 1\n tardiness = due_date - time\n tardiness_list.append(tardiness)\n\n\nclass Order:\n \"\"\"\n In this class first orders are generated. Afterwards sent to the stations on the routing where the order\n is handled. Also, the tracking for each order is done here.\n \"\"\"\n global order_number\n global period_length\n global new_order_time\n global stations_list\n global order_pool\n global period\n\n def __init__(self, environment, order_id, product_type, due_date):\n \"\"\"\n Here the variables for the orders are defined.\n :param environment: SimPy Environment()\n \"\"\"\n self.env = environment\n self.order_id = order_id # Identifier for each order\n self.product_type = product_type\n self.due_date = due_date\n self.prd = 0\n\n def handle_order(self, station):\n \"\"\"\n The order request the station. Whether the order needs to wait or is immediately processed.\n :param station: The station for the order on the routing.\n \"\"\"\n\n # Order requests the station\n with station.machine.request() as request:\n print(f\"Order with order_id {self.order_id} arrives at station {station.number} at {self.env.now}\")\n yield request\n # Get Processing time\n processing_time = expon.rvs(scale=100).round()\n # Use the station\n print(f\"Order with order_id {self.order_id} is going to be processed at station {station.number} at \"\n f\"{self.env.now} with processing time {processing_time}\")\n yield self.env.timeout(processing_time)\n print(f\"Order with order_id {self.order_id} leaves the station {station.number} at {self.env.now}\")\n\n # Track orders, if finished\n track_order(self.due_date, self.product_type, station.number, self.env.now)\n order_track_finished(self.order_id, self.product_type, self.env, station)\n\n def get_station(self):\n \"\"\"\n The next station on the product types routing is selected.\n :return: Sending the order to the next station (handle_order() ).\n \"\"\"\n global stations_list\n\n # Get the orders stations\n stations = routing.get(self.product_type)\n\n # Iterate over each station\n for station in stations:\n # Send to the next station\n station = stations_list[station - 1]\n yield self.env.process(self.handle_order(station))\n\n def generate_orders(self):\n \"\"\"\n In this function new orders are created. each order gets an order_id, then a random product type\n and the orders due date is calculated. A new order is created after the specified time above.\n \"\"\"\n global order_number\n global period\n\n # Global order_id\n order_number += 1\n\n # Order attributes\n self.order_id = order_number\n self.product_type = random.randint(1, 5)\n self.due_date = self.env.now + (random.randint(2, 15) * period_length)\n\n # Create new Order\n order_new = Order(self.env, self.order_id, self.product_type, self.due_date)\n\n # Track order\n order_track_creation(order_new, self.env)\n collect_features(order_new)\n\n # Predict SFTT\n expected_sftt(order_new)\n\n # Append order to order_pool list\n order_pool.append(order_new)\n\n while True:\n yield self.env.timeout(new_order_time)\n\n # Increase order_id\n order_number += 1\n\n # Order attributes\n self.order_id = order_number\n self.product_type = random.randint(1, 5)\n self.due_date = self.env.now + (random.randint(2, 15) * period_length)\n\n # Create new order\n order_new = Order(self.env, self.order_id, self.product_type, self.due_date)\n\n # Track order\n order_track_creation(order_new, self.env)\n collect_features(order_new)\n\n # Predict SFTT\n expected_sftt(order_new)\n\n # Append order to order_pool list\n order_pool.append(order_new)\n\n if self.env.now >= period * period_length:\n # Increase period for periodic release\n period += 1\n\n for order_created in earliest_prd(bil(order_pool_dict)):\n # Send order to the first stations\n self.env.process(order_created.get_station())\n\n # Track order release\n order_track_release(order_created, self.env)\n\n\n# Initialize the station class\nclass Station:\n \"\"\"\n This class contains the stations used in the simulation.\n \"\"\"\n\n def __init__(self, number, environment):\n self.env = environment\n self.number = number\n self.machine = simpy.Resource(environment, 1)\n\n\n# Create 3 stations\nstation1 = Station(1, env)\nstation2 = Station(2, env)\nstation3 = Station(3, env)\n\n# Append stations to the stations_list\nstations_list.append(station1)\nstations_list.append(station2)\nstations_list.append(station3)\n\n# Create instance of class Order\norder = Order(env, 1, 1, 1)\n\nenv.process(order.generate_orders())\n\n# Simulation RunTime\nenv.run(until=SIM_TIME)\n\n# Print Performance\nscenario = 'IR_EDD'\nprint(f\"###{scenario}: In total {order_number} Orders were created.\")\nprint(f\"###{scenario}: {finished_orders} Orders were finished.\")\nprint(f\"###{scenario}: {early_orders} Orders were finished in time.\")\nprint(f\"###{scenario}: {tardy_orders} Orders were finished too late.\")\nmean_earliness = np.sum(earliness_list) / early_orders\nmean_tardiness = np.sum(tardiness_list) / tardy_orders\nprint(f\"###{scenario}: Mean earliness {mean_earliness}.\")\nprint(f\"###{scenario}: Mean tardiness {mean_tardiness}.\")\n\nl = order_tracking_df\nf = order_features_df\n\nfinal_df = order_tracking_df.merge(order_features_df, how='left', left_on=order_tracking_df['order_id'],\n right_on=order_features_df['order_id'])\n\nfinal_df.to_csv('name.csv')\n","repo_name":"GeorgKaltenbrunner/SimPy_Series","sub_path":"third_article_features.py","file_name":"third_article_features.py","file_ext":"py","file_size_in_byte":16152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23860371598","text":"import math\n# region fastio\nimport os\nimport sys\nfrom io import BytesIO, IOBase\n\nBUFSIZE = 8192\n\n\nclass FastIO(IOBase):\n newlines = 0\n\n def __init__(self, file):\n self._fd = file.fileno()\n self.buffer = BytesIO()\n self.writable = \"x\" in file.mode or \"r\" not in file.mode\n self.write = self.buffer.write if self.writable else None\n\n def read(self):\n while True:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))\n if not b:\n break\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines = 0\n return self.buffer.read()\n\n def readline(self):\n while self.newlines == 0:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))\n self.newlines = b.count(b\"\\n\") + (not b)\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines -= 1\n return self.buffer.readline()\n\n def flush(self):\n if self.writable:\n os.write(self._fd, self.buffer.getvalue())\n self.buffer.truncate(0), self.buffer.seek(0)\n\n\nclass IOWrapper(IOBase):\n def __init__(self, file):\n self.buffer = FastIO(file)\n self.flush = self.buffer.flush\n self.writable = self.buffer.writable\n self.write = lambda s: self.buffer.write(s.encode(\"ascii\"))\n self.read = lambda: self.buffer.read().decode(\"ascii\")\n self.readline = lambda: self.buffer.readline().decode(\"ascii\")\n\n\nsys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)\ninput = lambda: sys.stdin.readline().rstrip(\"\\r\\n\")\n\n# endregion\nn = int(input())\narr = list(map(int, input().split()))\ndp = [[-math.inf for _ in range(n+1)] for __ in range(n+1)]\n#dp2 = [[0 for _ in range(n+1)] for __ in range(n+1)]\nans = 0\ndp[0][0] = 0\nfor i in range(1, n+1):\n dp[i][0] = 0\n for j in range(1, i+1):\n if dp[i-1][j-1] + arr[i-1] >= 0:\n dp[i][j] = max(dp[i-1][j-1] + arr[i-1], dp[i-1][j])\n ans = max(ans, j)\n elif dp[i-1][j] != -math.inf:\n dp[i][j] = dp[i-1][j]\nprint(ans)\nprint(dp)\n'''for _ in range(int(input())):\n s = input()\n c1 = 0\n c2 = 0\n if \"ab\" in s:\n c1 += s.count(\"ab\")\n if \"ba\" in s:\n c2 += s.count(\"ba\")\n if c1 == c2:\n print(s)\n continue\n else:\n arr = list(s)\n arr2 = list(s)[::-1]\n if c1 > c2:\n z = s.index(\"aa\")\n arr[z] = \"b\"\n print(\"\".join(arr))\n else:\n z = s[::-1].index(\"bb\")\n print(s[::-1])\n arr2[z] = \"b\"\n print(arr2)\n print(\"\".join(arr2[::-1]))'''\n\n'''try:\n s = input(\"Enter the expression you wat to simplify: \")\n print(eval(s))\nexcept:\n print(\"Give Expression cannot be Evaluated\")\n\ns = int(input(\"Enter distance in Kms: \"))\nprint(s * (0.621))'''\n\n'''import numpy\nn, m = list(map(int, input(\"Enter Dimensions of matrixes: \").split()))\nmatrix1 = []\nprint(\"Input Values for matrix1\")\nfor i in range(n):\n matrix1.append(list(map(int, input().split())))\nmatrix2 = []\nprint(\"Input Values for matrix2\")\nfor i in range(n):\n matrix2.append(list(map(int, input().split())))\nmatrix2 = numpy.array(matrix2)\nmatrix1 = numpy.array(matrix1)\nss = numpy.add(matrix1, matrix2)\nfor k in ss:\n for j in k:\n print(j, end=' ')\n print()'''\n\n'''n, m = list(map(int, input(\"Enter Dimensions of matrixes: \").split()))\nmatrix1 = []\nprint(\"Input Values for matrix1\")\nfor i in range(n):\n matrix1.append(list(map(int, input().split())))\nmatrix2 = []\nprint(\"Input Values for matrix2\")\nfor i in range(n):\n matrix2.append(list(map(int, input().split())))\n\nfor k, kk in zip(matrix1, matrix2):\n for j, jj in zip(k, kk):\n print(j + jj, end=' ')\n print()'''","repo_name":"35C4n0r/Codeforces-Py-","sub_path":"PycharmProjects/Codeforces/Potions (Easy Version).py","file_name":"Potions (Easy Version).py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10240438785","text":"# Copyright (C) 2021 Intel Corporation\r\n# SPDX-License-Identifier: BSD-3-Clause\r\n# See: https://spdx.org/licenses/\r\nimport typing as ty\r\n\r\nimport numpy.typing as npt\r\nfrom lava.lib.optimization.problems.problems import OptimizationProblem\r\nfrom lava.lib.optimization.solvers.generic.builder import SolverProcessBuilder\r\nfrom lava.lib.optimization.solvers.generic.hierarchical_processes import \\\r\n StochasticIntegrateAndFire\r\nfrom lava.lib.optimization.solvers.generic.sub_process_models import \\\r\n StochasticIntegrateAndFireModel\r\nfrom lava.magma.core.resources import AbstractComputeResource, CPU, \\\r\n Loihi2NeuroCore, NeuroCore\r\nfrom lava.magma.core.run_conditions import RunContinuous, RunSteps\r\nfrom lava.magma.core.run_configs import Loihi1SimCfg, Loihi2HwCfg\r\nfrom lava.magma.core.sync.protocol import AbstractSyncProtocol\r\nfrom lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol\r\nfrom lava.proc.dense.models import PyDenseModelFloat\r\nfrom lava.proc.dense.process import Dense\r\nfrom lava.proc.read_gate.models import ReadGatePyModel\r\nfrom lava.proc.read_gate.process import ReadGate\r\n\r\nBACKENDS = ty.Union[CPU, Loihi2NeuroCore, NeuroCore, str]\r\nCPUS = [CPU, \"CPU\"]\r\nNEUROCORES = [Loihi2NeuroCore, NeuroCore, \"Loihi2\"]\r\n\r\n\r\ndef solve(problem: OptimizationProblem,\r\n timeout: int,\r\n target_cost: int = None,\r\n backend: BACKENDS = Loihi2NeuroCore) -> \\\r\n npt.ArrayLike:\r\n \"\"\"Create solver from problem spec and run until target_cost or timeout.\r\n\r\n Parameters\r\n ----------\r\n problem: Optimization problem to be solved.\r\n timeout: Maximum number of iterations (timesteps) to be run. If set to\r\n -1 then the solver will run continuously in non-blocking mode until a\r\n solution is found.\r\n target_cost: A cost value provided by the user as a target for the\r\n solution to be found by the solver, when a solution with such cost is\r\n found and read, execution ends.\r\n backend: Specifies the backend where the main solver network will be\r\n deployed.\r\n\r\n Returns\r\n ----------\r\n solution: candidate solution to the input optimization problem.\r\n \"\"\"\r\n solver = OptimizationSolver(problem)\r\n solution = solver.solve(timeout=timeout, target_cost=target_cost,\r\n backend=backend)\r\n return solution\r\n\r\n\r\nclass OptimizationSolver:\r\n \"\"\"Generic solver for constrained optimization problems defined by\r\n variables, cost and constraints.\r\n\r\n The problem should behave according to the OptimizationProblem's\r\n interface so that the Lava solver can be built correctly.\r\n\r\n A Lava OptimizationSolverProcess and a Lava OptimizationSolverModel will\r\n be created from the problem specification. The dynamics of such process\r\n implements the algorithms that search a solution to the problem and\r\n reports it to the user.\r\n\r\n Parameters\r\n ----------\r\n problem: Optimization problem to be solved.\r\n run_cfg: Run configuration for the OptimizationSolverProcess.0\r\n\r\n \"\"\"\r\n\r\n def __init__(self,\r\n problem: OptimizationProblem,\r\n run_cfg=None):\r\n self.problem = problem\r\n self._run_cfg = run_cfg\r\n self._process_builder = SolverProcessBuilder()\r\n self.solver_process = None\r\n self.solver_model = None\r\n\r\n @property\r\n def run_cfg(self):\r\n \"\"\"Run configuration for process model selection.\"\"\"\r\n return self._run_cfg\r\n\r\n @run_cfg.setter\r\n def run_cfg(self, value):\r\n self._run_cfg = value\r\n\r\n def solve(self,\r\n timeout: int,\r\n target_cost: int = 0,\r\n backend: BACKENDS = CPU,\r\n hyperparameters: ty.Dict[\r\n str, ty.Union[int, npt.ArrayLike]] = None) \\\r\n -> npt.ArrayLike:\r\n \"\"\"Create solver from problem spec and run until target_cost or timeout.\r\n\r\n Parameters\r\n ----------\r\n timeout: Maximum number of iterations (timesteps) to be run. If set to\r\n -1 then the solver will run continuously in non-blocking mode until a\r\n solution is found.\r\n target_cost: A cost value provided by the user as a target for the\r\n solution to be found by the solver, when a solution with such cost is\r\n found and read, execution ends.\r\n backend: Specifies the backend where the main solver network will be\r\n deployed.\r\n hyperparameters: A dictionary specifying values for steps_to_fire,\r\n noise_amplitude, step_size and init_value. All but the last are\r\n integers, the initial value is an array-like of initial values for the\r\n variables defining the problem.\r\n\r\n Returns\r\n ----------\r\n solution: candidate solution to the input optimization problem.\r\n\r\n \"\"\"\r\n run_cfg = None\r\n if not self.solver_process:\r\n self._create_solver_process(self.problem, target_cost, backend,\r\n hyperparameters)\r\n if backend in CPUS:\r\n pdict = {self.solver_process: self.solver_model,\r\n ReadGate: ReadGatePyModel,\r\n Dense: PyDenseModelFloat,\r\n StochasticIntegrateAndFire: StochasticIntegrateAndFireModel\r\n }\r\n run_cfg = Loihi1SimCfg(exception_proc_model_map=pdict,\r\n select_sub_proc_model=True)\r\n elif backend in NEUROCORES:\r\n raise NotImplementedError(\"Loihi backend will be supported in an \"\r\n \"upcomming release and requires the \"\r\n \"lava-on-loihi extension of Lava, \"\r\n \"verify you are running the latest \"\r\n \"release of this library.\")\r\n else:\r\n raise NotImplementedError(str(backend) + backend_msg)\r\n self.solver_process._log_config.level = 20\r\n self.solver_process.run(\r\n condition=RunContinuous()\r\n if timeout == -1\r\n else RunSteps(num_steps=timeout),\r\n run_cfg=run_cfg,\r\n )\r\n if timeout == -1:\r\n self.solver_process.wait()\r\n solution = self.solver_process.variable_assignment.aliased_var.get()\r\n self.solver_process.stop()\r\n return solution\r\n\r\n def _create_solver_process(self,\r\n problem: OptimizationProblem,\r\n target_cost: ty.Optional[int] = None,\r\n backend: BACKENDS = None,\r\n hyperparameters: ty.Dict[\r\n str, ty.Union[int, npt.ArrayLike]] = None):\r\n \"\"\"Create process and model class as solver for the given problem.\r\n\r\n Parameters\r\n ----------\r\n problem: Optimization problem defined by cost and constraints which\r\n will be used to build the process and its model.\r\n target_cost: A cost value provided by the user as a target for the\r\n solution to be found by the solver, when a solution with such cost is\r\n found and read, execution ends.\r\n backend: Specifies the backend where the main solver network will be\r\n deployed.\r\n \"\"\"\r\n requirements, protocol = self._get_requirements_and_protocol(backend)\r\n self._process_builder.create_solver_process(problem, hyperparameters\r\n or dict())\r\n self._process_builder.create_solver_model(target_cost,\r\n requirements,\r\n protocol)\r\n self.solver_process = self._process_builder.solver_process\r\n self.solver_model = self._process_builder.solver_model\r\n\r\n def _get_requirements_and_protocol(self,\r\n backend: BACKENDS) -> \\\r\n ty.Tuple[\r\n AbstractComputeResource, AbstractSyncProtocol]:\r\n \"\"\"Figure out requirements and protocol for a given backend.\r\n\r\n Parameters\r\n ----------\r\n backend: Specifies the backend for which requirements and protocol\r\n classes will be returned.\r\n\r\n \"\"\"\r\n protocol = LoihiProtocol\r\n if backend in CPUS:\r\n return [CPU], protocol\r\n elif backend in NEUROCORES:\r\n return [Loihi2NeuroCore], protocol\r\n else:\r\n raise NotImplementedError(str(backend) + backend_msg)\r\n\r\n\r\n# TODO throw an error if L2 is not present and the user tries to use it.\r\nbackend_msg = f\"\"\" was requested as backend. However,\r\nthe solver currently supports only Loihi 2 and CPU backends.\r\nThese can be specified by calling solve with any of the following:\r\n\r\n backend = \"CPU\"\r\n backend = \"Loihi2\"\r\n backend = CPU\r\n backend = Loihi2NeuroCore\r\n backend = NeuroCore\r\n\r\nThe explicit resource classes can be imported from\r\nlava.magma.core.resources\r\n\"\"\"\r\n","repo_name":"phillipmacon/lava-optimization","sub_path":"src/lava/lib/optimization/solvers/generic/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":9022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71254710593","text":"from rest_framework import serializers\n\nfrom .models import Comment, Review\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n read_only=True,\n slug_field='username',\n )\n\n def validate(self, data):\n reviews = Review.objects.filter(\n title=self.context.get('view').kwargs.get('title_id'),\n author=self.context.get('request').user\n ).exists()\n\n if reviews and self.context.get('request').method == 'POST':\n raise serializers.ValidationError('Отзыв существует')\n return data\n\n class Meta:\n fields = ('id', 'text', 'author', 'score', 'pub_date')\n model = Review\n\n extra_kwargs = {\n 'author': {'required': False},\n 'title': {'required': False},\n }\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n read_only=True,\n slug_field='username',\n )\n\n class Meta:\n fields = ('id', 'text', 'author', 'pub_date')\n model = Comment\n\n extra_kwargs = {\n 'pud_date': {'required': False},\n 'review': {'required': False},\n 'author': {'required': False},\n }\n","repo_name":"zaleksandrne/infra_sp2","sub_path":"reviews/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18168544041","text":"\nimport nltk\nnltk.data.path.append('/home/guozl-s20/data/nltk_data')\n\nimport json\nimport os,sys \nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) \nimport argparse\nimport functools\nimport _jsonnet\nimport tqdm\nimport torch \nimport time \n\nfrom duorat.asdl.asdl_ast import AbstractSyntaxTree\nfrom duorat.types import RATPreprocItem\nfrom duorat.utils import registry, parallelizer\nfrom duorat.api import DuoratAPI, DuoratOnDatabase\nfrom duorat.utils.evaluation import find_any_config\nfrom duorat.preproc.utils import preprocess_schema_uncached\nfrom duorat.datasets.spider import SpiderDataset, SpiderItem\nfrom duorat.preproc.slml import pretty_format_slml\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n \"Infer queries for questions about a data section. The output format\"\\\n \" is compatible with official Spider eval scripts.\")\n parser.add_argument(\"--logdir\", required=True)\n parser.add_argument(\"--config\",\n help=\"The configuration file. By default, an arbitrary configuration from the logdir is loaded\")\n parser.add_argument(\"--data-config\",\n help=\"Dataset section configuration\",\n required=True)\n parser.add_argument(\n \"--questions\",\n help=\"The path to the questions in Spider format.\"\n \"By default, use questions specified by --data-config\",\n )\n parser.add_argument(\n \"--output-spider\",\n help=\"Path to save outputs in the Spider format\")\n parser.add_argument(\n \"--output-google\",\n help=\"Path to save output in the Google format\")\n\n parser.add_argument(\n \"--step\") \n parser.add_argument(\n \"--prefix\", default='\"data/\"'\n ) \n parser.add_argument(\n \"--nproc\", default=1\n )\n\n args = parser.parse_args()\n\n if args.output_spider is None and args.output_google is None:\n raise ValueError(\"specify output destination in either Google or Michigan format\")\n\n config_path = find_any_config(args.logdir) if args.config is None else args.config\n api = DuoratAPI(args.logdir, config_path, args.step)\n\n data_config = json.loads(_jsonnet.evaluate_file(\n args.data_config, tla_codes={'prefix': args.prefix}))\n if data_config['name'] != 'spider':\n raise ValueError()\n del data_config['name']\n if args.questions:\n data_config['paths'] = [args.questions]\n dataset = SpiderDataset(**data_config)\n\n sql_schemas = {}\n for db_id in dataset.schemas:\n spider_schema = dataset.schemas[db_id]\n sql_schemas[db_id] = preprocess_schema_uncached(\n schema=spider_schema,\n db_path=dataset.get_db_path(db_id),\n tokenize=api.preproc._schema_tokenize,\n )\n\n if args.output_spider and os.path.exists(args.output_spider):\n os.remove(args.output_spider)\n\n output_items = []\n\n orig_data = []\n preproc_data = []\n\n print('preprocessing data...')\n for item in tqdm.tqdm(dataset):\n db_id = item.spider_schema.db_id\n\n spider_item = SpiderItem(\n question=item.question,\n slml_question=None,\n query=\"\",\n spider_sql={},\n spider_schema=item.spider_schema,\n db_path=\"\",\n orig={'question_id':item.orig['question_id']},\n )\n preproc_item: RATPreprocItem = api.preproc.preprocess_item(\n spider_item,\n sql_schemas[db_id],\n AbstractSyntaxTree(production=None, fields=(), created_time=None),\n )\n orig_data.append(spider_item)\n preproc_data.append(preproc_item)\n\n print('start infer...')\n if torch.cuda.is_available():\n cp = parallelizer.CUDAParallelizer(int(args.nproc))\n else:\n cp = parallelizer.CPUParallelizer(int(args.nproc))\n inferred_lines = cp.parallel_map(\n [\n (\n functools.partial(\n DuoratAPI.parse_single2,\n api.model,\n beam_size=1,\n decode_max_time_step=500,\n ),\n list(enumerate(zip(orig_data, preproc_data))),\n )\n ]\n )\n inferred_lines = list(inferred_lines)\n\n with open(args.output_spider, \"w\") as output_dst:\n for line in inferred_lines:\n # print(line)\n output_dst.write(line)\n\n","repo_name":"hyc2026/text2sql","sub_path":"scripts/infer_questions_multi_proc.py","file_name":"infer_questions_multi_proc.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33740351541","text":"import cv2\nimport numpy as np\n\nfrom utils import general\nfrom utils.log import LogFactory\n\n\nclass PerPixelDetector:\n def __init__(self, model, grid_size, logger=LogFactory.get_default_logger()):\n self.model = model\n self.grid_size = grid_size\n self.logger = logger\n\n def detect(self, image):\n \"\"\"\n Gets the image skin region by building a window around each pixel.\n Result is smooth but processing time is increased.\n \"\"\"\n return self.__detect_template(image)\n\n def detect_with_mask(self, image, mask):\n \"\"\"\n Gets the image skin region by building a window around each pixel.\n Uses mask to only consider pixels that have been marked.\n \"\"\"\n def mask_filter(x, y): return np.all(mask[x, y] == 0)\n return self.__detect_template(image, mask_filter)\n\n def __detect_template(self, image, mask=lambda x, y: True):\n new_image = general.generate_overlay_image(image)\n\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n rows = gray.shape[0]\n cols = gray.shape[1]\n\n radius = self.grid_size // 2\n for x_pixel in range(radius, rows - radius):\n for y_pixel in range(radius, cols - radius):\n self.logger.log_progress_pixel(x_pixel, y_pixel, rows, cols)\n\n if mask(x_pixel, y_pixel):\n r = x_pixel - radius\n c = y_pixel - radius\n grid_size = self.grid_size\n roi = gray[r:r + grid_size, c:c + grid_size]\n\n prediction = self.model.classify(roi)\n if prediction == self.model.skin_label:\n new_image[x_pixel, y_pixel] = [0, 0, 0]\n return new_image\n","repo_name":"StefanSebastian/SkinDetectionInImages","sub_path":"implementation/texture_analysis/detect/per_pixel_detector.py","file_name":"per_pixel_detector.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"35352140241","text":"from flask import Flask\nimport json\nfrom os import path\nfrom flask_cors import CORS\nfrom flask import request\nimport FetchAPIData\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n@app.route(\"/keyword_7days\")\ndef keyword_7days():\n # check if file exists\n if path.exists(\"keyword_7days.txt\"):\n # read file and put contents in variable 'data'\n with open('keyword_7days.txt', 'r') as file:\n data = file.read()\n json_data = json.loads(data)\n return json_data\n # if there is no file, there is no data\n return {}\n\n@app.route(\"/keyword_daysago\")\ndef keyword_daysago():\n json_1daysago = {}\n json_2daysago = {}\n json_3daysago = {}\n there_is_data = False\n\n for i in range(1,4):\n if path.exists(\"keyword_\" + str(i) + \"daysago.txt\"):\n there_is_data = True\n with open('keyword_' + str(i) + 'daysago.txt', 'r') as file:\n data = file.read()\n json_data = json.loads(data)\n if i==1:\n json_1daysago = json_data\n elif i==2:\n json_2daysago = json_data\n else:\n json_3daysago = json_data\n\n if there_is_data:\n return {0: json_1daysago, 1: json_2daysago, 2: json_3daysago}\n\n return {}\n\n@app.route(\"/sentiment\")\ndef sentiment():\n if path.exists(\"sentiment_post.txt\"):\n with open('sentiment_post.txt', 'r', encoding='utf8') as file:\n data = file.read()\n json_data = json.loads(data)\n return json_data\n return {}\n\n@app.route('/new_search', methods=['POST'])\ndef new_search():\n if request.method == 'POST':\n keyword = request.get_json()[\"keyword\"]\n language = request.get_json()[\"language\"]\n FetchAPIData.keyword_7days(keyword, language)\n for i in range(1, 4):\n FetchAPIData.keyword_days(keyword, language, i)\n return json.dumps({'success':True}), 200, {'ContentType':'application/json'}\n\n@app.route('/new_sentiment_search', methods=['POST'])\ndef new_sentiment_search():\n if request.method == 'POST':\n keyword = request.get_json()[\"keyword\"]\n language = request.get_json()[\"language\"]\n FetchAPIData.sentiment(keyword, language)\n return json.dumps({'success':True}), 200, {'ContentType':'application/json'}","repo_name":"rospuye/APSEI_Project3","sub_path":"backend/MyAPI.py","file_name":"MyAPI.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35033525939","text":"a=input().split('-')\nL=[]\nfor i in a:\n count=0\n b=i.split('+')\n for j in b:\n count+=int(j)\n L.append(count)\nn=L[0]\nfor i in range(1,len(L)):\n n-=L[i]\nprint(n)","repo_name":"ahnjongin/BOJ","sub_path":"1541.py","file_name":"1541.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33523932912","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Pandas\n# \n# - Methods to deal with tabular data\n# - These methods are to replicate what `dplyr` in R is capable of\n# - The `statsmodels` can download R datasets from https://vincentarelbundock.github.io/Rdatasets/datasets.html\n\n# ## Libraries\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nimport matplotlib\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# ## Importing/Exporting Data\n\n# Importing:\n# \n# - `pd.read_csv(filename)`: From a CSV file\n# - `pd.read_table(filename)`: From a delimited text file (like TSV)\n# - `pd.read_excel(filename)`: From an Excel file\n# - `pd.read_sql(query, connection_object)`: Read from a SQL table/database\n# - `pd.read_json(json_string)`: Read from a JSON formatted string, URL or file.\n# - `pd.read_html(url)`: Parses an html URL, string or file and extracts tables to a list of dataframes\n# - `pd.read_clipboard()`: Takes the contents of your clipboard and passes it to read_table()\n# - `pd.DataFrame(dict)`: From a dict, keys for columns names, values for data as lists\n# - `pd.DataFrame(list of tuples)`: From a list, which includes the records of each row\n# \n# Exporting:\n# \n# - `df.to_csv(filename)` \n# - `df.to_excel(filename)` \n# - `df.to_sql(table_name, connection_object)` \n# - `df.to_json(filename)`\n\n# In[2]:\n\n\nDEMO_DATA_DIR = '../../../RepositoryData/data/titanic/'\niris = sm.datasets.get_rdataset('iris').data\ntitanic = pd.read_csv(DEMO_DATA_DIR+'train.csv')\n\n\n# In[3]:\n\n\nx= [(1,2,3,4),\n (5,6,7,8),\n (9,10,11,12)]\npd.DataFrame(x,columns=[\"A\",\"B\",\"C\",\"D\"])\n\n\n# In[4]:\n\n\nx = {\"A\":[1,2,3,4],\n \"B\":[5,6,7,8],\n \"C\":[9,10,11,12]}\npd.DataFrame(x)\n\n\n# ```{note}\n# When you have data of the **columns**, use **dict**; when you have the data of the **rows**, use **list** as the source data structures of a data frame.\n# ```\n\n# ## Inspecting Data Frame\n\n# - `df.head(n)`: First n rows of the DataFrame\n# - `df.tail(n)`: Last n rows of the DataFrame\n# - `df.shape`: Number of rows and columns\n# - `df.info()`: Index, Datatype and Memory information\n# - `df.describe()`: Summary statistics for numerical columns\n# - `s.value_counts(dropna=False)`: View unique values and counts\n# - `df.apply(pd.Series.value_counts)`: Unique values and counts for all columns\n# - `df.columns`\n# - `df.index`\n# - `df.dtypes`\n# - `df.set_index('column_name')`: Set a column as the index \n\n# In[5]:\n\n\niris.info()\n\n\n# In[6]:\n\n\niris.describe()\n\n\n# In[7]:\n\n\nprint(iris.shape)\niris.head(3)\n\n\n# In[8]:\n\n\ntitanic.tail(3)\n\n\n# In[9]:\n\n\niris['Species'].value_counts()\n\n\n# In[10]:\n\n\ntitanic.apply(pd.Series.value_counts)\n\n\n# In[11]:\n\n\nprint(iris.columns)\nprint(titanic.columns)\nprint(iris.index)\n\n\n# In[12]:\n\n\nprint(iris.dtypes)\nprint(titanic.dtypes)\n\n\n# ## Basic Functions\n\n# In[13]:\n\n\n## DataFrame attributes\niris.shape\niris.columns\niris.index\niris.info()\niris.describe()\niris.dtypes # check column data types\n\n\n# ## Subsetting Data Frame\n\n# - `df[col]`: Returns column with label col as Series\n# - `df[[col1, col2]]`: Returns columns as a new DataFrame\n# - `s.iloc[0]`: Selection by position\n# - `s.loc['index_one']`: Selection by index\n# - `df.iloc[0,:]`: First row\n# - `df.iloc[0,0]`: First element of first column\n\n# In[14]:\n\n\niris.loc[:5, 'Species'] # first six rows of 'Species' column\n\n\n# In[15]:\n\n\niris.iloc[:5, 4] # same as above\n\n\n# ## Exploration\n# \n\n# How to perform the key functions provided in R `dplyr`?\n# \n# - `dplyr` Key Verbs\n# - `filter()`\n# - `select()`\n# - `mutate()`\n# - `arrange()`\n# - `summarize()`\n# - `group_by()`\n\n# ### NA Values\n# \n\n# Functions to take care of `NA` values:\n# \n# - `df.isnull()`\n# - `df.notnull()`\n# - `df.dropna()`: Drop rows with null values\n# - `df.dropna(axis=1)`: Drop columns with null values\n# - `df.dropna(axis=1, thresh=n)`: Drop all columns have less than n non-values\n# - `df.fillna(x)`: Replaces all null values with `x`\n# - `s.fillna(s.mean())`: Replace the null values of a Series with its mean score\n\n# - Quick check of the null values in each column\n\n# In[16]:\n\n\ntitanic.isnull().sum()\n\n\n# In[17]:\n\n\ntitanic.dropna(axis=1, thresh=600)\n\n\n# In[18]:\n\n\ntitanic.notnull().sum()\n\n\n# ### Converting Data Types\n\n# - `s.astype(float)`: Convert a Series into a `float` type\n# \n\n# In[19]:\n\n\niris.dtypes\n\n\n# In[20]:\n\n\niris['Species']=iris['Species'].astype('category')\niris.dtypes\n#iris.value_counts(iris['Species']).plot.bar()\n\n\n# ### Pandas-supported Data Types\n# \n# ![pandas-dtypes](../images/pandas-dtypes.png)\n# \n# ([source](https://pbpython.com/pandas_dtypes.html))\n# \n\n# ### Transformation\n# \n\n# - `s.replace(X, Y)`\n\n# In[21]:\n\n\ntitanic.head()\ntitanic.value_counts(titanic['Survived']).plot.bar()\ntitanic.columns\ntitanic.groupby(['Sex','Pclass']).mean()\ntitanic[titanic['Age']<18].groupby(['Sex','Pclass']).mean()\n\n\n# ### `filter()`\n\n# In[22]:\n\n\n## filter\niris[iris['Sepal.Length']>5]\n\n\n# ```{note}\n# When there are more than one filtering condition, put the conditions in parentheses.\n# ```\n\n# In[23]:\n\n\niris[(iris['Sepal.Length']>4) & (iris['Sepal.Width']>5)]\n\n\n# In[24]:\n\n\niris.query('`Sepal.Length`>5')\n\n\n# In[25]:\n\n\niris[(iris['Sepal.Length']>5) & (iris['Sepal.Width']>4)]\n\n\n# ### `arrange()`\n\n# In[26]:\n\n\niris.sort_values(['Species','Sepal.Length'], ascending=[False,True])\n\n\n# ### `select()`\n\n# In[27]:\n\n\n## select\niris[['Sepal.Length', 'Species']]\n\n\n# In[28]:\n\n\n## deselect columns\niris.drop(['Sepal.Length'], axis=1).head()\n\n\n# In[29]:\n\n\niris.filter(['Species','Sepal.Length'])\n\n\n# In[30]:\n\n\niris[['Species','Sepal.Length']]\n\n\n# In[31]:\n\n\n## extract one particular column\nsepal_length = iris['Sepal.Length']\ntype(sepal_length)\n\n\n# ### `mutate()`\n\n# In[32]:\n\n\n## mutate\niris['Species_new'] = iris['Species'].apply(lambda x: len(x))\niris['Species_initial'] = iris['Species'].apply(lambda x: x[:2].upper())\niris\n\n\n# In[33]:\n\n\n## mutate alternative 2\niris.assign(Specias_initial2 = iris['Species'].apply(lambda x: x.upper()))\n\n\n# ### `apply()`, `mutate_if()`\n# \n# - `df.apply(np.mean)`: Apply a function to all columns\n# - `df.apply(np.max,axis=1)`: Apply a function to each row\n\n# ```{note}\n# When `apply()` functions to the data frame, the `axis=1` refers to row mutation and `axis=0` refers to column mutation. This is very counter-intuitive for R users.\n# ```\n\n# In[34]:\n\n\niris.head(10)\n\n\n# In[35]:\n\n\niris[['Sepal.Width','Petal.Width']].apply(np.sum, axis=1).head(10)\n\n\n# ### `group_by()` and `summarize()`\n\n# In[36]:\n\n\niris.groupby(by='Species').mean()\n\n\n# In[37]:\n\n\niris.filter(['Species','Sepal.Length']).groupby('Species').agg({'Sepal.Length':['mean','count','std']})\n\n\n# In[38]:\n\n\ntitanic.head()\n\n\n# In[39]:\n\n\ntitanic.groupby(['Pclass','Sex']).agg(np.sum)\n\n\n# In[40]:\n\n\ntitanic.pivot_table(index=['Pclass','Sex'], values=['Survived'], aggfunc=np.sum)\n\n\n# ### `rename()`\n# \n\n# In[41]:\n\n\niris\niris.columns\n\n\n# - Selective renaming column names\n\n# In[42]:\n\n\niris = iris.rename(columns={'Sepal.Length':'SLen'})\niris\n\n\n# - Massive renaming column names\n\n# In[43]:\n\n\niris.rename(columns=lambda x: 'XX'+x)\n\n\n# In[44]:\n\n\ntitanic.head(10)\n\n\n# In[45]:\n\n\ntitanic.set_index('Name').rename(index=lambda x:x.replace(' ',\"_\").upper())\n\n\n# ## Join/Combine Data Frames\n\n# - `df1.append(df2)`: Add the rows in df1 to the end of df2 (columns should be identical) (`rbind()` in R)\n# - `pd.concat([df1, df2],axis=1)`: Add the columns in df1 to the end of df2 (rows should be identical) (`cbind()` in R)\n# - `df1.join(df2,on=col1,how='inner')`: SQL-style join the columns in df1 with the columns on df2 where the rows for col have identical values. 'how' can be one of 'left', 'right', 'outer', 'inner'\n# \n# \n\n# ## Statistics\n\n# - `df.describe()`: Summary statistics for numerical columns\n# - `df.mean()`: Returns the mean of all columns\n# - `df.corr()`: Returns the correlation between columns in a DataFrame\n# - `df.count()`: Returns the number of non-null values in each DataFrame column\n# - `df.max()`: Returns the highest value in each column\n# - `df.min()`: Returns the lowest value in each column\n# - `df.median()`: Returns the median of each column\n# - `df.std()`: Returns the standard deviation of each column\n\n# In[46]:\n\n\ntitanic.count()\n\n\n# In[47]:\n\n\ntitanic.median()\n\n\n# ## Generic Functions\n\n# - `pandas.pivot_table()`\n# - `pandas.crosstab()`\n# - `pandas.cut()`\n# - `pandas.qcut()`\n# - `pandas.merge()`\n# - `pandas.get_dummies()`\n\n# ## References\n# \n# - [Python for Data Analysis](https://www.amazon.com/gp/product/1491957662/ref=as_li_tl_nodl?ie=UTF8&camp=1789&creative=9325&creativeASIN=1491957662&linkCode=as2&tag=ledoux-20&linkId=eff92247940c967299befaed855c580a)\n# - [Python for Data Analysis GitHub](https://github.com/wesm/pydata-book)\n# - [How to get sample datasets in Python](https://stackoverflow.com/questions/28417293/sample-datasets-in-pandas)\n# \n\n# ## Requirements\n\n# In[48]:\n\n\n# %load get_modules.py\nimport pkg_resources\nimport types\ndef get_imports():\n for name, val in globals().items():\n if isinstance(val, types.ModuleType):\n # Split ensures you get root package, \n # not just imported function\n name = val.__name__.split(\".\")[0]\n\n elif isinstance(val, type):\n name = val.__module__.split(\".\")[0]\n\n # Some packages are weird and have different\n # imported names vs. system/pip names. Unfortunately,\n # there is no systematic way to get pip names from\n # a package's imported name. You'll have to add\n # exceptions to this list manually!\n poorly_named_packages = {\n \"PIL\": \"Pillow\",\n \"sklearn\": \"scikit-learn\"\n }\n if name in poorly_named_packages.keys():\n name = poorly_named_packages[name]\n\n yield name\n \n \nimports = list(set(get_imports()))\n\n# The only way I found to get the version of the root package\n# from only the name of the package is to cross-check the names \n# of installed packages vs. imported packages\nrequirements = []\nfor m in pkg_resources.working_set:\n if m.project_name in imports and m.project_name!=\"pip\":\n requirements.append((m.project_name, m.version))\n\nfor r in requirements:\n print(\"{}=={}\".format(*r))\n\n","repo_name":"alvinntnu/python-notes","sub_path":"_build/jupyter_execute/python-basics/pandas.py","file_name":"pandas.py","file_ext":"py","file_size_in_byte":10131,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"16493502501","text":"from collections import deque\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def maxLevelSum(self, root: TreeNode) -> int:\n if root == None:\n return 0\n result = root.val\n l = []\n q = deque()\n q.append(root)\n level = 1\n while len(q) > 0:\n count = len(q)\n summ = 0\n while (count > 0):\n temp = q.popleft()\n summ += temp.val\n if (temp.left):\n q.append(temp.left)\n if (temp.right):\n q.append(temp.right)\n count -= 1\n print(summ, level)\n l.append(summ)\n level += 1\n return l.index(max(l)) + 1","repo_name":"ddaarrrryyll/leetcode","sub_path":"maximum-level-sum-of-a-binary-tree/maximum-level-sum-of-a-binary-tree.py","file_name":"maximum-level-sum-of-a-binary-tree.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27611807936","text":"import argparse\nimport re\n\nfrom torch.optim import Adam\n\nimport core.replay\nimport environments\nfrom core import logpath, policies as policies\nfrom core.manager import TrainingManager\nfrom models.linear_uncertainty import OnlineVariance, OnlineVarianceMulti\nfrom models.nn.losses import QLoss, SuccessorLosses, UBELoss\nfrom models.temporal_difference import QNetwork, SF, BootQNetwork\nfrom run.inner_loops import q_train_iter, sf_train_iter, ube_train_iter\n\n\nclass RunArgParser(argparse.ArgumentParser):\n def __init__(self):\n super().__init__()\n self.add_argument('algorithm', choices=('sf', 'ube', 'bdqn', 'boot'))\n self.add_argument('--env', default='grid')\n self.add_argument('--env-size', type=int, default=5)\n\n self.add_argument('--n-episodes', default=100, type=int)\n\n self.add_argument('--policy-arg', type=float, default=1.0)\n self.add_argument('--n-grad-steps', default=10, type=int)\n\n self.add_argument('--lr', default=1e-3, type=float)\n self.add_argument('--target-update-factor', default=1.0, type=float)\n self.add_argument('--discount-factor', default=1.0, type=float)\n\n self.add_argument('--batch-size', default=100, type=int)\n self.add_argument('--buffer-size', default=10000, type=int)\n\n self.add_argument('--feature-size', default=10, type=int)\n self.add_argument('--use-network', action='store_true')\n\n self.add_argument('--prior', default=1.0, type=float)\n self.add_argument('--p-bootstrap', default=0.5, type=float)\n\n self.add_argument('--print-frequency', default=1, type=int)\n\n self.add_argument('--name', default='unnamed', type=str)\n self.add_argument('--exit-on-done', action='store_true')\n self.add_argument('--verbose', action='store_true')\n\n self.add_argument('--debug', action='store_true')\n self.add_argument('--export-debug', action='store_true')\n self.add_argument('--test_mode', action='store_true')\n\n\n_re_parse_fcall = re.compile(r'([a-z\\-_\\d]+)(?:\\((.+)\\))?')\n\n\ndef config_from_args(args):\n _ns = {'n_episodes': args.n_episodes,\n 'n_grad_steps': args.n_grad_steps,\n 'target_update_factor': args.target_update_factor,\n 'print_frequency': args.print_frequency,\n \"buffer_size\": args.buffer_size,\n 'debug': args.debug,\n 'discount_factor': args.discount_factor,\n 'prior': args.prior}\n\n # set up manager\n manager = TrainingManager(logpath, args.name, args.test_mode).register_args(args)\n _ns['manager'] = manager\n\n try:\n # set up data set & env\n _ns.update(get_env(args.env, args.env_size, args.verbose))\n\n _ns.update(get_data(args.batch_size, _ns['buffer_size'], _ns['env'].state_size, args.algorithm, args.policy_arg,\n args.p_bootstrap))\n\n # set up model\n _ns.update(\n get_model(args.algorithm, _ns['env'], args.feature_size, args.use_network, policy_arg=args.policy_arg,\n prior=args.prior))\n manager.register_modules([_ns['model']])\n\n # set up policy & uncertainty\n _ns.update(get_uncertainty(args.algorithm, _ns['model'],\n _ns['env'].action_size, args.prior))\n _ns.update(get_policy(args.algorithm, args.policy_arg, _ns['env'].action_size,\n _ns['model'], _ns['uncertainty']))\n\n # set up loss & optimiser\n _ns.update(get_loss(args.algorithm, _ns['model'], _ns['policy'], _ns['discount_factor']))\n _ns.update(get_optim(set(_ns['model'].parameters()), args.lr))\n\n # set up training loop\n _ns.update(get_train_iter(_ns['algorithm']))\n\n except:\n manager.delete_log()\n raise\n\n return argparse.Namespace(**_ns)\n\n\ndef get_loss(algorithm, model, policy, discount_factor):\n if algorithm in ('bdqn', 'boot'):\n loss = QLoss(model, discount_factor)\n elif algorithm == 'sf':\n loss = SuccessorLosses(model, policy, discount_factor)\n elif algorithm == 'ube':\n loss = UBELoss(model, policy, discount_factor)\n else:\n raise ValueError(f'get_loss: algorithm {algorithm} not recognised.')\n return {\"loss\": loss}\n\n\ndef get_optim(parameters, lr):\n return {\"optimiser\": Adam(parameters, lr=lr)}\n\n\ndef get_data(batch_size, buffer_size, state_size, algorithm, n_ensembles, p_bootstrap):\n if algorithm == 'boot':\n replay_dataset = core.replay.EnsembleDataset(max_size=buffer_size, n_ensembles=n_ensembles, p=p_bootstrap)\n else:\n replay_dataset = core.replay.UniformDatasetOneHot(state_size, max_size=buffer_size)\n return {'dataset': replay_dataset,\n 'sample_fn': core.replay.make_sample_function(replay_dataset, batch_size, algorithm)}\n\n\ndef get_env(env_name, env_arg, verbose):\n if env_name == 'tree':\n env = environments.EnvironmentTree(env_arg)\n elif env_name == 'grid':\n env = environments.EnvironmentGrid(env_arg, verbose)\n else:\n raise ValueError(f'get_env: env {env_name} not recognised.')\n return {\"env\": env, **env.default_args()}\n\n\ndef get_model(algorithm, env, feature_size, use_network, policy_arg, prior):\n if algorithm in ('bdqn', 'ube'):\n model = QNetwork(env.state_size, env.action_size, feature_size)\n elif algorithm == 'sf':\n model = SF(env.state_size, env.action_size, feature_size, use_network)\n elif algorithm == 'boot':\n model = BootQNetwork(env.state_size, env.action_size, feature_size,\n prior_weight=prior, n_heads=int(policy_arg))\n else:\n raise ValueError(f'get_model: algorithm {algorithm} not recognised.')\n return {\"model\": model, 'algorithm': algorithm}\n\n\ndef get_policy(algorithm, policy_arg, action_size, model, uncertainty):\n if algorithm in ('sf', 'ube', 'bdqn'):\n policy = policies.ThompsonPolicy(action_size, model.q_fn, model.compute_q_fn_external, model.get_weights,\n uncertainty, policy_arg, model.ube_fn if algorithm == 'ube' else None)\n elif algorithm == 'boot':\n policy = policies.BootstrapPolicy(action_size, model, model.next_head)\n return {\"policy\": policy, \"test_policy\": policies.GreedyPolicy(action_size, model)}\n\n\ndef get_uncertainty(algorithm, model, action_size, variance_prior):\n if algorithm.startswith('sf'):\n uncertainty = OnlineVariance(\n model.feature_size, train_featuriser=model.local_features,\n test_featuriser=model.global_features, prior_variance=variance_prior, bias=False)\n model.register_weights(uncertainty.mean_vector)\n elif algorithm in ('ube', 'bdqn'):\n uncertainty = OnlineVarianceMulti(\n model.feature_size, action_size, model.local_features, variance_prior, bias=False)\n else:\n uncertainty = None\n return {\"uncertainty\": uncertainty}\n\n\ndef get_train_iter(algorithm):\n if algorithm in ('bdqn', 'boot'):\n train_iter = q_train_iter\n elif algorithm == 'sf':\n train_iter = sf_train_iter\n elif algorithm == 'ube':\n train_iter = ube_train_iter\n else:\n raise ValueError(f'get_train_iter: algorithm {algorithm} not recognised.')\n return {\"train_iter\": train_iter}\n\n\ndefault_config, _ = RunArgParser().parse_known_args(['bdqn'])\n","repo_name":"DavidJanz/successor_uncertainties_tabular","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7349,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"22034660525","text":"import os, sys\nfrom guthoms_helpers.common_helpers.ColorHelper import ColorHelper\nimport math\nimport numpy as np\nimport torch\n\nfrom guthoms_helpers.filesystem.DirectoryHelper import DirectoryHelper\n\n#path stuff\nhomedir = os.environ['HOME']\nprojectDir = os.path.dirname(os.path.realpath(__file__))\n\n# append third party stuff\nthirdpartyBaseLinePath = os.path.join(projectDir, \"thirdparty/humanpose/\")\nthirdpartyBaseLineLibLibPath = os.path.join(projectDir, \"thirdparty/human-pose-estimation.pytorch/lib\")\nthirdpartyYoloPath = os.path.join(projectDir, \"thirdparty/yolov3/\")\n\nif not DirectoryHelper.DirExists(thirdpartyBaseLinePath) or not DirectoryHelper.DirExists(thirdpartyYoloPath):\n raise Exception(\"Can't find thirdparty component(s) -> git submodule update --init --recursive\")\n\nsys.path.append(thirdpartyYoloPath)\nsys.path.append(thirdpartyBaseLinePath)\nsys.path.append(thirdpartyBaseLineLibLibPath)\nprint(sys.path)\n\npretrainedModelPath = os.path.join(projectDir, \"ropose/TrainedModels\")\n\nevalPath = os.path.join(projectDir, \"../evaluation/\")\nDirectoryHelper.CreateIfNotExist(evalPath)\n\nshowExamplePath = os.path.join(projectDir, \"../examples/\")\nDirectoryHelper.CreateIfNotExist(showExamplePath)\n\nweightsDir = os.path.join(projectDir, \"../models/\")\nDirectoryHelper.CreateIfNotExist(weightsDir)\n\noutputDir = os.path.join(projectDir, \"../output/\")\nDirectoryHelper.CreateIfNotExist(outputDir)\n\ntrainedModelBasePath = os.path.join(projectDir, \"../trained/\")\ndataPath = trainedModelBasePath\nDirectoryHelper.CreateIfNotExist(trainedModelBasePath)\n\nroposeDatasetDir = os.path.join(projectDir, \"../datasets/\")\nDirectoryHelper.CreateIfNotExist(roposeDatasetDir)\n\nrealDataPath = os.path.join(roposeDatasetDir, \"real_train/\")\nroposeEvalDataPath = os.path.join(roposeDatasetDir, \"colropose_eval/\")\nroposeTestDataPath = os.path.join(roposeDatasetDir, \"real_test/\")\n\nroPoseNetWeights = os.path.join(weightsDir, \"ropose_net.pt\")\nroPoseYoloWeights = os.path.join(weightsDir, \"ropose_yolo.pt\")\noriginalHumanPoseModelPath = os.path.join(weightsDir, \"pose_resnet_152_256x192.pth.tar\")\n\nroposeFineTuneDatasets = [os.path.join(roposeEvalDataPath, \"colropose_eval_006\")]\n\nsimDataPath = os.path.join(roposeDatasetDir, \"sim/\")\n\n#cocoStuff\ncocoPath = homedir + \"/coco/\"\ncocoDatasetTrain = [\"train2017\"]\ncocoDatasetEval = [\"val2017\"]\n\n# Pose model stuff\nlinkOrder = [\"base_link\",\n \"shoulder_link\",\n \"upper_arm_link\",\n \"forearm_link\",\n \"wrist_1_link\",\n \"wrist_2_link\",\n \"wrist_3_link\"]\n\nropPoseColors = ColorHelper.GetUniqueColors(8)\nropPoseColorsGT = [[0, 0, 255]] * 7\n\nhumanColors = ColorHelper.GetUniqueColors(17)\nhumanColorsGT = [[0, 255, 0]] * 17\n\nhumanPairMap = [{0: 1}, #nose - left_eye\n {1: 3}, #left_eye - left_ear\n {0: 2}, #nose - right_eye\n {2: 4}, #right_eye - right_ear\n {3: 5}, #left_ear - left_shoulder\n {4: 6}, #right_ear - right_shoulder\n\n {6: 8}, # right_shoulder - right_elbow\n {8: 10}, # right_elbow - right_wrist\n\n {5: 7}, # left_shoulder - left_elbow\n {7: 9}, # left_elbow - left_wrist\n\n {6: 12}, # right_shoulder - right_hip\n {5: 11}, # left_shoulder - left_hip\n\n {12: 14}, # right_hip - right_knee\n {14: 16}, # right_knee - right_ankle\n\n {11: 13}, # left_hip - left_knee\n {13: 15}, # left_knee - left_ankle\n\n {5: 6}, # left_shoulder - right_shoulder\n {11: 12}, # left_hip - right_hip\n ]\n\n#common net stuff\nuseHSVColorspace = False\nuseSpatialDistributionLoss = False\nspatialDistributionLossTH = 0.75\n# mseHeatmapLoss, spatialDistributionLoss\nlossWeights = (1.0, 1.0)\nsdlEpochs = 25\nadvancedFeatureStackSize = 128\nrawInputRes = [720, 1280]\ninputRes = [256, 192]\noutputRes = [64, 48]\nmaxDistance = math.sqrt(math.pow(outputRes[0], 2) + math.pow(outputRes[1], 2))\npaddingValue = 0.0\ndownsampleFactor = inputRes[0]/outputRes[0]\n\nincludeUpsampling = False\nif includeUpsampling:\n outputRes = inputRes\n\nloadGrayscale = False\nropose_detectionLayers = linkOrder.__len__() + 1 #XJoints + background\n#human pose detection stuff\nhuman_detectionLayers = 17 + 1 #XJoints + background\n\n#collropoe stuff\ncolropose_freezeFeatureModelEpoch = -1 #20\ncolropose_freezeRoposeModelEpoch = -1 #100\n\n#ropose CASE model stuff\ntrainFeatureModel = True\ntrainBootstrapStage = False\nuseJointConnectionModel = False\ntrainJointConnectionModel = True\njointConnectionFilterSize = 64\nrefineStageCount = 2\njointConnectionStageCount = 1\n\n# define the final comtypes float16 vs. float32 etc.\ncompType = torch.float16\ncompTypeNP = np.float16\n\n#training stuff\nepochs = 150\n\n#simulation vs real data\nmixWithZeroHumans = False\nmixWithZeroHuamnsFactor = 0.1\nmixRealWithSimulation = False\nmixSimulationFactor = 0.25\n\n#Random erasing\nuseRandomErasing = False\nrandomErasingProb = 0.10\nrandomErasingMaxObjectCount = 2\nrandomErasingMaxAreaCover = 0.10\n\nstartLearningRate = 1e-3\nchangeLearningRate = True\nlearningRateEpochDecay = 0.005\nbatchSize = 1\ntestBatchSize = batchSize\ngpuList = ['/gpu:0']\n\n#final prediction config\nroposeRejectionTH = 0.70\nhumanRejectionTH = 0.90\n\n#augmentation\nonTheFlyBackgroundAugmentation = False\nonTheFlyForegroundAugmentation = False\nforgroundAugmentationMaxCount = 5\nonTheFlyForegroundAugmentationProb = 0.15\nonTheFlyAugmentation = True\nonTheFlyAugmentationProbability = 0.7\n\n#ground truth config\nif includeUpsampling:\n gaussSigma = 7.0\nelse:\n gaussSigma = 2.0\n\npafWidth = 3\nheatmapMaxValue = 1.0\nheatmapMinValue = 0.0\n\n#input config\nkeepAspectRatio = True\ncropInputToBB = True\n\n#3d joint inputs\nuse3DJointInput = False\nrobotMaxWorkSpace = 2.0\n\n#yolo stuff\nyolo_BatchSize = 1\nyolo_Epochs = 100\nyolo_onTheFlyAugmentation = True\nyolo_InputSize = [416, 416]# has to be multiple of 32 (Grid size of the used yolo input)\n#yolo_InputSize = [320, 320]\n#yolo_InputSize = [224, 224] #TinyYolo\n#yolo_InputSize = [160, 160]\nyolo_StartLearningRate = 0.001\nyolo_IgnoreThreshold = 0.5 #conf_thres\nyolo_NonMaxSurThreshold = 0.1 #iou_thres\n\n#conf_thres=0.001\n#iou_thres=0.6, # for nms\n\nyolo_Anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]\n\ncoco_classes = {0: {'supercategory': 'unknown', 'id': 0, 'name': 'unknown'},\n 1: {'supercategory': 'person', 'id': 1, 'name': 'person'},\n 2: {'supercategory': 'vehicle', 'id': 2, 'name': 'bicycle'},\n 3: {'supercategory': 'vehicle', 'id': 3, 'name': 'car'},\n 4: {'supercategory': 'vehicle', 'id': 4, 'name': 'motorcycle'},\n 5: {'supercategory': 'vehicle', 'id': 5, 'name': 'airplane'},\n 6: {'supercategory': 'vehicle', 'id': 6, 'name': 'bus'},\n 7: {'supercategory': 'vehicle', 'id': 7, 'name': 'train'},\n 8: {'supercategory': 'vehicle', 'id': 8, 'name': 'truck'},\n 9: {'supercategory': 'vehicle', 'id': 9, 'name': 'boat'},\n 10: {'supercategory': 'outdoor', 'id': 10, 'name': 'traffic light'},\n 11: {'supercategory': 'outdoor', 'id': 11, 'name': 'fire hydrant'},\n 12: {'supercategory': 'outdoor', 'id': 13, 'name': 'stop sign'},\n 13: {'supercategory': 'outdoor', 'id': 14, 'name': 'parking meter'},\n 14: {'supercategory': 'outdoor', 'id': 15, 'name': 'bench'},\n 15: {'supercategory': 'animal', 'id': 16, 'name': 'bird'},\n 16: {'supercategory': 'animal', 'id': 17, 'name': 'cat'},\n 17: {'supercategory': 'animal', 'id': 18, 'name': 'dog'},\n 18: {'supercategory': 'animal', 'id': 19, 'name': 'horse'},\n 19: {'supercategory': 'animal', 'id': 20, 'name': 'sheep'},\n 20: {'supercategory': 'animal', 'id': 21, 'name': 'cow'},\n 21: {'supercategory': 'animal', 'id': 22, 'name': 'elephant'},\n 22: {'supercategory': 'animal', 'id': 23, 'name': 'bear'},\n 23: {'supercategory': 'animal', 'id': 24, 'name': 'zebra'},\n 24: {'supercategory': 'animal', 'id': 25, 'name': 'giraffe'},\n 25: {'supercategory': 'accessory', 'id': 27, 'name': 'backpack'},\n 26: {'supercategory': 'accessory', 'id': 28, 'name': 'umbrella'},\n 27: {'supercategory': 'accessory', 'id': 31, 'name': 'handbag'},\n 28: {'supercategory': 'accessory', 'id': 32, 'name': 'tie'},\n 29: {'supercategory': 'accessory', 'id': 33, 'name': 'suitcase'},\n 30: {'supercategory': 'sports', 'id': 34, 'name': 'frisbee'},\n 31: {'supercategory': 'sports', 'id': 35, 'name': 'skis'},\n 32: {'supercategory': 'sports', 'id': 36, 'name': 'snowboard'},\n 33: {'supercategory': 'sports', 'id': 37, 'name': 'sports ball'},\n 34: {'supercategory': 'sports', 'id': 38, 'name': 'kite'},\n 35: {'supercategory': 'sports', 'id': 39, 'name': 'baseball bat'},\n 36: {'supercategory': 'sports', 'id': 40, 'name': 'baseball glove'},\n 37: {'supercategory': 'sports', 'id': 41, 'name': 'skateboard'},\n 38: {'supercategory': 'sports', 'id': 42, 'name': 'surfboard'},\n 39: {'supercategory': 'sports', 'id': 43, 'name': 'tennis racket'},\n 40: {'supercategory': 'kitchen', 'id': 44, 'name': 'bottle'},\n 41: {'supercategory': 'kitchen', 'id': 46, 'name': 'wine glass'},\n 42: {'supercategory': 'kitchen', 'id': 47, 'name': 'cup'},\n 43: {'supercategory': 'kitchen', 'id': 48, 'name': 'fork'},\n 44: {'supercategory': 'kitchen', 'id': 49, 'name': 'knife'},\n 45: {'supercategory': 'kitchen', 'id': 50, 'name': 'spoon'},\n 46: {'supercategory': 'kitchen', 'id': 51, 'name': 'bowl'},\n 47: {'supercategory': 'food', 'id': 52, 'name': 'banana'},\n 48: {'supercategory': 'food', 'id': 53, 'name': 'apple'},\n 49: {'supercategory': 'food', 'id': 54, 'name': 'sandwich'},\n 50: {'supercategory': 'food', 'id': 55, 'name': 'orange'},\n 51: {'supercategory': 'food', 'id': 56, 'name': 'broccoli'},\n 52: {'supercategory': 'food', 'id': 57, 'name': 'carrot'},\n 53: {'supercategory': 'food', 'id': 58, 'name': 'hot dog'},\n 54: {'supercategory': 'food', 'id': 59, 'name': 'pizza'},\n 55: {'supercategory': 'food', 'id': 60, 'name': 'donut'},\n 56: {'supercategory': 'food', 'id': 61, 'name': 'cake'},\n 57: {'supercategory': 'furniture', 'id': 62, 'name': 'chair'},\n 58: {'supercategory': 'furniture', 'id': 63, 'name': 'couch'},\n 59: {'supercategory': 'furniture', 'id': 64, 'name': 'potted plant'},\n 60: {'supercategory': 'furniture', 'id': 65, 'name': 'bed'},\n 61: {'supercategory': 'furniture', 'id': 67, 'name': 'dining table'},\n 62: {'supercategory': 'furniture', 'id': 70, 'name': 'toilet'},\n 63: {'supercategory': 'electronic', 'id': 72, 'name': 'tv'},\n 64: {'supercategory': 'electronic', 'id': 73, 'name': 'laptop'},\n 65: {'supercategory': 'electronic', 'id': 74, 'name': 'mouse'},\n 66: {'supercategory': 'electronic', 'id': 75, 'name': 'remote'},\n 67: {'supercategory': 'electronic', 'id': 76, 'name': 'keyboard'},\n 68: {'supercategory': 'electronic', 'id': 77, 'name': 'cell phone'},\n 69: {'supercategory': 'appliance', 'id': 78, 'name': 'microwave'},\n 70: {'supercategory': 'appliance', 'id': 79, 'name': 'oven'},\n 71: {'supercategory': 'appliance', 'id': 80, 'name': 'toaster'},\n 72: {'supercategory': 'appliance', 'id': 81, 'name': 'sink'},\n 73: {'supercategory': 'appliance', 'id': 82, 'name': 'refrigerator'},\n 74: {'supercategory': 'indoor', 'id': 84, 'name': 'book'},\n 75: {'supercategory': 'indoor', 'id': 85, 'name': 'clock'},\n 76: {'supercategory': 'indoor', 'id': 86, 'name': 'vase'},\n 77: {'supercategory': 'indoor', 'id': 87, 'name': 'scissors'},\n 78: {'supercategory': 'indoor', 'id': 88, 'name': 'teddy bear'},\n 79: {'supercategory': 'indoor', 'id': 89, 'name': 'hair drier'},\n 80: {'supercategory': 'indoor', 'id': 90, 'name': 'toothbrush'},\n 81: {'supercategory': 'robot', 'id': 100, 'name': 'ropose robot'}}\n\nyolo_cocoClassMap = {\n 1: 0,\n 2: 1,\n 3: 2,\n 4: 3,\n 5: 4,\n 6: 5,\n 7: 6,\n 8: 7,\n 9: 8,\n 10: 9,\n 11: 10,\n 13: 11,\n 14: 12,\n 15: 13,\n 16: 14,\n 17: 15,\n 18: 16,\n 19: 17,\n 20: 18,\n 21: 19,\n 22: 20,\n 23: 21,\n 24: 22,\n 25: 23,\n 27: 24,\n 28: 25,\n 31: 26,\n 32: 27,\n 33: 28,\n 34: 29,\n 35: 30,\n 36: 31,\n 37: 32,\n 38: 33,\n 39: 34,\n 40: 35,\n 41: 36,\n 42: 37,\n 43: 38,\n 44: 39,\n 46: 40,\n 47: 41,\n 48: 42,\n 49: 43,\n 50: 44,\n 51: 45,\n 52: 46,\n 53: 47,\n 54: 48,\n 55: 49,\n 56: 50,\n 57: 51,\n 58: 52,\n 59: 53,\n 60: 54,\n 61: 55,\n 62: 56,\n 63: 57,\n 64: 58,\n 65: 59,\n 67: 60,\n 70: 61,\n 72: 62,\n 73: 63,\n 74: 64,\n 75: 65,\n 76: 66,\n 77: 67,\n 78: 68,\n 79: 69,\n 80: 70,\n 81: 71,\n 82: 72,\n 84: 73,\n 85: 74,\n 86: 75,\n 87: 76,\n 88: 77,\n 89: 78,\n 90: 79,\n 100: 80,\n}\n\ncoco_catIDs = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',\n 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',\n 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush']\n\ntinyYoloClasses = {\n 0: {'supercategory': 'unknown', 'id': 0, 'name': 'unknown'},\n 1: {'supercategory': 'person', 'id': 1, 'name': 'person'},\n 2: {'supercategory': 'robot', 'id': 2, 'name': 'robot'}\n}\n\ntinyYoloClassMap = {1: 0,\n 2: 1}\n\nyolo_FromScratchParams = os.path.join(projectDir, \"yolo_cfg/hyp.scratch.yaml\")\nyolo_FinetuneParams = os.path.join(projectDir, \"yolo_cfg/hyp.finetune.yaml\")\nyolo_Classes = tinyYoloClasses\nyoloClassMap = tinyYoloClassMap\nyolo_RoposeClassNum = 1\nyolo_ConfigFilePath = os.path.join(projectDir, \"yolo_cfg/yolov3_colropose.cfg\")\n\nyolo_HumanClassNum = 0\nyolo_MaxBoxCount = 50","repo_name":"guthom/ropose","sub_path":"ropose/pytorch_config.py","file_name":"pytorch_config.py","file_ext":"py","file_size_in_byte":16358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12001891199","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\n\nfrom mosta.api.serializers.sim import SimSerializer\nfrom mosta.phone.models import CallHistory, Sim\n\n\nclass CallHistorySerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.email')\n issuer = SimSerializer(read_only=True)\n\n class Meta:\n model = CallHistory\n fields = (\n 'owner',\n 'issuer',\n 'source_number',\n 'destination_number',\n 'started',\n 'ended',\n 'direction',\n 'hangup_reason'\n )\n depth = 1\n\n\nclass CallHistoryCreateSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.email')\n issuer = SimSerializer()\n\n def create(self, validated_data):\n sim_label = validated_data.pop('issuer')['label']\n sim = get_object_or_404(Sim, owner=self.context['request'].user, label=sim_label)\n call_history = CallHistory.objects.create(\n owner=self.context['request'].user,\n issuer=sim,\n source_number=validated_data.get('source_number'),\n destination_number=validated_data.get('destination_number'),\n started=validated_data.get('started'),\n ended=validated_data.get('ended'),\n direction=validated_data.get('direction'),\n hangup_reason=validated_data.get('hangup_reason')\n )\n return call_history\n\n class Meta:\n model = CallHistory\n fields = (\n 'owner',\n 'issuer',\n 'source_number',\n 'destination_number',\n 'started',\n 'ended',\n 'direction',\n 'hangup_reason'\n )\n depth = 1\n","repo_name":"protux/Mobile_State_Server","sub_path":"src/mosta/api/serializers/call_history.py","file_name":"call_history.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23524901697","text":"import requests\nimport aiohttp\n\nSLOTS = {}\n\ndef calculate_number_of_tokens(line):\n return len(line) / 2.7\n\ndef get_user_name(user):\n return user.username or ((user.first_name or \"\") + \" \" + (user.last_name or \"\"))\n\nasync def prepare_prompt(messages, active_prompt, model, add_persona=True, chat=None):\n chat_log = \"\"\n current_tokens = 0\n persona_name = active_prompt['persona_name']\n\n if chat is None:\n chat = messages[-1].chat\n\n if chat.type == \"private\":\n base_prompt = model['private_base_prompt'].replace(\"{{char}}\", persona_name)\\\n .replace(\"{{username}}\", chat.username or \"\")\\\n .replace(\"{{first_name}}\", chat.first_name or \"\")\\\n .replace(\"{{last_name}}\", chat.last_name or \"\")\\\n .replace(\"{{bio}}\", chat.bio or \"\")\n else:\n base_prompt = model['group_base_prompt'].replace(\"{{char}}\", persona_name)\\\n .replace(\"{{room_title}}\", chat.title or \"\")\\\n .replace(\"{{room_description}}\", chat.description or \"\")\n prompt_calc = f\"{base_prompt}\\n{model['log_start']}\\n{model['user_prepend']}{persona_name}{model['user_append']}\"\n initial_prompt_tokens = calculate_number_of_tokens(prompt_calc)\n max_tokens = model['max_tokens'] - initial_prompt_tokens\n\n chat_log_lines = []\n seen_info = set()\n\n for msg in messages:\n name = get_user_name(msg.from_user)\n # check if the message is from our telegram bot\n # if name == bot.get_me().username:\n # name = persona_name\n # if name == active_prompt.users[0].username:\n # name = user_name\n # elif name == active_prompt.users[1].username:\n # name = persona_name\n if (msg.reply_to_message is not None):\n chat_log_lines.append(f\"{model['user_prepend']}{name} (in reply to {get_user_name(msg.reply_to_message.from_user)}){model['user_append']}{msg.text}\")\n else:\n chat_log_lines.append(f\"{model['user_prepend']}{name}{model['user_append']}{msg.text}\")\n\n for line in reversed(chat_log_lines):\n line_tokens = calculate_number_of_tokens(line)\n\n # matched_entries = find_matches(line)\n # info_tokens = 0\n # info_text = ''\n # for entry in matched_entries:\n # if entry not in seen_info:\n # formatted_entry = f\"### INFO: {entry}\"\n # info_tokens += calculate_number_of_tokens(formatted_entry)\n # info_text += f\"{formatted_entry}\\n\"\n # seen_info.add(entry)\n\n if (current_tokens + line_tokens) <= max_tokens:\n chat_log = f\"{model['line_separator']}{line}\\n{chat_log}\"\n current_tokens += line_tokens\n\n # if info_text:\n # print(\"adding info text\", info_text)\n # chat_log = f\"{info_text}{chat_log}\"\n # current_tokens += info_tokens\n else:\n break\n \n if add_persona:\n return f\"{base_prompt}\\n{model['log_start']}\\n{chat_log}{model['line_separator']}{model['user_prepend']}{persona_name} (in reply to {get_user_name(messages[-1].from_user)}){model['user_append']}\"\n else:\n return f\"{base_prompt}\\n{model['log_start']}\\n{chat_log}{model['line_separator']}\"\n\nasync def complete(prompt, model, stop_sequences, length=None, chat_id=\"0\"):\n print(prompt)\n params = {\n \"prompt\": prompt,\n \"temperature\": model['temperature'],\n \"top_p\": model['top_p'],\n \"top_k\": model['top_k'],\n }\n\n if chat_id in SLOTS:\n session, slot_id = SLOTS[chat_id]\n else:\n session, slot_id = aiohttp.ClientSession(), -1\n\n if model['engine'] == \"kobold\":\n params.update({\n \"n\": 1,\n \"max_context_length\": model['max_tokens'],\n \"max_length\": length is None and model['max_length'] or length,\n \"rep_pen\": 1.08,\n \"top_a\": 0,\n \"typical\": 1,\n \"tfs\": 1,\n \"rep_pen_range\": 1024,\n \"rep_pen_slope\": 0.7,\n \"sampler_order\": model['sampler_order'],\n \"quiet\": True,\n \"stop_sequence\": stop_sequences,\n \"use_default_badwordsids\": False\n })\n elif model['engine'] == \"llamacpp\":\n print(\"slot_id\", slot_id)\n # slot_id = model['slot_id'] is None and -1 or model['slot_id']\n params.update({\n \"n_predict\": length is None and model['max_length'] or length,\n \"slot_id\": slot_id,\n \"cache_prompt\": True,\n \"typical_p\": 1,\n \"tfs_z\": 1,\n \"stop\": stop_sequences,\n \"use_default_badwordsids\": False\n })\n elif model['engine'] == \"openai\":\n params.update({\n \"n\": 1,\n \"stop\": stop_sequences,\n \"max_tokens\": length is None and model['max_length'] or length,\n })\n\n async with session.post(model['api_url'], json=params) as response:\n\n if response.status == 200:\n # Simulate the response (you will need to replace this with actual API response handling)\n response_data = await response.json()\n\n if model['engine'] == \"kobold\":\n print(response_data)\n return_data = False, response_data['results'][0]['text']\n\n elif model['engine'] == \"llamacpp\":\n # model['slot_id'] = response_data['slot_id']\n slot_id = response_data['slot_id']\n stopped = response_data['stopped_eos'] or response_data['stopped_word']\n return_data = stopped, response_data['content']\n\n elif model['engine'] == \"openai\":\n return_data = False, response_data.choices[0]['text']\n \n SLOTS[chat_id] = session, response_data['slot_id']\n\n return return_data\n else:\n print(f\"Error: Request failed with status code {response.status}\")\n return True, None\n\n","repo_name":"moshemalawach/libertai-tg-bot","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8627046732","text":"def palindrom_check2(s):\n qu = []\n st = []\n for x in s:\n if x.isalpha():\n qu.append(x.lower())\n st.append(x.lower())\n while qu:\n if qu.pop(0) != st.pop():\n return False\n\n return True\n\n\npstringList = ['역삼역', '구로구', 'Mom', '기러기', '비둘기', '기특한 특기', 'racer', 'father', '봄', '여름']\n\nfor s in pstringList:\n #idx = pstringList.index(s)\n #print(s, ' = ', palindrom_check2(pstringList[idx]))\n print('{0} = {1}'.format(s, palindrom_check2(s)))\n","repo_name":"hellen1221/2023pythonclass","sub_path":"class13/pal2.py","file_name":"pal2.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2137150036","text":"import tkinter\r\n\r\n\r\n\r\nDEFAULT_FONT = ('Calibri', 14)\r\n\r\nclass inputDialog:\r\n def __init__(self):\r\n self._dialog_window = tkinter.Toplevel()\r\n #INTRODUCTION TO DIALOG\r\n intro_label = tkinter.Label(\r\n master = self._dialog_window,\r\n text = ('PLEASE SPECIFY GAME PRESETS:'), font = DEFAULT_FONT)\r\n intro_label.grid(\r\n row = 0, column = 0, columnspan = 3, sticky = tkinter.W + tkinter.E + tkinter.S + tkinter.N)\r\n \r\n #ROW LABEL AND ENTRY BOX\r\n rows_label = tkinter.Label(\r\n master = self._dialog_window,\r\n text = 'Rows: (between 4-16 even)', font = DEFAULT_FONT)\r\n rows_label.grid(\r\n row = 1, column = 0, columnspan = 2, padx = 5, pady = 5,\r\n sticky = tkinter.W + tkinter.E + tkinter.S + tkinter.N)\r\n self._rows_entry = tkinter.Entry(\r\n master = self._dialog_window, width = 10, font = DEFAULT_FONT)\r\n self._rows_entry.grid(\r\n row = 1, column = 3, sticky = tkinter.W)\r\n\r\n #COLUMN LABEL AND ENTRY BOX\r\n columns_label = tkinter.Label(\r\n master = self._dialog_window,\r\n text = 'Columns: (between 4-16 even)', font = DEFAULT_FONT)\r\n columns_label.grid(\r\n row = 2, column = 0, columnspan = 2, padx = 5, pady = 5,\r\n sticky = tkinter.W + tkinter.E + tkinter.S + tkinter.N)\r\n self._columns_entry = tkinter.Entry(\r\n master = self._dialog_window, width = 10, font = DEFAULT_FONT)\r\n self._columns_entry.grid(\r\n row = 2, column = 3, sticky = tkinter.W)\r\n\r\n #FIRST TURN LABEL AND ENTRY BOX\r\n first_turn_label = tkinter.Label(\r\n master = self._dialog_window,\r\n text = 'First Turn: (Black or White)', font = DEFAULT_FONT)\r\n first_turn_label.grid(\r\n row = 3, column = 0, columnspan = 2, padx = 5, pady = 5,\r\n sticky = tkinter.W + tkinter.E + tkinter.S + tkinter.N)\r\n self._first_turn_entry = tkinter.Entry(\r\n master = self._dialog_window, width = 10, font = DEFAULT_FONT)\r\n self._first_turn_entry.grid(\r\n row = 3, column = 3, sticky = tkinter.W)\r\n\r\n #DISC ARRANGEMENT LABEL AND ENTRY BOX\r\n disc_arrangement_label = tkinter.Label(\r\n master = self._dialog_window,\r\n text = 'Four Discs Arrangement: (Black or White)', font = DEFAULT_FONT)\r\n disc_arrangement_label.grid(\r\n row = 4, column = 0, columnspan = 2, padx = 5, pady = 5,\r\n sticky = tkinter.W + tkinter.E + tkinter.S + tkinter.N)\r\n self._disc_arrangement_entry = tkinter.Entry(\r\n master = self._dialog_window, width = 10, font = DEFAULT_FONT)\r\n self._disc_arrangement_entry.grid(\r\n row = 4, column = 3, sticky = tkinter.W)\r\n\r\n #WINNER SPECIFIER LABEL AND ENTRY BOX\r\n winner_specifier_label = tkinter.Label(\r\n master = self._dialog_window,\r\n text = 'Winner Specifier: Winner = (MORE or LESS)', font = DEFAULT_FONT)\r\n winner_specifier_label.grid(\r\n row = 5, column = 0, columnspan = 2, padx = 5, pady = 5,\r\n sticky = tkinter.W + tkinter.E + tkinter.S + tkinter.N)\r\n self._winner_specifier_entry = tkinter.Entry(\r\n master = self._dialog_window, width = 10, font = DEFAULT_FONT)\r\n self._winner_specifier_entry.grid(\r\n row = 5, column = 3, sticky = tkinter.W)\r\n\r\n # OK AND CANCEL BUTTONS\r\n button_frame = tkinter.Frame(master = self._dialog_window)\r\n button_frame.grid(\r\n row = 6, column = 3, columnspan = 3, padx = 5, pady = 5,\r\n sticky = tkinter.W + tkinter.E + tkinter.S + tkinter.N)\r\n ok_button = tkinter.Button(master = button_frame, text = 'OK', font = DEFAULT_FONT,\r\n command = self._on_ok_button)\r\n ok_button.grid(row = 0, column = 0, padx = 5, pady = 5)\r\n cancel_button = tkinter.Button(master = button_frame, text = 'CANCEL', font = DEFAULT_FONT,\r\n command = self._on_cancel_button)\r\n cancel_button.grid(row = 0, column = 1, padx = 5, pady = 5)\r\n\r\n #BALANCE ROWS AND COLUMNS ON INPUT DIALOG RESIZE\r\n self._dialog_window.rowconfigure(0, weight = 1)\r\n self._dialog_window.columnconfigure(0, weight = 1)\r\n\r\n #VARIABLES FOR USER INPUTS and BOOL INDICATORS \r\n self._ok_clicked = False\r\n self._rows = 4\r\n self._columns = 4\r\n self._first_turn = ''\r\n self._disc_arrangement = ''\r\n self._winner_specifier = ''\r\n\r\n def _on_ok_button(self) -> None:\r\n self._ok_clicked = True\r\n self._rows = self._rows_entry.get()\r\n self._columns = self._columns_entry.get()\r\n self._first_turn = self._first_turn_entry.get()\r\n self._disc_arrangement = self._disc_arrangement_entry.get()\r\n self._winner_specifier = self._winner_specifier_entry.get()\r\n\r\n self._dialog_window.destroy()\r\n\r\n def _on_cancel_button(self) -> None:\r\n self._dialog_window.destroy()\r\n\r\n def was_ok_clicked(self) -> bool:\r\n return self._ok_clicked\r\n\r\n def get_inputs(self) -> list:\r\n temp_list = []\r\n temp_list.append(self._rows)\r\n temp_list.append(self._columns)\r\n temp_list.append(self._first_turn)\r\n temp_list.append(self._disc_arrangement)\r\n temp_list.append(self._winner_specifier)\r\n return temp_list\r\n\r\n def show(self) -> None:\r\n self._dialog_window.grab_set()\r\n self._dialog_window.wait_window()\r\n \r\n\r\n \r\n \r\n\r\nclass Layout:\r\n def __init__(self):\r\n \r\n self._root_window = tkinter.Tk()\r\n \r\n self._canvas = tkinter.Canvas(\r\n master = self._root_window,\r\n width = 500, height = 500,\r\n background = 'blue')\r\n \r\n self._canvas.grid(\r\n row = 0, column = 0, padx = 10, pady = 10,\r\n sticky = tkinter.N + tkinter.S + tkinter.W + tkinter.E)\r\n \r\n #STRING VARIABLE FOR LABEL AND UPDATES\r\n self._score = tkinter.StringVar(value = 'test string var')\r\n self._score.set('resized string var test')\r\n\r\n current_score = tkinter.Label(\r\n master = self._root_window, font = DEFAULT_FONT,\r\n textvariable = self._score)\r\n current_score.grid(\r\n row = 1, column = 0, sticky = tkinter.W)\r\n \r\n #BUTTON TO GET USER INPUTS\r\n button1 = tkinter.Button(\r\n master = self._root_window, font = DEFAULT_FONT, text = 'GET USER INPUTS',\r\n command = self._on_button1_clicked)\r\n button1.grid(\r\n row = 1, column = 1, \r\n sticky = tkinter.N + tkinter.S + tkinter.W + tkinter.E)\r\n\r\n #Draws a rectangle\r\n rec = self._canvas.create_rectangle(0, 0, 100, 100)\r\n \r\n #BALANCE ROWS AND COLUMNS ON WINDOW RESIZE\r\n self._root_window.rowconfigure(0, weight = 1)\r\n self._root_window.columnconfigure(0, weight = 1)\r\n\r\n def start(self) -> None:\r\n self._root_window.mainloop()\r\n\r\n def _on_button1_clicked(self) -> None:\r\n dialog = inputDialog()\r\n dialog.show()\r\n\r\n if dialog.was_ok_clicked():\r\n input_list = dialog.get_inputs()\r\n for element in input_list:\r\n print(element)\r\n\r\nif __name__ == '__main__':\r\n app = Layout()\r\n app.start()\r\n","repo_name":"dblam/Duy-s-Python-Projects","sub_path":"PYTHON 32/Project 5/ModuleA1.py","file_name":"ModuleA1.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38474167691","text":"import time \nimport torch \nimport random \nimport platform \nimport subprocess \nimport torchvision\nimport torch.nn as nn\nimport numpy as np \n\n\n\ndef load_classes(path):\n\twith open(path, 'r') as fp:\n\t\tnames = fp.read().splitlines()\n\treturn names \n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0.0)\n\t\n\ndef xywh2xyxy_np(x):\n y = np.zeros_like(x)\n y[..., 0] = x[..., 0] - x[..., 2] / 2\n y[..., 1] = x[..., 1] - x[..., 3] / 2\n y[..., 2] = x[..., 0] + x[..., 2] / 2\n y[..., 3] = x[..., 1] + x[..., 3] / 2\n return y \n\t\t\n\ndef worker_seed_set(worker_id):\n # See for details of numpy:\n # https://github.com/pytorch/pytorch/issues/5059#issuecomment-817392562\n # See for details of random:\n # https://pytorch.org/docs/stable/notes/randomness.html#dataloader\n\n # NumPy\n uint64_seed = torch.initial_seed()\n ss = np.random.SeedSequence([uint64_seed])\n np.random.seed(ss.generate_state(4))\n\n # random\n worker_seed = torch.initial_seed() % 2**32\n random.seed(worker_seed)\n\n\ndef provide_determinism(seed=42):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n\ndef to_cpu(tensor):\n return tensor.detach().cpu()\n\n\ndef rescale_boxes(boxes, current_dim, original_shape):\n \"\"\"\n Rescales bounding boxes to the original shape\n \"\"\"\n orig_h, orig_w = original_shape\n\n # The amount of padding that was added\n pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))\n pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))\n\n # Image height and width after padding is removed\n unpad_h = current_dim - pad_y\n unpad_w = current_dim - pad_x\n\n # Rescale bounding boxes to dimension of original image\n boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h\n boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h\n return boxes\n\n\ndef xywh2xyxy(x):\n y = x.new(x.shape)\n y[..., 0] = x[..., 0] - x[..., 2] / 2\n y[..., 1] = x[..., 1] - x[..., 3] / 2\n y[..., 2] = x[..., 0] + x[..., 2] / 2\n y[..., 3] = x[..., 1] + x[..., 3] / 2\n return y\n\n\ndef non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None):\n \"\"\"Performs Non-Maximum Suppression (NMS) on inference results\n Returns:\n detections with shape: nx6 (x1, y1, x2, y2, conf, cls)\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n\n # Settings\n # (pixels) minimum and maximum box width and height\n max_wh = 4096\n max_det = 300 # maximum number of detections per image\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 1.0 # seconds to quit after\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [torch.zeros((0, 6), device=\"cpu\")] * prediction.shape[0]\n\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[x[..., 4] > conf_thres] # confidence\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n # sort by confidence\n x = x[x[:, 4].argsort(descending=True)[:max_nms]]\n\n # Batched NMS\n c = x[:, 5:6] * max_wh # classes\n # boxes (offset by class), scores\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = to_cpu(x[i])\n\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output\n\n\ndef print_environment_info():\n \"\"\"\n Prints infos about the environment and the system.\n This should help when people make issues containg the printout.\n \"\"\"\n\n print(\"Environment information:\")\n\n # Print OS information\n print(f\"System: {platform.system()} {platform.release()}\")\n\n # Print poetry package version\n try:\n print(f\"Current Version: {subprocess.check_output(['poetry', 'version'], stderr=subprocess.DEVNULL).decode('ascii').strip()}\")\n except (subprocess.CalledProcessError, FileNotFoundError):\n print(\"Not using the poetry package\")\n\n # Print commit hash if possible\n try:\n print(f\"Current Commit Hash: {subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], stderr=subprocess.DEVNULL).decode('ascii').strip()}\")\n except (subprocess.CalledProcessError, FileNotFoundError):\n print(\"No git or repo found\")\n","repo_name":"xiaoxioamu/yolo_v3","sub_path":"pytorchyolo/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8025479399","text":"import datetime as dt\nimport requests\n\n\n\"\"\"\nREAD ME:\npip install requests\nla API lasque de aqui : https://home.openweathermap.org/\n\n\"\"\"\nBASE_URL = \"http://api.openweathermap.org/data/2.5/weather?\"\nAPI_KEY = \"d1e390c59df437eb10ad140efdb81f5b\"\nCITY = \"Madrid\"\n\ndef kelvin_to_celsius_fahrenheit(kelvin):\n celsius = kelvin - 273.15\n fahrenheit = celsius * (9/5) + 32\n return celsius, fahrenheit\n\nurl = BASE_URL + \"appid=\" + API_KEY + \"&q=\" + CITY\n\nresponse = requests.get(url).json()\n\n\ntemp_kelvin = response['main']['temp']\ntemp_celsius, temp_fahrenheit = kelvin_to_celsius_fahrenheit(temp_kelvin)\ntiempo = response['weather'][0]['main']\ngrados_viento = response['wind']['deg']\nvelocidad_viento = response['wind']['speed']\n\nprint(response)\n","repo_name":"sroaks/FUMADA-ROMANA","sub_path":"tiempop.py","file_name":"tiempop.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6477231007","text":"\nfrom datetime import datetime\nimport os\nimport shutil\n\nclass FileUtils:\n def __init__(self):\n pass\n\n def make_dir(self, *subpaths):\n path = self.abs_path(*subpaths)\n os.makedirs(path, exist_ok=True)\n return path\n\n def abs_path(self, *subpaths):\n return self.abs_path_with_splits(self, None, *subpaths)\n\n def abs_path_with_splits(self, number_of_splits=None, *subpaths):\n path = os.path.join(*subpaths)\n if os.path.isabs(path):\n return path\n if '__file__' in globals():\n parts = os.path.split(__file__)[:-1]\n\n if number_of_splits is not None:\n p = parts\n\n for i in range(number_of_splits):\n p = p[0]\n p = os.path.split(p)\n\n parts = p[:-1]\n\n parts += subpaths\n\n return os.path.join(*parts)\n return os.path.abspath(*subpaths)\n\n def clear_dir_and_files(self, path):\n for root, dirs, files in os.walk(path):\n for file in files:\n os.remove(os.path.join(root, file))\n\n def create_dirs(self, *sub_paths):\n dir_path = self.abs_path_with_splits(1, *sub_paths)\n\n if os.path.isdir(dir_path) is False:\n os.makedirs(dir_path)\n\n return dir_path\n\n def create_sub_dir(self, dir_path, label_name):\n sub_dir = os.path.join(dir_path, label_name)\n\n if os.path.isdir(sub_dir) is False:\n os.mkdir(sub_dir)\n else:\n self.clear_dir_and_files(sub_dir)\n print(\"[{}] directory already exists.\".format(sub_dir))\n\n return sub_dir\n\n def create_path_dir(self, label_name, *sub_paths):\n dir_path = self.create_dirs(*sub_paths)\n sub_dir = self.create_sub_dir(dir_path, label_name)\n return sub_dir\n\n def list_files(self, *subpaths, paths_only=False):\n dir_path = self.abs_path(*subpaths)\n res = []\n for f in os.listdir(dir_path):\n path = os.path.join(dir_path, f)\n if os.path.isfile(path):\n if paths_only:\n res.append(path)\n else:\n res.append((path, f))\n return res\n\n def clear_dir(self, *subpaths):\n dir_path = self.abs_path(*subpaths)\n for root, dirs, files in os.walk(dir_path):\n for f in files:\n os.unlink(os.path.join(root, f))\n for d in dirs:\n shutil.rmtree(os.path.join(root, d))\n\n def clear_or_make_dir(self, *subpaths):\n dir_path = self.abs_path(*subpaths)\n if os.path.exists(dir_path):\n self.clear_dir(dir_path)\n else:\n self.make_dir(dir_path)\n\n def copy_files(self, from_path, to_path, clear_first=True, move=False, rename_cb=None):\n from_path = self.abs_path(from_path) if type(from_path) == str else self.abs_path(*from_path)\n to_path = self.abs_path(to_path) if type(to_path) == str else self.abs_path(*to_path)\n if os.path.exists(to_path):\n if clear_first:\n self.clear_dir(to_path)\n else:\n self.make_dir(to_path)\n for from_file_path, from_file_name in self.list_files(from_path, paths_only=True):\n to_file_name = rename_cb(from_file_name) if rename_cb else from_file_name\n to_file_path = os.path.join(to_path, to_file_name)\n shutil.copy(from_file_path, to_file_path)\n\n def get_path(self, *sub_paths):\n dir_path = self.abs_path_with_splits(1, *sub_paths)\n print(\"[{}] directory exists.\".format(dir_path))\n return dir_path\n\n def cur_datetime(self):\n return datetime.now().strftime('%Y%m%d_%H%M%S')\n\n def rename(self, old_name, new_name):\n os.rename(old_name, new_name)\n\n\n\n\n\n","repo_name":"hebertluchetti/HandGestureRecog","sub_path":"src/utils/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37039087724","text":"with open(\"input\", \"r\") as file:\n text = file.read().splitlines()\n text = sorted(map(int, text))\n\njolt3 = 1\njolt1 = 1\n\nfor i in range(1, len(text)):\n jolt3 += 1 if text[i] - text[i-1] == 3 else 0\n jolt1 += 1 if text[i] - text[i-1] == 1 else 0\n\nprint(jolt3*jolt1)","repo_name":"GrbavaCigla/AdventOfCode","sub_path":"2020/10/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22957026493","text":"\nfrom vsg import parser\n\nfrom vsg.token import sensitivity_list as token\n\nfrom vsg.vhdlFile import utils\n\n\ndef classify(iToken, lObjects):\n '''\n subtype_indication ::=\n [ resolution_indication ] type_mark [ constraint ]\n '''\n\n iCurrent = iToken\n iStop = len(lObjects) - 1\n iOpenParenthesis = 0\n iCloseParenthesis = 0\n while iCurrent < iStop:\n iCurrent = utils.find_next_token(iCurrent, lObjects)\n if utils.token_is_open_parenthesis(iCurrent, lObjects):\n iOpenParenthesis += 1\n if utils.token_is_close_parenthesis(iCurrent, lObjects):\n iCloseParenthesis += 1\n if iOpenParenthesis < iCloseParenthesis:\n break\n else:\n if utils.is_next_token(',', iCurrent, lObjects):\n utils.assign_token(lObjects, iCurrent, token.comma)\n else:\n utils.assign_token(lObjects, iCurrent, parser.todo)\n return iCurrent\n\n\ndef classify_until(lUntils, iToken, lObjects):\n '''\n sensitivity_list ::=\n *signal*_name { , *signal*_name}\n '''\n\n iCurrent = iToken\n iLast = 0\n while iLast != iCurrent:\n iLast = iCurrent\n if lObjects[utils.find_next_token(iCurrent, lObjects)].get_value().lower() in lUntils:\n return iCurrent\n iCurrent = utils.assign_next_token_if(',', token.comma, iCurrent, lObjects)\n iCurrent = utils.assign_next_token(parser.todo, iCurrent, lObjects)\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/vhdlFile/classify/sensitivity_list.py","file_name":"sensitivity_list.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"35567959065","text":"# File: Wordle.py\n\n\"\"\"\nThis module is the starter file for the Wordle assignment.\n\"\"\"\n\nimport random\nfrom WordleDictionary import FIVE_LETTER_WORDS as words\nfrom WordleGraphics import WordleGWindow, N_COLS, CORRECT_COLOR, PRESENT_COLOR, MISSING_COLOR, UNKNOWN_COLOR\n\ndef wordle():\n def change_color(enteredWord, word):\n is_color_blind = gw.get_color_mode()\n square_colors, key_colors = set_color_squares(is_color_blind)\n\n rowNum = gw.get_current_row()\n word_dict = {}\n\n # Create a dictionary to count the occurrences of each letter in the target word\n for i in range(N_COLS):\n if word[i] in word_dict:\n word_dict[word[i]] += 1\n else:\n word_dict[word[i]] = 1\n\n # all of the greens must be compared first, not letter by letter with yellows\n for i in range(N_COLS):\n if enteredWord[i] == word[i]:\n square_color = square_colors[\"correct\"]\n gw.set_square_color(rowNum, i, square_color)\n gw.set_key_color(enteredWord[i].upper(), square_color)\n # if correct then take letter out of dictionary\n word_dict[enteredWord[i]] -= 1\n\n for i in range(N_COLS):\n # if the color isn't already set to \"correct\" then make it either yellow or gray\n if gw.get_square_color(rowNum, i) != square_colors[\"correct\"]:\n if enteredWord[i] in word_dict and word_dict[enteredWord[i]] > 0:\n square_color = square_colors[\"present\"]\n else:\n square_color = square_colors[\"missing\"]\n \n gw.set_square_color(rowNum, i, square_color)\n if gw.get_key_color(enteredWord[i].upper()) == square_colors[\"missing\"] or gw.get_key_color(enteredWord[i].upper()) == square_colors[\"unknown\"]:\n gw.set_key_color(enteredWord[i].upper(), square_color)\n\n # Decrement the count if the letter appears in the dictionary (for yellows only)\n if enteredWord[i] in word and word_dict[enteredWord[i]] > 0:\n word_dict[enteredWord[i]] -= 1\n\n def set_color_squares(is_color_blind):\n if is_color_blind:\n square_colors = {\n \"correct\": \"BLUE\",\n \"present\": \"ORANGE\",\n \"missing\": \"RED\",\n \"unknown\": UNKNOWN_COLOR,\n }\n key_colors = {\n \"correct\": \"BLUE\",\n \"present\": \"ORANGE\",\n \"missing\": \"RED\",\n }\n else:\n square_colors = {\n \"correct\": CORRECT_COLOR,\n \"present\": PRESENT_COLOR,\n \"missing\": MISSING_COLOR,\n \"unknown\": UNKNOWN_COLOR,\n }\n key_colors = {\n \"correct\": CORRECT_COLOR,\n \"present\": PRESENT_COLOR,\n \"missing\": MISSING_COLOR,\n }\n return square_colors, key_colors\n\n def check_correct_letters(enteredWord, word): #checks if the word is the selected random word\n if enteredWord == word:\n return True \n else: \n return False \n\n def check_word(enteredWord): #checks if the word entered is in the word dictionary\n\n if enteredWord in words:\n return True\n else:\n return False\n \n \n\n # def choose_alternate_colors():\n # while True:\n # user_choice = input(\"Do you want to use the alternate color scheme? (yes/no): \").strip().lower()\n # if user_choice == \"yes\":\n # return True\n # elif user_choice == \"no\":\n # return False\n # else:\n # print(\"Invalid input. Please enter 'yes' or 'no'.\") \n\n def enter_action(s):\n enteredWord = s.lower()\n\n if check_word(enteredWord):\n won = check_correct_letters(enteredWord, word)\n # change_color(enteredWord, word, square_colors, key_colors, use_alternate_colors)\n change_color(enteredWord, word)\n currentRow = gw.get_current_row()\n\n if currentRow < 5 and not won:\n gw.set_current_row(currentRow + 1)\n message = \"Keep going\"\n elif won:\n message = \"You won\"\n gw.enable_button()\n\n else:\n message = \"Try again tomorrow\"\n\n else:\n message = \"Not in the word list\"\n\n gw.show_message(message)\n\n # I moved this code so that it worked with the button\n # Call the function from WordleGraphics.py to choose the color scheme\n # use_alternate_colors = choose_alternate_colors()\n\n # if use_alternate_colors:\n # square_colors = {\n # \"correct\": \"blue\",\n # \"present\": \"orange\",\n # \"missing\": \"red\",\n # \"unknown\": UNKNOWN_COLOR,\n # }\n # key_colors = {\n # \"correct\": \"blue\",\n # \"present\": \"orange\",\n # \"missing\": \"red\",\n # }\n # else:\n # square_colors = {\n # \"correct\": CORRECT_COLOR,\n # \"present\": PRESENT_COLOR,\n # \"missing\": MISSING_COLOR,\n # \"unknown\": UNKNOWN_COLOR,\n # }\n # key_colors = {\n # \"correct\": CORRECT_COLOR,\n # \"present\": PRESENT_COLOR,\n # \"missing\": MISSING_COLOR,\n # }\n\n gw = WordleGWindow()\n\n word = randomWord()\n\n gw.add_enter_listener(enter_action)\n\ndef randomWord():\n min_value = 1\n max_value = len(words) # You can adjust the range as needed\n\n random_integer = random.randint(min_value, max_value)\n\n word = words[random_integer]\n\n # not necessarily needed but good for testing\n print(word)\n return word\n\n# Startup code\nif __name__ == \"__main__\":\n wordle()\n\n\n","repo_name":"matkinson5/wordle","sub_path":"WordleStarter/Wordle.py","file_name":"Wordle.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27367496692","text":"import json\nimport math\nimport socket\nfrom operator import attrgetter\n\nfrom kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.widget import Widget\nfrom kivy.core.window import Window\n\nfrom improvement4.edges import RouteEdge\nfrom nodes import *\nfrom signaler import Signaler\nfrom route import Route\n\nN_SIZE = 3\nWIDTH = 2048\nHEIGHT = 1600\nLEXICON_PATH = 'lexicon.txt'\nSENTENCES_PATH = 'sentences.txt'\nSHOW_FULL_LEXICON = False\n\nIP, PORT = '127.0.0.1', 62236\n\nWindow.size = WIDTH / 2, HEIGHT / 2\n\nclass Network(Widget):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.nodes = {}\n self.edges = {}\n self.lexicon = {}\n self.features = {}\n self.categories = []\n self.sentences = []\n self.current_sentence_index = 0\n self.merge = None\n self.merge_pair = None\n self.merge_ok = None\n self.signaler = None\n self.route_mode = False\n self.ongoing_sentence = \"\"\n self.ongoing_sentence_label = Label(text=\"\")\n self.ongoing_sentence_label.x = WIDTH / 2\n self.ongoing_sentence_label.y = 100\n self.sentence_row_y = 0\n self.kataja_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.next_button = Button(text='Next step', font_size=14)\n self.next_button.x = 120\n self.next_button.y = 10\n self.add_widget(self.next_button)\n self.next_button.on_press = self.next_word\n self.next_sen_button = Button(text='Next sen', font_size=14)\n self.next_sen_button.x = 10\n self.next_sen_button.y = 10\n self.add_widget(self.next_sen_button)\n self.next_sen_button.on_press = self.next_sentence\n self.route_mode_button = Button(text='Routes /\\nNetwork', font_size=14)\n self.route_mode_button.x = WIDTH - 120\n self.route_mode_button.y = 10\n self.add_widget(self.route_mode_button)\n self.route_mode_button.on_press = self.toggle_route_mode\n self.counter = 0\n\n keyboard = Window.request_keyboard(self.handle_keyup, self)\n keyboard.bind(on_key_up=self.handle_keyup)\n Window.bind(on_request_close=self.on_request_close)\n\n def on_request_close(self, *args):\n if self.kataja_socket:\n # Sockets should be closed on garbage collection, probably this is not necessary\n self.kataja_socket.close()\n\n def handle_keyup(self, window, keycode):\n if keycode:\n if keycode[1] == 'right':\n self.next_word()\n elif keycode[1] == 'down':\n self.next_sentence()\n elif keycode[1] == 'up':\n self.prev_sentence()\n\n def clear_grammar(self):\n self.nodes = {}\n self.edges = {}\n self.lexicon = {}\n self.features = {}\n self.categories = []\n self.merge = None\n self.merge_pair = None\n self.merge_ok = None\n\n def toggle_route_mode(self):\n self.route_mode = not self.route_mode\n if self.route_mode:\n self.draw_sentence_circle()\n else:\n self.draw_sentence_row()\n self.update_canvas()\n\n def update_canvas(self, *args):\n self.canvas.clear()\n self.clear_widgets()\n if self.route_mode:\n for edge in self.edges.values():\n if edge.draw_in_route_mode:\n edge.draw()\n else:\n for edge in self.edges.values():\n if edge.draw_in_feature_mode:\n edge.draw()\n if self.route_mode:\n for node in self.nodes.values():\n if node.draw_in_route_mode:\n node.draw()\n else:\n for node in self.nodes.values():\n if node.draw_in_feature_mode:\n node.draw()\n if self.route_mode:\n for node in self.nodes.values():\n if node.draw_in_route_mode:\n node.add_label()\n else:\n for node in self.nodes.values():\n if node.draw_in_feature_mode:\n node.add_label()\n self.add_widget(self.next_button)\n self.add_widget(self.next_sen_button)\n self.add_widget(self.ongoing_sentence_label)\n self.add_widget(self.route_mode_button)\n\n def merge_signals(self, old_signal, new_signal):\n for node in self.nodes.values():\n node.merge_activations(old_signal, new_signal)\n if isinstance(node, LexicalNode):\n for route in node.routes_down:\n #if route.wp.signal == old_signal:\n route.rs.merge_signals(old_signal, new_signal)\n\n def read_lexicon(self, lexicon_file, append=False, only_these=None):\n if not append:\n self.lexicon.clear()\n new_lexicon = {}\n with open(lexicon_file) as lines:\n for line in lines:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n word, feats = line.split('::', 1)\n word = word.strip()\n if only_these and word not in only_these:\n continue\n word_parts = feats.split(',')\n first = True\n lex_parts = []\n for feats in word_parts:\n if not first:\n word = f\"{word}'\"\n cats = []\n neg_feats = []\n pos_feats = []\n for feat in feats.strip().split():\n if feat.startswith('cat:'):\n cats.append(self.add(CategoryNode, feat))\n elif feat[0] in NegFeatureNode.signs:\n neg_feats.append(self.add(NegFeatureNode, feat))\n else:\n pos_feats.append(self.add(PosFeatureNode, feat))\n lex_node = self.add(LexicalNode, word, cats, neg_feats + pos_feats, lex_parts)\n lex_parts.append(lex_node)\n new_lexicon[word] = lex_node\n first = False\n if only_these:\n self.lexicon.clear()\n for word in only_these:\n lex_node = new_lexicon[word]\n self.lexicon[word] = lex_node\n if lex_node.lex_parts:\n for part in lex_node.lex_parts:\n self.lexicon[part.id] = part\n\n else:\n self.lexicon = new_lexicon\n\n def find_by_signal(self, signal):\n for lex_item in reversed(self.lexicon.values()):\n if signal in lex_item.activations:\n return lex_item\n\n def get_wp(self, signal):\n for wp in self.signaler.word_parts:\n if wp.signal == signal:\n return wp\n\n def get_first_wp(self, signal):\n for wp in self.signaler.word_parts:\n if wp.signal == signal:\n return wp\n\n def get_last_wp(self, signal):\n for wp in reversed(self.signaler.word_parts):\n if wp.signal == signal:\n return wp\n\n def add_merge(self, head_signal, arg_signal):\n if head_signal < arg_signal or True:\n head = self.get_first_wp(head_signal)\n arg = self.get_first_wp(arg_signal)\n else:\n head = self.get_first_wp(head_signal)\n arg = self.get_last_wp(arg_signal)\n if head and arg:\n LexicalNode.add_merge(head, arg)\n\n def add_adjunction(self, first_signal, second_signal):\n head = self.get_wp(first_signal)\n adj = self.get_wp(second_signal)\n if head and adj:\n LexicalNode.add_adjunction(head, adj)\n\n def update_sentence(self, text=\"\"):\n if not text:\n text = f'{self.current_sentence_index + 1}/{len(self.sentences)}. ' + self.sentences[\n self.current_sentence_index]\n self.ongoing_sentence = text\n self.ongoing_sentence_label.text = self.ongoing_sentence\n\n def send(self, data):\n if self.kataja_socket:\n try:\n self.kataja_socket = socket.create_connection((IP, PORT))\n self.kataja_socket.send(str(data).encode('utf-8'))\n self.kataja_socket.close()\n return True\n except ConnectionRefusedError:\n self.kataja_socket = None\n\n def reset(self):\n self.signaler.reset()\n self.counter = 0\n for edge in list(self.edges.values()):\n if isinstance(edge, (MergeEdge, AdjunctEdge, RouteEdge)):\n del self.edges[edge.id]\n for node in self.nodes.values():\n node.reset()\n if isinstance(node, LexicalNode):\n node.head_edges.clear()\n node.arg_edges.clear()\n node.adjunctions.clear()\n node.adjunct_to.clear()\n node.routes_down.clear()\n node.route_edges.clear()\n self.clear_activations()\n\n def decay_signals(self):\n for edge in self.edges.values():\n edge.decay()\n for node in self.nodes.values():\n node.decay()\n\n def next_word(self):\n if not self.signaler:\n return\n if self.signaler.current_item.signal == 1:\n self.update_canvas()\n leaf_constituent = Route(None, wp=self.signaler.current_item)\n leaf_constituent.wp.li.routes_down.append(leaf_constituent)\n if self.signaler.pick_next():\n self.update_sentence(' '.join([wp.li.id for wp in self.signaler.word_parts]))\n else:\n self.reset()\n self.signaler.pick_first()\n self.update_sentence()\n print()\n print(f'** Activating {self.signaler.current_item} ***')\n if self.signaler.can_merge():\n self.decay_signals()\n self.signaler.activate_current_words()\n self.update_canvas()\n print()\n print(f'*** Handling {self.signaler.current_item} ***')\n leaf_constituent = Route(None, wp=self.signaler.current_item)\n leaf_constituent.wp.li.routes_down.append(leaf_constituent)\n leaf_constituent.walk_all_routes_up()\n \"\"\"\n for wp in self.signaler.word_parts[:-1]:\n print()\n print(f' *** Revisiting {wp} ***')\n print(f' routes to walk: {wp.li.routes_down}')\n for route in wp.li.routes_down:\n if route.wp is wp and len(route) == 1:\n route.walk_all_routes_up()\n\n for wp in self.signaler.word_parts:\n if wps_to_merge := self.should_merge_signals(wp):\n print(f'*** merging signals {wps_to_merge} -> {wp}')\n for wp_to_merge in wps_to_merge:\n wp_to_merge.merged = True\n self.merge_signals(wp_to_merge.signal, wp.signal)\n wp_to_merge.signal = wp.signal\n \"\"\"\n if self.signaler.is_last() and False:\n for wp in self.signaler.word_parts:\n print()\n print(f' *** Revisiting one last time: {wp} ***')\n print(f' routes to walk: {wp.li.routes_down}')\n for route in wp.li.routes_down:\n if route.wp is wp and len(route) == 1:\n route.walk_all_routes_up()\n\n print('****************************************')\n print('* *')\n print('* Done parsing, now pick optimal route *')\n print('* *')\n print('****************************************')\n self.pick_optimal_route()\n else:\n self.show_current_routes()\n self.update_canvas()\n\n def should_merge_signals(self, wp):\n my_routes = [r for r in wp.li.routes_down if r.wp.signal == wp.signal]\n wps_to_merge = set()\n if my_routes:\n top_route = my_routes[0]\n if top_route.arg and top_route.arg.wp.signal < wp.signal:\n print(f'sig_merge {top_route.arg.wp} -> {wp} because it is argument at route {top_route}')\n wps_to_merge.add(top_route.arg.wp)\n arg_part = top_route.arg.part\n while arg_part:\n print(f'sig_merge {arg_part.wp} -> {wp} because it is part of merged argument')\n wps_to_merge.add(arg_part.wp)\n arg_part = arg_part.part\n if top_route.wp.merged and top_route.part and False:\n part = top_route.part\n while part:\n if part.wp.signal != wp.signal:\n print(f'sig_merge {part.wp} -> {wp} because it is my part')\n wps_to_merge.add(part.wp)\n part = part.part\n for adjunct in top_route.adjuncts:\n if adjunct.wp.signal != wp.signal:\n print(f'sig_merge {adjunct.wp} -> {wp} because it is adjunct of this wp')\n wps_to_merge.add(adjunct.wp)\n return wps_to_merge\n\n def next_sentence(self):\n self.current_sentence_index += 1\n if self.current_sentence_index == len(self.sentences):\n self.current_sentence_index = 0\n self._reset_sentence()\n\n def prev_sentence(self):\n self.current_sentence_index -= 1\n if self.current_sentence_index < 0:\n self.current_sentence_index = len(self.sentences) - 1\n self._reset_sentence()\n\n def _reset_sentence(self):\n self.clear_activations()\n self.reset()\n self.parse(self.sentences[self.current_sentence_index])\n self.update_canvas()\n\n def add(self, node_class, label, *args, **kwargs):\n if label in self.nodes:\n return self.nodes[label]\n node = node_class(label, *args, **kwargs)\n self.nodes[label] = node\n return node\n\n def parse(self, sentence):\n if not SHOW_FULL_LEXICON:\n self.clear_grammar()\n self.read_lexicon(LEXICON_PATH, only_these=sentence.split())\n self.draw_grammar()\n elif not self.lexicon:\n self.read_lexicon(LEXICON_PATH)\n self.draw_grammar()\n self.signaler = Signaler(sentence.split(), self.lexicon)\n self.signaler.pick_first()\n self.update_sentence()\n\n def draw_grammar(self):\n row = 1\n row_height = HEIGHT / 6\n self.merge = self.add(SymmetricMergeNode, 'M(AB)') # A→B\n self.merge.set_pos(WIDTH / 2 - WIDTH / 8, row * row_height)\n self.merge_pair = self.add(SymmetricPairMergeNode, 'M(A<->B)')\n self.merge_pair.set_pos(WIDTH / 2 + WIDTH / 8, row * row_height)\n self.merge_ok = self.add(MergeOkNode, 'OK')\n self.merge_ok.set_pos(100, HEIGHT / 2)\n\n row += 2\n for n, cat_node in enumerate(self.categories):\n cat_node.set_pos(WIDTH / (len(self.categories) + 1) * (n + 1), row * row_height)\n self.merge.connect(self.merge_ok)\n self.merge_pair.connect(self.merge_ok)\n row += 1\n y_shift = row_height / -2\n sorted_features = sorted(self.features.values(), key=FeatureNode.sortable)\n for n, feat_node in enumerate(sorted_features):\n x = WIDTH / (len(self.features) + 1) * (n + 1)\n y = row * row_height + y_shift\n y_shift += 50\n if y_shift > 50:\n y_shift = -100\n if feat_node.sign == '=':\n feat_node.connect(self.merge)\n feat_node.connect_positive()\n elif feat_node.sign == '-':\n feat_node.connect(self.merge_pair)\n feat_node.connect_positive()\n feat_node.set_pos(x, y)\n row += 1\n print('row: ', row)\n self.sentence_row_y = row * row_height\n for n, lex_node in enumerate(self.lexicon.values()):\n lex_node.connect_lex_parts()\n if self.route_mode:\n self.draw_sentence_circle()\n else:\n self.draw_sentence_row()\n self.update_canvas()\n\n def draw_sentence_row(self):\n y_shift = 0\n for n, lex_node in enumerate(self.lexicon.values()):\n x = WIDTH / (len(self.lexicon) + 1) * (n + 1)\n y = self.sentence_row_y + y_shift\n y_shift += 20\n if y_shift > 60:\n y_shift = 0\n lex_node.set_pos(x, y)\n\n def draw_sentence_circle(self):\n for n, lex_node in enumerate(self.lexicon.values()):\n pi_step = (math.pi * 2 / len(self.lexicon)) * n + math.pi / 2\n x = (math.cos(pi_step) * (WIDTH / -2 * 0.8)) + WIDTH / 2\n y = (math.sin(pi_step) * (HEIGHT / 2 * 0.8)) + HEIGHT / 2 + HEIGHT / 20\n lex_node.set_pos(x, y)\n\n def build(self):\n self.sentences = [row.strip() for row in open(SENTENCES_PATH).readlines()\n if row.strip() and not row.strip().startswith('#')]\n self.parse(self.sentences[self.current_sentence_index])\n\n def clear_activations(self):\n for node in self.nodes.values():\n node.activations = {}\n node.active = False\n for edge in self.edges.values():\n edge.activations = {}\n\n def show_current_routes(self):\n c = 0\n for word_part in self.signaler.word_parts:\n indent = ' ' * word_part.signal\n print(f'{indent}*** routes down from {word_part}: ({len(word_part.li.routes_down)})')\n for route in word_part.li.routes_down:\n c += 1\n if route.wp is not word_part:\n continue\n print(f'{indent}{route.print_route()} {route.rs.low}-{route.rs.high}, '\n f'movers: {route.rs.movers} used:{route.rs.used_movers} w:{route.weight}, order:{route.order}')\n print('routes total at this point: ', c)\n\n def pick_optimal_route(self):\n total_routes = 0\n good_routes = []\n for word_part in self.signaler.word_parts:\n indent = ' ' * word_part.signal\n print(f'{indent}*** routes down from {word_part}: ({len(word_part.li.routes_down)})')\n for route in word_part.li.routes_down:\n if route.wp is not word_part:\n continue\n total_routes += 1\n print(f'{indent} {route.print_route()} {route.rs.low}-{route.rs.high}, '\n f'{route.rs.movers} wp: {route.wp}, len: {len(route)}, '\n f'signals: {len({wp.signal for wp in route.wps})}, '\n f'used_movers: {route.rs.used_movers}, weight: {route.weight}, order: {route.order}')\n\n if len(route) == len(self.signaler.word_parts) and not route.rs.movers:\n if route not in good_routes:\n good_routes.append(route)\n print(route.tree())\n\n print(f'{indent} routes len: {len(word_part.li.routes_down)}')\n\n if good_routes:\n good_route_strs = []\n good_routes.sort(key=attrgetter('size', 'weight'), reverse=True)\n for route in good_routes:\n good_route = route.tree()\n good_route_strs.append(good_route)\n good_route_strs.append(\"\")\n print(route, route.rs, route.weight, route.order)\n\n if self.send(json.dumps(good_route_strs)):\n print(f'sent {len(good_routes)} good routes to kataja')\n else:\n print(f'found {len(good_routes)} good routes')\n print('total routes: ', total_routes)\n\n def add_route_edge(self, start, end, origin):\n if not RouteEdge.exists(start, end, origin):\n edge = RouteEdge(start, end, origin)\n if edge not in end.li.route_edges:\n end.li.route_edges.append(edge)\n\n\nclass NetworkApp(App):\n def build(self):\n g = Network()\n ctrl.post_initialize(g)\n g.build()\n return g\n\n\nif __name__ == '__main__':\n NetworkApp().run()\n\n","repo_name":"jpurma/NodeMerge","sub_path":"improvement4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32408744995","text":"import urllib.request\nimport json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\nimport requests\n\nclass permit(dml.Algorithm):\n contributor = 'lc546_jofranco'\n reads = []\n writes = ['lc546_jofranco.permit']\n @staticmethod\n def execute(trial = False):\n startTime = datetime.datetime.now()\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate(\"lc546_jofranco\", \"lc546_jofranco\")\n # url = 'https://data.cityofboston.gov/resource/fdxy-gydq.json'\n url = 'https://data.boston.gov/export/f1e/137/f1e13724-284d-478c-b8bc-ef042aa5b70b.json'\n # import requests\n #import json\n r = requests.get('https://data.boston.gov'+\\\n '/export/f1e/137/'+\\\n 'f1e13724-284d-478c-b8bc-ef042aa5b70b.json')\n t = r.text.replace(\"\\n],\\n\", \",\\n\")\n p = json.loads('{\"data\":'+t+']}')\n #response = urllib.request.urlopen(url).read().decode(\"utf-8\")\n # response = open('/Users/Jesus/Desktop/project1/course-2017-fal-proj/lc546_jofranco/fixedpermits.txt').read()\n # print(\"this\", response)\n # r = json.loads(str(p))\n #print(p)\n s = json.dumps(p, sort_keys = True, indent = 2)\n repo.dropCollection(\"permit\")\n repo.createCollection(\"permit\")\n repo[\"lc546_jofranco.permit\"].insert_many([p])\n repo[\"lc546_jofranco.permit\"].metadata({'complete':True})\n print(repo[\"lc546_jofranco.permit\"].metadata())\n repo.logout()\n endTime = datetime.datetime.now()\n return {\"start\":startTime, \"end\":endTime}\n @staticmethod\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate(\"lc546_jofranco\", \"lc546_jofranco\")\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in # format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in # format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n this_script = doc.agent('alg:lc546_jofranco#retrievedata', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n resource = doc.entity('bdp:xgbq-327x', {'prov:label':'permit', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'json'})\n get_permitinfo = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime, {prov.model.PROV_LABEL:'permit', prov.model.PROV_TYPE:'ont:DataSet'})\n doc.wasAssociatedWith(get_permitinfo, this_script)\n doc.usage(get_permitinfo, resource, startTime)\n permit = doc.entity('dat:lc546_jofranco#permit', {prov.model.PROV_LABEL:'restaurant permit', prov.model.PROV_TYPE:'ont:DataSet'})\n doc.wasAttributedTo(permit, this_script)\n doc.wasGeneratedBy(permit, get_permitinfo, endTime)\n doc.wasDerivedFrom(permit, resource, get_permitinfo, get_permitinfo, get_permitinfo)\n return doc\n\n\n\npermit.execute()\ndoc = permit.provenance()\nprint(doc.get_provn())\nprint(json.dumps(json.loads(doc.serialize()), indent=4))\n","repo_name":"data-mechanics/course-2017-fal-proj","sub_path":"lc546_jofranco/permit.py","file_name":"permit.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"1949372029","text":"\"\"\"\nFlask initialization\n\"\"\"\nimport os\n\n__version__ = '0.1'\n\nimport connexion\nfrom flask_environments import Environments\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom flask_mail import Mail, Message \nimport config\n\ndb = None\nmigrate = None\ndebug_toolbar = None\nredis_client = None\napp = None\napi_app = None\nlogger = None\n\n\ndef create_app():\n \"\"\"\n This method create the Flask application.\n :return: Flask App Object\n \"\"\"\n global db\n global app\n global migrate\n global api_app\n\n # first initialize the logger\n init_logger()\n\n api_app = connexion.FlaskApp(\n __name__,\n server='flask',\n specification_dir='openapi/',\n )\n\n # getting the flask app\n app = api_app.app\n\n flask_env = os.getenv('FLASK_ENV', 'None')\n if flask_env == 'development':\n config_object = config.DevConfig\n elif flask_env == 'testing':\n config_object = config.TestConfig\n elif flask_env == 'production':\n config_object = config.ProdConfig\n else:\n raise RuntimeError(\n \"%s is not recognized as valid app environment. You have to setup the environment!\" % flask_env)\n\n # Load config\n env = Environments(app)\n env.from_object(config_object)\n\n # registering db\n db = SQLAlchemy(\n app=app\n )\n\n # requiring the list of models\n import mib.models\n\n # creating migrate\n migrate = Migrate(\n app=app,\n db=db\n )\n\n # checking the environment\n if flask_env == 'testing':\n # we need to populate the db\n db.create_all()\n #db.create_all()\n # registering to api app all specifications\n register_specifications(api_app)\n\n return app\n\n\ndef init_logger():\n global logger\n \"\"\"\n Initialize the internal application logger.\n :return: None\n \"\"\"\n logger = logging.getLogger(__name__)\n from flask.logging import default_handler\n logger.addHandler(default_handler)\n\n\ndef register_specifications(_api_app):\n \"\"\"\n This function registers all resources in the flask application\n :param _api_app: Flask Application Object\n :return: None\n \"\"\"\n\n # we need to scan the specifications package and add all yaml files.\n from importlib_resources import files\n folder = files('mib.specifications')\n for _, _, files in os.walk(folder):\n for file in files:\n if file.endswith('.yaml') or file.endswith('.yml'):\n file_path = folder.joinpath(file)\n _api_app.add_api(file_path)\n","repo_name":"francesboc/mib-message","sub_path":"mib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18024709740","text":"import xlrd\nimport jieba\nimport torch\nfrom transformers import BertTokenizer, BertModel\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom torch.utils.data import DataLoader,TensorDataset\nfrom tqdm import tqdm\n# from BERT.utils import get_news_label_embed\nimport random\nimport time\nfrom datetime import timedelta\nPAD, CLS = '[PAD]', '[CLS]' # padding符号, bert中综合信息符号\n\n\n# Load pretrained model/tokenizer\ntokenizer = BertTokenizer.from_pretrained('.\\data\\BERT_cased_L-12_H-768_A-12')\nBERT_model = BertModel.from_pretrained('.\\data\\BERT_cased_L-12_H-768_A-12')\n\n\n# tokenizer = BertTokenizer.from_pretrained('.\\\\data\\\\uncased_L-12_H-768_A-12')\n# BERT_model = BertModel.from_pretrained('.\\\\data\\\\uncased_L-12_H-768_A-12')\n######\n#### 生成停用词列表\n# stopwords = open('./data/hit_stopwords.txt', encoding='utf-8')\n# stopwords_list = stopwords.readlines()\n# stopwords = [x.strip() for x in stopwords_list] #去掉每行头尾空白\n\ndef load_dataset(path, max_sen):\n data_list = []\n label_list = []\n with open(path, 'r', encoding='UTF-8') as f:\n for line in tqdm(f):\n lin = line.strip()\n if not lin:\n continue\n content, label = lin.split('\\t')\n content = content.strip('\\n')\n # cut_data = jieba.lcut(content.strip('\\n')) ## 第一次分词\n # ## 去停用词\n # final = ''\n # for seg in cut_data:\n # if seg not in stopwords:\n # final += seg\n tokenized_text = tokenizer(content, padding='max_length', max_length=max_sen, truncation=True,\n return_tensors=\"pt\") # token初始化\n with torch.no_grad():\n output = BERT_model(tokenized_text[\"input_ids\"])\n output = output[0]\n data_list.append(output)\n label_list.append(int(label))\n return data_list, label_list\n\n\ndef get_dataloder(train_embed_align, y_train, batch_size):\n # train_embed_align = np.array(train_embed_align)\n train_embed_align= [t.numpy() for t in train_embed_align] ## 转成numoy格式,由于torch有多维时不能直接转\n train_embed_align = np.array(train_embed_align)\n train_embed_align = torch.Tensor(train_embed_align)\n y_train = np.array(y_train)\n y_train = torch.LongTensor(y_train)\n deal_dataset = TensorDataset(train_embed_align, y_train)\n data_loader = DataLoader(dataset=deal_dataset, batch_size=batch_size, shuffle=True, num_workers=0)\n return data_loader\n\ndef get_time_dif(start_time):\n \"\"\"获取已使用时间\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))","repo_name":"ceceliax/LIE","sub_path":"LIE2/BERT/data_for_transfomer.py","file_name":"data_for_transfomer.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17788221015","text":"import matplotlib.pyplot as plt\nfrom matplotlib.colors import Normalize\nimport numpy as np\nfrom copy import deepcopy\nfrom geom import geom\n\nclass geom_point(geom):\n VALID_AES = ['x', 'y', 'size', 'color', 'alpha', 'shape', 'marker', 'label', 'cmap']\n\n def plot_layer(self, layer):\n layer = {k: v for k, v in layer.iteritems() if k in self.VALID_AES}\n layer.update(self.manual_aes)\n\n if \"size\" in layer:\n layer[\"s\"] = layer[\"size\"]\n del layer[\"size\"]\n\n if \"cmap\" in layer:\n layer[\"c\"] = layer[\"color\"]\n del layer[\"color\"]\n plt.scatter(**layer)\n\n","repo_name":"vdt/ggplot","sub_path":"ggplot/geoms/geom_point.py","file_name":"geom_point.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"16806505513","text":"# BOJ 16234 인구이동\n# 220926\n\nimport collections\nimport sys\ninput = sys.stdin.readline\n\nN, L, R = map(int, input().split())\narr = [list(map(int, input().split())) for _ in range(N)]\n\ndelta = [[0,1],[0,-1],[1,0],[-1,0]]\n\ndef bfs(i, j):\n\n queue = collections.deque()\n queue.append((i, j))\n \n union = []\n country = 0\n population = 0\n while queue:\n si, sj = queue.popleft()\n visited[si][sj] = 1\n country += 1\n population += arr[si][sj]\n if (si, sj) not in union:\n union.append((si, sj))\n else:\n continue\n \n for k in range(4):\n ni, nj = si + delta[k][0], sj + delta[k][1]\n if 0 <= ni < N and 0 <= nj < N and visited[ni][nj] == 0 and L <= abs(arr[ni][nj] - arr[si][sj]) <= R:\n queue.append((ni, nj))\n return union\n\ndays = 0\nwhile True:\n visited = [[0] * N for _ in range(N)]\n status = False\n for i in range(N):\n for j in range(N):\n if visited[i][j] == 0:\n visited[i][j] = 1\n union = bfs(i, j)\n if len(union) > 1:\n status = True\n average = sum([arr[x][y] for x, y in union]) // len(union)\n for x, y in union:\n arr[x][y] = average\n if not status:\n break\n days += 1\nprint(days)","repo_name":"joney0715/Algorithm_GroupStudy","sub_path":"angielxx/0928/BOJ_16234_인구이동.py","file_name":"BOJ_16234_인구이동.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38304993588","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nimport os\nimport glob\nimport csv\nfrom pathlib import Path\nfrom umi_tools import UMIClusterer\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport requests\n\n#reading all txt files as a pandas dataframes an generating csv with barcodes \n#and read counts\n\nfolder='data_output/'\nfor file in Path(folder).glob('*/*barcodes_clean.txt'):\n df=pd.read_csv(file, header=None)\n df.columns=[\"barcodes\"]\n df=df[\"barcodes\"].value_counts().rename_axis('barcodes').reset_index(name='read_count')\n df.to_csv(file.with_suffix('.csv'), index = False)\n\n\n#Preparing each barcode list for clustering\n\n#We use the UMI-Tools adjacency clustering methods (doi: 10.1101/gr.209601.116; Smith, et al. 2017)\n\n\nclusterer = UMIClusterer(cluster_method=\"adjacency\")\n\nfor file in Path(folder).glob('*/*barcodes_clean.csv'):\n new_data = pd.read_csv(file, sep=',')\n new_data = new_data[['barcodes', 'read_count']]\n new_data.sort_values('read_count', ascending=False, inplace=True)\n new_data_dict = new_data.set_index('barcodes')['read_count'].to_dict()\n new_data_dict = {k.encode('utf8'): v for k, v in new_data_dict.items()}\n #applying the UMI-clusterer:\n clustered_new_data_bc = clusterer(new_data_dict, threshold=1)\n total_read_count_per_cluster_list = []\n for cluster in clustered_new_data_bc:\n clustertotal=0\n for barcode in cluster:\n clustertotal += new_data_dict[barcode]\n total_read_count_per_cluster_list.append(clustertotal)\n clustered_rcs = []\n for cluster in clustered_new_data_bc:\n tempreadlist = []\n for barcode in cluster:\n tempreadlist.append(new_data_dict[barcode])\n clustered_rcs.append(tempreadlist)\n clustered_bc_df = pd.DataFrame(clustered_new_data_bc)\n clustered_rcs_df = pd.DataFrame(clustered_rcs)\n clustered_bc_df_first = clustered_bc_df[clustered_bc_df.columns[0]]\n clustered_rcs_df_first = clustered_rcs_df[clustered_rcs_df.columns[0]]\n clusters = pd.concat([clustered_bc_df_first, clustered_rcs_df_first], axis=1)\n clusters.columns = ['barcode_clusters', 'read_count']\n num_reads = new_data['read_count'].sum()\n clusters_temp = clusters['read_count'].div(num_reads).mul(100).to_frame('frequency (%)')\n clusters_final = pd.concat([clusters, clusters_temp], axis=1)\n clusters_final['barcode_clusters'] = clusters_final['barcode_clusters'].str.decode(\"utf-8\")\n clusters_final = clusters_final.sort_values(by=[\"frequency (%)\"], ascending=False)\n clusters_final.to_csv(file.with_suffix('.csv'), index = False)\n \n#Output will be a csv file with clustered barcodes, read counts and relative frequencies\n","repo_name":"haddocksoto/barcoded_flu_analysis","sub_path":"amplicon_sequencing/barcode_quantification.py","file_name":"barcode_quantification.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26399091375","text":"import unittest\nfrom robot.robot import *\nfrom queue import Queue\nfrom threading import Thread\nimport time\n\n\"\"\" python -m unittest test.test_robot \"\"\"\n\n\n# def serial_auto_responder_testing(auto_response):\n# \"\"\" This should run in its own thread to act as robotcontroller-responder for PC serial commands\"\"\"\n# controller_port = MySerial(\"COM7\")\n# # Wait for first message, get_response is blocking so it will wait here\n# controller_port.get_response()\n# controller_port.send_melfa_msg(auto_response)\n# controller_port.close()\n\n\n\nclass MockMySerialUp(MySerial):\n \"\"\" Can be used for either giving the serial-response used by what you want to test\n or return the MelfaMessage's content-string for assertion\"\"\"\n def __init__(self, test_output: str):\n self.test_output=test_output\n self.sent_msgs = list()\n super().__init__(\"\")\n\n def open_serial(self,comport, w_timeout):\n pass # This is a mock so skip\n\n def send_melfa_msg(self, msg: MelfaMessage):\n self._set_last_msg(msg)\n self.sent_msgs.append(msg)\n print(\"in mock, last msg content:\"+self.get_last_msg_content())\n return self.test_output\n\n def get_sent_msgs(self):\n return self.sent_msgs\n\n def close(self):\n pass\n\n\nstandard_pos = \"+500.00,+0.00,-46.30,+0.01,-179.99,R,A,C\"\nstandard_pos_simple = \"500.0,0.0,-46.3,0.01,-179.99,R,A,C\"\n\n\ndef assert_standard_pos(test: unittest.TestCase, myPos: Position):\n test.assertEqual(myPos.x, 500)\n test.assertEqual(myPos.y, 0)\n test.assertEqual(myPos.z, -46.3)\n test.assertEqual(myPos.A, 0.01)\n test.assertEqual(myPos.B, -179.99)\n test.assertEqual(myPos.arg1, \"R\")\n test.assertEqual(myPos.arg2, \"A\")\n test.assertEqual(myPos.grip, \"C\")\n\nclass MelfaMessageTest(unittest.TestCase):\n\n def test_validation_noresponse1(self):\n msg = MelfaMessage(\"TEST\", MelfaResponseType.NONE)\n msg.validate_response(\"N\")\n\n def test_validation_response(self):\n msg = MelfaMessage(\"TEST\", MelfaResponseType.POSITION)\n self.assertTrue(msg.expects_response())\n\n def test_validation_bad_position(self):\n \"\"\"Missing a comma in the position\"\"\"\n msg = MelfaMessage(\"TEST\", MelfaResponseType.POSITION)\n self.assertRaises(NameError, msg.validate_response, \"+500.00,+0.00,+46.30,+0.00+179.99,R,A,O\")\n\n def test_validation_ok_position(self):\n \"\"\"position string complete\"\"\"\n msg = MelfaMessage(\"TEST\", MelfaResponseType.POSITION)\n msg.validate_response(\"+500.00,+0.00,+46.30,+0.00,+179.99,R,A,O\")\n\nclass MySerialTest(unittest.TestCase):\n \"\"\" COM6 and COM7 are virtual COM-ports which are connected to each other on the development-machine\"\"\"\n\n def test_open_close(self):\n ser = MySerial(\"COM6\")\n ser.close()\n\n def send_receive_enqueue(self, port: MySerial, msg: MelfaMessage, queue: Queue):\n response = port.send_melfa_msg(msg)\n queue.put(response)\n\n def test_receive_melfa_msg(self):\n pos_str = \"+500.00,+0.00,+46.30,+0.00,+179.99,R,A,O\"\n pc_WH_Msg = MelfaMessage(\"WH\", MelfaResponseType.POSITION)\n pos_response_msg = MelfaMessage(pos_str, MelfaResponseType.NONE)\n\n pc_port = MySerial(\"COM6\")\n controller_port = MySerial(\"COM7\")\n\n # The send cmd needs to be in another thread so we can send the response while its waiting\n q = Queue()\n pc_send_thread = Thread(target=self.send_receive_enqueue, args=(pc_port, pc_WH_Msg, q,))\n pc_send_thread.start()\n time.sleep(1)\n\n # Have the controller received the sent cmd from the PC?\n _, resp = controller_port.get_response()\n self.assertEqual(resp, \"WH\")\n controller_port.send_melfa_msg(pos_response_msg)\n\n response_to_sentcmd = q.get()\n\n # Check that the PC received the position from the simulated controller\n self.assertEqual(response_to_sentcmd, pos_str)\n\n pc_send_thread.join()\n pc_port.close()\n controller_port.close()\n\n def test_receive_melfa_msg_timeout(self):\n pc_WH_Msg = MelfaMessage(\"WH\", MelfaResponseType.POSITION)\n\n pc_port = MySerial(\"COM6\")\n controller_port = MySerial(\"COM7\") # we need this so pc_port wont timeout\n\n\n # it will wait 2s for a Position response that will never arrive\n self.assertRaises(TimeoutError, pc_port.send_melfa_msg, pc_WH_Msg)\n\n pc_port.close()\n controller_port.close()\n\nclass RobotMovementTest(unittest.TestCase):\n\n def test_co_validation(self):\n rm = RobotMovement(None)\n rm.validate_limits(-1000, 1000, 999, -999, 0)\n self.assertRaises(ValueError, rm.validate_limits, -999, 999, 1001)\n self.assertRaises(TypeError, rm.validate_limits, -999, 999, \"a\")\n\n def test_move_straight(self):\n mys = MockMySerialUp(\"\")\n rm = RobotMovement(mys)\n rm.move_straight(y=5,x=-10)\n movecmd_str = mys.get_last_msg_content()\n self.assertEqual(movecmd_str,\"DS -10,5,0\")\n\n\n def test_get_position(self):\n mys = MockMySerialUp(standard_pos)\n rm = RobotMovement(mys)\n pos_obj = rm.get_position()\n assert_standard_pos(self, pos_obj)\n\n def test_close_gripper(self):\n \"\"\" Make sure it first activates, and then after a while, sends deactivation of output \"\"\"\n mys = MockMySerialUp(standard_pos)\n rm = RobotMovement(mys)\n rm.close_gripper()\n num_msgs_sent = len(mys.get_sent_msgs())\n self.assertEqual(num_msgs_sent, 1)\n time.sleep(4) # wait for the timer to send de_activation_msgs\n num_msgs_sent = len(mys.get_sent_msgs())\n self.assertEqual(num_msgs_sent, 3)\n self.assertEqual(mys.get_last_msg_content(), \"OB -1\")\n\n def test_read_position_inx(self):\n mys = MockMySerialUp(standard_pos)\n rm = RobotMovement(mys)\n std_pos_obj = rm.read_position_inx(99)\n self.assertEqual(str(std_pos_obj), standard_pos_simple)\n\n\nclass PositionTest(unittest.TestCase):\n\n def test_pos_string_to_dict(self):\n pos = \"+500.00,+0.00,+46.30,+0.00,-179.99,R,A,O\"\n dict = Position.pos_string_to_dict(pos)\n self.assertEqual(dict.__len__(), 8)\n self.assertEqual(dict.get(\"B\"), -179.99)\n\n def test_constructor(self):\n myPos = Position(y=5)\n self.assertTrue(myPos.y,5)\n\n def test_classmethod(self):\n myPos = Position.from_string(standard_pos)\n assert_standard_pos(self, myPos)\n\n def test_is_empty(self):\n pos = \"+500.00,+0.00,+46.30,+0.00,-179.99,R,A,O\"\n pos_obj = Position.from_string(pos)\n self.assertFalse(pos_obj.is_empty())\n pos_obj = Position(z=0)\n self.assertTrue(pos_obj.is_empty())\n\n def test_to_string(self):\n pos = \"+500.00,+0.00,+46.30,+0.00,-179.99,R,A,O\"\n pos_obj = Position.from_string(pos)\n self.assertEqual(str(pos_obj),\"500.0,0.0,46.3,0.0,-179.99,R,A,O\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"tonybjorkman/robotic_scheduler","sub_path":"python/test/test_robot.py","file_name":"test_robot.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10153245125","text":"#!/usr/bin/python\r\nfrom sys import argv\r\n\r\ndef get_training_list(training_IDs):\r\n training_list = list()\r\n with open(training_IDs) as f:\r\n for line in f:\r\n line = line.strip()\r\n training_list.append(line)\r\n return training_list\r\n\r\ndef remove_seq(training_list, complete_fasta_file, fasta_file_without_training):\r\n with open(complete_fasta_file) as f1, open(fasta_file_without_training, 'w') as f2: \r\n copy = False\r\n for line in f1:\r\n line = line.rstrip() \r\n if line.startswith('>'): \r\n id = line.split('|')[1] \r\n if id in training_list: #if the sequence is in the training list\r\n copy = False\r\n print(line)\r\n else: #if the sequence is not in the training list\r\n copy = True\r\n f2.write(line + '\\n')\r\n else: #sequence line\r\n if copy == True: \r\n f2.write(line + '\\n')\r\n \r\n \r\n\r\nif __name__ == \"__main__\":\r\n if len(argv) != 4:\r\n print(\"ERROR: uncorrect number of arguments.\")\r\n exit(1)\r\n training_IDs = argv[1]\r\n complete_fasta_file = argv[2]\r\n fasta_file_without_training = argv[3]\r\n training_list = get_training_list(training_IDs)\r\n remove_seq(training_list, complete_fasta_file, fasta_file_without_training)\r\n \r\n","repo_name":"DOnofrio-Irene/Kunitz_HMM_prj","sub_path":"py.scripts/rem_fasta_seqs.py","file_name":"rem_fasta_seqs.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30402564571","text":"import pygame\n\nclass statebasedSprite(pygame.sprite.Sprite):\n\t\"\"\"sprites that are game state dependent. Will read off the save file when intializing the sprites whether\n\tthe sprite should be rendered or not\"\"\"\n\n\tdef __init__(self, location, cell, *groups):\n\n\t\tsuper(statebasedSprite, self).__init__(*groups)\n\t\tself.image = pygame.image.load(cell['src'])\n\t\tself.defaultImage = self.image.copy()\n\t\tself.width = int(cell['width'])\n\t\tself.height = int(cell['height'])\n\t\tself.rect = pygame.Rect(location, (self.width,self.height))\n\t\tself.location = location\n\t\tself.currLocation = location\n\t\tself.saveIndex = int(cell['saveIndex'])\n\t\tself.hasInteraction = False \n","repo_name":"QuantumChamploo/exeDev","sub_path":"pyIn/pygamefire/newScript/classes/statebasedSprite.py","file_name":"statebasedSprite.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33523174332","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# \n# # NLP Pipeline\n\n# ## A General NLP Pipeline\n\n# ![nlp-pipeline](../images/nlp-pipeline.png)\n\n# ### Varations of the NLP Pipelines\n# \n# - The process may not always be linear.\n# - There are loops in between.\n# - These procedures may depend on specific task at hand.\n\n# ## Data Collection\n\n# ### Data Acquisition: Heart of ML System\n# \n# - Ideal Setting: We have everything needed.\n# - Labels and Annotations\n# - Very often we are dealing with less-than-idea scenarios\n\n# ### Less-than-ideal Scenarios\n# \n# - Initial datasets with limited annotations/labels\n# - Initial datasets labeled based on regular expressions or heuristics\n# - Public datasets (cf. [Google Dataset Search](https://datasetsearch.research.google.com/))\n# - Scrape data\n# - Product intervention\n# - Data augmentation\n\n# ### Data Augmentation\n# \n# - It is a technique to exploit language properties to create texts that are syntactically similar to the source text data.\n# - Types of strategies:\n# - synonym replacement\n# - Related word replacement (based on association metrics)\n# - Back translation\n# - Replacing entities\n# - Adding noise to data (e.g. spelling errors, random words)\n\n# ## Text Extraction and Cleanup\n\n# ### Text Extraction\n# \n# - Extracting raw texts from the input data\n# - HTML\n# - PDF\n# - Relevant vs. irrelevant information\n# - non-textual information\n# - markup\n# - metadata\n# - Encoding format\n\n# #### Extracting texts from webpages\n\n# In[1]:\n\n\nimport requests \nfrom bs4 import BeautifulSoup\nimport pandas as pd\n \n \nurl = 'https://news.google.com/topics/CAAqJQgKIh9DQkFTRVFvSUwyMHZNRFptTXpJU0JYcG9MVlJYS0FBUAE?hl=zh-TW&gl=TW&ceid=TW%3Azh-Hant'\nr = requests.get(url)\nweb_content = r.text\nsoup = BeautifulSoup(web_content,'html.parser')\ntitle = soup.find_all('a', class_='DY5T1d')\nfirst_art_link = title[0]['href'].replace('.','https://news.google.com',1)\n\n#print(first_art_link)\nart_request = requests.get(first_art_link)\nart_request.encoding='utf8'\nsoup_art = BeautifulSoup(art_request.text,'html.parser')\n\nart_content = soup_art.find_all('p')\nart_texts = [p.text for p in art_content]\nprint(art_texts)\n\n\n# #### Extracting texts from scanned PDF\n\n# In[2]:\n\n\nfrom PIL import Image\nfrom pytesseract import image_to_string\n\nfilename = '../../../RepositoryData/data/pdf-firth-text.png'\ntext = image_to_string(Image.open(filename))\nprint(text)\n\n\n# #### Unicode normalization\n\n# In[3]:\n\n\ntext = 'I feel really 😡. GOGOGO!! 💪💪💪 🤣🤣'\nprint(text)\ntext2 = text.encode('utf-8')\nprint(text2)\n\n\n# - Other useful libraries\n# - Spelling check: pyenchant, Microsoft REST API\n# - PDF: PyPDF, PDFMiner\n# - OCR: pytesseract\n# \n\n# ### Cleanup\n# \n# - Preliminaries\n# - Sentence segmentation\n# - Word tokenization\n# \n\n# #### Segmentation and Tokenization\n\n# In[4]:\n\n\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n\ntext = '''\nPython is an interpreted, high-level and general-purpose programming language. Python's design philosophy emphasizes code readability with its notable use of significant whitespace. Its language constructs and object-oriented approach aim to help programmers write clear, logical code for small and large-scale projects.\n'''\n\n## sent segmentation\nsents = sent_tokenize(text)\n\n## word tokenization\nfor sent in sents:\n print(sent)\n print(word_tokenize(sent))\n\n\n# - Frequent preprocessing\n# - Stopword removal\n# - Stemming and/or lemmatization\n# - Digits/Punctuaions removal\n# - Case normalization\n# \n\n# #### Removing stopwords, punctuations, digits\n\n# In[5]:\n\n\nfrom nltk.corpus import stopwords\nfrom string import punctuation\n\neng_stopwords = stopwords.words('english')\n\ntext = \"Mr. John O'Neil works at Wonderland, located at 245 Goleta Avenue, CA., 74208.\"\n\nwords = word_tokenize(text)\n\nprint(words)\n\n# remove stopwords, punctuations, digits\nfor w in words:\n if w not in eng_stopwords and w not in punctuation and not w.isdigit():\n print(w)\n\n\n# #### Stemming and lemmatization\n\n# In[6]:\n\n\n## Stemming\nfrom nltk.stem.porter import PorterStemmer\nstemmer = PorterStemmer()\n\nwords = ['cars','revolution', 'better']\nprint([stemmer.stem(w) for w in words])\n\n\n# In[7]:\n\n\n## Lemmatization\nfrom nltk.stem import WordNetLemmatizer\nlemmatizer = WordNetLemmatizer()\n\n## Wordnet requires POS of words\nposs = ['n','n','a']\n\nfor w,p in zip(words,poss):\n print(lemmatizer.lemmatize(w, pos=p))\n\n\n# - Task-specific preprocessing\n# - Unicode normalization\n# - language detection\n# - code mixing\n# - transliteration\n# \n\n# - Automatic annotations\n# - POS tagging\n# - Parsing\n# - Named entity recognition\n# - coreference resolution\n# \n\n# ### Important Reminders for Preprocessing\n# \n# - Not all steps are necessary\n# - These steps are NOT sequential\n# - These steps are task-dependent\n\n# ## Feature Engineering\n\n# ### What is feature engineering?\n# \n# - It refers to a process to feed the extracted and preprocessed texts into a machine-learning algorithm.\n# - It aims at capturing the characteristics of the text into a numeric vector that can be understood by the ML algorithms. (Cf. *construct*, *operational definitions*, and *measurement* in experimental science)\n# - In short, it concerns how to meaningfully represent texts quantitatively, i.e., text representation.\n\n# ### Feature Engineering for Classical ML\n# \n# - word-based frequency lists\n# - bag-of-words representations\n# - domain-specific word frequency lists\n# - handcrafted features based on domain-specific knowledge\n\n# ### Feature Engineering for DL\n# \n# - DL directly takes the texts as inputs to the model.\n# - The DL model is capable of learning features from the texts (e.g., embeddings)\n# - Less interpretable.\n# \n\n# ## Modeling\n\n# ### From Simple to Complex\n# \n# - Start with heuristics or rules\n# - Experiment with different ML models\n# - from heuristics to features\n# - from manual annotation to automatic extraction\n# - feature importance (weights)\n# - Find the most optimal model\n# - Ensemble and stacking\n# - Redo feature engineering\n# - Transfer learning\n# - Reapply heuristics\n\n# ## Evaluation\n\n# ### Why evaluation?\n# \n# - We need to know how *good* the model we've built is -- \"Goodness\"\n# - Factors relating to the evaluation methods\n# - model building\n# - deployment\n# - production\n# - ML metrics vs. business metrics\n# \n\n# ### Intrinsic vs. Extrinsic Evaluation\n# \n# - Take spam-classification system as an example\n# - Intrinsic:\n# - the precision and recall of the spam classification/prediction\n# - Extrinsic:\n# - the amount of time users spent on a spam email\n# \n\n# ### General Principles\n# \n# - Do intrinsic evaluation before extrinsic.\n# - Extrinsic evaluation is more expensive because it often invovles project stakeholders outside the AI team.\n# - Only when we get consistently good results in intrinsic evaluation should we go for extrinsic evaluation.\n# - Bad results in intrinsic often implies bad results in extrinsic as well.\n\n# ### Common Intrinsic Metrics\n# \n# - Principles for Evaluation Metrics Selection\n# - Data type of the labels (ground truths)\n# - Binary (e.g., sentiment)\n# - Ordinal (e.g., informational retrieval)\n# - Categorical (e.g., POS tags)\n# - Textual (e.g., named entity, machine translation, text generation)\n# - Automatic vs. Human Evalation\n\n# ## Post-Modeling Phases\n\n# ### Post-Modeling Phases\n# \n# - Deployment of the model in a production environment (e.g., web service)\n# - Monitoring system performance on a regular basis\n# - Updating system with new-coming data\n\n# ## References\n\n# - Chapter 2 of Practical Natural Language Processing. {cite}`vajjala2020`\n\n# ```{bibliography} ../book.bib\n# :filter: docname in docnames\n# :style: unsrt\n# ```\n","repo_name":"alvinntnu/python-notes","sub_path":"_build/jupyter_execute/nlp/nlp-pipeline.py","file_name":"nlp-pipeline.py","file_ext":"py","file_size_in_byte":7836,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"12120353361","text":"import numpy as np\nimport argparse\nimport pickle\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter)\n parser.add_argument(\"--num_nodes\", type = int)\n parser.add_argument(\"--dim\", type = int)\n args = parser.parse_args()\n fea = np.random.normal(size = [args.num_nodes, args.dim])\n with open(\"features.pkl\", \"wb\") as f:\n pickle.dump(fea, f)\n","repo_name":"YunerWang/TCNE","sub_path":"tools/random_features.py","file_name":"random_features.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12528380623","text":"# cache就在当前文件的目录,创建的一个cache.json目录\ndef __get_cache_name(url, md5):\n url = url.replace(\"http://\", \"\")\n url = url.replace(\"/\", \"_\")\n cache_dir = os.path.join(conf.root_path, \"data\", \"http_cache\", url)\n if not os.path.exists(cache_dir):\n logger.info(\"缓存目录[%s]不存在,创建!\", cache_dir)\n os.makedirs(cache_dir)\n\n # 确定json的cache文件名\n json_path = os.path.join(cache_dir, md5 + \".json\")\n return json_path\n\n\ndef __load_cache(url, md5):\n json_path = __get_cache_name(url, md5)\n\n if os.path.exists(json_path):\n logger.debug(\"缓存文件[%s]存在,加载之\", json_path)\n with open(json_path, \"r\", encoding=\"utf-8\") as f:\n j_data = f.read()\n j_data = j_data.replace(\"'\", '\"')\n data = json.loads(j_data)\n return data\n else:\n logger.debug(\"加载缓存失败,缓存文件[%s]不存在\", json_path)\n return None\n\n\ndef __cache_me(url, result_data, md5):\n if result_data is None:\n logger.warning(\"返回异常或者错误码不为0,不缓存\")\n return\n\n # 错误码不为0,不缓存\n if result_data.get('code', None) and result_data['code'] != '0':\n # logger.debug(\"结果为空或调用未成功,不缓存\")\n return\n\n json_path = __get_cache_name(url, md5)\n with open(json_path, \"w\") as jf:\n jf.write(str(result_data))\n logger.debug(\"[url]调用缓存到:%s\", json_path)\n\n","repo_name":"piginzoo/mfm_learner","sub_path":"temp/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"zh","doc_type":"code","stars":70,"dataset":"github-code","pt":"61"} +{"seq_id":"16063056826","text":"# I made this just for creating the csv since my trifacta wrangler trial is over :(\n\nimport itertools\noutfile = open('transcript.csv', 'a')\nVIDEO = '10-10'\nwith open(f'transcripts/{VIDEO}.txt') as infile:\n for line1, line2 in itertools.zip_longest(*[infile]*2):\n time = line1.split(':', 1)\n seconds = (int(time[0]) * 60) + int(time[1])\n outfile.write(f'\"{VIDEO}\",\"' + line1.strip() +\n '\",\"' + line2.strip() + '\"\\n')\n\n # adding lines with matches to final list\n # results.append('

{}'.format(\n # seconds, line1.strip()) + ' ' + line2.strip() + '

')\n\n # return str(results).replace('[', '').replace(']', '').replace(\"'\", '').replace(',', '')\n","repo_name":"jheym/helper138","sub_path":"createcsv.py","file_name":"createcsv.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29201121737","text":"import copy\nfrom munch import Munch\nfrom models.generator import Generator\nfrom models.discriminator import Discriminator\nfrom models.style_transformer import StyleTransformer\nfrom models.style_encoder import StyleEncoder\nfrom models.fan import FAN\nfrom models.resnet18 import ResNet18\n\n\ndef build_model(args):\n generator = Generator(args)\n style_transformer = StyleTransformer(args)\n style_encoder = StyleEncoder(args)\n discriminator = Discriminator(args)\n generator_ema = copy.deepcopy(generator)\n style_transformer_ema = copy.deepcopy(style_transformer)\n style_encoder_ema = copy.deepcopy(style_encoder)\n\n nets = Munch(generator=generator,\n style_transformer=style_transformer,\n style_encoder=style_encoder,\n discriminator=discriminator)\n nets_ema = Munch(generator=generator_ema,\n style_transformer=style_transformer_ema,\n style_encoder=style_encoder_ema)\n\n if args.w_hpf > 0:\n fan = FAN(fname_pretrained=args.wing_path).eval()\n nets.fan = fan\n nets_ema.fan = fan\n\n resnet18 = ResNet18(args)\n nets.resnet18 = resnet18\n nets_ema.resnet18 = resnet18\n\n return nets, nets_ema\n","repo_name":"songquanpeng/L2M-GAN","sub_path":"models/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"13889969925","text":"import os\nimport psutil\nimport json\nimport sqlite3\nimport threading\nfrom datetime import datetime, timezone\nfrom websocket import create_connection\n\nclass CustomHandler:\n\n def __init__(self, dbPath):\n\n self.working = False\n self.counter = 0\n self.ws = None\n self.dbPath = dbPath\n if self.dbReady(self.dbPath):\n self.setStatus(True, 'Function handler on standby')\n else:\n self.setStatus(False, 'Database error, cannot start service')\n \n def dbReady(self, path) -> bool:\n try:\n self.db = sqlite3.connect(path, check_same_thread=False)\n self.cursor = self.db.cursor()\n self.cursor.execute('''CREATE TABLE IF NOT EXISTS stats(\\\n id INTEGER PRIMARY KEY,\\\n country_name TEXT,\\\n change_size INTEGER)''')\n self.db.commit()\n return True\n except sqlite3.OperationalError:\n return False\n\n def worker(self, stop_event):\n \n while not stop_event.is_set():\n result = self.ws.recv()\n country = None\n if \"geo_ip\" in result:\n j_dict = json.loads(result)\n geo = j_dict.get(\"geo_ip\")\n country = geo.get(\"country_name\")\n change = j_dict.get(\"change_size\")\n if change is None:\n change = 0\n\n if country is not None:\n self.cursor.execute('''INSERT INTO stats(country_name, change_size) VALUES(?,?)''', (country, change))\n self.db.commit()\n self.counter += 1\n\n def setStatus(self, status, msg):\n\n self.status = status\n self.message = msg\n\n def getStatus(self) -> json:\n \n stat_result = os.stat(self.dbPath)\n modified = datetime.fromtimestamp(stat_result.st_mtime, tz=timezone.utc).strftime(\"%m/%d/%Y, %H:%M:%S\")\n msg = {\"Status\": self.status, \"Message\": self.message, \"Working in background\": self.working, \"Records in session\": self.counter, \"DB size (bytes)\": stat_result.st_size, \"Modified\": modified}\n return msg\n\n def getMemory(self) -> json:\n memory = 1024 * 1024\n proc = psutil.Process(os.getpid())\n mem0 = proc.memory_info().rss\n msg = str(mem0/memory) + 'Mb'\n return {'Memory use': msg}\n \n def getTotals(self) -> json:\n\n data = {}\n self.cursor.execute('''SELECT country_name, SUM(change_size) FROM stats GROUP BY country_name''')\n for row in self.cursor:\n data[row[0]] = row[1]\n msg = json.dumps(data)\n return msg\n\n def getCounts(self) -> json:\n\n data = {}\n self.cursor.execute('''SELECT country_name, COUNT(country_name) FROM stats GROUP BY country_name''')\n for row in self.cursor:\n data[row[0]] = row[1]\n msg = json.dumps(data)\n return msg\n\n def stopWork(self) -> json:\n\n self.ws.close\n self.working = False\n self.kill_switch.set()\n self.t.join()\n self.setStatus(True, 'Function handler on standby')\n msg = 'Function handler background work stopped'\n return {'message': msg}\n\n def startWork(self) -> json:\n\n if self.working:\n msg = 'Function handler already working in background, ignoring request'\n return {\"message\": msg}\n\n else:\n self.ws = create_connection(\"ws://wikimon.hatnote.com:9000\")\n self.working = True\n self.setStatus(True, 'Function handler working in background')\n self.kill_switch = threading.Event()\n self.t = threading.Thread(target=self.worker, args=(self.kill_switch,))\n self.t.start()\n\n msg = 'Function handler background work started'\n return {'message': msg}\n \n","repo_name":"jneethling/WikiStats","sub_path":"src/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71667331714","text":"import yaml\nimport os\nimport logging\nfrom sqlalchemy import create_engine\nimport grp\nimport pwd\nimport subprocess\nfrom datetime import date\nimport re\n\nlog = logging.getLogger(__name__)\n\n\ndef load_config(filename=None):\n '''\n load a yaml config file\n\n If filename is not given, the function looks first if there\n is an ERNA_CONFIG environment variable then if there is an `erna.yaml` in\n the current directory\n '''\n if filename is None:\n if 'ERNA_CONFIG' in os.environ:\n filename = os.environ['ERNA_CONFIG']\n elif os.path.isfile('erna.yaml'):\n filename = 'erna.yaml'\n else:\n raise ValueError('No config file found')\n\n log.debug('Loading config file {}'.format(filename))\n\n with open(filename, 'r') as f:\n config = yaml.safe_load(f)\n\n return config\n\n\ndef create_mysql_engine(user, password, host, database, port=3306):\n return create_engine(\n 'mysql+pymysql://{user}:{password}@{host}:{port}/{database}'.format(\n user=user,\n password=password,\n host=host,\n database=database,\n port=port,\n )\n )\n\n\ndef chown(path, username=None, groupname=None):\n '''\n Change ownership of given path to username:groupname\n '''\n uid = pwd.getpwnam(username).pw_uid if username else -1\n gid = grp.getgrnam(groupname).gr_gid if groupname else -1\n os.chown(path, uid, gid)\n\n\ndef night_int_to_date(night):\n ''' Convert the crazy FACT int to da date instance'''\n return date(night // 10000, (night % 10000) // 100, night % 100)\n\n\ndef date_to_night_int(night):\n ''' convert a date or datetime instance to the crazy FACT int '''\n return 10000 * night.year + 100 * night.month + night.day\n\n\ndef assemble_facttools_call(jar, xml, input_path, output_path, aux_source_path=None):\n ''' Assemble the call for fact-tools with the given combinations\n of jar, xml, input_path and output_path. The db_path is optional\n for the case where a db_file is needed\n '''\n call = [\n 'java',\n '-XX:MaxHeapSize=1024m',\n '-XX:InitialHeapSize=512m',\n '-XX:CompressedClassSpaceSize=64m',\n '-XX:MaxMetaspaceSize=128m',\n '-XX:+UseConcMarkSweepGC',\n '-XX:+UseParNewGC',\n '-jar',\n jar,\n xml,\n '-Dinput=file:{}'.format(input_path),\n '-Doutput=file:{}'.format(output_path),\n ]\n\n if aux_source_path is not None:\n call.append('-Daux_source=file:{}'.format(aux_source_path))\n return call\n\n\ndef check_environment_on_node():\n ''' Check memory, java executalbe and version'''\n subprocess.check_call(['which', 'java'])\n subprocess.check_call(['free', '-m'])\n subprocess.check_call(['java', '-Xmx512m', '-version'])\n\n\ndef create_filename_from_format(filename_format, basename, num):\n \"\"\"\n Given a special format string, create a filename_format with the basename and a given number.\n There are two named variables that can be used, one is basename which inserts the basename\n and the second one is num which is mandatory.\n \"\"\"\n m = re.search('\\{num', filename_format)\n if not m:\n raise ValueError(\"Missing named placeholder 'num' in format string\")\n return filename_format.format({\"basename\":basename, \"num\":num})\n","repo_name":"fact-project/erna","sub_path":"erna/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"20224099466","text":"import time\nimport requests\nimport statistics\nimport concurrent.futures\n\nNUM_REQUESTS = 20\nNUM_THREADS = 10 # Number of threads to use for concurrent requests\n\nBACKEND_URL = \"http://localhost\" # The Nginx load balancer URL\nLOAD_TEST_DURATION = 30 # Load test duration in seconds\n\ndef make_requests():\n response_times = []\n for _ in range(NUM_REQUESTS):\n start_time = time.time()\n response = requests.get(BACKEND_URL)\n end_time = time.time()\n response_times.append(end_time - start_time)\n return response_times\n\ndef calculate_summary_statistics(response_times):\n if not response_times:\n return 0, 0, 0, 0 # Return default values if response_times is empty\n min_time = min(response_times)\n max_time = max(response_times)\n avg_time = statistics.mean(response_times)\n stdev_time = statistics.stdev(response_times)\n return min_time, max_time, avg_time, stdev_time\n\ndef run_load_testing():\n print(\"Testing the approach of dynamically scaling service replicas with environment variables...\\n\")\n\n print(f\"Testing backend: {BACKEND_URL} using {NUM_THREADS} threads and {NUM_REQUESTS} requests per thread...\\n\")\n\n start_time = time.time()\n\n all_response_times = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:\n futures = [executor.submit(make_requests) for _ in range(NUM_THREADS)]\n for future in concurrent.futures.as_completed(futures):\n try:\n response_times = future.result()\n all_response_times.extend(response_times)\n except Exception as exc:\n print(f\"Thread raised an exception: {exc}\")\n\n end_time = time.time()\n total_time = end_time - start_time\n\n print(f\"All threads completed in {total_time:.2f} seconds.\\n\")\n\n # Calculate and display summary statistics\n min_time, max_time, avg_time, stdev_time = calculate_summary_statistics(all_response_times)\n print(f\"Minimum Response Time: {min_time:.4f} seconds\")\n print(f\"Maximum Response Time: {max_time:.4f} seconds\")\n print(f\"Average Response Time: {avg_time:.4f} seconds\")\n print(f\"Standard Deviation of Response Time: {stdev_time:.4f} seconds\")\n\n # Calculate and display Requests Per Second (RPS)\n total_requests = NUM_REQUESTS * NUM_THREADS\n rps = total_requests / total_time\n print(f\"Requests Per Second (RPS): {rps:.2f}\")\n\nif __name__ == \"__main__\":\n run_load_testing()\n","repo_name":"kostnerek/load-balancer","sub_path":"load_test.py","file_name":"load_test.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"385887532","text":"\"\"\"Kata url: https://www.codewars.com/kata/5b16490986b6d336c900007d.\"\"\"\n\n\ndef my_languages(results):\n return [\n lang for lang, score in sorted(\n results.items(),\n key=lambda x: x[1],\n reverse=True\n ) if score >= 60\n ]\n\n\ndef test_my_languages():\n assert my_languages(\n {\"Java\": 10, \"Ruby\": 80, \"Python\": 65}\n ) == [\"Ruby\", \"Python\"]\n assert my_languages(\n {\"Hindi\": 60, \"Greek\": 71, \"Dutch\": 93}\n ) == [\"Dutch\", \"Greek\", \"Hindi\"]\n assert my_languages({\"C++\": 50, \"ASM\": 10, \"Haskell\": 20}) == []\n","repo_name":"Sigmanificient/codewars","sub_path":"src/python/katas/py7kyu/my_language_skill.py","file_name":"my_language_skill.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"27177741463","text":"import csv\nimport json\nimport os\nimport pandas as pd\nimport re\nimport lusSTR\nfrom lusSTR.scripts.marker import get_str_metadata_file, STRMarkerObject\nfrom lusSTR.scripts.repeat import collapse_all_repeats, collapse_repeats_by_length\nfrom lusSTR.scripts.repeat import sequence_to_bracketed_form, split_by_n\nfrom lusSTR.scripts.repeat import reverse_complement, reverse_complement_bracketed\n\n\nwith open(get_str_metadata_file(), \"r\") as fh:\n str_marker_data = json.load(fh)\n\n\ndef format_table(input, uas=False, kit=\"forenseq\"):\n \"\"\"\n Function to format final output table and the flanking report (if necessary).\n \"\"\"\n data = pd.read_csv(input, keep_default_na=False)\n data.iloc[:, 3] = data.iloc[:, 3].astype(str)\n list_of_lists = []\n flanks_list = []\n for i, row in data.iterrows():\n locus = data.iloc[i, 0].upper()\n reads = data.iloc[i, 1]\n sequence = data.iloc[i, 2]\n sampleid = re.sub(\" \", \"_\", data.iloc[i, 3])\n try:\n project = re.sub(\" \", \"_\", data.iloc[i, 4])\n analysis = re.sub(\" \", \"_\", data.iloc[i, 5])\n except IndexError:\n project = \"NA\"\n analysis = \"NA\"\n except TypeError:\n project = data.iloc[i, 4]\n analysis = data.iloc[i, 5]\n if locus == \"PENTAD\" or locus == \"PENTA_D\":\n locus = \"PENTA D\"\n if locus == \"PENTAE\" or locus == \"PENTA_E\":\n locus = \"PENTA E\"\n if locus == \"DYS385A-B\" or locus == \"DYS385\":\n locus = \"DYS385A-B\"\n if locus == \"AMELOGENIN\":\n continue\n metadata = str_marker_data[locus]\n if kit == \"forenseq\":\n remove_5p = metadata[\"Foren_5\"]\n remove_3p = metadata[\"Foren_3\"]\n else:\n remove_5p = metadata[\"Power_5\"]\n remove_3p = metadata[\"Power_3\"]\n if len(sequence) <= (remove_5p + remove_3p) and not uas:\n flank_summary = [\n sampleid,\n project,\n analysis,\n locus,\n reads,\n \"NA\",\n sequence,\n \"NA\",\n \"NA\",\n \"NA\",\n \"Partial sequence\",\n ]\n flanks_list.append(flank_summary)\n continue\n elif \"N\" in sequence:\n flank_summary = [\n sampleid,\n project,\n analysis,\n locus,\n reads,\n \"NA\",\n sequence,\n \"NA\",\n \"NA\",\n \"NA\",\n \"Sequence contains Ns\",\n ]\n flanks_list.append(flank_summary)\n continue\n\n marker = STRMarkerObject(locus, sequence, uas=uas, kit=kit)\n summary = [sampleid, project, analysis, locus] + marker.summary + [reads]\n list_of_lists.append(summary)\n\n if not uas:\n flank_summary = [\n sampleid,\n project,\n analysis,\n locus,\n reads,\n marker.canonical,\n marker.sequence,\n marker.flank_5p,\n marker.convert,\n marker.flank_3p,\n marker.indel_flag,\n ]\n flanks_list.append(flank_summary)\n\n columns = [\n \"SampleID\",\n \"Project\",\n \"Analysis\",\n \"Locus\",\n \"UAS_Output_Sequence\",\n \"Forward_Strand_Sequence\",\n \"UAS_Output_Bracketed_Notation\",\n \"Forward_Strand_Bracketed_Notation\",\n \"CE_Allele\",\n \"LUS\",\n \"LUS_Plus\",\n \"Reads\",\n ]\n if not list_of_lists:\n final_output = pd.DataFrame(list_of_lists, columns=columns)\n else:\n final_output = sort_table(pd.DataFrame(list_of_lists, columns=columns))\n if not uas:\n flanks_columns = [\n \"SampleID\",\n \"Project\",\n \"Analysis\",\n \"Locus\",\n \"Reads\",\n \"CE_Allele\",\n \"Full_Sequence\",\n \"5_Flank_Bracketed_Notation\",\n \"UAS_Region_Bracketed_Notation\",\n \"3_Flank_Bracketed_Notation\",\n \"Potential_Issues\",\n ]\n if not flanks_list:\n final_flank_output = pd.DataFrame(flanks_list, columns=flanks_columns)\n else:\n final_flank_output = sort_table(pd.DataFrame(flanks_list, columns=flanks_columns))\n else:\n final_flank_output = \"\"\n return final_output, final_flank_output, columns\n\n\ndef combine_reads(table, columns):\n comb_table = table.groupby(columns[:-1], as_index=False)[\"Reads\"].sum()\n sorted = sort_table(comb_table)\n return sorted\n\n\ndef sort_table(table):\n sorted_table = table.sort_values(\n by=[\"SampleID\", \"Project\", \"Analysis\", \"Locus\", \"Reads\", \"CE_Allele\"], ascending=False\n )\n return sorted_table\n\n\ndef main(input, out, kit, uas, sex, nocombine):\n input = str(input)\n out = str(out)\n output_name = os.path.splitext(out)[0]\n input_name = os.path.splitext(input)[0]\n autosomal_final_table, autosomal_flank_table, columns = format_table(input, uas, kit)\n if sex:\n sex_final_table, sex_flank_table, columns = format_table(\n f\"{input_name}_sexloci.csv\", uas, kit\n )\n if not uas:\n if not sex_final_table.empty:\n sex_flank_table.to_csv(f\"{output_name}_sexloci_flanks.txt\", sep=\"\\t\", index=False)\n if nocombine:\n sex_final_table.to_csv(\n f\"{output_name}_sexloci_no_combined_reads.txt\", index=False\n )\n sex_final_table = combine_reads(sex_final_table, columns)\n sex_final_table.to_csv(f\"{output_name}_sexloci.txt\", sep=\"\\t\", index=False)\n else:\n sex_final_table.to_csv(f\"{output_name}_sexloci.txt\", sep=\"\\t\", index=False)\n if not uas:\n if not autosomal_final_table.empty:\n autosomal_flank_table.to_csv(f\"{output_name}_flanks.txt\", sep=\"\\t\", index=False)\n if nocombine:\n autosomal_final_table.to_csv(\n f\"{output_name}_no_combined_reads.txt\", sep=\"\\t\", index=False\n )\n autosomal_final_table = combine_reads(autosomal_final_table, columns)\n autosomal_final_table.to_csv(out, sep=\"\\t\", index=False)\n else:\n autosomal_final_table.to_csv(out, sep=\"\\t\", index=False)\n\n\nif __name__ == \"__main__\":\n main(\n snakemake.input,\n snakemake.output,\n kit=snakemake.params.kit,\n uas=snakemake.params.uas,\n sex=snakemake.params.sex,\n nocombine=snakemake.params.nocombine,\n )\n","repo_name":"bioforensics/lusSTR","sub_path":"lusSTR/wrappers/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":6706,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"33410665078","text":"# from datetime import date as dt #caso de uso: se for chamado somente dentro da def(função), economiza memoria\n\n\ndef vote(year):\n from datetime import date as dt\n actual = dt.today().year\n age = actual - year\n if year < 16:\n return f'With your age {age}, you NOT can vote'\n elif 16 <= age < 18 or age > 65:\n return f'With your age {age}, you can vote (OPTIONAL)'\n else:\n return f'With your age {age}, you NEED vote'\n\nborn = int(input('Enter your year of birth: '))\nprint(vote(born))\n","repo_name":"UskOops/scripts.py","sub_path":"MUNDO 3/Voting functions.py","file_name":"Voting functions.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23589480521","text":"from __future__ import division\n\nimport os\n#import sys\n#from math import log, floor, ceil, sqrt, pi\n#from random import randint, choice, shuffle\nfrom collections import Counter\n#from heapq import heappush, heappop, heapify\n#inf = 10**20\n\nname = 'A-large.in'\n\ndef _solve(p, groups):\n all_patterns = {\n 2: [[0], [1, 1]],\n 3: [[0], [1, 2], [1, 1, 1], [2, 2, 2]],\n 4: [[0], [1, 3], [2, 2], [1, 1, 2], [3, 3, 2], [1, 1, 1, 1], [3, 3, 3, 3]]\n }\n cnts = [0] * p\n for g in groups:\n cnts[g % p] += 1\n patterns = list(map(Counter, all_patterns[p]))\n res = 0\n while True:\n found = False\n for pattern in patterns:\n if all(cnts[key] >= pattern[key] for key in pattern):\n for key in pattern:\n cnts[key] -= pattern[key]\n found = True\n res += 1\n break\n if not found:\n break\n if sum(cnts) > 0:\n res += 1\n return res\n\ndef solve(*args, **kwargs):\n res = _solve(*args, **kwargs)\n return res\n\ninp_path = '/home/mama/Downloads/%s'%name\nif os.path.isfile(inp_path):\n os.system('mv %s .' % inp_path)\ninp_file = open(name)\nout_file = open('%s.out'%name, 'w')\ncases = int(inp_file.readline())\nfor caseno in range(cases):\n (n, p) = tuple(map(int, inp_file.readline().split()))\n groups = tuple(map(int, inp_file.readline().split()))\n res = solve(p, groups)\n print(caseno, res)\n print('---')\n out_file.write('Case #%d: %s\\n'%((caseno+1), res))\n out_file.flush()\nout_file.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_212/40.py","file_name":"40.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17493144159","text":"import os\nimport time\nimport random\nimport logging\nimport win32con\nimport win32api\nimport win32gui\nimport ctypes\nimport ctypes.wintypes\nfrom threading import Thread\nfrom threading import Semaphore\nfrom threading import Lock\nfrom threading import Event\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options \nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import SessionNotCreatedException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import WebDriverException\nfrom configparser import ConfigParser\n\nCONF_FILE = \"bo.conf\"\nLOG_FILE = \"bo.log\"\ng_mutex = Lock()\ng_sem = Semaphore(1)\ng_run_event = Event()\ng_run_event.set()\n\ndef init_log_config():\n if os.path.exists(LOG_FILE):\n fd = open(LOG_FILE, \"rb+\")\n fd.truncate()\n fd.close()\n cm = ConfigManager()\n logging.basicConfig(level=(cm.get_log_level() * 10),format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', \\\n datefmt='%a, %d %b %Y %H:%M:%S',filename=LOG_FILE,filemode='a')\n\ndef get_random_idstring(min, max):\n id = random.randint(min, max)\n return str(id)\n\nclass ConfigManager():\n def __init__(self):\n self.__cp = ConfigParser()\n self.__cp.read(CONF_FILE, encoding='utf-8-sig')\n self.__skw_keys = self.get_skws()\n\n def get_skws(self):\n option_ls = self.__cp.options('search_key_word')\n skw_ls = list()\n for option in option_ls:\n skw_ls.append(self.__cp.get('search_key_word', option))\n \n logging.info(\"Reading search key words : %s\" % str(skw_ls))\n return skw_ls\n\n def get_random_skw(self):\n index = random.randint(0, len(self.__skw_keys)-1)\n skw = self.__skw_keys[index]\n logging.debug(\"Getting key word : %s\" % skw)\n return skw\n\n def get_sleep_sec(self):\n return self.__cp.getint('default', 'sleep_seconds')\n\n def get_max_windou(self):\n return self.__cp.getint('default', 'max_windou')\n\n def get_default_url(self):\n return self.__cp.get('default', 'default_url')\n\n def get_log_level(self):\n return self.__cp.getint('default', 'log_level')\n\n def get_broswer_location(self):\n return self.__cp.get('default', 'broswer_location')\n\nclass Driver():\n def __init__(self):\n self.__cm = ConfigManager()\n self.__bd_num = 0\n self.__sleep_sec = self.__cm.get_sleep_sec()\n\n def __get_one_broswer_driver(self):\n logging.info(\"Creating new broswer driver...\")\n print(\"Creating new broswer driver...\")\n user_options = Options()\n user_options.add_argument('disable-infobars')\n user_options.add_argument('--start-maximized')\n user_options.add_argument('--no-sandbox')\n user_options.add_argument('--disable-dev-shm-usage')\n user_options.add_argument('--incognito')\n user_options.add_argument('--disable-plugins')\n user_options.add_argument('–disable-javascript')\n user_options.add_argument('–disable-java')\n user_options.add_argument('--disable-gpu')\n user_options.add_argument('–single-process')\n\n user_options.binary_location = self.__cm.get_broswer_location()\n\n prefs = {'profile.default_content_setting_values' :{'notifications' : 2}}\n user_options.add_experimental_option(\"prefs\", prefs)\n\n bd = None\n try:\n bd = webdriver.Chrome(chrome_options=user_options)\n except SessionNotCreatedException as e:\n logging.error(\"Failed to create new broswer driver, [SessionNotCreatedException] %s.\" % e.msg)\n print(\"Failed to create new broswer driver, [SessionNotCreatedException] %s.\" % e.msg)\n finally:\n return bd\n \n def once_search_task(self):\n bd = self.__get_one_broswer_driver()\n if bd == None:\n g_sem.release()\n return\n\n g_mutex.acquire()\n self.__bd_num = self.__bd_num + 1\n g_mutex.release()\n logging.info(\"Created active broswer driver %d\" % self.__bd_num)\n print(\"Created active broswer driver %d\" % self.__bd_num)\n\n try:\n bd.get(self.__cm.get_default_url())\n time.sleep(1)\n input_kw = bd.find_element_by_class_name(\"sch_inbox\")\n input_kw = input_kw.find_element_by_name(\"word\")\n input_kw.send_keys(self.__cm.get_random_skw())\n\n bd.find_element_by_id('j_search_sbm').click()\n time.sleep(1)\n\n bd.switch_to.window(bd.window_handles[0])\n res = bd.find_element_by_class_name('mz-list').find_elements_by_name('2')\n t = 2\n while(t > 0):\n bd.switch_to.window(bd.window_handles[0])\n res[random.randint(0, len(res)-1)].click()\n time.sleep(1)\n t = t - 1\n\n bd.minimize_window()\n except Exception as e:\n logging.error(\"Driver internal error, msg: %s.\" % e.msg)\n print(\"Driver internal error, msg: %s.\" % e.msg) \n finally:\n g_sem.release()\n time.sleep(self.__sleep_sec)\n g_mutex.acquire()\n self.__bd_num = self.__bd_num - 1 \n g_mutex.release() \n bd.quit()\n print('*******************************A broswer exiting...****************************************')\n return\n\n def run(self):\n print(\"The primary thread is running...\")\n while (True):\n print(\"Number of running work thread is %d\" % self.__bd_num)\n if not(self.__bd_num < self.__cm.get_max_windou()):\n time.sleep(1)\n continue \n #如果内置标志被置为False,主线程阻塞直到收到键盘运行事件\n g_run_event.wait()\n print(\"The primary thread has not recived the pause event...\")\n #如果上个driver的启动未完成,主线程阻塞\n g_sem.acquire()\n print(\"The last open task has been finished...\")\n\n th = Thread(target=self.once_search_task)\n #th.setDaemon(True)\n th.start()\n\nclass HotKey(Thread):\n def __init__(self,name):\n Thread.__init__(self)\n self.name = name\n\n def run(self):\n logging.info(\"Starting thread \" + str(self.name))\n hot_key_main()\n logging.info(\"Stoping thread\" + str(self.name))\n\n \ndef hot_key_main():\n user32 = ctypes.windll.user32\n while(True):\n if not user32.RegisterHotKey(None, 98, 0, win32con.VK_F9):\n logging.error(\"Unable to register id 98 for run command.\")\n print(\"Unable to register id 98 for run command.\")\n if not user32.RegisterHotKey(None, 99, 0, win32con.VK_F10):\n logging.error(\"Unable to register id 99 for pause command.\")\n print(\"Unable to register id 99 for pause command.\")\n try:\n msg = ctypes.wintypes.MSG()\n if user32.GetMessageA(ctypes.byref(msg), None, 0, 0) != 0:\n if msg.message == win32con.WM_HOTKEY:\n if msg.wParam == 98:\n g_run_event.set()\n logging.info(\"Program recived running event from keyboard...\")\n print(\"Program recived running event from keyboard...\")\n elif msg.wParam == 99:\n g_run_event.clear()\n logging.info(\"Program recived pause event from keyboard...\")\n print(\"Program recived pause event from keyboard...\")\n\n user32.TranslateMessage(ctypes.byref(msg))\n user32.DispatchMessageA(ctypes.byref(msg))\n finally:\n del msg\n user32.UnregisterHotKey(None, 98)\n user32.UnregisterHotKey(None, 99)\n\nif __name__ == \"__main__\":\n try:\n c_service = Service(\"chromedriver.exe\")\n c_service.command_line_args()\n c_service.start()\n except WebDriverException as e:\n logging.error(\"Failed to start broswer service\")\n\n try:\n init_log_config()\n thread_hotKey = HotKey(\"thread_hotKey\")\n thread_hotKey.setDaemon(True)\n thread_hotKey.start()\n print(\"The thread listening event from keyboard is running...\")\n\n driver = Driver()\n driver.run()\n except KeyboardInterrupt as e:\n logging.warning(\"Handle KeyboardInterrupt.\")\n c_service.stop()\n os.sys.exit(1)\n\n","repo_name":"hebostary/broswer-opt","sub_path":"broswer_opt.py","file_name":"broswer_opt.py","file_ext":"py","file_size_in_byte":8576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23390390931","text":"\r\nimport os\r\nimport sys\r\n\r\n\r\n\r\n################################################################################\r\n\r\n\r\ndef mowLawn(lawn, row, column, previousMax):\r\n \r\n maxHeight = max(lawn)\r\n \r\n while (0 < maxHeight):\r\n \r\n for index in range(0, len(lawn)):\r\n \r\n # for every maximum height, there must either be a mowable ...\r\n if(maxHeight == lawn[index]):\r\n \r\n # ... row\r\n base = index - (index % column)\r\n rc = True\r\n \r\n for columnindex in range(0, column):\r\n if (0 == lawn[columnindex + base]):\r\n rc = False\r\n break\r\n \r\n if rc:\r\n continue\r\n \r\n # ... or column\r\n base = index % column\r\n \r\n for rowindex in range(0, row):\r\n if (0 == lawn[(rowindex * column) + base]):\r\n return False\r\n \r\n for index in range(0, len(lawn)):\r\n if(maxHeight == lawn[index]):\r\n lawn[index] = 0\r\n \r\n maxHeight = max(lawn)\r\n \r\n return True\r\n\r\n \r\n################################################################################\r\n\r\n\r\n\r\ndef processFile(inputFile, outputFile):\r\n \r\n input = open(inputFile, 'r')\r\n output = open(outputFile, 'w')\r\n \r\n entry = ''\r\n caseNumber = 1\r\n limit = 0\r\n \r\n row = 0\r\n column = 0\r\n rowCounter = 0\r\n \r\n for line in input:\r\n line = line.rstrip('\\r\\n')\r\n \r\n if (0 == limit):\r\n limit = int(line)\r\n elif (0 == row):\r\n lines = line.split(' ')\r\n row = int(lines[0])\r\n column = int(lines[1])\r\n else:\r\n entry = entry + line + ' '\r\n rowCounter += 1\r\n \r\n if (rowCounter == row):\r\n \r\n entries = entry.rstrip(' ').split(' ')\r\n lawn = []\r\n for index in range(0, len(entries)):\r\n lawn.append(int(entries[index]))\r\n \r\n if mowLawn(lawn, row, column, -1):\r\n output.write(\"Case #\" + str(caseNumber) + \": YES\\n\")\r\n else:\r\n output.write(\"Case #\" + str(caseNumber) + \": NO\\n\")\r\n \r\n caseNumber += 1\r\n row = 0\r\n column = 0\r\n rowCounter = 0\r\n entry = ''\r\n \r\n output.close()\r\n input.close()\r\n\r\n\r\ndef main(argv):\r\n \r\n processFile(argv[0], argv[1])\r\n\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1:])","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_117/991.py","file_name":"991.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9002632593","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Movie, Score\nfrom .forms import ScoreForm\nfrom django.http import HttpResponseForbidden\nfrom django.contrib.auth.decorators import login_required\n\nfrom bs4 import BeautifulSoup as bs\nimport datetime\nimport requests\n\n\n# Create your views here.\ndef movie_db(request):\n API_KEY = 'e4782a1ecd7ea361da31b4c4f2f5a02b'\n today = ''.join(str(datetime.date.today() - datetime.timedelta(days=1)).split('-')) # 오늘 날짜\n\n url_boxoffice = 'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList.json?key={}&targetDt={}&weekGb=0'.format(API_KEY, today)\n\n res = requests.get(url_boxoffice)\n res_data = res.json().get('boxOfficeResult').get('dailyBoxOfficeList')\n boxoffice = []\n audience = []\n for i in range(len(res_data)):\n boxoffice.append(res_data[i]['movieCd'])\n audience.append(res_data[i]['audiAcc'])\n\n movies = []\n for movie_code in boxoffice:\n url_movie = \"http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?key={}&movieCd={}\".format(API_KEY, movie_code)\n res = requests.get(url_movie)\n res_data = res.json().get('movieInfoResult').get('movieInfo')\n\n movies.append({\n 'movie_code': res_data.get('movieCd'),\n 'movie_name_ko': res_data.get('movieNm'),\n 'movie_name_en': res_data.get('movieNmEn'),\n 'showTm': res_data.get('showTm'),\n 'openDt': '.'.join([res_data.get('openDt')[:4],res_data.get('openDt')[4:6],res_data.get('openDt')[6:]]),\n 'genre': ', '.join([x['genreNm'] for x in res_data.get('genres')]),\n 'watch_grade_nm': res_data.get('audits')[0].get('watchGradeNm'),\n })\n\n headers_naver = {\n 'X-Naver-Client-Id': 'bS2KxFME66tpq5UJPclw',\n 'X-Naver-Client-Secret': 'ZUOJVkLobP'\n }\n\n for i in range(len(movies)):\n url_naver = \"https://openapi.naver.com/v1/search/movie.json?query={}\".format(movies[i].get('movie_name_ko'))\n\n res = requests.get(url_naver, headers=headers_naver)\n res_data = res.json().get('items')\n\n if res_data:\n res_data = res_data[0]\n movies[i]['audience'] = audience[i]\n # movies[i]['score'] = res_data.get('userRating')\n movies[i]['actors'] = res_data.get('actor')[:-1]\n movies[i]['directors'] = res_data.get('director')[:-1]\n url_content = res_data.get('link')\n\n response = requests.get(url_content).text\n document = bs(response, 'html.parser')\n temp_content = document.select('.story_area .con_tx')\n content = str(temp_content)[19:-5].replace('\\r', '').split('
\\xa0')\n movies[i]['description'] = '\\n'.join(content)\n\n temp_img = document.select('.wide_info_area .poster')\n movies[i]['poster_url'] = str(temp_img).split('src=')[2].split('?type=')[0].replace('\"', '')\n\n for movie_data in movies:\n movie = Movie()\n movie.movie_code = movie_data['movie_code']\n movie.movie_name_ko = movie_data['movie_name_ko']\n movie.movie_name_en = movie_data['movie_name_en']\n movie.showTm = movie_data['showTm']\n movie.openDt = movie_data['openDt']\n movie.watch_grade_nm = movie_data['watch_grade_nm']\n movie.audience = movie_data['audience']\n movie.actors = movie_data['actors']\n movie.directors = movie_data['directors']\n movie.description = movie_data['description']\n movie.poster_url = movie_data['poster_url']\n movie.genre = movie_data['genre']\n movie.save()\n\n return redirect('movies:movie_list')\n\n\ndef movie_db2(request):\n movie = {}\n\n url_rank = 'https://movie.naver.com/movie/sdb/rank/rmovie.nhn'\n\n response = requests.get(url_rank).text\n document = bs(response, 'html.parser')\n temp_rank = document.select('.list_ranking .title a')\n\n temp_li = str(temp_rank).split('href=\"')[1:]\n for i in range(len(temp_li)):\n temp_li[i] = temp_li[i].split('\" title')[0]\n\n for i in range(1):\n url_movie = 'https://movie.naver.com' + temp_li[i]\n response = requests.get(url_movie).text\n document = bs(response, 'html.parser')\n\n temp_content = document.select('.story_area .con_tx')\n content = str(temp_content)[19:-5].replace('\\r', '').split('
\\xa0')\n movie['description'] = '\\n'.join(content)\n\n temp_img = document.select('.wide_info_area .poster')\n movie['poster_url'] = str(temp_img).split('src=')[2].split('?type=')[0].replace('\"', '')\n\n temp_title = document.select('.wide_info_area .mv_info .h_movie a')\n movie['movie_name_ko'] = str(temp_title).split('')[0].split('\">')[1]\n\n return render(request, 'movies/index.html')\n\n\ndef movie_list(request):\n movies = Movie.objects.all()[:10]\n return render(request, 'movies/list.html', {'movies': movies})\n\n\ndef detail(request, movie_id):\n movie = Movie.objects.get(pk=movie_id)\n if request.GET.get(\"theaterCode\"):\n theatercode = request.GET.get(\"theaterCode\")\n else:\n theatercode = '0056'\n\n url_movie = 'http://www.cgv.co.kr/theaters/'+f'?theaterCode={theatercode}'\n response = requests.get(url_movie).text\n document = bs(response, 'html.parser')\n\n temp_address = document.select('.wrap-theater .sect-theater .box-contents .title')\n address = str(temp_address).split('
')[1].split('')[0]\n\n url_geocode = f'https://naveropenapi.apigw.ntruss.com/map-geocode/v2/geocode?query={address}'\n\n res = requests.get(url_geocode, headers=headers_naver_map)\n res_data = res.json()\n\n theaters = {\n '0056': 'CGV강남', '0001': 'CGV강변', '0229': 'CGV건대입구', '0010': 'CGV구로', '0063': 'CGV대학로',\n '0252': 'CGV동대문', '0230': 'CGV등촌', '0009': 'CGV명동', '0105': 'CGV명동역 씨네라이브러리', '0011': 'CGV목동',\n '0057': 'CGV미아', '0030': 'CGV불광', '0046': 'CGV상봉', '0083': 'CGV성신여대입구', '0088': 'CGV송파',\n '0276': 'CGV수유', '0150': 'CGV신촌아트레온', '0040': 'CGV압구정', '0112': 'CGV여의도', '0059': 'CGV영등포',\n '0074': 'CGV왕십리', '0013': 'CGV용산아이파크몰', '0131': 'CGV중계', '0199': 'CGV천호', '0107': 'CGV청담씨네시티',\n '0223': 'CGV피카디리1958', '0164': 'CGV하계', '0191': 'CGV홍대', '0040': 'CINE de CHEF 압구정', '0013': 'CINE de CHEF 용산아이파크몰',\n '0012': 'CGV수원', '0253': 'CGV해운대',\n }\n\n headers_naver = {\n 'X-Naver-Client-Id': 'bS2KxFME66tpq5UJPclw',\n 'X-Naver-Client-Secret': 'ZUOJVkLobP'\n }\n\n url_search = f'https://openapi.naver.com/v1/search/local.json?query={address} 맛집'\n res = requests.get(url_search, headers=headers_naver)\n restaurants = res.json().get('items')\n\n for r in restaurants:\n address = r.get('address')\n url_geocode = f'https://naveropenapi.apigw.ntruss.com/map-geocode/v2/geocode?query={address}'\n\n res = requests.get(url_geocode, headers=headers_naver_map)\n res_data1 = res.json()\n r['x_axis'] = res_data1.get('addresses')[0].get('x')\n r['y_axis'] = res_data1.get('addresses')[0].get('y')\n\n sum_score = 0\n avg_score = 0\n list_score = movie.score_set.all()\n if list_score:\n for m_score in list_score:\n sum_score += m_score.value\n avg_score = round(sum_score / len(list_score), 2)\n\n context = {\n 'movie': movie,\n 'x_axis': res_data.get('addresses')[0].get('x'),\n 'y_axis': res_data.get('addresses')[0].get('y'),\n 'theater_code': theaters,\n 'theater': theaters.get(str(theatercode)),\n 'restaurants': restaurants,\n 'avg_score': avg_score,\n }\n\n return render(request, 'movies/detail.html', context)\n\n\n@login_required\ndef create_score(request, movie_id):\n if request.method == 'POST':\n score = ScoreForm(request.POST)\n if score.is_valid():\n form = score.save(commit=False)\n form.movie_id = movie_id\n form.user = request.user\n form.save()\n return redirect('movies:detail', movie_id=movie_id)\n return redirect('movies:detail', movie_id=movie_id)\n return redirect('movies:detail')\n\n\n@login_required\ndef create_score(request, movie_id):\n if request.method == 'POST':\n score = ScoreForm(request.POST)\n if score.is_valid():\n form = score.save(commit=False)\n form.movie_id = movie_id\n form.user = request.user\n form.save()\n return redirect('movies:detail', movie_id=movie_id)\n return redirect('movies:detail', movie_id=movie_id)\n return redirect('movies:detail')\n\n\n@login_required\ndef delete_score(request, movie_id, score_id):\n if request.method == 'POST':\n score = get_object_or_404(Score, id=score_id)\n score.delete()\n return redirect('movies:detail', movie_id=movie_id)\n return redirect('movies:movie_list')\n\n\n@login_required\ndef update_score(request, score_id, movie_id):\n score = get_object_or_404(Score, id=score_id)\n movie = get_object_or_404(Movie, id=movie_id)\n if score.user != request.user:\n return HttpResponseForbidden(\"You are not allowed to update this Article\")\n\n if request.method == 'POST':\n form = ScoreForm(request.POST, instance=score)\n if form.is_valid():\n score_form = form.save(commit=False)\n score_form.movie_id = movie_id\n score_form.user = request.user\n score_form.save()\n return redirect('movies:detail', movie_id=movie_id)\n else:\n form = ScoreForm(instance=score)\n return render(request, 'movies/update.html', {'form': form, 'movie': movie})\n","repo_name":"Hansung-Lee/SSAFY","sub_path":"ssafy_project/last_project/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5259373986","text":"#\r\n# PAOFLOW\r\n#\r\n# Copyright 2016-2022 - Marco BUONGIORNO NARDELLI (mbn@unt.edu)\r\n#\r\n# Reference:\r\n#\r\n# F.T. Cerasoli, A.R. Supka, A. Jayaraj, I. Siloi, M. Costa, J. Slawinska, S. Curtarolo, M. Fornari, D. Ceresoli, and M. Buongiorno Nardelli,\r\n# Advanced modeling of materials with PAOFLOW 2.0: New features and software design, Comp. Mat. Sci. 200, 110828 (2021).\r\n#\r\n# M. Buongiorno Nardelli, F. T. Cerasoli, M. Costa, S Curtarolo,R. De Gennaro, M. Fornari, L. Liyanage, A. Supka and H. Wang, \r\n# PAOFLOW: A utility to construct and operate on ab initio Hamiltonians from the Projections of electronic wavefunctions on \r\n# Atomic Orbital bases, including characterization of topological materials, Comp. Mat. Sci. vol. 143, 462 (2018).\r\n#\r\n# This file is distributed under the terms of the\r\n# GNU General Public License. See the file `License'\r\n# in the root directory of the present distribution,\r\n# or http://www.gnu.org/copyleft/gpl.txt .\r\n\r\nimport numpy as np\r\nimport scipy.linalg as la\r\n\r\ndef doubling_HRs ( data_controller ):\r\n from scipy.fftpack import fftshift\r\n from mpi4py import MPI\r\n\r\n if MPI.COMM_WORLD.Get_rank() != 0:\r\n return\r\n\r\n arry,attr = data_controller.data_dicts()\r\n\r\n nx = attr['nx'] \r\n ny = attr['ny'] \r\n nz = attr['nz'] \r\n nk1 = attr['nk1'] \r\n nk2 = attr['nk2'] \r\n nk3 = attr['nk3'] \r\n nspin = attr['nspin'] \r\n\r\n nkpts=nk1*nk2*nk3\r\n\r\n cell_index=np.zeros((nk1,nk2,nk3,3),dtype=int)\r\n new_index=np.zeros((3,nkpts),dtype=int)\r\n \r\n for i in range(nk1):\r\n for j in range(nk2):\r\n for k in range(nk3):\r\n n = k + j*nk3 + i*nk2*nk3\r\n Rx = float(i)/float(nk1)\r\n Ry = float(j)/float(nk2)\r\n Rz = float(k)/float(nk3)\r\n if Rx >= 0.5: Rx=Rx-1.0\r\n if Ry >= 0.5: Ry=Ry-1.0\r\n if Rz >= 0.5: Rz=Rz-1.0\r\n Rx -= int(Rx)\r\n Ry -= int(Ry)\r\n Rz -= int(Rz)\r\n # the minus sign in Rx*nk1 is due to the Fourier transformation (Ri-Rj)\r\n ix=int(-round(Rx*nk1,0))\r\n iy=int(-round(Ry*nk2,0))\r\n iz=int(-round(Rz*nk3,0))\r\n \r\n cell_index[ix,iy,iz,0]=i\r\n cell_index[ix,iy,iz,1]=j\r\n cell_index[ix,iy,iz,2]=k \r\n\r\n new_index[0,n]=ix\r\n new_index[1,n]=iy\r\n new_index[2,n]=iz\r\n\r\n # This construction is doubling along the X direction nx times \r\n for dx in range(nx):\r\n \r\n HR_double= np.zeros((2*attr['nawf'],2*attr['nawf'],nk1,nk2,nk3,nspin),dtype=complex)\r\n for ix in range(min(new_index[0,:]),max(new_index[0,:])+1):\r\n for iy in range(min(new_index[1,:]),max(new_index[1,:])+1):\r\n for iz in range(min(new_index[2,:]),max(new_index[2,:])+1):\r\n \r\n i,j,k = cell_index[ix,iy,iz,:] # doubled cell index\r\n\r\n if ( ((2*ix) >= min(new_index[0,:])) and ((2*ix) <= (max(new_index[0,:])))):\r\n i,j,k = cell_index[ix,iy,iz,:]\r\n m,n,l = cell_index[2*ix,iy,iz,:] \r\n # Upper left HR_double block \r\n HR_double[0:attr['nawf'],0:attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n # Lower right HR_double block \r\n HR_double[attr['nawf']:2*attr['nawf'],attr['nawf']:2*attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n \r\n if ( ((2*ix+1) >= min(new_index[0,:])) and ((2*ix+1) <= (max(new_index[0,:])))):\r\n i,j,k = cell_index[ix,iy,iz,:]\r\n m,n,l = cell_index[2*ix+1,iy,iz,:]\r\n #Upper right HR_double block \r\n HR_double[0:attr['nawf'],attr['nawf']:2*attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n \r\n if ( ((2*ix-1) >= min(new_index[0,:])) and ((2*ix-1) <= (max(new_index[0,:])))):\r\n i,j,k = cell_index[ix,iy,iz,:]\r\n m,n,l = cell_index[2*ix-1,iy,iz,:]\r\n #Lower left HR_double block\r\n HR_double[attr['nawf']:2*attr['nawf'],0:attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n\r\n arry['HRs'] = HR_double\r\n HR_double = None\r\n arry['tau'] = np.append(arry['tau'],arry['tau'][:,:]+arry['a_vectors'][0,:]*attr['alat'],axis=0)\r\n arry['a_vectors'][0,:]=2*arry['a_vectors'][0,:]\r\n doubling_attr_arry(data_controller)\r\n\r\n\r\n\r\n # This construction is doubling along the Y direction ny times \r\n for dy in range(ny):\r\n HR_double= np.zeros((2*attr['nawf'],2*attr['nawf'],nk1,nk2,nk3,nspin),dtype=complex)\r\n\r\n for ix in range(min(new_index[0,:]),max(new_index[0,:])+1):\r\n for iy in range(min(new_index[1,:]),max(new_index[1,:])+1):\r\n for iz in range(min(new_index[2,:]),max(new_index[2,:])+1):\r\n \r\n i,j,k = cell_index[ix,iy,iz,:] # doubled cell index\r\n\r\n if ( ((2*iy) >= min(new_index[1,:])) and ((2*iy) <= (max(new_index[1,:])))):\r\n i,j,k = cell_index[ix,iy,iz,:]\r\n m,n,l = cell_index[ix,2*iy,iz,:] \r\n # Upper left HR_double block \r\n HR_double[0:attr['nawf'],0:attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n # Lower right HR_double block \r\n HR_double[attr['nawf']:2*attr['nawf'],attr['nawf']:2*attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n \r\n if ( ((2*iy+1) >= min(new_index[1,:])) and ((2*iy+1) <= (max(new_index[1,:])))):\r\n i,j,k = cell_index[ix,iy,iz,:]\r\n m,n,l = cell_index[ix,2*iy+1,iz,:]\r\n #Upper right HR_double block \r\n HR_double[0:attr['nawf'],attr['nawf']:2*attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n \r\n if ( ((2*iy-1) >= min(new_index[1,:])) and ((2*iy-1) <= (max(new_index[1,:])))):\r\n i,j,k = cell_index[ix,iy,iz,:]\r\n m,n,l = cell_index[ix,2*iy-1,iz,:]\r\n #Lower left HR_double block \r\n HR_double[attr['nawf']:2*attr['nawf'],0:attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n \r\n\r\n arry['HRs'] = HR_double\r\n HR_double = None\r\n arry['tau'] = np.append(arry['tau'],arry['tau'][:,:]+arry['a_vectors'][1,:]*attr['alat'],axis=0)\r\n arry['a_vectors'][1,:]=2*arry['a_vectors'][1,:]\r\n doubling_attr_arry(data_controller)\r\n\r\n # This construction is doubling along the Z direction nz times \r\n delete_index=0\r\n for dz in range(nz):\r\n\r\n HR_double= np.zeros((2*attr['nawf'],2*attr['nawf'],nk1,nk2,nk3,nspin),dtype=complex)\r\n \r\n for ix in range(min(new_index[0,:]),max(new_index[0,:])+1):\r\n for iy in range(min(new_index[1,:]),max(new_index[1,:])+1):\r\n for iz in range(min(new_index[2,:]),max(new_index[2,:])+1):\r\n \r\n i,j,k = cell_index[ix,iy,iz,:] # doubled cell index\r\n\r\n if ( ((2*iz) >= min(new_index[2,:])) and ((2*iz) <= (max(new_index[2,:])))):\r\n i,j,k = cell_index[ix,iy,iz,:]\r\n m,n,l = cell_index[ix,iy,2*iz,:] \r\n # Upper left HR_double block \r\n HR_double[0:attr['nawf'],0:attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n # Lower right HR_double block \r\n HR_double[attr['nawf']:2*attr['nawf'],attr['nawf']:2*attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n \r\n if ( ((2*iz+1) >= min(new_index[2,:])) and ((2*iz+1) <= (max(new_index[2,:])))):\r\n i,j,k = cell_index[ix,iy,iz,:]\r\n m,n,l = cell_index[ix,iy,2*iz+1,:]\r\n #Upper right HR_double block \r\n HR_double[0:attr['nawf'],attr['nawf']:2*attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n \r\n if ( ((2*iz-1) >= min(new_index[2,:])) and ((2*iz-1) <= (max(new_index[2,:])))):\r\n i,j,k = cell_index[ix,iy,iz,:]\r\n m,n,l = cell_index[ix,iy,2*iz-1,:]\r\n #Lower left HR_double block \r\n HR_double[attr['nawf']:2*attr['nawf'],0:attr['nawf'],i,j,k,:] = arry['HRs'][:,:,m,n,l,:]\r\n \r\n arry['HRs'] = HR_double\r\n HR_double = None\r\n arry['tau'] = np.append(arry['tau'],arry['tau'][:,:]+arry['a_vectors'][2,:]*attr['alat'],axis=0)\r\n arry['a_vectors'][2,:]=2*arry['a_vectors'][2,:]\r\n doubling_attr_arry(data_controller)\r\n\r\n\r\ndef doubling_attr_arry ( data_controller ):\r\n\r\n arry,attr = data_controller.data_dicts()\r\n\r\n # Increassing nawf/natoms\r\n attr['nawf'] = 2*attr['nawf']\r\n if 'natoms' in attr: attr['natoms'] = 2*attr['natoms']\r\n if 'nelec' in attr: attr['nelec'] = 2*attr['nelec']\r\n if 'nbnds' in attr: attr['nbnds'] = 2*attr['nbnds']\r\n if 'bnd' in attr: attr['bnd'] = 2*attr['bnd']\r\n #doubling the atom number of orbitals / orbital character / multiplicity\r\n if 'naw' in arry: arry['naw'] = np.append(arry['naw'],arry['naw'])\r\n if 'sh' in arry: arry['sh'] = np.append(arry['sh'],arry['sh'])\r\n if 'nl' in arry: arry['nl'] = np.append(arry['nl'],arry['nl'])\r\n if 'atoms' in arry: arry['atoms'] = np.append(arry['atoms'],arry['atoms'])\r\n # if Sj is already computed, then double it\r\n if 'Sj' in arry:\r\n Sj_double = np.zeros((3,attr['nawf'],attr['nawf']),dtype=complex)\r\n for spol in range(3):\r\n Sj = arry['Sj'][spol]\r\n Sj_double[spol] = la.block_diag(*[Sj,Sj])\r\n\r\n arry['Sj'] = Sj_double\r\n Sj_double = None\r\n\r\n # If the SOC is included pertubative\r\n if 'do_spin_orbit' in attr and (attr['do_spin_orbit']):\r\n if 'lambda_p' in arry: arry['lambda_p'] = np.append(arry['lambda_p'],arry['lambda_p'])\r\n if 'lambda_d' in arry: arry['lambda_d'] = np.append(arry['lambda_d'],arry['lambda_d'])\r\n if 'orb_pseudo' in arry: arry['orb_pseudo'] = np.append(arry['orb_pseudo'],arry['orb_pseudo'])\r\n\r\n","repo_name":"marcobn/PAOFLOW","sub_path":"src/defs/do_doubling.py","file_name":"do_doubling.py","file_ext":"py","file_size_in_byte":10834,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"30275940767","text":"\"\"\"\n素数\n\"\"\"\n\nfrom math import sqrt # 开根号\n\nnumber = int(input(\"请输入一个正整数: \"))\nresult = int(sqrt(number)) # 对输入的数字进行开根号计算\nis_Prime = True\n\nfor x in range(2, result + 1):\n if number % x == 0: # 判断数字的模\n is_Prime = False\n break\n\nif is_Prime and number != 1:\n print('%d是素数' % number)\nelse:\n print('%d不是素数' % number)\n","repo_name":"Bobby981229/Python-Learning","sub_path":"Day 04 - Loop Structure/Prime_Number.py","file_name":"Prime_Number.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19814498547","text":"#!/usr/bin/env python3\n\"\"\"\nBootstrapping script for QryptoCrawler\n\"\"\"\nfrom exchanges.binance import Binance\nfrom datetime import datetime\nimport logging\nimport yaml\n\ndef setup_logging(path: str, level: int, console: bool=True, file: bool=True) -> None :\n # Basic logging setup\n logger = logging.getLogger()\n logger.setLevel(level)\n logFormatter = logging.Formatter(\"%(asctime)s [%(levelname)s] %(message)s\")\n \n # Log to file\n if file:\n fileHandler = logging.FileHandler(path)\n fileHandler.setFormatter(logFormatter)\n logger.handlers.append(fileHandler)\n \n # Log to console\n if console:\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logFormatter)\n logger.handlers.append(consoleHandler)\n\nif __name__ == \"__main__\":\n\n # Config loading\n with open(\"config.yaml\", \"r\") as yamlfile:\n cfg = yaml.load(yamlfile, Loader=yaml.FullLoader) \n with open(\"api.yaml\", \"r\") as apifile:\n api = yaml.load(apifile, Loader=yaml.FullLoader)\n \n # Load the logging settings\n folder = cfg['logging']['folder']\n prefix = cfg['logging']['prefix']\n timeformat = cfg['logging']['timeformat']\n\n # Build filepath and setup logging\n logfile_name = folder + prefix + datetime.now().strftime(timeformat) + '.log'\n setup_logging(logfile_name, True, True)\n\n # Start of script\n logging.info('Current logfile is %s', logfile_name) \n binance = Binance(api['binance']['key'], api['binance']['secret'])","repo_name":"CryptoQuaternions/QryptoCrawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15796138954","text":"import argparse\nimport concurrent.futures\nimport logging\nimport multiprocessing\nimport os\nimport psutil\nimport resource\nimport signal\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom object_database.util import (\n configureLogging,\n genToken,\n sslContextFromCertPathOrNone,\n validateLogLevel,\n)\nfrom object_database import (\n TcpServer,\n TcpProxyServer,\n connect,\n RedisPersistence,\n InMemoryPersistence,\n DisconnectedException,\n LoggingTransactionWatcher,\n)\nfrom object_database.service_manager.SubprocessServiceManager import SubprocessServiceManager\nfrom object_database.service_manager.logfiles import Logfile\n\n\nownDir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef startServiceManagerProcess(\n tempDirectoryName,\n port,\n authToken,\n *,\n loglevelName=\"INFO\",\n timeout=1.0,\n verbose=True,\n ownHostname=\"localhost\",\n dbHostname=\"localhost\",\n runDb=True,\n logDir=True,\n sslPath=None,\n proxyPort=None,\n):\n if not verbose:\n kwargs = dict(stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n else:\n kwargs = dict()\n\n cmd = [\n sys.executable,\n os.path.join(ownDir, \"service_manager.py\"),\n ownHostname,\n dbHostname,\n str(port),\n \"Master\" if runDb else \"Worker\",\n \"--service-token\",\n authToken,\n \"--shutdownTimeout\",\n str(timeout),\n \"--log-level\",\n loglevelName,\n \"--source\",\n os.path.join(tempDirectoryName, \"source\"),\n \"--storage\",\n os.path.join(tempDirectoryName, \"storage\"),\n ]\n if runDb:\n cmd.append(\"--run_db\")\n\n if proxyPort is not None:\n assert isinstance(proxyPort, int)\n\n cmd.append(\"--proxy-port\")\n cmd.append(str(proxyPort))\n\n if logDir:\n logsPath = os.path.join(tempDirectoryName, \"logs\")\n cmd.extend([\"--logdir\", logsPath])\n cmd.extend([\"--log-max-megabytes\", \"1\"])\n cmd.extend([\"--log-max-total-megabytes\", \"10\"])\n cmd.extend([\"--log-backup-count\", \"3\"])\n\n if sslPath:\n cmd.extend([\"--ssl-path\", sslPath])\n\n server = subprocess.Popen(cmd, **kwargs)\n try:\n # this should throw a subprocess.TimeoutExpired exception if the service did not crash\n server.wait(timeout)\n except subprocess.TimeoutExpired:\n pass\n else:\n if server.returncode:\n msg = f\"Failed to start service_manager (retcode:{server.returncode})\"\n\n if verbose and server.stderr:\n error = b\"\".join(server.stderr.readlines())\n msg += \"\\n\" + error.decode(\"utf-8\")\n server.terminate()\n server.wait()\n raise Exception(msg)\n\n return server\n\n\ndef autoconfigureAndStartServiceManagerProcess(\n port=None, authToken=None, loglevelName=None, **kwargs\n):\n\n port = port or 8020\n authToken = authToken or genToken()\n\n if loglevelName is None:\n loglevelName = logging.getLevelName(logging.getLogger(__name__).getEffectiveLevel())\n\n tempDirObj = tempfile.TemporaryDirectory()\n tempDirectoryName = tempDirObj.name\n\n server = startServiceManagerProcess(\n tempDirectoryName, port, authToken, loglevelName=loglevelName, **kwargs\n )\n\n def cleanupFn(error=False):\n server.terminate()\n try:\n server.wait(timeout=15.0)\n except subprocess.TimeoutExpired:\n logging.getLogger(__name__).warning(\n \"Failed to gracefully terminate service manager after 15 seconds.\"\n \" Sending KILL signal\"\n )\n server.kill()\n try:\n server.wait(timeout=5.0)\n except subprocess.TimeoutExpired:\n logging.getLogger(__name__).warning(\"Failed to kill service manager process.\")\n\n if error or server.returncode:\n logging.getLogger(__name__).warning(\n \"Exited with an error. Leaving temporary directory around for inspection: %s\",\n tempDirectoryName,\n )\n else:\n tempDirObj.cleanup()\n\n return server, cleanupFn\n\n\ndef runProxyServer(shouldStop, dbHostname, dbPort, ownHostname, ownPort, authToken, sslPath):\n \"\"\"A background thread loop for running a proxy server.\n\n If we get disconnected, we'll attempt to reconnect.\"\"\"\n logger = logging.getLogger(__name__)\n\n ssl_ctx = sslContextFromCertPathOrNone(sslPath)\n\n logger.info(\"Started a proxy server on %s:%s\", ownHostname, ownPort)\n\n proxyServer = None\n\n while not shouldStop.is_set():\n if proxyServer is not None and proxyServer.disconnected.is_set():\n proxyServer.stop()\n proxyServer = None\n\n if proxyServer is None:\n try:\n proxyServer = TcpProxyServer(\n dbHostname,\n dbPort,\n ownHostname,\n ownPort,\n ssl_context=ssl_ctx,\n auth_token=authToken,\n )\n\n proxyServer.start()\n except (\n ConnectionRefusedError,\n DisconnectedException,\n concurrent.futures._base.TimeoutError,\n OSError,\n ):\n proxyServer = None\n logger.error(\"Failed to connect to central ODB. Sleeping and retrying\")\n time.sleep(5)\n\n shouldStop.wait(timeout=1.0)\n\n\ndef processHostnameArg(hostname):\n # if hostname is ip-XXX-XXX-XXX-XXX, convert to 'dot' form since our\n # default linux build doesnt have this hostname mapped correctly.\n if hostname.startswith(\"ip-\"):\n return hostname[3:].replace(\"-\", \".\")\n return hostname\n\n\ndef main(argv=None):\n if argv is None:\n # this is a needed pathway for the 'console_scripts' in setup.py\n argv = sys.argv\n\n parser = argparse.ArgumentParser(\n \"Run the main service manager and the object_database_service.\"\n )\n\n parser.add_argument(\"own_hostname\")\n parser.add_argument(\"db_hostname\")\n parser.add_argument(\"port\", type=int)\n parser.add_argument(\"placement_group\", type=str)\n parser.add_argument(\n \"--source\", help=\"path for the source trees used by services\", required=True\n )\n parser.add_argument(\n \"--storage\", help=\"path for local storage used by services\", required=True\n )\n parser.add_argument(\n \"--service-token\",\n type=str,\n required=True,\n help=\"the auth token to be used with this service\",\n )\n parser.add_argument(\n \"--run_db\",\n default=False,\n action=\"store_true\",\n help=\"run an odb server here, not just a service manager.\",\n )\n parser.add_argument(\n \"--proxy-port\",\n default=None,\n required=False,\n type=int,\n help=\"run an odb proxy server here, connecting back to the main dbserver. \"\n \"Services booted here will connect to the proxy server directly. \"\n \"This is the port they'll use.\",\n )\n\n parser.add_argument(\n \"--ssl-path\",\n default=None,\n required=False,\n help=\"path to (self-signed) SSL certificate\",\n )\n parser.add_argument(\"--redis_port\", type=int, default=None, required=False)\n parser.add_argument(\"--fd-limit\", type=int, default=4096, required=False)\n\n parser.add_argument(\"--max_gb_ram\", type=float, default=None, required=False)\n parser.add_argument(\"--max_cores\", type=int, default=None, required=False)\n parser.add_argument(\"--shutdownTimeout\", type=float, default=None, required=False)\n\n parser.add_argument(\"--logdir\", default=None, required=False)\n parser.add_argument(\"--log-level\", required=False, default=\"INFO\")\n parser.add_argument(\"--log-max-megabytes\", required=False, default=\"100\")\n parser.add_argument(\"--log-max-total-megabytes\", required=False, default=\"20000\")\n parser.add_argument(\"--log-backup-count\", required=False, default=\"100\")\n parser.add_argument(\"--transaction-log\", required=False, default=None)\n\n parser.add_argument(\n \"--watch-aws-image-hash\",\n required=False,\n default=None,\n help=(\n \"Path to file containing our docker image. If the AWS docker image\"\n \"changes, we'll write the new image to this file and exit the process. \"\n \"The invoking image is expected to re-run this.\"\n ),\n )\n parser.add_argument(\"--max-service-instances\", required=False, type=int, default=-1)\n parser.add_argument(\"--is-gpu\", required=False, type=int, default=0)\n\n parsedArgs = parser.parse_args(argv[1:])\n\n level = parsedArgs.log_level.upper()\n level = validateLogLevel(level, fallback=\"INFO\")\n startTs = int(time.time())\n\n if parsedArgs.logdir:\n logfile = f\"{SubprocessServiceManager.SERVICE_NAME}-{startTs}.log\"\n assert Logfile.parseLogfileName(logfile) is not None, logfile\n\n if not os.path.isdir(parsedArgs.logdir):\n os.makedirs(parsedArgs.logdir)\n\n logpath = os.path.join(parsedArgs.logdir, logfile)\n\n updates = {\n \"handlers\": {\n \"rotating-file\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": logpath,\n \"level\": 0, # this handler should apply to all levels\n \"formatter\": \"nyc\",\n \"maxBytes\": int(float(parsedArgs.log_max_megabytes) * 1024**2),\n \"backupCount\": int(parsedArgs.log_backup_count),\n }\n },\n \"root\": {\"handlers\": [\"default\", \"rotating-file\"]},\n }\n\n else:\n updates = {}\n\n configureLogging(\"service_manager\", level=level, config_updates=updates)\n logger = logging.getLogger(__name__)\n\n parsedArgs = parser.parse_args(argv[1:])\n\n if parsedArgs.redis_port is not None and not parsedArgs.run_db:\n sys.stderr.write(\"error: please add --run_db if you want to run a database\\n\")\n parser.print_help()\n return 2\n\n maxServiceInstances = parsedArgs.max_service_instances\n if maxServiceInstances == -1:\n maxServiceInstances = None\n elif maxServiceInstances < 0:\n sys.stderr.write(\n \"error: please pass a nonnegative integer for --max-service-instances\\n\"\n )\n parser.print_help()\n return 2\n\n ownHostname = processHostnameArg(parsedArgs.own_hostname)\n\n logger.info(\n \"ServiceManager on %s connecting to %s:%s\",\n ownHostname,\n parsedArgs.db_hostname,\n parsedArgs.port,\n )\n shouldStop = threading.Event()\n\n def shutdownCleanly(signalNumber, frame):\n logger.info(\"Received signal %s. Stopping.\", signalNumber)\n shouldStop.set()\n\n signal.signal(signal.SIGINT, shutdownCleanly)\n signal.signal(signal.SIGTERM, shutdownCleanly)\n\n softLimit, hardLimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n if parsedArgs.fd_limit <= hardLimit:\n fdLimit = parsedArgs.fd_limit\n else:\n fdLimit = hardLimit\n logger.warning(\n \"Requested FD count (%s) is higher than the hard limit (%s).\",\n parsedArgs.fd_limit,\n hardLimit,\n )\n\n resource.setrlimit(resource.RLIMIT_NOFILE, (fdLimit, hardLimit))\n\n databaseServer = None\n serviceManager = None\n\n if parsedArgs.transaction_log:\n if not parsedArgs.run_db:\n raise Exception(\n \"Makes no sense to ask for a transaction log if you're not running a DB\"\n )\n\n transactionWatcher = LoggingTransactionWatcher(parsedArgs.transaction_log)\n else:\n transactionWatcher = None\n\n try:\n if parsedArgs.run_db:\n ssl_ctx = sslContextFromCertPathOrNone(parsedArgs.ssl_path)\n databaseServer = TcpServer(\n ownHostname,\n parsedArgs.port,\n RedisPersistence(port=parsedArgs.redis_port)\n if parsedArgs.redis_port is not None\n else InMemoryPersistence(),\n ssl_context=ssl_ctx,\n auth_token=parsedArgs.service_token,\n transactionWatcher=transactionWatcher,\n )\n\n databaseServer.start()\n\n logger.info(\"Started a database server on %s:%s\", ownHostname, parsedArgs.port)\n\n if parsedArgs.proxy_port is not None:\n proxyThread = threading.Thread(\n target=runProxyServer,\n args=(\n shouldStop,\n parsedArgs.db_hostname,\n parsedArgs.port,\n # put proxy traffic on the loopback\n ownHostname,\n parsedArgs.proxy_port,\n parsedArgs.service_token,\n parsedArgs.ssl_path,\n ),\n daemon=True,\n )\n proxyThread.start()\n\n # ensure we can connect to the proxy server\n try:\n connect(\n ownHostname,\n parsedArgs.proxy_port,\n parsedArgs.service_token,\n timeout=2.0,\n retry=True,\n )\n except Exception:\n logging.warn(\n \"Failed to connect to the proxy server. Perhaps the upstream is down.\"\n )\n\n else:\n proxyThread = None\n\n serviceManager = None\n\n try:\n while not shouldStop.is_set():\n if serviceManager is None:\n try:\n serviceManager = SubprocessServiceManager(\n ownHostname,\n # if we're running a proxy, connect to ourselves\n ownHostname\n if parsedArgs.proxy_port is not None\n else parsedArgs.db_hostname,\n parsedArgs.port\n if parsedArgs.proxy_port is None\n else parsedArgs.proxy_port,\n parsedArgs.source,\n parsedArgs.storage,\n parsedArgs.service_token,\n placementGroup=parsedArgs.placement_group,\n maxGbRam=parsedArgs.max_gb_ram\n or int(\n psutil.virtual_memory().total / 1024.0 / 1024.0 / 1024.0 + 0.1\n ),\n maxCores=parsedArgs.max_cores or multiprocessing.cpu_count(),\n logfileDirectory=parsedArgs.logdir,\n shutdownTimeout=parsedArgs.shutdownTimeout,\n logLevelName=level,\n logMaxMegabytes=float(parsedArgs.log_max_megabytes),\n logMaxTotalMegabytes=float(parsedArgs.log_max_total_megabytes),\n logBackupCount=int(parsedArgs.log_backup_count),\n startTs=startTs,\n maxServiceInstances=maxServiceInstances,\n )\n logger.info(\"Connected the service-manager\")\n except (\n ConnectionRefusedError,\n DisconnectedException,\n concurrent.futures._base.TimeoutError,\n OSError,\n ):\n logger.exception(\"Failed to connect the ServiceManager to the ODB.\")\n serviceManager = None\n\n if serviceManager is None:\n logger.error(\n \"Failed to connect to service manager. Sleeping and retrying\"\n )\n time.sleep(10)\n else:\n serviceManager.start()\n\n else:\n timeout = max(0.1, serviceManager.shutdownTimeout / 10)\n shouldStop.wait(timeout=timeout)\n try:\n serviceManager.cleanup()\n\n if parsedArgs.watch_aws_image_hash:\n shouldReboot = serviceManager.checkAwsImageHash(\n parsedArgs.watch_aws_image_hash, parsedArgs.is_gpu\n )\n if shouldReboot:\n logger.info(\"Rebooting because docker image hash changed.\")\n serviceManager.stop(gracefully=False)\n serviceManager = None\n return 0\n except (\n ConnectionRefusedError,\n DisconnectedException,\n concurrent.futures._base.TimeoutError,\n OSError,\n ):\n # try to reconnect\n logger.error(\n \"Disconnected from object_database host. Attempting to reconnect.\"\n )\n serviceManager.stop(gracefully=False)\n serviceManager = None\n\n except Exception:\n logger.exception(\"Service manager cleanup failed:\")\n\n except KeyboardInterrupt:\n logger.warning(\"Exiting due to KeyboardInterrupt\")\n return 0\n\n return 0\n\n finally:\n if serviceManager is not None:\n try:\n serviceManager.stop(gracefully=True)\n except Exception:\n logger.exception(\"Failed to stop the service manager:\")\n\n if databaseServer is not None:\n try:\n databaseServer.stop()\n except Exception:\n logger.exception(\"Failed to stop the database server:\")\n\n if transactionWatcher is not None:\n transactionWatcher.stop()\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","repo_name":"APrioriInvestments/object_database","sub_path":"object_database/frontends/service_manager.py","file_name":"service_manager.py","file_ext":"py","file_size_in_byte":18134,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"25458277323","text":"from .models import Config, penny\nfrom . import td\nfrom . import cmdline\nfrom decimal import Decimal\nimport datetime\nimport sys\n\nimport logging\nlog = logging.getLogger(__name__)\n\n# Do we want config items to be able to have no value? No. They\n# should always have a valid value.\n\n# What do we do when the value in the database is invalid for the\n# type? Read it as \"None\". If we can't return None, return the\n# default.\n\n# What do we do when there's no database available? Just use the defaults\n\n\nclass ConfigItem:\n \"\"\"A configuration setting\n \"\"\"\n _keys = {}\n _listener = None\n\n def __init__(self, key, default, type=\"text\",\n display_name=None, description=None):\n # NB 'default' may be changed after init simply by setting the\n # attribute\n self.key = key\n self.default = default\n self.type = type\n self.display_name = display_name or key\n self.description = description or self.display_name\n self._value = None\n self._current = False\n self._keys[self.key] = self\n self._notify = []\n\n @classmethod\n def from_db(cls, s):\n \"\"\"Convert a string from the database to the appropriate python type\n \"\"\"\n return s\n\n @classmethod\n def to_db(cls, v):\n \"\"\"Convert a python value to a string for the database\n \"\"\"\n if v is None:\n return \"\"\n return v\n\n def notify_on_change(self, func):\n self._notify.append(func)\n\n @classmethod\n def _config_changed(cls, configitem):\n ci = cls._keys.get(configitem)\n if ci:\n log.debug(\"config changed: %s, clearing cache\", configitem)\n ci._value = None\n ci._current = False\n for func in ci._notify:\n func()\n else:\n log.debug(\"config changed: %s, no config found\", configitem)\n\n @classmethod\n def listen_for_changes(cls, listener):\n if not cls._listener:\n cls._listener = listener.listen_for('config', cls._config_changed)\n\n @classmethod\n def preload(cls):\n td.s.query(Config).all()\n for ci in cls._keys.values():\n ci._read()\n\n def _read(self):\n d = td.s.query(Config).get(self.key)\n if d is None:\n # The config option doesn't exist in the database. Initialise it\n # with the default.\n td.s.add(Config(key=self.key,\n value=self.to_db(self.default),\n type=self.type,\n display_name=self.display_name,\n description=self.description))\n self._value = self.default\n else:\n self._value = self.from_db(d.value)\n if d.type != self.type:\n d.type = self.type\n if d.display_name != self.display_name:\n d.display_name = self.display_name\n if d.description != self.description:\n d.description = self.description\n self._current = True\n\n def __call__(self, allow_none=False):\n if not self._current:\n self._read()\n if self._value is None:\n return None if allow_none else self.default\n return self._value\n\n @property\n def value(self):\n return self()\n\n def __str__(self):\n return str(self())\n\n\nclass MultiLineConfigItem(ConfigItem):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, type=\"multiline text\", **kwargs)\n\n\nclass IntConfigItem(ConfigItem):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, type=\"integer\", **kwargs)\n\n @classmethod\n def from_db(cls, s):\n try:\n return int(s)\n except Exception:\n return\n\n @classmethod\n def to_db(cls, v):\n return str(v)\n\n\nclass BooleanConfigItem(ConfigItem):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, type=\"boolean\", **kwargs)\n\n @classmethod\n def from_db(cls, s):\n if not s:\n # Empty, non-null string\n return False\n if s[0] in ('y', 'Y', 't', 'T'):\n return True\n return False\n\n @classmethod\n def to_db(cls, v):\n return \"Yes\" if v else \"No\"\n\n\nclass DateConfigItem(ConfigItem):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, type=\"date\", **kwargs)\n\n @classmethod\n def from_db(cls, s):\n try:\n return datetime.date(*(int(x) for x in s.split('-')))\n except Exception:\n return\n\n @classmethod\n def to_db(cls, v):\n return str(v)\n\n\nclass IntervalConfigItem(ConfigItem):\n _units = {\n 'w': 'weeks',\n 'week': 'weeks',\n 'weeks': 'weeks',\n 'd': 'days',\n 'day': 'days',\n 'days': 'days',\n 'h': 'hours',\n 'hr': 'hours',\n 'hour': 'hours',\n 'hours': 'hours',\n 'm': 'minutes',\n 'min': 'minutes',\n 'minute': 'minutes',\n 'minutes': 'minutes',\n 's': 'seconds',\n 'sec': 'seconds',\n 'second': 'seconds',\n 'seconds': 'seconds',\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, type=\"interval\", **kwargs)\n\n @classmethod\n def from_db(cls, s):\n if not s:\n return\n kwargs = {}\n parts = [x.strip().split() for x in s.split(',')]\n try:\n for p in parts:\n num = int(p[0])\n kwargs[cls._units[p[1]]] = num\n except (ValueError, IndexError, KeyError):\n return\n return datetime.timedelta(**kwargs)\n\n @classmethod\n def to_db(cls, v):\n if v is None:\n return \"\"\n return f\"{v.days} days, {v.seconds} seconds\"\n\n\nclass MoneyConfigItem(ConfigItem):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, type=\"money\", **kwargs)\n\n @classmethod\n def from_db(cls, s):\n try:\n return Decimal(s).quantize(penny)\n except Exception:\n return\n\n @classmethod\n def to_db(cls, v):\n return str(v)\n\n\nclass config_cmd(cmdline.command):\n command = \"config\"\n help = \"view or modify till configuration\"\n\n @staticmethod\n def add_arguments(parser):\n parser.add_argument(\n \"-s\", \"--set\", help=\"set a configuration key; if value is not \"\n \"provided on the command line, will read from stdin\",\n action=\"store_true\")\n parser.add_argument(\n \"key\", nargs=\"?\", help=\"configuration key to view or modify\")\n parser.add_argument(\n \"value\", nargs=\"?\", help=\"value for configuration key\")\n\n @staticmethod\n def run(args):\n if args.set and not args.key:\n print(\"The --set option requires a key to be specified\")\n return 1\n\n with td.orm_session():\n if not args.key:\n for ci in td.s.query(Config).order_by(Config.key).all():\n print(f\"{ci.key}: {ci.display_name}: {ci.value}\")\n return\n ci = td.s.query(Config).get(args.key)\n if not ci:\n print(f\"Config key {args.key} does not exist\")\n return 1\n if args.set or args.value:\n ci.value = args.value or sys.stdin.read().strip()\n print(f\"Key: {ci.key}\")\n if args.key in ConfigItem._keys:\n cf = ConfigItem._keys[args.key]\n print(f\"Name: {cf.display_name}\")\n print(f\"Description: {cf.description}\")\n print(f\"Type: {cf.type}\")\n print(f\"Default value: {cf.to_db(cf.default)}\")\n print(f\"{'New' if args.set or args.value else 'Current'} \"\n f\"value: {ci.value}\")\n","repo_name":"sde1000/quicktill","sub_path":"quicktill/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7802,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"} +{"seq_id":"70204269635","text":"class Solution:\n def winnerSquareGame(self, n: int) -> bool:\n dp=[False for i in range(n+1)]\n dp[0]=False\n dp[1]=True\n dp[2]=False\n for i in range(3,n+1):\n flag=False\n for j in range(1,i+1):\n if j*j>i:\n break\n flag=flag or not dp[i-j*j] \n dp[i]=flag\n return dp[n]\na = Solution()\nprint(a.winnerSquareGame(4))","repo_name":"z369437558/Leetcode","sub_path":"1510.py","file_name":"1510.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72330804355","text":"from django.core.validators import URLValidator\nfrom django.db import models\nfrom django.db.models.expressions import NoneType\n\n\n# Create your models here.\n\nclass Site(models.Model):\n title = models.CharField(max_length=255, unique=True)\n base_url = models.TextField(validators=[URLValidator()], null=True, blank=True)\n icon_image = models.ImageField(upload_to=\"images/icons\",null=True, blank=True )\n last_update = models.DateTimeField()\n created_at = models.DateField(auto_now_add=True)\n\n\nclass JobManager(models.Manager):\n def get_last_job_link(self, site_id):\n if site_id is not None:\n try:\n link = self.filter(is_last=True, site_id=site_id).last().link\n except:\n link = \"\"\n else:\n try:\n link = self.filter(is_last=True).last().link\n except:\n link = \"\"\n return link\n\n\nclass Job(models.Model):\n site = models.ForeignKey(Site, on_delete=models.CASCADE)\n title = models.CharField(max_length=255, verbose_name=\"عنوان آگهی\")\n published_at = models.DateField(verbose_name=\"زمان انتشار آگهی\")\n # image = models.ImageField(upload_to='images/logo', verbose_name='تصویر لوگو', null=True, blank=True)\n image = models.TextField(validators=[URLValidator()], null=True, blank=True, verbose_name=\"آدرس تصویر\")\n link = models.TextField(validators=[URLValidator()], null=True, blank=True, verbose_name=\"لینک صفحه آگهی\")\n is_last = models.BooleanField(default=False, verbose_name=\"آخرین آیتم\")\n objects = JobManager()\n\n class Meta:\n verbose_name = \"شغل\"\n verbose_name_plural = \"شغل ها\"\n ordering = [\"-published_at\"]\n\n def __str__(self):\n return self.title\n\n\nclass JobSeeker(models.Model):\n site = models.ForeignKey(\"Site\", on_delete=models.SET_NULL, null=True)\n full_name = models.CharField(max_length=255)\n description = models.TextField(null=True, blank=True)\n avatar = models.ImageField(\"images\", null=True, blank=True)\n image = models.TextField(validators=[URLValidator()], null=True, blank=True, verbose_name=\"آدرس تصویر\")\n link = models.TextField(validators=[URLValidator()], null=True, blank=True, verbose_name=\"لینک صفحه آگهی\")\n is_last = models.BooleanField(default=False, verbose_name=\"آخرین آیتم\")\n created_at = models.DateTimeField(auto_now_add=True)\n objects = JobManager()\n\n class Meta:\n verbose_name = \"کارجو\"\n verbose_name_plural = \"کارجو ها\"\n ordering = [\"-created_at\"]\n\n def __str__(self):\n return self.full_name\n\n\nclass ScrapingSetting(models.Model):\n site = models.ForeignKey(\"Site\", on_delete=models.CASCADE, verbose_name=\"سایت\")\n number = models.PositiveIntegerField(default=20, verbose_name=\"تعداد\")\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\"{self.site.title} ==> {self.number}\"\n\n class Meta:\n verbose_name = 'تنظیمات اسکرپینگ'\n verbose_name_plural = 'تنظیمات اسکرپینگ'\n","repo_name":"amirdks/web_scraping_project","sub_path":"main_module/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44385327081","text":"import logging\nfrom Base.base_driver import BaseDriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import *\nfrom selenium.webdriver.support.select import Select\nfrom Utilities.utils import custom_logger\n\n\nclass PractisePage(BaseDriver):\n log = custom_logger(logLevel=logging.INFO)\n def __init__(self, driver):\n super().__init__(driver)\n self.driver = driver\n\n # Locators\n BMW_RADIO_BTN = (By.XPATH, \"//input[@id='bmwradio']\")\n BENZ_RADIO_BTN = (By.XPATH, \"//input[@id='benzradio']\")\n HONDA_RADIO_BTN = (By.XPATH, \"//input[@id='hondaradio']\")\n CAR_DROPDOWN = (By.XPATH, \"//select[@id='carselect']\")\n BMW_CHECKBOX = (By.XPATH, \"//input[@id='bmwcheck']\")\n BENZ_CHECKBOX = (By.XPATH, \"//input[@id='benzcheck']\")\n HONDA_CHECKBOX = (By.XPATH, \"//input[@id='hondacheck']\")\n SWITCH_WINDOW_BTN = (By.XPATH, \"//button[@id='openwindow']\")\n\n\n def target_url(self):\n self.driver.get(self.practisepage)\n\n\n def radio_checkbox_btn_selection(self, *args):\n radio_list = [*args]\n condition = False\n for btn in radio_list:\n btn = self.driver.find_element(*btn)\n btn.click()\n if not btn.is_selected():\n self.log.error(f\"*** Radio button error. Not able to selected {btn}\")\n condition = False\n break\n else:\n condition = True\n return condition\n\n\n def dropdown_selection(self, dropdown):\n element = self.driver.find_element(*dropdown)\n select = Select(element)\n length_of_select = len(select.options)\n try:\n for num in range(length_of_select):\n select.select_by_index(num)\n except NoSuchElementException:\n self.log.error(f\"*** Option in dropdown has not been found ***\")\n return False\n else:\n return True\n\n\n\n\n\n\n\n\n\n","repo_name":"youssefjohn/selenium-website-1-python","sub_path":"Pages/practise_page.py","file_name":"practise_page.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16919547448","text":"from operator import itemgetter\nimport os\nimport functools\nimport numpy as np\nimport pickle\nimport shutil\n\nfrom constants import *\nimport variables as var\n\ndef listsSetDifference(list1, list2):\n return list(set(list1) - set(list2))\n\n####################################################################################\n# PROGRAM SPECIFICS\n\ndef codebaseNetsPath():\n return \"DPN_{};DBN_{};MC_{}\"\\\n .format(var.HOW_MANY_DAYS_IN_A_NET, var.DAYS_BETWEEN_NETS, var.MAX_MONTHS_COLLAB)\n\ndef datetimeIntoStr(date, present=False):\n if date == ALL_TIME:\n return ALL_TIME\n \n if present:\n if var.DAYS_BETWEEN_NETS % 365 == 0 and var.HOW_MANY_DAYS_IN_A_NET % 365 == 0:\n return str(date.year)\n else:\n if date.month < 10:\n month = '0' + str(date.month)\n else:\n month = str(date.month)\n return '{}/{}'.format(month, date.year)\n else:\n return '{},{}'.format(date.month, date.year)\n\n####################################################################################\n# FILE SYSTEM INTERACTIONS\n\ndef createDirRec(path):\n for i in range(len(path.split(\"/\"))):\n createDir(\"/\".join(path.split(\"/\")[:i]))\n createDir(path)\n\ndef createDir(path):\n currentPath = os.getcwd()\n currentPath = currentPath + \"/\" + path\n try:\n os.mkdir(currentPath)\n except OSError: # path is already created\n return -1\n\ndef removeDir(relativePath):\n currentPath = os.getcwd()\n currentPath = currentPath + \"/\" + relativePath\n if os.path.exists(currentPath):\n try:\n shutil.rmtree(currentPath)\n except OSError as e:\n print (\"Error: %s - %s.\" % (e.filename, e.strerror))\n\ndef getStructInFile(fileName):\n try:\n with open(fileName, 'rb') as f:\n return pickle.load(f)\n except FileNotFoundError as e: \n return {}\n\ndef saveStructInFile(d, fileName):\n with open(fileName, 'wb') as f:\n pickle.dump(d, f)\n\ndef doesPathExist(path):\n return os.path.exists(path)\n\ndef getItemsInDir(path):\n return os.listdir(path)\n\ndef getGraphData(graphPath):\n sPath = graphPath.split(\"/\")\n figName = sPath[-1].split(\".\")[0]\n path = '{}/{}/{}.pickle'.format(\"/\".join(sPath[:-1]), 'pickledData', figName)\n return getStructInFile(path)\n\ndef getFileOfSubdirsInDir(pathToDir, fileInfoName):\n dirComponents = sorted(os.listdir(pathToDir))\n\n fileDictList = []\n for d in dirComponents:\n createDirRec(pathToDir)\n f = open('{}/{}/{}'.format(pathToDir, d, fileInfoName), encoding=\"utf-8\")\n\n # txt to dict\n fileDict = {}\n for line in f:\n sLine = line.split(\":\")\n sLine[1] = sLine[1][1:-1]\n try:\n if sLine[0] == PARAMS_ID:\n fileDict[sLine[0]] = int(sLine[1])\n else:\n fileDict[sLine[0]] = float(sLine[1])\n except ValueError:\n fileDict[sLine[0]] = sLine[1]\n \n fileDictList += [fileDict]\n \n return fileDictList\n\n#################################################################################### \n# MATHS\n\ndef normalize(struct, xDist = \"uni\", alpha=None, normalizeDictValues=False):\n if normalizeDictValues: # dict\n l = struct.values()\n else: # list\n l = struct\n\n if list(filter(lambda x: x != 0, l)) == []:\n return [1 / len(l)] * len(l)\n \n if xDist == \"uni\":\n total = functools.reduce(lambda x, y: x + y, filter(lambda x: x != 0, l))\n if normalizeDictValues:\n return {k: v / total for k, v in struct.items()}\n else:\n return list(map(lambda x: x / total, l))\n\ndef histogram(values):\n hDict = {}\n for v in values:\n if v in hDict:\n hDict[v] += 1\n else:\n hDict[v] = 1\n return hDict\n\ndef cumulativeProbabilityDistribution(values, relativeProbability=True):\n hDict = histogram(values)\n x, y = [], []\n for kS in sorted(hDict.keys()):\n x += [kS]\n y += [hDict[kS]]\n y = np.cumsum(y[::-1])[::-1]\n if relativeProbability:\n y = list(map(lambda x: x / len(values), y))\n return x, y\n\ndef kolmogorovSmirnovCumDists(cumDist1, cumDist2, inverseCumDists=True, topKS=False):\n x1, y1 = cumDist1\n x2, y2 = cumDist2\n\n cd1Values, cd2Values = [], []\n infoAboutVal = {}\n for iX in range(len(x1)):\n x = x1[iX]\n if x in x2: \n xIndexInX2 = x2.index(x)\n cd1Values += [y1[iX]]\n cd2Values += [y2[xIndexInX2]]\n infoAboutVal[x] = (\"BOTH\", len(cd1Values) - 1)\n else:\n x2Iterator = 0\n while (x2Iterator < len(x2) and x > x2[x2Iterator]):\n if x < x2[x2Iterator]:\n break\n x2Iterator += 1\n \n if x2Iterator == len(x2):\n cd1Values += [y1[iX]]\n if inverseCumDists:\n cd2Values += [0]\n else:\n cd2Values += [1]\n else: \n cd1Values += [y1[iX]]\n cd2Values += [y2[x2Iterator]]\n infoAboutVal[x] = (\"ONLY_1\", len(cd1Values) - 1)\n \n for iX in range(len(x2)):\n x = x2[iX]\n if x not in x1:\n x1Iterator = 0\n while (x1Iterator < len(x1) and x > x1[x1Iterator]):\n\n if x < x1[x1Iterator]:\n break\n x1Iterator += 1\n \n if x1Iterator == len(x1):\n if inverseCumDists:\n cd1Values += [0]\n else:\n cd1Values += [1]\n cd2Values += [y2[iX]]\n else: \n cd1Values += [y1[x1Iterator]]\n cd2Values += [y2[iX]]\n infoAboutVal[x] = (\"ONLY_2\", len(cd1Values) - 1)\n\n maxDiff = 0\n if topKS:\n numberOfValues1, numberOfValues2 = 0, 0\n for val, info in sorted(infoAboutVal.items(), key=itemgetter(0), reverse=True):\n if numberOfValues1 >= 10 and numberOfValues2 >= 10:\n indexToStartFrom = info[1]\n break\n if info[0] == \"BOTH\":\n numberOfValues1 += 1\n numberOfValues2 += 1\n elif info[0] == \"ONLY_1\":\n numberOfValues1 += 1\n elif info[0] == \"ONLY_2\":\n numberOfValues2 += 1\n\n indexesToEvaluate = range(indexToStartFrom, len(cd1Values))\n else:\n indexesToEvaluate = range(len(cd1Values))\n\n for i in indexesToEvaluate:\n valDiff = abs(cd1Values[i] - cd2Values[i])\n if valDiff > maxDiff:\n maxDiff = valDiff\n\n return maxDiff\n","repo_name":"josemiguelpgomes/MasterThesis","sub_path":"code/utils/auxiliaryFunctions.py","file_name":"auxiliaryFunctions.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10890111450","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom webdriver_manager.chrome import ChromeDriverManager\n\ndef clear(text,element):\n for char in text:\n element.send_keys(Keys.BACK_SPACE)\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\n#driver = webdriver.Firefox(executable_path=\"\")\n\nsearchtext = \"helloworld\"\ndriver.get(\"https://www.google.com/\")\ns = driver.find_element_by_name(\"q\")\ns.send_keys(searchtext)\n# s.clear()\n\n# for char in 'selenium':\n# s.send_keys(Keys.BACK_SPACE)\n\n\n# def cleartext(text):\n# for char in text:\n# s.send_keys(Keys.BACK_SPACE)\n#\n# cleartext(searchtext)\n\nclear(searchtext,s)","repo_name":"muktabehera/DjangoSeleniumPython","sub_path":"ClearGoogleSearch.py","file_name":"ClearGoogleSearch.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16104380548","text":"import geopandas\nimport os\n\ndef shp_to_gjson():\n shp_path = 'data/shp_name/bremervoerde/'\n \n for file_name in [file for file in os.listdir(shp_path) if file.endswith('.shp')]:\n shape_name, _ = os.path.splitext(file_name)\n print(shape_name)\n \n shpfile = geopandas.read_file(os.path.join(shp_path,file_name))\n shpfile.to_file('data/geojson/bremervoerde/{}.geojson'.format(shape_name), driver='GeoJSON', encoding='utf-8')\n \ndef main():\n geojson = shp_to_gjson()\n \nif __name__ == \"__main__\":\n main() \n \n","repo_name":"kriti115/Dataset-Preparation-for-Frame-Field-Learning-using-Orthophotos","sub_path":"shp_to_geojson.py","file_name":"shp_to_geojson.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7605860982","text":"from kqcircuits.pya_resolver import pya\nfrom kqcircuits.util.parameters import Param, pdt\n\nfrom kqcircuits.test_structures.test_structure import TestStructure\n\n\nclass CrossTest(TestStructure):\n \"\"\"PCell declaration for optical lithography test alignment cross markers.\n\n Contains a given number of alignment cross markers in `base_metal_gap_wo_grid`-layer with given width, length and\n spacing.\n \"\"\"\n\n num_crosses = Param(pdt.TypeInt, \"Number of crosses\", 10)\n cross_width = Param(pdt.TypeDouble, \"Width of the crosses arms\", 4, unit=\"μm\")\n cross_length = Param(pdt.TypeDouble, \"Length of the crosses\", 15, unit=\"μm\")\n cross_spacing = Param(pdt.TypeDouble, \"Spacing between the crosses\", 100, unit=\"μm\")\n cross_box_distance = Param(pdt.TypeDouble, \"Distance between crosses and respective boxes\", 4, unit=\"μm\")\n\n def build(self):\n\n layer_base_metal = self.get_layer(\"base_metal_gap_wo_grid\")\n\n box = pya.DBox(pya.DPoint(-self.cross_width/2, -self.cross_box_distance), pya.DPoint(self.cross_width/2,\n -self.cross_box_distance - self.cross_width))\n vertical_tick = pya.DBox(pya.DPoint(-self.cross_width/2, 0), pya.DPoint(self.cross_width/2, self.cross_length))\n horizontal_tick = pya.DBox(pya.DPoint(-self.cross_length/2, self.cross_length/2 - self.cross_width/2),\n pya.DPoint(self.cross_length/2, self.cross_length/2 + self.cross_width/2))\n\n for i in range(self.num_crosses):\n trans = pya.DTrans(0, False, i*self.cross_spacing, 0).to_itype(self.layout.dbu)\n box_trans = pya.Region(trans * box.to_itype(self.layout.dbu))\n vertical_tick_trans = pya.Region(trans * vertical_tick.to_itype(self.layout.dbu))\n horizontal_tick_trans = pya.Region(trans * horizontal_tick.to_itype(self.layout.dbu))\n\n marker = box_trans + vertical_tick_trans + horizontal_tick_trans\n\n self.cell.shapes(layer_base_metal).insert(marker)\n","repo_name":"iqm-finland/KQCircuits","sub_path":"klayout_package/python/kqcircuits/test_structures/cross_test.py","file_name":"cross_test.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"61"} +{"seq_id":"28183987074","text":"import random\nimport copy\nimport statistics\n\nclass Teeko2Player:\n \"\"\" An object representation for an AI game player for the game Teeko2.\n \"\"\"\n board = [[' ' for j in range(5)] for i in range(5)]\n pieces = ['b', 'r']\n\n def __init__(self):\n \"\"\" Initializes a Teeko2Player object by randomly selecting red or black as its\n piece color.\n \"\"\"\n self.my_piece = random.choice(self.pieces)\n self.opp = self.pieces[0] if self.my_piece == self.pieces[1] else self.pieces[1]\n\n def make_move(self, state):\n \"\"\" Selects a (row, col) space for the next move. You may assume that whenever\n this function is called, it is this player's turn to move.\n\n Args:\n state (list of lists): should be the current state of the game as saved in\n this Teeko2Player object. Note that this is NOT assumed to be a copy of\n the game state and should NOT be modified within this method (use\n place_piece() instead). Any modifications (e.g. to generate successors)\n should be done on a deep copy of the state.\n\n In the \"drop phase\", the state will contain less than 8 elements which\n are not ' ' (a single space character).\n\n Return:\n move (list): a list of move tuples such that its format is\n [(row, col), (source_row, source_col)]\n where the (row, col) tuple is the location to place a piece and the\n optional (source_row, source_col) tuple contains the location of the\n piece the AI plans to relocate (for moves after the drop phase). In\n the drop phase, this list should contain ONLY THE FIRST tuple.\n\n Note that without drop phase behavior, the AI will just keep placing new markers\n and will eventually take over the board. This is not a valid strategy and\n will earn you no points.\n \"\"\"\n\n # drop_phase = True # TODO: detect drop phase\n\n # if not drop_phase:\n # # TODO: choose a piece to move and remove it from the board\n # # (You may move this condition anywhere, just be sure to handle it)\n # #\n # # Until this part is implemented and the move list is updated\n # # accordingly, the AI will not follow the rules after the drop phase!\n # pass\n\n # # select an unoccupied space randomly\n # # TODO: implement a minimax algorithm to play better\n # move = []\n # (row, col) = (random.randint(0,4), random.randint(0,4))\n # while not state[row][col] == ' ':\n # (row, col) = (random.randint(0,4), random.randint(0,4))\n\n # # ensure the destination (row,col) tuple is at the beginning of the move list\n # move.insert(0, (row, col))\n # return move\n\n def minimax(move, state, depth, isAITurn):\n \"\"\"\n arg move = the `move` that led to `state`\n\n Returns (move, h)\n \"\"\"\n h = self.heuristic(state)\n\n if depth == 0 or abs(h) == 1:\n return (move, h)\n\n if isAITurn:\n bestVal = -2\n bestMove = None\n for (succMove, succ) in self.successors(state, True):\n _, mmxVal = minimax(succMove, succ, depth - 1, False)\n if mmxVal > bestVal:\n bestVal = mmxVal\n bestMove = succMove\n return (bestMove, bestVal)\n\n else:\n bestVal = 2\n bestMove = None\n for (succMove, succ) in self.successors(state, False):\n _, mmxVal = minimax(succMove, succ, depth - 1, True)\n if mmxVal < bestVal:\n bestVal = mmxVal\n bestMove = succMove\n return (bestMove, bestVal)\n\n \n bestMove, _ = minimax(None, state, 3, True)\n\n return bestMove\n\n\n\n def successors(self, state, isAITurn):\n \"\"\"\n Returns a list of successors for a state\n\n Returns: [(move, newState)]\n \"\"\"\n isDropPhase = 8 != sum([sum([1 if cell in self.pieces else 0 for cell in row]) for row in state])\n\n piece = self.my_piece if isAITurn else self.opp\n\n if isDropPhase:\n succs = []\n\n # Iterate over drop points\n for rowI in range(5):\n for colI in range(5):\n if state[rowI][colI] == ' ':\n newSucc = ([(rowI, colI)], copy.deepcopy(state))\n newSucc[1][rowI][colI] = piece\n succs.append(newSucc)\n \n return succs\n \n else:\n succs = []\n\n # Iterate over source points\n for rowI in range(5):\n for colI in range(5):\n if state[rowI][colI] == piece:\n # Iterate over destination points\n for (rowOff, colOff) in [(1,1),(1,0),(1,-1),(0,1),(0,-1),(-1,1),(-1,0),(-1,-1)]:\n if 0 <= rowI+rowOff < 5 and 0 <= colI+colOff < 5:\n if state[rowI+rowOff][colI+colOff] == ' ':\n newSucc = ([(rowI+rowOff, colI+colOff), (rowI, colI)], copy.deepcopy(state))\n newSucc[1][rowI][colI] = ' '\n newSucc[1][rowI+rowOff][colI+colOff] = piece\n succs.append(newSucc)\n \n return succs\n\n def opponent_move(self, move):\n \"\"\" Validates the opponent's next move against the internal board representation.\n You don't need to touch this code.\n\n Args:\n move (list): a list of move tuples such that its format is\n [(row, col), (source_row, source_col)]\n where the (row, col) tuple is the location to place a piece and the\n optional (source_row, source_col) tuple contains the location of the\n piece the AI plans to relocate (for moves after the drop phase). In\n the drop phase, this list should contain ONLY THE FIRST tuple.\n \"\"\"\n # validate input\n if len(move) > 1:\n source_row = move[1][0]\n source_col = move[1][1]\n if source_row != None and self.board[source_row][source_col] != self.opp:\n raise Exception(\"You don't have a piece there!\")\n if self.board[move[0][0]][move[0][1]] != ' ':\n raise Exception(\"Illegal move detected\")\n # make move\n self.place_piece(move, self.opp)\n\n def place_piece(self, move, piece):\n \"\"\" Modifies the board representation using the specified move and piece\n\n Args:\n move (list): a list of move tuples such that its format is\n [(row, col), (source_row, source_col)]\n where the (row, col) tuple is the location to place a piece and the\n optional (source_row, source_col) tuple contains the location of the\n piece the AI plans to relocate (for moves after the drop phase). In\n the drop phase, this list should contain ONLY THE FIRST tuple.\n\n This argument is assumed to have been validated before this method\n is called.\n piece (str): the piece ('b' or 'r') to place on the board\n \"\"\"\n if len(move) > 1:\n self.board[move[1][0]][move[1][1]] = ' '\n self.board[move[0][0]][move[0][1]] = piece\n\n def print_board(self):\n \"\"\" Formatted printing for the board \"\"\"\n for row in range(len(self.board)):\n line = str(row)+\": \"\n for cell in self.board[row]:\n line += cell + \" \"\n print(line)\n print(\" A B C D E\")\n # print(\"heuristic values =\" + str(self.heuristic_values(self.board)))\n # print(\"heuristic =\" + str(self.heuristic(self.board)))\n\n def game_value(self, state):\n \"\"\" Checks the current board status for a win condition\n\n Args:\n state (list of lists): either the current state of the game as saved in\n this Teeko2Player object, or a generated successor state.\n\n Returns:\n int: 1 if this Teeko2Player wins, -1 if the opponent wins, 0 if no winner\n \"\"\"\n\n h = self.heuristic(state)\n if abs(h) == 1:\n return h\n \n return 0\n\n def heuristic(self, state):\n \"\"\" A heuristic of the state argument that:\n = 1 if The AI won\n = -1 if the player won\n otherwise = the average of all \"significant\" heuristic values from the function below\n Significant in this case means |value| >= 0.50\"\"\"\n CUTOFF = 0.50\n\n h_values = self.heuristic_values(state)\n\n if max(h_values) == 1:\n return 1\n elif min(h_values) == -1:\n return -1\n\n sig_values = list(filter(lambda value: abs(value) >= CUTOFF, h_values))\n \n return statistics.mean(sig_values) if len(sig_values) != 0 else 0\n \n\n\n def heuristic_values(self, state):\n \"\"\" Iterates over all possible win configurations (each row/column/diagonal of four, and the diamonds),\n and returns a \"score\" for each. The score, from -1 to 1, represents how close to a winning configuration\n each configuration is. Closer to 1 is better for AI, closer to -1 is better for player.\n Any score of 1 or -1 means the respective player won.\n This returns a list, for further processing \"\"\"\n\n scores = []\n\n # Row check\n for row in state:\n for i in range(2):\n score = 0.0\n for col in range(i, i+4):\n if row[col] == self.my_piece:\n score += 0.25\n elif row[col] == self.opp:\n score -= 0.25\n scores.append(score)\n\n # Column check\n for col in range(5):\n for i in range(2):\n score = 0.0\n for row in range(i, i+4):\n if state[row][col] == self.my_piece:\n score += 0.25\n elif state[row][col] == self.opp:\n score -= 0.25\n scores.append(score)\n\n # Diagonal checks\n for rowI in range(2):\n for colI in range(2):\n # UpperLeft-LowerRight\n score = 0.0\n for i in range(4):\n if state[rowI+i][colI+i] == self.my_piece:\n score += 0.25\n elif state[rowI+i][colI+i] == self.opp:\n score -= 0.25\n scores.append(score)\n\n # LowerLeft-UpperRight\n score = 0.0\n for i in range(4):\n if state[4-rowI-i][colI+i] == self.my_piece:\n score += 0.25\n elif state[4-rowI-i][colI+i] == self.opp:\n score -= 0.25\n scores.append(score)\n\n # Diamond checks\n for rowI in range(1,4):\n for colI in range(1,4):\n score = 0.0\n for (rowOff, colOff) in [(0,1),(1,0),(0,-1),(-1,0)]:\n if state[rowI+rowOff][colI+colOff] == self.my_piece:\n score += 0.25\n elif state[rowI+rowOff][colI+colOff] == self.opp:\n score -= 0.25\n\n # Penalize by half if the middle isnt clear\n if state[rowI][colI] != ' ':\n score /= 2\n scores.append(score)\n\n return scores\n\n############################################################################\n#\n# THE FOLLOWING CODE IS FOR SAMPLE GAMEPLAY ONLY\n#\n############################################################################\n\nai = Teeko2Player()\npiece_count = 0\nturn = 0\n\n# drop phase\nwhile piece_count < 8 and abs(ai.game_value(ai.board)) != 1:\n\n # get the player or AI's move\n if ai.my_piece == ai.pieces[turn]:\n ai.print_board()\n move = ai.make_move(ai.board)\n ai.place_piece(move, ai.my_piece)\n print(ai.my_piece+\" moved at \"+chr(move[0][1]+ord(\"A\"))+str(move[0][0]))\n else:\n move_made = False\n ai.print_board()\n print(ai.opp+\"'s turn\")\n while not move_made:\n player_move = input(\"Move (e.g. B3): \")\n while player_move[0] not in \"ABCDE\" or player_move[1] not in \"01234\":\n player_move = input(\"Move (e.g. B3): \")\n try:\n ai.opponent_move([(int(player_move[1]), ord(player_move[0])-ord(\"A\"))])\n move_made = True\n except Exception as e:\n print(e)\n\n # update the game variables\n piece_count += 1\n turn += 1\n turn %= 2\n\n# move phase - can't have a winner until all 8 pieces are on the board\nwhile ai.game_value(ai.board) == 0:\n\n # get the player or AI's move\n if ai.my_piece == ai.pieces[turn]:\n ai.print_board()\n move = ai.make_move(ai.board)\n ai.place_piece(move, ai.my_piece)\n print(ai.my_piece+\" moved from \"+chr(move[1][1]+ord(\"A\"))+str(move[1][0]))\n print(\" to \"+chr(move[0][1]+ord(\"A\"))+str(move[0][0]))\n else:\n move_made = False\n ai.print_board()\n print(ai.opp+\"'s turn\")\n while not move_made:\n move_from = input(\"Move from (e.g. B3): \")\n while move_from[0] not in \"ABCDE\" or move_from[1] not in \"01234\":\n move_from = input(\"Move from (e.g. B3): \")\n move_to = input(\"Move to (e.g. B3): \")\n while move_to[0] not in \"ABCDE\" or move_to[1] not in \"01234\":\n move_to = input(\"Move to (e.g. B3): \")\n try:\n ai.opponent_move([(int(move_to[1]), ord(move_to[0])-ord(\"A\")),\n (int(move_from[1]), ord(move_from[0])-ord(\"A\"))])\n move_made = True\n except Exception as e:\n print(e)\n\n # update the game variables\n turn += 1\n turn %= 2\n\nai.print_board()\nif ai.game_value(ai.board) == 1:\n print(\"AI wins! Game over.\")\nelse:\n print(\"You win! Game over.\")\n","repo_name":"DirkyJerky/Uni","sub_path":"UWM/540/PA6/teeko2_player.py","file_name":"teeko2_player.py","file_ext":"py","file_size_in_byte":14426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20640933736","text":"#A script to plot volcano plots\r\n\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom adjustText import adjust_text\r\nimport random\r\n\r\n#read the csv file\r\ndeseq_results = pd.read_csv('ctnnb_TPMdeseq_analysis.csv').dropna()\r\ndeseq_results['nlog10'] = -np.log10(deseq_results.padj)\r\n\r\n\r\n\r\n#Determine the upregulated and downregulated genes based on a specified threshold for log2 fold change and p-adj\r\n\r\nupregulated_genes = deseq_results[(deseq_results['log2FoldChange'] > 0.5) & (deseq_results['padj'] < 0.05)].symbol.tolist()\r\n\r\nupcount = len(upregulated_genes)\r\n\r\n#print(\"Number of upregulated genes\", upcount)\r\n#not_diff = deseq_results[deseq_results['padj'] > 0.05]\r\n\r\ndownregulated_genes = deseq_results[(deseq_results['log2FoldChange'] < -0.5) & (deseq_results['padj'] < 0.05)].symbol.tolist()\r\n\r\ndowncount = len(downregulated_genes)\r\n\r\n#print(\"Number of downregulated genes\", downcount)\r\n\r\nnotdiff_genes = deseq_results[((deseq_results['log2FoldChange'] < 0.5) & (deseq_results['log2FoldChange'] > -0.5) & (deseq_results['padj'] < 0.05))|(deseq_results['padj'] > 0.05)].symbol.tolist()\r\n\r\nnotdiff = len(notdiff_genes)\r\n#print(notdiff)\r\n\r\n\r\n#print(\"Number of not differentially expressed genes\", notdiff)\r\n#print(upregulated_genes)\r\n\r\n\r\n\r\ndef map_color(a):\r\n log2FoldChange, symbol, padj, nlog10 = a\r\n\r\n if symbol in notdiff_genes:\r\n return 'Not_diff_expr'+' '+str(notdiff)\r\n elif symbol in upregulated_genes:\r\n return 'upregulated'+' '+str(upcount)\r\n elif symbol in downregulated_genes:\r\n return 'downregulated'+' '+str(downcount)\r\n\r\n\r\n\r\n\r\ndeseq_results['key'] = deseq_results[['log2FoldChange', 'symbol', 'padj', 'nlog10']].apply(map_color, axis = 1)\r\n\r\n\r\n#print(deseq_results)\r\n#make the plot\r\n\r\nplt.figure(figsize = (6,6))\r\n\r\nax = sns.scatterplot(data = deseq_results, x = 'log2FoldChange', y = 'nlog10',\r\n hue = 'key', hue_order = [ 'upregulated'+' '+str(upcount),'downregulated'+' '+str(downcount), 'Not_diff_expr'+' '+str(notdiff)],\r\n palette = ['red','green', 'grey'])\r\n\r\n\r\nax.axhline(1, zorder = 0, c = 'k', lw = 1, ls = '--')\r\nax.axvline(-1, zorder = 0, c = 'k', lw = 1, ls = '--')\r\nax.axvline(1, zorder = 0, c = 'k', lw = 1, ls = '--')\r\n\r\n#mark selected genes\r\n\r\nmark_list=['Cdkn2A','Bst2','Grem1','Bst2','Foxj1','Gabrb3','Cers1']\r\ntexts = []\r\ntexts = [plt.text(row['log2FoldChange'], row['nlog10'], row['symbol'], fontsize = 12, weight = 'bold') for index, row in deseq_results.iterrows() if row['symbol'] in mark_list]\r\n\r\n# Adjust the text\r\nadjust_text(texts, arrowprops=dict(arrowstyle='-', color='k'))\r\n\r\n\r\n#for index,row in deseq_results.iterrows():\r\n# if row['symbol'] in mark_list:\r\n# plt.annotate(row['symbol'],(row['log2FoldChange'],row['nlog10']), textcoords=\"offset points\", xytext=(5, 5),\r\n # ha='left', bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.5))\r\n\r\n\r\n\r\nplt.legend(loc = 1, bbox_to_anchor = (1.5,1), frameon = True, prop = {'weight':'bold'})\r\n\r\nfor axis in ['bottom', 'left']:\r\n ax.spines[axis].set_linewidth(2)\r\n\r\nfor axis in ['top', 'right']:\r\n ax.spines[axis].set_linewidth(2)\r\n\r\n#ax.spines['top'].set_visible(True)\r\n#ax.spines['right'].set_visible(False)\r\n\r\nax.tick_params(width = 2)\r\n\r\nplt.xticks(size = 12, weight = 'bold')\r\nplt.yticks(size = 12, weight = 'bold')\r\n\r\nplt.xlabel(\"$log_{2}$ fold change\", size = 15)\r\nplt.ylabel(\"-$log_{10}$ FDR\", size = 15)\r\n\r\nplt.savefig('volcano_ctnnb_labeled_TPM.png', dpi = 300, bbox_inches = 'tight', facecolor = 'white')\r\n\r\n\r\nplt.show()\r\n\r\n\r\n","repo_name":"Dorothynyamai/Python-project","sub_path":"volcano_plot.py","file_name":"volcano_plot.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35021039022","text":"#Faça um Programa que leia lista de 10 caracteres, e diga quantas consoantes foram lidas. Imprima as consoantes.\n\nlista = ['a', 'b', 't', 'z', 'r', 'i', 'q', 'u']\nvogais = ['a', 'e', 'i', 'o', 'u']\n\n# print('Digite 10 caracteres para a lista')\n# for i in range(10):\n# c = input()\n# lista.append(c[0])\n\nconsoantes = [letra for letra in lista if letra not in vogais]\nqtd = len(consoantes)\n \nprint('Consoantes =', consoantes)\nprint('Quantidade =', qtd)","repo_name":"eullergomes/POO","sub_path":"estrutura-dados/lista-1/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71435185474","text":"# ! -*- coding: utf-8 -*-\n\"\"\"\n@Author: Gump\n@Create Time: 20220824\n@Info:\n给你一个整数数组 nums 和一个整数 k ,请你返回其中出现频率前 k 高的元素。你可以按 任意顺序 返回答案。\n\"\"\"\nimport collections\n\n\n# 进阶:你所设计算法的时间复杂度 必须 优于 O(n log n) ,其中 n 是数组大小。\ndef topKFrequent(nums, k):\n count = collections.Counter(nums)\n res = sorted(count.items(), key=lambda x: x[1], reverse=True)\n\n return [value[0] for value in res][:k]\n\n\nif __name__ == '__main__':\n test_input = [1, 1, 1, 2, 2, 3]\n test_k = 2\n print(topKFrequent(test_input, test_k))\n","repo_name":"gump1368/leetcode-python","sub_path":"中级算法/排序和搜索/前K个高频元素.py","file_name":"前K个高频元素.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16923996645","text":"from PyQt5.QtWidgets import QDialog, QGridLayout, QPushButton, QApplication, QMessageBox\n\nfrom DyCommon.Ui.DyTreeWidget import *\n\n\nclass DyStockDataJaccardIndexPlotDlg(QDialog):\n \"\"\" 选择哪些杰卡德指数可视化\n \"\"\"\n def __init__(self, data, columns, parent=None):\n super().__init__(parent)\n\n self._data = data\n self._columns = columns\n\n self._initUi()\n\n def _initUi(self):\n self.setWindowTitle('选择哪些杰卡德指数可视化')\n \n # 控件\n cancelPushButton = QPushButton('Cancel')\n okPushButton = QPushButton('OK')\n cancelPushButton.clicked.connect(self._cancel)\n okPushButton.clicked.connect(self._ok)\n\n self._jaccardIndexWidget = DyTreeWidget([[x] for x in self._columns])\n\n # 布局\n grid = QGridLayout()\n grid.setSpacing(10)\n \n grid.addWidget(self._jaccardIndexWidget, 0, 0, 20, 2)\n \n grid.addWidget(okPushButton, 20, 1)\n grid.addWidget(cancelPushButton, 20, 0)\n \n self.setLayout(grid)\n self.resize(QApplication.desktop().size().width()//6, QApplication.desktop().size().height()//2)\n\n def _ok(self):\n names = self._jaccardIndexWidget.getCheckedTexts()\n\n if not names:\n QMessageBox.warning(self, '错误', '没有选择杰卡德指数!')\n return\n\n self._data['data'] = names\n\n self.accept()\n\n def _cancel(self):\n self.reject()\n","repo_name":"MicroEngine/DevilYuan","sub_path":"Stock/Data/Viewer/JaccardIndex/DyStockDataJaccardIndexPlotDlg.py","file_name":"DyStockDataJaccardIndexPlotDlg.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"61"} +{"seq_id":"32843954646","text":"def readFile(filename):\n with open(filename) as f:\n return [[int(y) for y in list(x)] for x in f.read().splitlines()]\n\n\ndef flatten(t):\n return [item for sublist in t for item in sublist]\n\n\ndef countAdjacent(x, y, grid):\n maxY = len(grid)\n maxX = len(grid[0])\n # check all adjacent cells\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n if i == 0 and j == 0:\n continue\n if x + i < 0 or x + i >= maxX:\n continue\n if y + j < 0 or y + j >= maxY:\n continue\n if grid[y + j][x + i] > 9:\n count += 1\n return count\n\n\ndef printGrid(grid):\n for y in grid:\n print(y)\n\n\ndef part1(grid, steps):\n maxY = len(grid)\n maxX = len(grid[0])\n countFlashes = 0\n # check all cells\n for i in range(steps):\n # First increment all values by one\n grid = [[min(a+1, 10) for a in b] for b in grid]\n newGrid = list(map(list, grid))\n # while there exists any values > 9 - flash + increment adjacent cells\n loop = True\n while(loop):\n newFlashCreated = False\n for y in range(maxY):\n for x in range(maxX):\n if grid[y][x] > 9:\n countFlashes += 1\n newGrid[y][x] = -100\n newGrid[y][x] += countAdjacent(x, y, grid)\n if newGrid[y][x] > 9:\n newFlashCreated = True\n grid = list(map(list, newGrid))\n\n if not newFlashCreated:\n loop = False\n grid = [[max(0, a) for a in b] for b in grid]\n\n print(' END OF STEP:')\n printGrid(grid)\n\n return countFlashes\n\n###############################################################################\n\n\ndef part2(grid, steps):\n maxY = len(grid)\n maxX = len(grid[0])\n countFlashes = 0\n # check all cells\n for i in range(steps):\n # First increment all values by one\n grid = [[min(a+1, 10) for a in b] for b in grid]\n newGrid = list(map(list, grid))\n # while there exists any values > 9 - flash + increment adjacent cells\n loop = True\n while(loop):\n newFlashCreated = False\n for y in range(maxY):\n for x in range(maxX):\n if grid[y][x] > 9:\n countFlashes += 1\n newGrid[y][x] = -100\n newGrid[y][x] += countAdjacent(x, y, grid)\n if newGrid[y][x] > 9:\n newFlashCreated = True\n\n if(all(i < 0 for i in flatten(newGrid))):\n print('ALL VALUES FLASHED! {}'.format(i+1))\n return i + 1\n grid = list(map(list, newGrid))\n\n if not newFlashCreated:\n loop = False\n grid = [[max(0, a) for a in b] for b in grid]\n\n return 0\n\n\n###############################################################################\ntestInputSmall = readFile('./inputs/day11-test-small.in')\ntestInput = readFile('./inputs/day11-test.in')\ninput = readFile('./inputs/day11.in')\n\nprint(f\" Day 11 - part 1 (Test) is: {part1(testInputSmall, 2)} \")\nprint(f\" Day 11 - part 1 (Test) is: {part1(testInput, 100)} \")\nprint(f\" Day 11 - part 1 is: {part1(input, 100)} \")\nprint(f\" Day 11 - part 2 (Test) is: {part2(testInput, 200)} \")\nprint(f\" Day 11 - part 2 is: {part2(input, 500)} \")\n","repo_name":"IrfyNaz/advent-of-code-2021","sub_path":"python/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23546862851","text":"def flip(b, K, cakes, nb_hf):\n for i in range(b, b+K):\n if cakes[i] == '+':\n cakes[i] = '-'\n nb_hf -= 1\n else:\n cakes[i] = '+'\n nb_hf += 1\n\n return nb_hf\n\ndef isProblemDone(nb_hf, cakes):\n return nb_hf == len(cakes)\n\ndef getNbHf(cakes):\n nb_hf = 0\n for c in cakes:\n nb_hf += (c == '+')\n\n return nb_hf\n\ndef computeCakes(K, cakes):\n nb_hf = getNbHf(cakes)\n rounds = 0\n\n for i in range(len(cakes) - K + 1):\n if cakes[i] == '-':\n nb_hf = flip(i, K, cakes, nb_hf)\n rounds += 1\n\n if isProblemDone(nb_hf, cakes):\n return rounds\n else:\n return -1\n\nt = int(raw_input())\nfor i in xrange(1, t + 1):\n uin = raw_input().split(\" \")\n cakes = list(uin[0])\n K = int(uin[1])\n\n res = computeCakes(K, cakes)\n if res != -1:\n print(\"Case #\" + str(i) + \": \" + str(res))\n else:\n print(\"Case #\" + str(i) + \": IMPOSSIBLE\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3583.py","file_name":"3583.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26046155242","text":"r\"\"\"\n:mod:`mirgecom.transport` provides methods/utils for transport properties.\n\nTransport Models\n^^^^^^^^^^^^^^^^\nThis module is designed provide Transport Model objects used to compute and\nmanage the transport properties in viscous flows. The transport properties\ncurrently implemented are the dynamic viscosity ($\\mu$), the bulk viscosity\n($\\mu_{B}$), the thermal conductivity ($\\kappa$), and the species diffusivities\n($d_{\\alpha}$).\n\n.. autoclass:: GasTransportVars\n.. autoclass:: TransportModel\n.. autoclass:: SimpleTransport\n.. autoclass:: PowerLawTransport\n\nExceptions\n^^^^^^^^^^\n.. autoexception:: TransportModelError\n\"\"\"\n\n__copyright__ = \"\"\"\nCopyright (C) 2021 University of Illinois Board of Trustees\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom typing import Optional\nfrom dataclasses import dataclass\nfrom arraycontext import dataclass_array_container\nimport numpy as np\nfrom meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa\nfrom meshmode.dof_array import DOFArray\nfrom mirgecom.fluid import ConservedVars\nfrom mirgecom.eos import GasEOS, GasDependentVars\n\n\nclass TransportModelError(Exception):\n \"\"\"Indicate that transport model is required for model evaluation.\"\"\"\n\n pass\n\n\n@dataclass_array_container\n@dataclass(frozen=True)\nclass GasTransportVars:\n \"\"\"State-dependent quantities for :class:`TransportModel`.\n\n Prefer individual methods for model use, use this\n structure for visualization or probing.\n\n .. attribute:: bulk_viscosity\n .. attribute:: viscosity\n .. attribute:: thermal_conductivity\n .. attribute:: species_diffusivity\n \"\"\"\n\n bulk_viscosity: np.ndarray\n viscosity: np.ndarray\n thermal_conductivity: np.ndarray\n species_diffusivity: np.ndarray\n\n\nclass TransportModel:\n r\"\"\"Abstract interface to thermo-diffusive transport model class.\n\n Transport model classes are responsible for\n computing relations between fluid or gas state variables and\n thermo-diffusive transport properties for those fluids.\n\n .. automethod:: bulk_viscosity\n .. automethod:: viscosity\n .. automethod:: thermal_conductivity\n .. automethod:: species_diffusivity\n .. automethod:: volume_viscosity\n .. automethod:: transport_vars\n \"\"\"\n\n def bulk_viscosity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None) -> DOFArray:\n r\"\"\"Get the bulk viscosity for the gas (${\\mu}_{B}$).\"\"\"\n raise NotImplementedError()\n\n def viscosity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None) -> DOFArray:\n r\"\"\"Get the gas dynamic viscosity, $\\mu$.\"\"\"\n raise NotImplementedError()\n\n def volume_viscosity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None) -> DOFArray:\n r\"\"\"Get the 2nd coefficent of viscosity, $\\lambda$.\"\"\"\n raise NotImplementedError()\n\n def thermal_conductivity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None,\n eos: Optional[GasEOS] = None) -> DOFArray:\n r\"\"\"Get the gas thermal_conductivity, $\\kappa$.\"\"\"\n raise NotImplementedError()\n\n def species_diffusivity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None,\n eos: Optional[GasEOS] = None) -> DOFArray:\n r\"\"\"Get the vector of species diffusivities, ${d}_{\\alpha}$.\"\"\"\n raise NotImplementedError()\n\n def transport_vars(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None,\n eos: Optional[GasEOS] = None) -> GasTransportVars:\n r\"\"\"Compute the transport properties from the conserved state.\"\"\"\n return GasTransportVars(\n bulk_viscosity=self.bulk_viscosity(cv=cv, dv=dv),\n viscosity=self.viscosity(cv=cv, dv=dv),\n thermal_conductivity=self.thermal_conductivity(cv=cv, dv=dv, eos=eos),\n species_diffusivity=self.species_diffusivity(cv=cv, dv=dv, eos=eos)\n )\n\n\nclass SimpleTransport(TransportModel):\n r\"\"\"Transport model with uniform, constant properties.\n\n Inherits from (and implements) :class:`TransportModel`.\n\n .. automethod:: __init__\n .. automethod:: bulk_viscosity\n .. automethod:: viscosity\n .. automethod:: volume_viscosity\n .. automethod:: species_diffusivity\n .. automethod:: thermal_conductivity\n \"\"\"\n\n def __init__(self, bulk_viscosity=0, viscosity=0, thermal_conductivity=0,\n species_diffusivity=None):\n \"\"\"Initialize uniform, constant transport properties.\"\"\"\n if species_diffusivity is None:\n species_diffusivity = np.empty((0,), dtype=object)\n self._mu_bulk = bulk_viscosity\n self._mu = viscosity\n self._kappa = thermal_conductivity\n self._d_alpha = species_diffusivity\n\n def bulk_viscosity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None) -> DOFArray:\n r\"\"\"Get the bulk viscosity for the gas, $\\mu_{B}$.\"\"\"\n return self._mu_bulk*(0*cv.mass + 1.0)\n\n def viscosity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None) -> DOFArray:\n r\"\"\"Get the gas dynamic viscosity, $\\mu$.\"\"\"\n return self._mu*(0*cv.mass + 1.0)\n\n def volume_viscosity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None) -> DOFArray:\n r\"\"\"Get the 2nd viscosity coefficent, $\\lambda$.\n\n In this transport model, the second coefficient of viscosity is defined as:\n\n $\\lambda = \\left(\\mu_{B} - \\frac{2\\mu}{3}\\right)$\n \"\"\"\n return (self._mu_bulk - 2 * self._mu / 3)*(0*cv.mass + 1.0)\n\n def thermal_conductivity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None,\n eos: Optional[GasEOS] = None) -> DOFArray:\n r\"\"\"Get the gas thermal_conductivity, $\\kappa$.\"\"\"\n return self._kappa*(0*cv.mass + 1.0)\n\n def species_diffusivity(self, cv: ConservedVars,\n dv: Optional[GasDependentVars] = None,\n eos: Optional[GasEOS] = None) -> DOFArray:\n r\"\"\"Get the vector of species diffusivities, ${d}_{\\alpha}$.\"\"\"\n return self._d_alpha*(0*cv.mass + 1.0)\n\n\nclass PowerLawTransport(TransportModel):\n r\"\"\"Transport model with simple power law properties.\n\n Inherits from (and implements) :class:`TransportModel` based on a\n temperature-dependent power law.\n\n .. automethod:: __init__\n .. automethod:: bulk_viscosity\n .. automethod:: viscosity\n .. automethod:: volume_viscosity\n .. automethod:: species_diffusivity\n .. automethod:: thermal_conductivity\n \"\"\"\n\n # air-like defaults here\n def __init__(self, alpha=0.6, beta=4.093e-7, sigma=2.5, n=.666,\n species_diffusivity=None, lewis=None):\n \"\"\"Initialize power law coefficients and parameters.\n\n Parameters\n ----------\n alpha: float\n The bulk viscosity parameter. The default value is \"air\".\n\n beta: float\n The dynamic viscosity linear parameter. The default value is \"air\".\n\n n: float\n The temperature exponent for dynamic viscosity. The default value\n is \"air\".\n\n sigma: float\n The heat conductivity linear parameter. The default value is \"air\".\n\n lewis: numpy.ndarray\n If required, the Lewis number specify the relation between the\n thermal conductivity and the species diffusivities. The input array\n must have a shape of \"nspecies\".\n \"\"\"\n if species_diffusivity is None and lewis is None:\n species_diffusivity = np.empty((0,), dtype=object)\n self._alpha = alpha\n self._beta = beta\n self._sigma = sigma\n self._n = n\n self._d_alpha = species_diffusivity\n self._lewis = lewis\n\n def bulk_viscosity(self, cv: ConservedVars, # type: ignore[override]\n dv: GasDependentVars) -> DOFArray:\n r\"\"\"Get the bulk viscosity for the gas, $\\mu_{B}$.\n\n .. math::\n\n \\mu_{B} = \\alpha\\mu\n\n \"\"\"\n return self._alpha * self.viscosity(cv, dv)\n\n # TODO: Should this be memoized? Avoid multiple calls?\n def viscosity(self, cv: ConservedVars, # type: ignore[override]\n dv: GasDependentVars) -> DOFArray:\n r\"\"\"Get the gas dynamic viscosity, $\\mu$.\n\n $\\mu = \\beta{T}^n$\n \"\"\"\n return self._beta * dv.temperature**self._n\n\n def volume_viscosity(self, cv: ConservedVars, # type: ignore[override]\n dv: GasDependentVars) -> DOFArray:\n r\"\"\"Get the 2nd viscosity coefficent, $\\lambda$.\n\n In this transport model, the second coefficient of viscosity is defined as:\n\n .. math::\n\n \\lambda = \\left(\\alpha - \\frac{2}{3}\\right)\\mu\n\n \"\"\"\n return (self._alpha - 2.0/3.0) * self.viscosity(cv, dv)\n\n def thermal_conductivity(self, cv: ConservedVars, # type: ignore[override]\n dv: GasDependentVars, eos: GasEOS) -> DOFArray:\n r\"\"\"Get the gas thermal conductivity, $\\kappa$.\n\n .. math::\n\n \\kappa = \\sigma\\mu{C}_{v}\n\n \"\"\"\n return (\n self._sigma * self.viscosity(cv, dv)\n * eos.heat_capacity_cv(cv, dv.temperature)\n )\n\n def species_diffusivity(self, cv: ConservedVars, # type: ignore[override]\n dv: GasDependentVars, eos: GasEOS) -> DOFArray:\n r\"\"\"Get the vector of species diffusivities, ${d}_{\\alpha}$.\n\n The species diffusivities can be either\n (1) specified directly or\n (2) using user-imposed Lewis number $Le$ w/shape \"nspecies\"\n\n In the latter, it is then evaluate based on the heat capacity at\n constant pressure $C_p$ and the thermal conductivity $\\kappa$ as:\n\n .. math::\n\n d_{\\alpha} = \\frac{\\kappa}{\\rho \\; Le \\; C_p}\n \"\"\"\n if self._lewis is not None:\n return (self._sigma * self.viscosity(cv, dv)/(\n cv.mass*self._lewis*eos.gamma(cv, dv.temperature))\n )\n return self._d_alpha*(0*cv.mass + 1.)\n","repo_name":"majosm/mirgecom","sub_path":"mirgecom/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":11287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"14534671523","text":"#!/usr/bin/python3\n\"\"\"\nClass with size and position\n- area method\n- my_print method to print the area\n- setter and getter\n\"\"\"\n\n\nclass Square:\n \"\"\"create size of square and position\"\"\"\n\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"constractor of square and its position\"\"\"\n self.size = size\n self.position = position\n\n def area(self):\n \"\"\"return the area\"\"\"\n return (self.__size ** 2)\n\n def my_print(self):\n if self.__size == 0:\n print()\n else:\n for i in range(self.position[1]):\n print()\n for j in range(self.__size):\n print(\" \" * self.position[0], end=\"\")\n print(\"#\" * self.__size)\n\n @property\n def size(self):\n \"\"\"getter for size\"\"\"\n return (self.__size)\n\n @size.setter\n def size(self, value):\n \"\"\"setter for size\"\"\"\n if type(value) is not int:\n raise TypeError(\"size must be an integer\")\n elif value < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = value\n\n @property\n def position(self):\n \"\"\"return position\"\"\"\n return (self.__position)\n\n @position.setter\n def position(self, value):\n \"\"\"setter for position\"\"\"\n if len(value) != 2 or type(value) is not tuple \\\n or type(value[0]) is not int \\\n or type(value[1]) is not int \\\n or (value[0] < 0 or value[1] < 0):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value\n","repo_name":"Makbelhailu/alx-higher_level_programming","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5699729546","text":"# Baekjoon Online Judge - 4256번. 트리\n\n\ndef postorder(left, right, root):\n\n for i in range(left, right):\n if preorder[root] == inorder[i]:\n # 왼쪽 서브트리\n postorder(left, i, root + 1)\n # 오른쪽 서브트리\n postorder(i + 1, right, root + i + 1 - left)\n print(preorder[root], end=' ')\n\n\nT = int(input())\nfor _ in range(T):\n N = int(input())\n preorder = list(map(int, input().split()))\n inorder = list(map(int, input().split()))\n postorder(0, N, 0)\n print()\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_4256.py","file_name":"BOJ_4256.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3449590274","text":"\nimport random\n\n\n\ndef miller_rabin ( n, a ): # True implies n is composite\n\n\tfrom gcd import euclids_gcd\n\n\t# Test if n is even\n\tif not( n & 1 ):\n\t\treturn True\n\n\t# Test if a divides n\n\tgcd = euclids_gcd( n, a )\n\n\tif (1 < gcd ) and ( gcd < n ):\n\t\treturn True\n\n\t# Find q such that n - 1 = 2^k q\n\n\tq = n - 1\n\tk = 0\n\twhile not( q & 1 ):\n\t\tq >>= 1\n\t\tk += 1\n\n\t# Search for the existence of i such that a^( (2^i) q ) = -1 (mod p), i < k\n\ta = pow( a, q, n )\n\n\tif a == 1:\n\t\treturn False\n\n\tfor i in range( k ):\n\n\t\tif a == n-1:\n\n\t\t\treturn False\n\n\t\ta = pow( a, 2, n )\n\n\treturn True\n\n\n\ndef is_prime( n, rounds = 100 ):\n\n\tif n == 1: return False\n\telif n == 2: return True\n\n\tfor _ in range( rounds ):\n\n\t\ta = random.randrange( 2, n )\n\n\t\tif miller_rabin( n, a ):\n\t\t\treturn False\n\n\treturn True\n\n","repo_name":"rpbeltran/The-MATH-470s","sub_path":"primality.py","file_name":"primality.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71305152516","text":"\"\"\"\nHash commands for HANA Bot\n\"\"\"\n\nimport base64\nfrom discord.ext import commands\nfrom handlers import hash_generator\n\n\nclass Hash(commands.Cog):\n '''\n Hash Commands\n '''\n @commands.command()\n async def sha256(self, ctx, *args):\n '''\n Hashes a string using sha256\n EX: /sha256 hello world\n '''\n result = hash_generator.sha256_hash(' '.join(args))\n msg = f\"The sha256 Hash of \\'{' '.join(args)}\\' is:\\n{result}\"\n await ctx.reply(msg)\n\n @commands.command()\n async def md5(self, ctx, *args):\n '''\n Hashes a string using md5\n EX: /md5 hello world\n '''\n result = hash_generator.md5_hash(' '.join(args))\n msg = f\"The MD5 Hash of \\'{' '.join(args)}\\' is:\\n{result}\"\n await ctx.reply(msg)\n\n @commands.command()\n async def encode_b64(self, ctx, *args):\n '''\n Encodes a string in base64\n EX: /encode_b64 hello world\n '''\n result = base64.b64encode(bytes(' '.join(args), encoding='utf-8'), altchars=None)\n msg = f\"The Base64 Encoded string of \\'{' '.join(args)}\\' is:\\n{result.decode()}\"\n await ctx.reply(msg)\n\n @commands.command()\n async def decode_b64(self, ctx, encode_string: str):\n '''\n Decodes a base64 string\n EX: /decode_b64 aGVsbG8gd29ybGQ=\n '''\n result = base64.b64decode(encode_string)\n msg = f\"The decoded Base64 string of \\'{encode_string}\\' is:\\n{result.decode()}\"\n await ctx.reply(msg)\n","repo_name":"kyleranous/hana_bot","sub_path":"modules/hash_commands.py","file_name":"hash_commands.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10459972445","text":"import re\n\n\ndef dance(steps):\n programs = list('abcdefghijklmnop')\n possible_states = []\n cycle = 0\n while True:\n for step in steps:\n op, n1, n2 = re.findall(r'(.)(\\w+)\\/?(\\w+)?', step)[0]\n if op == 's':\n n1 = int(n1)\n programs = programs[16 - n1:] + programs[:16 - n1]\n if op == 'x':\n n1 = int(n1)\n n2 = int(n2)\n programs[n1], programs[n2] = programs[n2], programs[n1]\n if op == 'p':\n n1 = programs.index(n1)\n n2 = programs.index(n2)\n programs[n1], programs[n2] = programs[n2], programs[n1]\n prog = ''.join(programs)\n if prog not in possible_states:\n possible_states.append(prog)\n else:\n break\n cycle += 1\n idx = 1000000000 % cycle\n return possible_states[0], possible_states[idx - 1]\n\n\nwith open('data/day16') as f:\n steps = [x for x in f.read().split(',')]\n print(dance(steps))\n","repo_name":"kamilwu/aoc2017","sub_path":"day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71446462915","text":"#!/usr/bin/env python\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Conv2D, Input, ReLU, Lambda, Add\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.initializers import glorot_normal\n\nimport tensorflow.keras.backend as K\n\nimport tensorflow_datasets as tfds\nds_div2k = tfds.load('div2k', shuffle_files=True)\n\nds_train = ds_div2k['train']\nds_test = ds_div2k['validation']\n\n\ndef abpn(scale=3, in_channels=3, num_fea=28, m=4, out_channels=3):\n inp = Input(shape=(None, None, 3)) \n upsample_func = Lambda(lambda x_list: tf.concat(x_list, axis=3))\n upsampled_inp = upsample_func([inp for x in range(scale**2)])\n\n # Feature extraction\n x = Conv2D(num_fea, 3, padding='same', activation='relu', kernel_initializer=glorot_normal(), bias_initializer='zeros')(inp)\n\n for i in range(m):\n x = Conv2D(num_fea, 3, padding='same', activation='relu', kernel_initializer=glorot_normal(), bias_initializer='zeros')(x)\n\n # Pixel-Shuffle\n x = Conv2D(out_channels*(scale**2), 3, padding='same', activation='relu', kernel_initializer=glorot_normal(), bias_initializer='zeros')(x)\n x = Conv2D(out_channels*(scale**2), 3, padding='same', kernel_initializer=glorot_normal(), bias_initializer='zeros')(x)\n x = Add()([upsampled_inp, x])\n \n depth_to_space = Lambda(lambda x: tf.nn.depth_to_space(x, scale))\n out = depth_to_space(x)\n clip_func = Lambda(lambda x: K.clip(x, 0., 255.))\n out = clip_func(out)\n \n return Model(inputs=inp, outputs=out, name='abpn')\n\nmodel = abpn(2) # ABPN super resolution scale = 2\nmodel.load_weights('models/checkpoint/abpn_x2/checkpoint').expect_partial()\n\npsnrs = []\nssims = []\n\nfor i in ds_test:\n sr = tf.cast(model(tf.expand_dims(i['lr'], 0)), tf.uint8)\n psnrs.append(tf.image.psnr(sr, i['hr'], max_val = 255.0))\n ssims.append(tf.image.ssim(sr, i['hr'], max_val = 255.0))\n\nprint(\"DIV2K (PSNR, SSIM) trained with DIV2K:\", tf.reduce_mean(psnrs), tf.reduce_mean(ssims))\n\nimport imagepairs\nip_test = tfds.load('imagepairs', split='test')\n\npsnrs = []\nssims = []\n\nfor a in ip_test:\n sr = tf.cast(model(tf.expand_dims(a['image'], 0)), tf.uint8)\n sr_q = tf.image.central_crop(sr, 0.25)\n hr_q = tf.image.central_crop(a['image_gt'], 0.25)\n psnrs.append(tf.image.psnr(sr_q, hr_q, max_val=255.0))\n ssims.append(tf.image.ssim(sr_q, hr_q, max_val=255.0))\n\nprint(\"ImagePairs (PSNR, SSIM) trained with DIV2K:\", tf.reduce_mean(psnrs), tf.reduce_mean(ssims))\n\nmodel.load_weights('models/checkpoint/abpn_x2_ip/checkpoint').expect_partial()\npsnrs = []\nssims = []\n\nfor a in ip_test:\n sr = tf.cast(model(tf.expand_dims(a['image'], 0)), tf.uint8)\n sr_q = tf.image.central_crop(sr, 0.25)\n hr_q = tf.image.central_crop(a['image_gt'], 0.25)\n psnrs.append(tf.image.psnr(sr_q, hr_q, max_val=255.0))\n ssims.append(tf.image.ssim(sr_q, hr_q, max_val=255.0))\n\nprint(\"ImagePairs (PSNR, SSIM) trained with ImagePairs:\", tf.reduce_mean(psnrs), tf.reduce_mean(ssims))\n","repo_name":"freedomtan/some_super_resolution_tflite_models","sub_path":"calculate_abpn_psrn_ssim.py","file_name":"calculate_abpn_psrn_ssim.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"15888126133","text":"import os\nfrom typing import List\n\nimport pandas as pd\nimport pytest\n\n\"\"\"\nNote that this is executed as part of the more global conbench test suite and\ntherefore executes heavy fixture init/teardown logic between tests. That's not\nneeded conceptually, but probably OK for now. (you might get the \"WARNING:\nempty_db_tables() called in non-testing mode, skip\" warnings in local test\nexecution).\n\"\"\"\n\nfrom conbench.outlier import remove_outliers_by_iqrdist\n\nthis_module_dirpath = os.path.dirname(os.path.abspath(__file__))\n\n\ndef path_to_datafile(fn: str) -> str:\n return os.path.join(this_module_dirpath, \"data\", fn)\n\n\ndef df_from_datafile(fn: str) -> pd.DataFrame:\n return pd.read_csv(path_to_datafile(fn), comment=\"#\")\n\n\n@pytest.mark.parametrize(\n \"filename, expected_outliers\",\n [\n (\"outlier_A.csv\", [269.2997, 2995.5260]),\n (\"outlier_B.csv\", [785.10015]),\n (\"outlier_C.csv\", [21.038903]),\n (\"outlier_D.csv\", []),\n (\"outlier_E.csv\", []),\n ],\n)\ndef test_scenarios(filename: str, expected_outliers: List):\n df = df_from_datafile(filename)\n df_outliers = remove_outliers_by_iqrdist(df, \"svs\")\n assert list(df_outliers[\"svs\"].values) == expected_outliers\n # print(df_outliers.index)\n # print(dfa[\"svs\"].loc[df_outliers.index])\n # print(dfa[\"svs\"])\n assert df[\"svs\"].loc[df_outliers.index].isna().sum() == len((expected_outliers))\n","repo_name":"conbench/conbench","sub_path":"conbench/tests/analysis/test_outlier_detect.py","file_name":"test_outlier_detect.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"61"} +{"seq_id":"41451602321","text":"from rest_framework import serializers\nfrom backend.models.clientes import Clientes\n\nclass ClienteSerializers(serializers.ModelSerializer):\n class Meta:\n model = Clientes\n fields = '__all__'\n def to_representation(self, obj):\n clientes = Clientes.objects.get(id_cliente=obj.id_cliente)\n return{\n \"id_cliente\": clientes.id_cliente,\n \"nombre\": clientes.nombre,\n \"documento\": format(clientes.documento,',.0f'),\n \"email\": clientes.email,\n \"telefono\": clientes.telefono,\n \"direccion\": clientes.direccion,\n \"fecha_nacimiento\": clientes.fecha_nacimiento,\n \"compras\": clientes.compras,\n \"Ultima_compra\":format(clientes.Ultima_compra,'%Y-%m-%d %H:%M:%S')\n }","repo_name":"MaicolRojas/djangorestFramework","sub_path":"backend/serializers/ClienteSerializers.py","file_name":"ClienteSerializers.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28233203313","text":"from django.shortcuts import render\r\n\r\nfrom joblib import load\r\nmodel = load('./savedModels/model.joblib')\r\n\r\n\r\ndef predictor(request):\r\n return render(request, 'index.html') \r\n\r\n\r\ndef formInfo(request):\r\n sepal_lengh = request.GET['Sepal_lenght']\r\n Sepal_Width = request.GET['Sepal_Width']\r\n pedal_Lenth = request.GET['pedal_Lenth']\r\n pedal_Width = request.GET['pedal_Width']\r\n \r\n y_pred = model.predict([[sepal_lengh, Sepal_Width, pedal_Lenth, pedal_Width]])\r\n if y_pred[0] == 0:\r\n y_pred = 'Setosa'\r\n elif y_pred[0] == 1:\r\n y_pred = 'Versicolor'\r\n else:\r\n y_pred = 'Verginica'\r\n return render(request, 'result.html', {'result' : y_pred})","repo_name":"abhiyash50/Django_ML","sub_path":"irisApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37785484651","text":"#Írjon programot, amely azt vizsgálja, hogy egy felhasználó helyesen adja-e meg a jelszavát! A program addig kérdezi újra a felhasználónév-jelszó párost, amíg a felhasználó mindkettőt hibátlanul meg nem adja. A program egyetlen felhasználó (bori99 ) jelszavát (Szivecske<3) ismeri, csak ezt a párost fogadja el helyesként. Mind a sikertelen, mind a sikeres bejelentkezési kísérletekről üzenetet ír a képernyőre.\n\nfelhasznalo = \"bori99\"\njelszo = \"Szivecske<3\"\nhelyes = False\n\nwhile not helyes:\n tipp_felhasznalo = input(\"Kérem a felhasználó nevet! \")\n tipp_jelszo = input(\"Kérem a jelszót! \")\n if tipp_felhasznalo == felhasznalo and tipp_jelszo == jelszo:\n helyes = True\n print(\"Sikeres Bejelentkezés!\")\n else:\n print(\"probálkozzon újra!\")","repo_name":"hadaszibalazs205/dogozat1","sub_path":"jelszo.py","file_name":"jelszo.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13248211174","text":"def bubble_sort(array):\n for i in range(1, len(array)):\n sorted = True\n for j in range(len(array)-1):\n if array[j] > array[i]:\n sorted = False\n array[j], array[i] = array[i], array[j]\n if sorted:\n break\n return array\n\nprint(bubble_sort([4,3,78,2,0,2]))\n#[0,2,2,3,4,78]","repo_name":"Lemonns/Bubble-Sort","sub_path":"bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23389527441","text":"import sys\nT = int(sys.stdin.readline())\n\nfor t in range(T):\n\tN, M = map(int, sys.stdin.readline().strip().split())\n\tG = [None]*N\n\tfor n in range(N):\n\t\tG[n] = [h for h in map(int, sys.stdin.readline().strip().split())]\n\tV = [[100]*M for n in range(N)]\n\n\tfor n in range(N):\n\t\tfor m in range(M):\n\t\t\tif G[n][m] == max(G[n]):\n\t\t\t\tfor i in range(M):\n\t\t\t\t\tif V[n][i]>G[n][m]:\n\t\t\t\t\t\tV[n][i]=G[n][m]\n\t\t\tif G[n][m] == max([G[i][m] for i in range(N)]):\n\t\t\t\tfor i in range(N):\n\t\t\t\t\tif V[i][m]>G[n][m]:\n\t\t\t\t\t\tV[i][m]=G[n][m]\n\n\tfound = False\n\tfor n in range(N):\n\t\tfor m in range(M):\n\t\t\tif V[n][m]!=G[n][m]:\n\t\t\t\tfound=True\n\t\t\t\tprint(\"Case #%s: NO\" % (str(t+1)))\n\t\t\t\tbreak\n\t\tif found: break\n\tif not found:\n\t\tprint(\"Case #%s: YES\" % (str(t+1)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_117/701.py","file_name":"701.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4611535840","text":"# https://docs.python.org/3/library/threading.html\n\nimport threading\nimport time\n\n\ndef show_threads_info():\n print(f\"active threads: {threading.active_count()}\")\n print(f\"main thread: {threading.main_thread()}\")\n print(\"threads list: \")\n for thread in threading.enumerate():\n print(\"--\", thread)\n print()\n print(\"stack_size: \", threading.stack_size())\n\n\ndef show_cur_thread_info():\n print(f\"current_thread: {threading.current_thread()}\")\n print(f\"cur_thread_id: {threading.get_ident()}\")\n\n\nmy_locker = threading.Lock()\nmy_rec_locker = threading.RLock()\n# create local thread's variable\nthread_local_data = threading.local()\n\ncommon_data = \"common\"\n\n\ndef l_print_blocking(message: str):\n my_locker.acquire()\n print(message)\n my_locker.release()\n\n\ndef l_print_nonblocking(message: str):\n time.sleep(0.2)\n if my_locker.acquire(blocking=False) is True:\n print(message)\n my_locker.release()\n else:\n return False\n return True\n\n\ndef l_print_timeout(message: str):\n time.sleep(0.2)\n if my_locker.acquire(blocking=True, timeout=0.1) is True:\n print(message)\n\n my_locker.release()\n else:\n return False\n return True\n\n\ndef l_print_common_data(message: str):\n global common_data\n my_locker.acquire()\n time.sleep(1)\n common_data += ' ' + message\n print(common_data)\n my_locker.release()\n time.sleep(0.01)\n\n\ndef l_print_noncommon_data(message: str):\n global thread_local_data\n time.sleep(1)\n if 'x' not in thread_local_data.__dict__:\n thread_local_data.x = \"non common\"\n thread_local_data.x += ' ' + message\n local_var = 42\n my_locker.acquire()\n print(thread_local_data.x)\n my_locker.release()\n\n\ndef r_print_common_data(message: str):\n global common_data\n\n my_rec_locker.acquire()\n\n time.sleep(1)\n common_data += ' ' + message\n print(common_data)\n\n my_rec_locker.acquire()\n print(\"internal lock!\")\n my_rec_locker.release()\n\n my_rec_locker.release()\n time.sleep(0.01)\n\n\ndef thread_func(func, iter_count, lol: str):\n for i in range(iter_count):\n # time.sleep(1)\n func(f\"{i}: {threading.get_ident()}\")\n\n\ndef create_new_thread(func, count, thread_name=None):\n new_thread = threading.Thread(\n target=thread_func,\n name=thread_name,\n args=(func, count),\n kwargs={\"lol\": \"asd\"}\n )\n return new_thread\n\n\ndef ex_2():\n print(\"ex_2\")\n # show_cur_thread_info()\n thread_lol = create_new_thread(l_print_noncommon_data, 10, \"lol\")\n thread_kek = create_new_thread(l_print_noncommon_data, 10, \"kek\")\n # print(f\"thread_lol is started: {thread_lol.is_alive()}\")\n thread_lol.start()\n thread_kek.start()\n # print(f\"thread_lol is started: {thread_lol.is_alive()}\")\n # print(f\"thread_lol native_id is: {thread_lol.native_id}\")\n # show_threads_info()\n thread_lol.join()\n thread_kek.join()\n\n\nif __name__ == \"__main__\":\n ex_2()\n","repo_name":"1xdeadman/examples","sub_path":"5_parallelism/threading/ex_2.py","file_name":"ex_2.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15328366786","text":"def bagi(a, b):\r\n try:\r\n hasil = a / b\r\n return hasil\r\n except ZeroDivisionError:\r\n print(\"Error: Pembagian oleh nol tidak diizinkan.\")\r\n return None\r\n except TypeError as e:\r\n print(f\"Error: {e}\")\r\n return None\r\n except Exception as e:\r\n print(f\"Error umum: {e}\")\r\n return None\r\n\r\n# Contoh pemanggilan fungsi dengan penanganan exception\r\nangka1 = 10\r\nangka2 = 0\r\n\r\nhasil_pembagian = bagi(angka1, angka2)\r\n\r\nif hasil_pembagian is not None:\r\n print(f\"Hasil pembagian {angka1} / {angka2} adalah {hasil_pembagian}\")\r\nelse:\r\n print(\"Gagal melakukan pembagian.\")\r\n","repo_name":"RafiAnanda912/UTS-PBO","sub_path":"jawaban 4.py","file_name":"jawaban 4.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38745048063","text":"import luigi\n\nfrom pipeline.tasks.data_handler import (\n CoOccurrenceMatrixCreationTask,\n EdgeLabelEncodingTask,\n EdgeListCreationTask,\n FitEdgeLabelEncoderTask,\n PredictSimilarNodeTask,\n TrainNodeEmbeddingTask,\n)\nfrom pipeline.tasks.data_loader import LoadCardDataTask\nfrom pipeline.tasks.data_uploader import UploadPredictedDataTask\nfrom pipeline.utils.template import GokartTask\n\n\nclass Main(GokartTask):\n output_athena_query_s3_url_base = luigi.Parameter()\n output_result_data_s3_url_base = luigi.Parameter()\n\n def requires(self) -> UploadPredictedDataTask:\n data_load_task = LoadCardDataTask(output_athena_query_s3_url_base=self.output_athena_query_s3_url_base)\n co_occurrence_matrix_creation_task = CoOccurrenceMatrixCreationTask(data_task=data_load_task)\n edge_list_creation_task = EdgeListCreationTask(data_task=co_occurrence_matrix_creation_task)\n fit_edge_label_encoder_task = FitEdgeLabelEncoderTask(data_task=edge_list_creation_task)\n edge_label_encoding_task = EdgeLabelEncodingTask(\n data_task=edge_list_creation_task,\n label_encoder_task=fit_edge_label_encoder_task,\n )\n train_node_embedding_task = TrainNodeEmbeddingTask(data_task=edge_label_encoding_task)\n predict_similar_node_task = PredictSimilarNodeTask(\n data_task=edge_list_creation_task,\n model_task=train_node_embedding_task,\n label_encoder_task=fit_edge_label_encoder_task,\n )\n upload_predict_data_task = UploadPredictedDataTask(\n predict_similar_node_task=predict_similar_node_task,\n output_result_data_s3_url_base=self.output_result_data_s3_url_base,\n )\n return upload_predict_data_task\n\n def run(self) -> None:\n self.dump(\"Finished !\")\n","repo_name":"sansan-inc/randd-engineering-training","sub_path":"python_training/batch/pipeline/tasks/main_task.py","file_name":"main_task.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"71092176833","text":"def initFlags():\n \"\"\"\n Creates a flag container (AthConfigFlags) appropriate for typical\n Analysis athena applications.\n :return: flags\n \"\"\"\n from AthenaConfiguration import AthConfigFlags\n from AthenaConfiguration.AutoConfigFlags import GetFileMD\n from Campaigns.Utils import Campaign\n acf=AthConfigFlags.AthConfigFlags()\n\n #Flags steering the job execution:\n from AthenaCommon.Constants import INFO\n from AthenaConfiguration.Enums import ProductionStep\n import argparse\n acf.addFlag('Exec.OutputLevel',INFO) #Global Output Level\n acf.addFlag('Exec.MaxEvents',-1)\n acf.addFlag('Exec.SkipEvents',0)\n acf.addFlag('Exec.DebugStage','', help=argparse.SUPPRESS)\n acf.addFlag('Exec.FPE',-2) #-2: No FPE check at all, -1: Abort with core-dump, 0: FPE Auditor w/o stack-tace (default) , >0: number of stack-trace printed by the job\n\n\n #Custom messaging for components, see Utils.setupLoggingLevels\n acf.addFlag('Exec.VerboseMessageComponents',[])\n acf.addFlag('Exec.DebugMessageComponents',[])\n acf.addFlag('Exec.InfoMessageComponents',[])\n acf.addFlag('Exec.WarningMessageComponents',[])\n acf.addFlag('Exec.ErrorMessageComponents',[])\n\n acf.addFlag('Common.MsgSourceLength',50) #Length of the source-field in the format str of MessageSvc\n acf.addFlag('Common.ProductionStep', ProductionStep.Default, enum=ProductionStep, help=argparse.SUPPRESS)\n acf.addFlag('Common.isOverlay', False, help=argparse.SUPPRESS)\n\n #Flags describing the input data\n acf.addFlag('Input.Files', [\"_ATHENA_GENERIC_INPUTFILE_NAME_\",]) # former global.InputFiles\n acf.addFlag('Input.OverrideRunNumber', False, help=argparse.SUPPRESS )\n acf.addFlag('Input.SecondaryFiles', [], help=argparse.SUPPRESS) # secondary input files for DoubleEventSelector\n acf.addFlag('Input.ProcessingTags', lambda prevFlags : GetFileMD(prevFlags.Input.Files).get(\"processingTags\", []), help=\"expert flag, do not override\" ) # list of names of streams written to this file\n acf.addFlag('Input.ProjectName', lambda prevFlags : GetFileMD(prevFlags.Input.Files).get(\"project_name\", \"data17_13TeV\"), help=\"expert flag, do not override\") # former global.ProjectName\n acf.addFlag('Input.MCCampaign', lambda prevFlags : Campaign(GetFileMD(prevFlags.Input.Files).get(\"mc_campaign\", \"\")), enum=Campaign, help=\"expert flag, do not override\")\n\n\n acf.addFlag('Concurrency.NumProcs', 0, help=\"0 = disables MP, otherwise is # of processes to use in MP mode\")\n acf.addFlag('Concurrency.NumThreads', 0, help=\"0 = disables MT, otherwise is # of threads to use in MT mode\" )\n acf.addFlag('Concurrency.NumConcurrentEvents', lambda prevFlags : prevFlags.Concurrency.NumThreads)\n acf.addFlag('Concurrency.DebugWorkers', False )\n\n # output\n acf.addFlag('Output.HISTOutputs', [],help=\"ROOT output files. Specify in form of 'STREAM:filename.root'\")\n acf.addFlag('Output.TreeAutoFlush', {}, help=\"{} = automatic for all streams, otherwise {'STREAM': 123}\")\n\n acf.addFlag(\"PoolSvc.MaxFilesOpen\", 0, help=argparse.SUPPRESS)\n\n # analysis-specific arguments\n acf.parser().add_argument('--accessMode',default=\"POOLAccess\",choices={\"POOLAccess\",\"ClassAccess\"},help=\"Input file reading mode\") # named arg\n acf.parser().add_argument('--postExec',default=None,help=\"Any postconfig execution required\")\n\n\n return acf\n\ndef initCfg(flags):\n \"\"\"\n Creates a ComponentAccumulator appropriate for typical Analysis\n type jobs.\n :param flags:\n :return:\n \"\"\"\n flags.lock()\n from AthenaConfiguration.MainServicesConfig import MainServicesCfg\n from AthenaConfiguration.ComponentFactory import CompFactory\n\n ca = MainServicesCfg(flags)\n\n if flags.args().accessMode == \"POOLAccess\":\n from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg\n ca.merge(PoolReadCfg(flags))\n else:\n from AthenaRootComps.xAODEventSelectorConfig import xAODReadCfg,xAODAccessMode\n ca.merge(xAODReadCfg(flags, AccessMode = xAODAccessMode.CLASS_ACCESS))\n\n outputs = [\"{} DATAFILE='{}' OPT='RECREATE'\".format(*file.split(\":\",1)) for file in flags.Output.HISTOutputs]\n if len(outputs): ca.addService(CompFactory.THistSvc(Output = outputs))\n\n ca.getService(\"MessageSvc\").setWarning += [\"ClassIDSvc\",\"PoolSvc\",\"AthDictLoaderSvc\",\"AthenaPoolAddressProviderSvc\",\n \"ProxyProviderSvc\",\"DBReplicaSvc\",\"MetaDataSvc\",\"MetaDataStore\",\"AthenaPoolCnvSvc\",\n \"TagMetaDataStore\",\"EventSelector\",\n #\"ApplicationMgr\", can't silence because otherwise ATN tests fail, see ATLINFR-1235\n \"CoreDumpSvc\",\"AthMasterSeq\",\"EventPersistencySvc\",\"ActiveStoreSvc\",\n \"AthenaEventLoopMgr\",\"AthOutSeq\",\"AthRegSeq\"]\n\n return ca\n\ndef launch(flags,ca):\n \"\"\"\n Launches the job (includes executing any postExec)\n :param flags:\n :param ca:\n :return:\n \"\"\"\n from AthenaConfiguration.Utils import setupLoggingLevels\n setupLoggingLevels(flags,ca)\n if flags.args().postExec: eval(flags.args().postExec)\n if ca.run().isFailure():\n import sys\n sys.exit(1)\n\n","repo_name":"Yusuf-Manjra/athena","sub_path":"Control/AthenaConfiguration/python/AnalysisApp.py","file_name":"AnalysisApp.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73510727554","text":"# libs\nimport os\nfrom src import main_logics, file_loader\n\n# global variables\nconfig_file_path = \"./config.yml\"\n\n\ndef main():\n # load settings\n settings_obj = main_logics.config_loader(config_file_path).unwrap()\n datasets_path = settings_obj['datasets_path']\n\n # print the prediction\n input_doc = input()\n print(main_logics.predict_test_from_text(datasets_path, input_doc))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jumang4423/authorship-atribution","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22854050597","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/5/112 15:20\n# @Author : 罗小贱\n# @email: ljq906416@gmail.com\n# @File : 冒泡排序\n# @Software: PyCharm\nlist = [3, 7, 2, 5, 20, 11]\nfor i in range(len(list)):\n for j in range(i + 1,len(list)):\n if list[i] > list[j]:\n list[i],list[j] = list[j],list[i]\n\nprint(list)\n","repo_name":"soulgo/python_project","sub_path":"Bubble_Sort.py","file_name":"Bubble_Sort.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12443952290","text":"\"\"\"\nThis script collates the near-splice and coding SNVs.\nIt gives one definitive consequence to each SNV.\nVariants are labelled as \"nonsense\", \"missense\", \"synonymous\", or a given a\nnear-splice annotation.\n\"\"\"\n\n# Import the relevant modules\nimport numpy as np\nimport pandas as pd\nimport os\n\ndef read_allele_counts(region):\n \"\"\" Read the allele counts for SNVs in the unaffected parents.\n \"\"\"\n\n names = [\"chrom\", \"pos\", \"ref\", \"alt\", \"filter\", \"an\", \"ac\", \"sample\"]\n\n df = pd.read_csv(\n f\"../outputs/unaff_parents_{region}_snvs.tsv\",\n sep=\"\\t\",\n header=None,\n names=names,\n usecols=[\"chrom\", \"pos\", \"ref\", \"alt\", \"ac\"]\n )\\\n .drop_duplicates(subset=[\"chrom\", \"pos\", \"ref\", \"alt\"], keep=False)\n\n return df\n\ndef read_coding_csqs():\n \"\"\" Get the VEP consequence annotations for each coding variant\n \"\"\"\n\n names = ([\"variant\", \"a\", \"b\", \"ensg\", \"enst\", \"d\", \"csq\", \"e\", \"f\", \"g\", \"h\", \"i\",\n \"j\", \"k\", \"l\", \"m\", \"n\"])\n\n df = pd.read_csv(\n \"../outputs/unaff_parents_coding_snvs_vep_out.tsv\",\n sep=\"\\t\",\n comment=\"#\",\n header=None,\n names=names,\n usecols=[\"variant\", \"csq\"]\n )\n\n # Reformat the data\n var = df.variant.str.split(\"_\", expand=True)\n\n chrom = var[0].rename(\"chrom\")\n pos = var[1].astype(int).rename(\"pos\")\n ref = var[2].str.split(\"/\").str[0].rename(\"ref\")\n alt = var[2].str.split(\"/\").str[1].rename(\"alt\")\n\n # Keep only SNVs with the following selected consequences\n consequences = ['synonymous_variant', 'missense_variant', 'stop_gained']\n csq = df.csq.str.split(\",\").explode().rename(\"csq\")\n csq = csq[csq.isin(consequences)]\n\n df = pd.concat([chrom, pos, ref, alt, csq], axis=1, join=\"inner\")\\\n .drop_duplicates() # 3,039,395 variants (78 duplicated variants)\n\n return df\n\ndef read_near_splice_csqs():\n \"\"\" Get the consequence annotations for each near-splice variant.\n \"\"\"\n\n df = pd.read_csv(\"../outputs/near_splice_positions.tsv\", sep=\"\\t\")\n df[\"csq\"] = df.region + \"_\" + df.site.astype(str)\n df = df[[\"chrom\", \"pos\", \"csq\"]]\n\n return df\n\ndef read_contexts(region):\n \"\"\" Get the sequence context for each genomic position.\n \"\"\"\n\n df = pd.read_csv(\n f\"../outputs/{region}_contexts.tsv\",\n sep=\"\\t\",\n header=None,\n names=[\"span\",\"context\"]\n )\n\n # List comprehensions are faster than string methods here:\n a = pd.DataFrame([x.split(\":\") for x in df.span])\n b = pd.DataFrame([x.split(\"-\") for x in a[1]]).astype(int)\n\n chrom = a[0].rename(\"chrom\")\n pos = (b[0] + 2).rename(\"pos\")\n\n df = pd.concat([chrom, pos, df.context], axis=1)\\\n .drop_duplicates()\\\n .drop_duplicates(subset=[\"chrom\",\"pos\"], keep=False)\n\n return df\n\ndef combine_annotations():\n \"\"\" Run the functions above to retrieve the annotations.\n Merge these annotations together.\n \"\"\"\n cd_snvs = read_allele_counts(\"coding\")\n ns_snvs = read_allele_counts(\"near_splice\")\n\n cd_csqs = read_coding_csqs()\n ns_csqs = read_near_splice_csqs()\n\n cd_ctxt = read_contexts(\"coding\")\n ns_ctxt = read_contexts(\"near_splice\")\n\n # Merge the data, to retrieve consequences and context annotations for each\n # allele\n cd = cd_snvs.merge(cd_csqs).merge(cd_ctxt)\n ns = ns_snvs.merge(ns_csqs).merge(ns_ctxt)\n\n # Concatenate the coding and near-splice variants\n df = pd.concat([cd, ns])\n\n return df\n\ndef reduce_annotations(df):\n \"\"\" Reduce the data so that each allele has one definitive annotation.\n Where a synonymous variant is found in a near-splice position, label it as\n near-splice.\n Where a missense/nonsense variant is found in a near-splice position, label\n it as missense/nonsense.\n \"\"\"\n # Keep those variants which are not (duplicated and synonymous) and those\n # variants which are not (duplicated and (not missense and not stop_gained)\n mask1 = df.duplicated(subset=[\"chrom\",\"pos\",\"ref\",\"alt\"], keep=False)\n mask2 = df.csq == \"synonymous_variant\"\n df = df[~(mask1 & mask2)]\n\n mask3 = df.duplicated(subset=[\"chrom\",\"pos\",\"ref\",\"alt\"], keep=False)\n mask4 = df.csq != \"missense_variant\"\n mask5 = df.csq != \"stop_gained\"\n df = df[~(mask3 & (mask4 & mask5))]\n\n return df\n\nif __name__ == '__main__':\n output = \"../outputs/unaff_parents_allele_counts.tsv\"\n\n if os.path.exists(output):\n df = pd.read_csv(output, sep=\"\\t\")\n else:\n df = combine_annotations()\\\n .pipe(reduce_annotations)\n df.to_csv(output, sep=\"\\t\", index=False)\n","repo_name":"alexblakes/100KGP_splicing","sub_path":"near_splice/scripts/tidy_near_splice_and_coding_snvs.py","file_name":"tidy_near_splice_and_coding_snvs.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"20547945942","text":"import math\nfrom typing import Callable, Optional\n\nimport torch\n\nfrom sbi.types import Shape\n\n\n# Mocking a neural posterior\nclass BiasedPosterior:\n def __init__(self, posterior: Callable, shift: float = 5.0):\n \"\"\"give me a prior/posterior and I'll shift it by a scalar `shift`. All calls to `sample` are wrapped.\"\"\"\n\n self.shift = shift\n self.posterior = posterior\n\n def set_default_x(self, x):\n\n self.posterior.set_default_x(x)\n\n def sample(\n self,\n sample_shape: Shape = torch.Size(),\n x: Optional[torch.Tensor] = None,\n show_progress_bars: bool = False,\n ):\n return (\n self.posterior.sample(\n sample_shape, x, show_progress_bars=show_progress_bars\n )\n + self.shift\n )\n\n\nclass DispersedPosterior:\n def __init__(self, posterior: Callable, dispersion: float = 1.05):\n \"\"\"give me a posterior and I'll disperse it. All calls to `sample` are wrapped.\n This class exploits: Var(aX) = a**2 * Var(X) for any random variable X\n while retaining the expectation value E[X] of all samples.\n\n Parameters:\n posterior: posterior distribution modelled like NeuralPosterior\n dispersion: choose values <1. to make the variance smaller,\n choose values >1. to make the variance larger (distribution more wide)\n \"\"\"\n\n self.dispersion = math.sqrt(dispersion)\n self.posterior = posterior\n\n def set_default_x(self, x):\n\n self.posterior.set_default_x(x)\n\n def sample(\n self,\n sample_shape: Shape = torch.Size(),\n x: Optional[torch.Tensor] = None,\n show_progress_bars: bool = False,\n ):\n\n value = self.posterior.sample(\n sample_shape, x, show_progress_bars=show_progress_bars\n )\n\n # obtain the median of all samples before applying\n # the dispersion to them (use median for more robust estimate)\n median = torch.median(value, dim=0) # dim 0 is the batch dimension\n\n # disperse the samples\n dispersed = value * self.dispersion\n\n # obtain the new median after the dispersion\n median_ = torch.median(dispersed, dim=0)\n\n # shift to obtain the original expectation values\n # (we only want to disperse the samples, not offset)\n shift = median.values - median_.values\n\n return dispersed + shift\n","repo_name":"amortizedgbi/amortizedgbi","sub_path":"packages/sbi/tutorials/utils_13_diagnosis_sbc.py","file_name":"utils_13_diagnosis_sbc.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17795166728","text":"from board import Board\r\nimport pygame\r\n\r\nclass Game:\r\n\r\n # Initializes Game Object\r\n def __init__(self, b, team1, team2):\r\n self.board = b\r\n self.team1 = team1\r\n self.team2 = team2\r\n\r\n # Checks if the game is over using the is_4 function, True if over, and False if not\r\n def is_over(self):\r\n if(self.board.is_4(self.team1) == True or self.board.is_4(self.team2) == True):\r\n return True\r\n else:\r\n return False\r\n\r\n # Allows a player to make a move by getting the row_idx, and using the update_board function\r\n def make_move(self, col_num):\r\n col_idx = col_num - 1\r\n row_idx = self.board.row_pos(self.board, col_num)\r\n self.board.update_board(self.board, row_idx, col_idx)\r\n\r\n # Uses pygame to draw the board, setting a background color, and creating the actual board\r\n def draw_board(self, screen, board_color, e_circle_color, r_circle_color, y_circle_color):\r\n for col in range(7):\r\n for row in range(6):\r\n pygame.draw.rect(screen, board_color, (col*100, row*100 + 100, 100, 100))\r\n if(self.board.board[row][col] == \"O\"):\r\n pygame.draw.circle(screen, e_circle_color, (col*100 + 50, row*100 + 100 + 50), 45)\r\n elif (self.board.board[row][col] == \"R\"):\r\n pygame.draw.circle(screen, r_circle_color, (col * 100 + 50, row * 100 + 100 + 50), 45)\r\n elif (self.board.board[row][col] == \"Y\"):\r\n pygame.draw.circle(screen, y_circle_color, (col * 100 + 50, row * 100 + 100 + 50), 45)\r\n pygame.display.update()\r\n\r\n\r\n","repo_name":"smerkle/Connect4","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42976929159","text":"while True:\n n = int(input())\n if n == 0:\n break\n L = []\n for _ in range(n):\n s = int(input())\n L.append(s)\n L.sort()\n L.pop(0)\n L.pop()\n print(int(sum(L) / len(L)))","repo_name":"Okabe-Junya/AtCoderArchive","sub_path":"Other/ICPC/001.py","file_name":"001.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43419369597","text":"from util.data import auxillary as aux\nfrom util.toolkits.database import *\nfrom util.toolkits.midi import *\nfrom music21 import analysis, converter\nimport pretty_midi\nimport numpy as np\nimport scipy.stats as stats\nimport math\n\n\ndef test_tonal_analysis():\n root_note_names = ['C', '♭D', 'D', '♭E', 'E', 'F', '♭G', 'G', '♭A', 'A', '♭B', 'B']\n\n scales = []\n for i in range(12):\n root_name = root_note_names[i]\n root_note = i\n\n for scale_name, scale in get_mode_dict()['Heptatonic'].items():\n name = root_name + '_' + scale_name\n scales.append({name: [root_note + note for note in scale]})\n\n for scale_name, scale in get_mode_dict()['Pentatonic'].items():\n name = root_name + '_' + scale_name\n scales.append({name: [root_note + note for note in scale]})\n\n print(scales)\n\n\ndef get_note_lengths(path):\n notes_length = [0 for _ in range(12)]\n pm = pretty_midi.PrettyMIDI(path)\n for instr in pm.instruments:\n if not instr.is_drum:\n for note in instr.notes:\n length = note.end - note.start\n pitch = note.pitch\n notes_length[pitch % 12] += length\n\n return notes_length\n\n\ndef get_weights(mode, name='ks'):\n if name == 'kk':\n a = analysis.discrete.KrumhanslKessler()\n # Strong tendancy to identify the dominant key as the tonic.\n elif name == 'ks':\n a = analysis.discrete.KrumhanslSchmuckler()\n elif name == 'ae':\n a = analysis.discrete.AardenEssen()\n # Weak tendancy to identify the subdominant key as the tonic.\n elif name == 'bb':\n a = analysis.discrete.BellmanBudge()\n # No particular tendancies for confusions with neighboring keys.\n elif name == 'tkp':\n a = analysis.discrete.TemperleyKostkaPayne()\n # Strong tendancy to identify the relative major as the tonic in minor keys. Well-balanced for major keys.\n else:\n assert name == 's'\n a = analysis.discrete.SimpleWeights()\n # Performs most consistently with large regions of music, becomes noiser with smaller regions of music.\n return a.getWeights(mode)\n\n\ndef get_key_name(index):\n if index // 12 == 0:\n mode = 'major'\n else:\n mode = 'minor'\n\n tonic_list = ['C', '♭D', 'D', '♭E', 'E', 'F', '♭g', 'G', '♭A', 'A', '♭B', 'B']\n tonic = tonic_list[index % 12]\n return tonic + ' ' + mode\n\n\ndef krumhansl_schmuckler(path):\n note_lengths = get_note_lengths(path)\n key_profiles = [0 for _ in range(24)]\n\n for key_index in range(24):\n\n if key_index // 12 == 0:\n mode = 'major'\n else:\n mode = 'minor'\n weights = get_weights(mode, 'kk')\n\n current_note_length = note_lengths[key_index:] + note_lengths[:key_index]\n\n pearson = stats.pearsonr(current_note_length, weights)[0]\n\n key_profiles[key_index] = math.fabs(pearson)\n\n key_name = get_key_name(np.argmax(key_profiles))\n print(key_name)\n # print(key_profiles, '\\n', note_lengths)\n\n\ndef find_meta(path):\n pm = pretty_midi.PrettyMIDI(path)\n for ks in pm.key_signature_changes:\n print(ks)\n\n\ndef test():\n path = '../data/midi/read/All You Need Is Love - Beatles.mid'\n\n find_meta(path)\n\n s = converter.parse(path)\n s.plot('histogram', 'pitch')\n p = analysis.discrete.KrumhanslKessler()\n print(p.getSolution(s))\n\n krumhansl_schmuckler(path)\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"josephding23/MusicCritique","sub_path":"algorithms/tonal.py","file_name":"tonal.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"61"} +{"seq_id":"10377787565","text":"import sqlalchemy\nimport numpy as np\nimport pandas as pd\nimport requests\nimport os\n\nfrom alg.population import Population\nfrom alg.student_wishes import SWishesConnector\nfrom alg.teacher_wishes import TWishesConnector\nfrom alg.collection_cards import CollectionCards\nfrom alg.genetic_algorithm import GeneticAlgorithm\n\nfrom db.tasks import LoadDaysTask, LoadDepartmentTask, \\\n\t\t\t\t\t\tLoadFacultyTask, LoadGroupsTask, \\\n\t\t\t\t\t\tLoadLessonTask, LoadTeachersTask, \\\n\t\t\t\t\t\tLoadPairsTask, LoadCardTask,\\\n\t\t\t\t\t\tLoadEmailStudents, LoadEmailTeachers,\\\n\t\t\t\t\t\tLoadRandomTeacherScheduleTask,\\\n\t\t\t\t\t\tLoadRandomStudentScheduleTask,\\\n\t\t\t\t\t\tLoadFullInfo, LoadFullSearchInfo\nfrom db.orm.tables import *\nimport csv\n\n\ndef send_messages(db):\n\turls = create_urls(db)\n\tfor url in list(urls.keys())[:1]:\n\t\trequests.post(\n \tos.environ.get('MAIL_URL'),\n \tauth=(\"api\", os.environ.get(\"MAIL_API\")),\n \tdata={\"from\": os.environ.get(\"MAIL_FROM\"),\n \t\t\"to\": \"test \",\n\t\t\t\t\"subject\": \"Hello\",\n\t\t\t\t\"text\": \"Email to edit schedule: %s\"%urls[url]})\n\ndef create_urls(db):\n\n\tst_verif_query = \"select * from verif_student;\"\n\ttchr_verif_query = \"select * from verif_teacher;\"\n\n\tst_verif_df = pd.read_sql(st_verif_query, con=db.engine)\n\ttchr_verif_df = pd.read_sql(tchr_verif_query, con=db.engine)\n\n\tst_link = 'https://generateschedule.herokuapp.com/scheduledesign/student/' + st_verif_df.st_secret_key\n\ttchr_link = 'https://generateschedule.herokuapp.com/scheduledesign/teacher/' + tchr_verif_df.tchr_secret_key\n\n\tverif_teacher_links = {x: y for x,y in zip(tchr_verif_df.tchr_email, tchr_link)}\n\tverif_students_links = {x: y for x,y in zip(st_verif_df.st_email, st_link)}\n\n\treturn {**verif_students_links, **verif_teacher_links}\n\ndef load_db(engine):\n\tLoadDaysTask.LoadDaysTask(engine=engine).load_to_db()\n\tLoadFacultyTask.LoadFacultyTask(engine=engine).load_to_db()\n\tLoadDepartmentTask.LoadDepartmentTask(engine=engine).load_to_db()\n\tLoadGroupsTask.LoadGroupsTask(engine=engine).load_to_db()\n\tLoadLessonTask.LoadLessonTask(engine=engine).load_to_db()\n\tLoadTeachersTask.LoadTeachersTask(engine=engine).load_to_db()\n\tLoadPairsTask.LoadPairsTask(engine=engine).load_to_db()\n\tLoadCardTask.LoadCardTask(engine=engine).load_to_db()\n\tLoadEmailStudents.LoadEmailStudents(engine=engine).load_to_db()\n\tLoadEmailTeachers.LoadEmailTeachers(engine=engine).load_to_db()\n\t\n\ndef prepare_random_schedule(db):\n\t#prepare for teacher\n\tLoadRandomTeacherScheduleTask.LoadRandomTeacherScheduleTask(db).load_to_db()\n\t#prepare for student\n\tLoadRandomStudentScheduleTask.LoadRandomStudentScheduleTask(db).load_to_db()\n\t\n\ndef prepare_schedule_interface(db, user_status, user_key):\n\treturn LoadFullInfo.LoadFullInfo(db=db, user_status=user_status, user_key=user_key).create_schedule()\n\ndef check_all_sended(db):\n\tall_keys_wish_student = pd.read_sql('select st_secret_key from student_wish_schedule', con=db.engine).drop_duplicates(keep='first')\n\tall_keys_wish_teacher = pd.read_sql('select tchr_secret_key from teacher_wish_schedule', con=db.engine).drop_duplicates(keep='first')\n\t\n\tteachers_keys = pd.read_sql('select tchr_secret_key from verif_teacher', con=db.engine)\n\tstudent_keys = pd.read_sql('select st_secret_key from verif_student', con=db.engine)\n\tif len(pd.concat(all_keys_wish_teacher, teachers_keys, ignore_index=True).drop_duplicates(keep=False)) and \\\n\t\tlen(pd.concat(all_keys_wish_student, student_keys, ignore_index=True).drop_duplicates(keep=False)):\n\t\treturn True\n\t\n\treturn False\n\n\ndef prepare_data_db(data, user_status, user_key):\n\tdata_db = []\n\tif user_status=='student':\n\t\ttable = 'student_wish_schedule'\n\t\tcolumn_key = 'st_secret_key'\n\telse:\n\t\ttable = 'teacher_wish_schedule'\n\t\tcolumn_key = 'tchr_secret_key'\n\n\tfor lesson in data:\n\t\tdata_db.append({\n\t\t\t\t\t\tcolumn_key: user_key,\n\t\t\t\t\t\t'st_schedule_id': int(lesson['les_id']),\n\t\t\t\t\t\t'days_id': lesson['week']*6+lesson['day']+1,\n\t\t\t\t\t\t'pairs_id': lesson['les_num']+1})\n\treturn data_db\n\ndef load_schedule_db(data,\n\t\t\t\t\tdb,\n\t\t\t\t\tuser_status,\n\t\t\t\t\tuser_key):\n\n\tif user_status=='student':\n\t\ttable = Student_Wish_Schedule\n\t\tcolumn_key = 'st_schedule_id'\n\telse:\n\t\ttable = Student_Wish_Schedule\n\t\tcolumn_key = 'tchr_schedule_id'\n\tdata = prepare_data_db(data=data,user_status=user_status,user_key=user_key)\n\tprint(data)\n\tfor d in data:\n\t\tdb.session.query(table).filter_by(**{column_key:d[column_key]}).update(d)\n\n\tdb.session.commit()\n\ndef search_schedule(db, search_query):\n\tstudent = find_in_student(db, search_query)\n\tteacher = find_in_teacher(db, search_query)\n\n\tif student:\n\t\t#format_data\n\t\treturn 'stud', LoadFullSearchInfo.LoadFullSearchInfo(db, user_status='student', ids=student).create_schedule()\n\telif teacher:\n\t\t#format_data\n\t\treturn 'teach', LoadFullSearchInfo.LoadFullSearchInfo(db, user_status='teacher', ids=teacher).create_schedule()\n\n\treturn False\n\t\ndef check_schedule(db, search_query):\n\tstudent = find_in_student(db, search_query)\n\tteacher = find_in_teacher(db, search_query)\n\n\treturn student or teacher\n\n\ndef find_in_student(db, search_query):\n\ttry:\n\t\tgroup_id = db.session.query(Groups).filter_by(group_name=search_query).first().group_id\n\texcept:\n\t\treturn False\n\tcards_id = db.session.query(Card.card_id).filter_by(group_id=group_id).all()\n\n\treturn db.session.query(Class.class_id).filter(Class.card_id.in_(cards_id)).all()\n\n\ndef find_in_teacher(db, search_query):\n\ttry:\n\t\tteacher_id = db.session.query(Teacher).filter_by(teacher_short_name=search_query).first().teacher_id\n\texcept:\n\t\treturn False\n\tcards_id = db.session.query(Card.card_id).filter_by(teacher_id=teacher_id).all()\n\t\n\treturn db.session.query(Class.class_id).filter(Class.card_id.in_(cards_id)).all()\n\n\ndef genetic_algorithm(db):\n\tprint('Start work of genetic_algorithm')\n\tFACULTY_ID = 2\n\trooms = np.array(range(1, 30))\n\n\tclc = CollectionCards(FACULTY_ID, db.session)\n\tprint('Done CLS')\n\tswc = SWishesConnector(FACULTY_ID, db.session)\n\tprint('Done SWC')\n\ttwc = TWishesConnector(FACULTY_ID, db.session)\n\tprint('Done TWC')\n\n\tppl = Population(rooms, 100, FACULTY_ID, db.session)\n\tppl.create_chromosomes()\n\tprint('Done PPL')\n\tga = GeneticAlgorithm(ppl.chromosomes, clc, swc, twc)\n\tga.fit(n_iter = 10)\n\n\tclassesL = []\n\tTB_chromosome = ga.chromosomes[0][0]\n\tfor lesson in TB_chromosome.lessons:\n\t\tfor card in lesson.cards:\n\n\t\t\troom, wdc = TB_chromosome.get_wdcByLessonNum(lesson.unNum)\n\t\t\tdays_id = (wdc[0] + 1) * wdc[1] + 1\n\t\t\tpairs_id = wdc[2] + 1\n\n\t\t\tclassesL.append(Class(card_id = int(card), days_id = int(days_id), pairs_id = int(pairs_id)))\n\t\t\tprint('Card_id: {}, days_id: {}, pairs_id: {}'.format(card, days_id, pairs_id))\n\n\tdb.session.add_all(classesL)\n\tdb.session.commit()\n\tprint('genetic_algorithm done')\n\ndef find_all_teachers(db):\n\treturn [dict((col, getattr(row, col)) for col in row.__table__.columns.keys()) for row in db.session.query(Teacher).all()]\n\n","repo_name":"Yelue/GenerateSchedule","sub_path":"app/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8904695139","text":"import os\nimport sys\nsys.path.append(os.curdir)\nfrom pdfgen import (NAME, TAGLINE, PIC, EMAIL, LINKEDIN, GITHUB, TELEGRAM,\n EDUCATIONS, LANGUAGES, INTERESTS, EXPERIENCES, SKILLS, THEME)\n\nAUTHOR = 'wastelandeer'\nSITENAME = 'Resume project'\nSITEURL = ''\n\nPATH = 'content'\nSTATIC_PATHS = ['images']\nEXTRA_PATH_METADATA = {\n 'images/resume.pdf': {'path': 'static/resume.pdf'},\n}\n\nTIMEZONE = 'Europe/Moscow'\n\nDEFAULT_LANG = 'ru'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('Pelican', 'https://getpelican.com/'),\n ('Python.org', 'https://www.python.org/'),\n ('Jinja2', 'https://palletsprojects.com/p/jinja/'),\n ('You can modify those links in your config file', '#'),) \n\n# Social widget\nSOCIAL = (('You can add links in your config file', '#'),\n ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = False\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\n# Hold theme folder inside \"content\" folder\nIGNORE_FILES = ['theme']\n\nPDF = 'resume.pdf'\nOPEN_TO_WORK = False","repo_name":"wastelandeer/wastelandeer.github.io","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71812783235","text":"\"\"\"\nRun antsCorticalThickness.sh on Mindboggle-101 brains\n\"\"\"\nimport os\n\nrun_all = True\n\nif run_all:\n\n names = ['OASIS-TRT-20', 'MMRR-21', 'NKI-RS-22', 'NKI-TRT-20',\n 'Afterthought', 'Colin27', 'Twins-2', 'MMRR-3T7T-2', 'HLN-12']\n numbers = [20,21,22,20,1,1,2,2,12]\n\n i1 = 0\n names = [names[i1]]\n numbers = [numbers[i1]]\n\n path1 = '/homedir/Data/Brains/Mindboggle101/subjects/'\n end1a = '/mri/orig/001.mgz'\n end1b = '/mri/orig/001.nii.gz'\n path2 = '/data/Brains/Atropos_templates/OASIS-30_Atropos_template/'\n end2a = 'T_template0.nii.gz'\n end2b = 'T_template0_BrainCerebellum.nii.gz'\n end2c = 'T_template0_BrainCerebellumProbabilityMask.nii.gz'\n end2d = 'T_template0_BrainCerebellumExtractionMask.nii.gz'\n end2e = 'Priors2/priors%d.nii.gz'\n convert = False\n\n for i,name in enumerate(names):\n number = numbers[i]\n for n in range(1,number+1):\n if convert:\n s = 'mri_convert {0}{1}-{2}{3} {0}{1}-{2}{4} ' \\\n .format(path1, name, n, end1a, end1b)\n print(s)\n os.system(s)\n\n prefix = 'antsCorticalThickness/{0}-{1}/ants'.format(name, n)\n\n s = 'antsCorticalThickness.sh -d 3 -n 3 -w 0.25 ' \\\n '-a {0}{1}-{2}{3} ' \\\n '-o {4} ' \\\n '-e {5}/{6} ' \\\n '-t {5}/{7} ' \\\n '-m {5}/{8} ' \\\n '-f {5}/{9} ' \\\n '-p {5}/{10} ' \\\n .format(path1, name, n, end1b, prefix, path2, end2a, end2b,\n end2c, end2d, end2e)\n print(s)\n os.system(s)\n","repo_name":"binarybottle/mindboggle_sidelined","sub_path":"run_ants.py","file_name":"run_ants.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"70599588356","text":"print(\"Digite as idades: \")\r\nx = int(input())\r\n\r\ntotal = 0\r\ncont = 0\r\n\r\nif x < 0:\r\n print(\"IMPOSSIVEL CALCULAR\")\r\nelse:\r\n while x > 0:\r\n total += x\r\n cont += 1\r\n x = int(input())\r\n\r\n media = total / cont\r\n print(f\"MEDIA = {media:.2f}\")\r\n","repo_name":"murilobarbosaa/Algoritmos_Udemy","sub_path":"Python/media_idades.py","file_name":"media_idades.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19291352793","text":"import subprocess as sp \nimport time \nnet = input(\"Enter the IP address \")\n\n\ndef scan(IP):\n\tlist1 = ['ping', '-c', '1']\n\tlist1.append(IP)\n\n\ttry:\n\t\tr = sp.check_output(list1, timeout=.5).decode()\n\t\tif \"ttl\" in r.lower():\n\t\t\tprint (IP,\"-->Live\")\n\texcept sp.TimeoutExpired:\n\t\tpass\n\n\texcept Exception as e :\n\t\tprint (e)\n\nfirstip = net.rsplit(\".\",1)[0]\n\nr1 = input(\"Enter the range, use '-' to seprate \")\n\na,b = r1.split(\"-\")\na = int(a)\nb = int(b)\n\nt1 = time.time()\nfor each in range(a,b+1):\n\tip = firstip+\".\"+str(each)\n\tscan(ip)\n\nt2 = time.time()\nprint (\"time taken \", t2-t1)","repo_name":"mohitraj/Network-Penetration-testing","sub_path":"tcp1/simple_scan.py","file_name":"simple_scan.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31452576416","text":"from django.shortcuts import render\nimport requests\n\ndef index(request):\n url = \"https://api.themoviedb.org/3/movie/popular?language=en-US&page=1\"\n\n headers = {\n \"accept\": \"application/json\",\n \"Authorization\": \"Bearer {API_KEY}\"\n }\n\n response = requests.get(url, headers=headers)\n posts = response.json()\n data = posts['results']\n context = {\n \"data\":data\n }\n return render(request,\"index.html\",context)","repo_name":"sameermkd/youtube-clone","sub_path":"video/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41192280943","text":"import FWCore.ParameterSet.Config as cms\n\npfDeepDoubleXTagInfos = cms.EDProducer('DeepDoubleXTagInfoProducer',\n shallow_tag_infos = cms.InputTag('pfBoostedDoubleSVAK8TagInfos'),\n jet_radius = cms.double(0.8),\n min_jet_pt = cms.double(150),\n min_candidate_pt = cms.double(0.95),\n vertices = cms.InputTag('offlinePrimaryVertices'),\n secondary_vertices = cms.InputTag('inclusiveCandidateSecondaryVertices'),\n jets = cms.InputTag('ak8PFJetsPuppi'),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"RecoBTag/FeatureTools/pfDeepDoubleXTagInfos_cfi.py","file_name":"pfDeepDoubleXTagInfos_cfi.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71830200193","text":"from bennedetto.settings.common import *\n\nSECRET_KEY = 'neva-forget'\nDOMAIN = 'localhost:8000'\n\nDEBUG = True\nALLOWED_HOSTS = []\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'temp', 'db.sqlite3'),\n }\n}\n\nEMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'\nEMAIL_FILE_PATH = os.path.join(BASE_DIR, 'temp', 'email')\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format' : \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\",\n 'datefmt' : \"%d/%b/%Y %H:%M:%S\"\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(BASE_DIR, 'temp', 'debug.log'),\n 'formatter': 'verbose'\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['file'],\n 'propagate': True,\n 'level': 'DEBUG'\n },\n }\n}\n","repo_name":"arecker/bennedetto","sub_path":"bennedetto/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":308,"dataset":"github-code","pt":"61"} +{"seq_id":"2148970029","text":"import ufl\nimport ufl.argument\nfrom ufl.assertions import ufl_assert\nfrom ufl.finiteelement import FiniteElementBase\nfrom ufl.split_functions import split\nfrom ufl.algorithms.analysis import extract_arguments\n\nimport function\n\n\n__all__ = ['Argument', 'TestFunction', 'TrialFunction',\n 'TestFunctions', 'TrialFunctions',\n 'derivative', 'adjoint',\n 'CellSize', 'FacetNormal']\n\n\nclass Argument(ufl.argument.Argument):\n \"\"\"Representation of the argument to a form,\"\"\"\n def __init__(self, element, function_space, count):\n \"\"\"\n :arg element: the :class:`ufl.element.FiniteElementBase` this\n argument corresponds to.\n :arg function_space: the :class:`.FunctionSpace` the argument\n corresponds to.\n :arg count: the number of the argument being constructed.\n\n .. note::\n\n an :class:`Argument` with a count of ``0`` is used as a\n :class:`TestFunction`, with a count of ``1`` it is used as\n a :class:`TrialFunction`.\n\n \"\"\"\n super(Argument, self).__init__(element, count)\n self._function_space = function_space\n\n @property\n def cell_node_map(self):\n return self._function_space.cell_node_map\n\n @property\n def interior_facet_node_map(self):\n return self._function_space.interior_facet_node_map\n\n @property\n def exterior_facet_node_map(self):\n return self._function_space.exterior_facet_node_map\n\n def function_space(self):\n return self._function_space\n\n def make_dat(self):\n return self._function_space.make_dat()\n\n def reconstruct(self, element=None, function_space=None, count=None):\n if function_space is None or function_space == self._function_space:\n function_space = self._function_space\n if element is None or element == self._element:\n element = self._element\n if count is None or count == self._count:\n count = self._count\n if count is self._count and element is self._element:\n return self\n ufl_assert(isinstance(element, FiniteElementBase),\n \"Expecting an element, not %s\" % element)\n ufl_assert(isinstance(count, int),\n \"Expecting an int, not %s\" % count)\n ufl_assert(element.value_shape() == self._element.value_shape(),\n \"Cannot reconstruct an Argument with a different value shape.\")\n return Argument(element, function_space, count)\n\n\ndef TestFunction(function_space):\n \"\"\"Build a test function on the specified function space.\n\n :arg function_space: the :class:`.FunctionSpaceBase` to build the test\n function on.\"\"\"\n return Argument(function_space.ufl_element(), function_space, 0)\n\n\ndef TrialFunction(function_space):\n \"\"\"Build a trial function on the specified function space.\n\n :arg function_space: the :class:`.FunctionSpaceBase` to build the trial\n function on.\"\"\"\n return Argument(function_space.ufl_element(), function_space, 1)\n\n\ndef TestFunctions(function_space):\n \"\"\"Return a tuple of test functions on the specified function space.\n\n :arg function_space: the :class:`.FunctionSpaceBase` to build the test\n functions on.\n\n This returns ``len(function_space)`` test functions, which, if the\n function space is a :class:`.MixedFunctionSpace`, are indexed\n appropriately.\n \"\"\"\n return split(TestFunction(function_space))\n\n\ndef TrialFunctions(function_space):\n \"\"\"Return a tuple of trial functions on the specified function space.\n\n :arg function_space: the :class:`.FunctionSpaceBase` to build the trial\n functions on.\n\n This returns ``len(function_space)`` trial functions, which, if the\n function space is a :class:`.MixedFunctionSpace`, are indexed\n appropriately.\n \"\"\"\n return split(TrialFunction(function_space))\n\n\ndef derivative(form, u, du=None):\n \"\"\"Compute the derivative of a form.\n\n Given a form, this computes its linearization with respect to the\n provided :class:`.Function`. The resulting form has one\n additional :class:`Argument` in the same finite element space as\n the Function.\n\n :arg form: a :class:`ufl.Form` to compute the derivative of.\n :arg u: a :class:`.Function` to compute the derivative with\n respect to.\n :arg du: an optional :class:`Argument` to use as the replacement\n in the new form (constructed automatically if not provided).\n\n See also :func:`ufl.derivative`.\n \"\"\"\n if du is None:\n if isinstance(u, function.Function):\n V = u.function_space()\n args = form.arguments()\n number = max(a.number() for a in args) if args else -1\n du = Argument(V.ufl_element(), V, number + 1)\n else:\n raise RuntimeError(\"Can't compute derivative for form\")\n return ufl.derivative(form, u, du)\n\n\ndef adjoint(form, reordered_arguments=None):\n \"\"\"UFL form operator:\n Given a combined bilinear form, compute the adjoint form by\n changing the ordering (count) of the test and trial functions.\n\n By default, new Argument objects will be created with\n opposite ordering. However, if the adjoint form is to\n be added to other forms later, their arguments must match.\n In that case, the user must provide a tuple reordered_arguments=(u2,v2).\n \"\"\"\n\n # ufl.adjoint creates new Arguments if no reordered_arguments is\n # given. To avoid that, always pass reordered_arguments with\n # firedrake.Argument objects.\n if reordered_arguments is None:\n v, u = extract_arguments(form)\n reordered_arguments = (Argument(u.element(), u.function_space(),\n count=v.count()),\n Argument(v.element(), v.function_space(),\n count=u.count()))\n return ufl.adjoint(form, reordered_arguments)\n\n\ndef CellSize(mesh):\n \"\"\"A symbolic representation of the cell size of a mesh.\n\n :arg mesh: the mesh for which to calculate the cell size.\n \"\"\"\n mesh.init()\n return 2.0 * ufl.Circumradius(mesh.ufl_domain())\n\n\ndef FacetNormal(mesh):\n \"\"\"A symbolic representation of the facet normal on a cell in a mesh.\n\n :arg mesh: the mesh over which the normal should be represented.\n \"\"\"\n mesh.init()\n return ufl.FacetNormal(mesh.ufl_domain())\n","repo_name":"gmarkall/firedrake","sub_path":"firedrake/ufl_expr.py","file_name":"ufl_expr.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"34836007373","text":"import array\nimport csv\nimport math\nimport wave\n\nfrom pprint import pprint\n\nSAMPLE_RATE = 44100.0\n\n################################################################################\n# Read in the functions.\nparams = {}\nwith open('linear_parameters.csv') as file:\n csv_file = csv.reader(file, delimiter=',')\n for row in csv_file:\n if row[0] == '':\n continue\n name = row[0].split(' ')[0].lower()\n values = [float(v) for v in row[1:]]\n paired_values = zip(values[:8], values[8:])\n params[name] = paired_values\n\n################################################################################\n# Generate noise.\n\ndef read_wav(path):\n wav_file = wave.open(path)\n\n assert wav_file.getnchannels() == 1, 'Expect monochannel audio'\n assert wav_file.getframerate() == SAMPLE_RATE, 'Expect 44.1k audio'\n assert wav_file.getsampwidth() == 2, 'Expected signed 16 bit audio'\n\n data_string = wav_file.readframes(wav_file.getnframes())\n\n # Convert the data from string to byte(s) array\n data = array.array('h')\n data.fromstring(data_string)\n\n return list(data)\n\ndata = read_wav('white_noise.wav')\n\n################################################################################\n# Filter the noise.\n\ndef slope_to_coefficients(params, slope):\n fc_gain = [[2000000, None], [16.5, None], [270.0, None], [5300.0, None]]\n index_dict = {\n 'constant': 0,\n 'low': 1,\n 'medium': 2,\n 'high': 3,\n }\n\n # Convert the slope to a target gain.\n for name, linear_fn in params.iteritems():\n max_gain = max([g for _,g in linear_fn])\n gain = None\n for i in range(len(linear_fn)-1):\n if linear_fn[i][0] <= slope < linear_fn[i+1][0]:\n frac = (slope - linear_fn[i][0])/(linear_fn[i+1][0] - linear_fn[i][0])\n gain = linear_fn[i][1] * (1 - frac) + linear_fn[i+1][1] * frac\n break\n if gain is None:\n gain = linear_fn[-1][1]\n # Snap to the nearest potentiometer step.\n if gain < 0:\n gain = 0\n steps = 1024\n gain = math.floor(steps*gain/max_gain)/steps * max_gain\n fc_gain[index_dict[name]][1] = gain\n\n # Convert the fc/gain params to a function\n tmp_coefs = [(1-(1/SAMPLE_RATE)/(1/(2*math.pi*fc) + 1/SAMPLE_RATE), gain)\n for fc, gain in fc_gain]\n coefs = [(A, gain*(1-A)) for A, gain in tmp_coefs]\n return coefs\n\ndef apply_continuous_filter(params, slope_spec, data):\n # Prep coefficients\n fc_gain = [[2000000, None], [16.5, None], [270.0, None], [5300.0, None]]\n index_dict = {\n 'constant': 0,\n 'low': 1,\n 'medium': 2,\n 'high': 3,\n }\n max_slope, min_slope = slope_spec\n\n # Apply to the data.\n output_data = []\n filtered = [0, 0, 0, 0]\n for i,d in enumerate(data):\n slope = float(max_slope - min_slope)*(float(i)/len(data)) + min_slope\n coefs = slope_to_coefficients(params, slope)\n filtered = [cf[0] * prev + cf[1] * d for prev,cf in zip(filtered, coefs)]\n raw_output = sum(filtered)\n trim_output = min(max(int(raw_output), -2**15), 2**15 - 1)\n output_data.append(trim_output)\n return output_data\n\noutput_data = apply_continuous_filter(params, [-20, 0], data)\n\n################################################################################\n# Write the noise to a WAV.\n\ndef write_wav(data):\n wav_file = wave.open('filtered_noise.wav', 'w')\n wav_file.setnchannels(1)\n wav_file.setframerate(SAMPLE_RATE)\n wav_file.setsampwidth(2)\n\n # Convert from byte(s) array to string\n arr = array.array('h')\n arr.fromlist(data)\n wav_file.writeframes(arr.tostring())\n\nwrite_wav(output_data)\n","repo_name":"thenoviceoof/noisEE","sub_path":"filter/generate_audio.py","file_name":"generate_audio.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13629916856","text":"#ealmonte32\n#CSIT 104 - In Class Programming Q2\n\ndef check_neg(numbers_list):\n all_negs = []\n for negative in numbers_list:\n if negative < 0:\n all_negs.append(negative)\n return all_negs\n\ndef check_pos(numbers_list):\n all_pos = []\n for positive in numbers_list:\n if positive > 0:\n all_pos.append(positive)\n return all_pos\n\ndef main():\n numbers_list = [-1,2,-3,4,-5,6,-7,8,9,10,-11]\n\n print(\"\\n-----------------------------------------\")\n print(\"Positive or Negative Number Checker Program\\n\")\n print(\"Available functions to choose:\\n\")\n print(\" (a) Check Negative \\\n \\n (b) Check Positive \\\n \\n (c) Quit\\n\")\n\n choice = input(\"Type the letter corresponding to the program and press enter: \")\n \n if choice not in (\"a\",\"b\",\"c\"):\n print(\"\\n (Error): You entered an incorrect letter as choice. Please try again.\")\n main()\n \n if (choice == \"a\"):\n print(\"Negative numbers in list are:\",check_neg(numbers_list))\n \n elif (choice == \"b\"):\n print(\"Positive numbers in list are:\",check_pos(numbers_list))\n \n elif (choice == \"c\"):\n print(numbers_list)\n exit()\n\n#main function initializes\nmain()\n","repo_name":"ealmonte32/python","sub_path":"positive_negative_checker_menu.py","file_name":"positive_negative_checker_menu.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34900322460","text":"import math\n\ntest_inputs = [\n \"inputs/day12_sample\",\n \"inputs/day12\"\n]\n\n\nclass WaypointNavigation:\n def __init__(self):\n self.ship = [0, 0]\n self.waypoint = [10, 1]\n\n def rotate_counterclockwise(self, amt):\n cos = math.cos(math.radians(amt))\n sin = math.sin(math.radians(amt))\n relx = self.waypoint[0] - self.ship[0]\n rely = self.waypoint[1] - self.ship[1]\n rotx = cos * relx + -1 * sin * rely\n roty = sin * relx + cos * rely\n self.waypoint[0] = int(self.ship[0] + rotx)\n self.waypoint[1] = int(self.ship[1] + roty)\n\n def navigate(self, op, amount):\n if op == \"N\":\n self.waypoint[1] += amount\n elif op == \"S\":\n self.waypoint[1] -= amount\n elif op == \"E\":\n self.waypoint[0] += amount\n elif op == \"W\":\n self.waypoint[0] -= amount\n elif op == \"L\":\n self.rotate_counterclockwise(amount)\n elif op == \"R\":\n self.rotate_counterclockwise(amount * -1)\n elif op == \"F\":\n dx = (self.waypoint[0] - self.ship[0]) * amount\n dy = (self.waypoint[1] - self.ship[1]) * amount\n self.ship[0] += dx\n self.ship[1] += dy\n self.waypoint[0] += dx\n self.waypoint[1] += dy\n\n\nclass Ship:\n def __init__(self, direction):\n self.direction_map = {\n \"N\": 0,\n \"E\": 1,\n \"S\": 2,\n \"W\": 3\n }\n\n self.direction = self.direction_map[direction]\n self.x = 0\n self.y = 0\n\n def turn(self, deg):\n deg = deg % 360\n assert deg % 90 == 0\n quads = int(deg / 90)\n self.direction = (self.direction + quads) % 4\n\n def move(self, op, amount):\n if op == \"N\":\n self.y += amount\n elif op == \"S\":\n self.y -= amount\n elif op == \"E\":\n self.x += amount\n elif op == \"W\":\n self.x -= amount\n elif op == \"L\":\n self.turn(-1 * amount)\n elif op == \"R\":\n self.turn(amount)\n elif op == \"F\":\n if self.direction == 0:\n self.move(\"N\", amount)\n elif self.direction == 1:\n self.move(\"E\", amount)\n elif self.direction == 2:\n self.move(\"S\", amount)\n elif self.direction == 3:\n self.move(\"W\", amount)\n\n\ndef process(path):\n print(\"Input:\", path)\n instructions = []\n with open(path) as f:\n for line in f:\n op = line[0]\n amt = int(line[1:])\n instructions.append((op, amt))\n\n ship = Ship(\"E\")\n for op, amt in instructions:\n ship.move(op, amt)\n print(\"\\tDistance 1:\", abs(ship.x) + abs(ship.y))\n\n nav = WaypointNavigation()\n for op, amt in instructions:\n nav.navigate(op, amt)\n print(op, amt, nav.ship, nav.waypoint)\n print(\"\\tDistance 2:\", abs(nav.ship[0]) + abs(nav.ship[1]))\n\n\ndef main():\n for path in test_inputs:\n process(path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"will-snavely/adventofcode","sub_path":"aoc2020/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72850940033","text":"\"\"\"\nreturn : Represents The leaderboard object for the client side of the game.\n\"\"\"\n\nimport pygame\n\nfrom client import player\n\n\nclass Leaderboard(object):\n def __init__(self,x,y):\n self.x = x\n self.y = y\n self.WIDTH = 200\n self.HEIGHT_ENTRY = 80\n self.players = []\n self.name_font = pygame.font.SysFont(\"comicsans\",25,bold = True)\n self.score_font = pygame.font.SysFont(\"comicsans\",20)\n self.rank_font = pygame.font.SysFont(\"comicsans\",60)\n self.BORDER_THICKNESS = 5\n\n def draw(self,win):\n score = [(player.name, player.score) for player in self.players]\n score.sort(key=lambda x: x[1],reverse=True)","repo_name":"danonymous856/Major","sub_path":"client/leaderboard.py","file_name":"leaderboard.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41762205797","text":"# '등급' 총 5가지로 나눔 (전체관람가(G), 12세이상관람가(pg), 15세이상관람가(pg13), 청소년관람불가(r), 제한상영가(nc17))\ndef rating (row):\n G = ['전체관람가','연소자관람가,전체관람가', '연소자관람가', '모든 관람객이 관람할 수 있는 등급', \"모든 관람객이 관람할 수 있는 등급,전체관람가\"]\n pg = ['12세이상관람가', '12세관람가', '중학생이상관람가', '국민학생관람불가', '12세이상관람가,12세관람가', '12세 미만인 자는 관람할 수 없는 등급', '12세이상관람가,전체관람가', '12세이상관람가,중학생이상관람가', \n '12세이상관람가,연소자관람가', '12세이상관람가,국민학생관람불가', '12세이상관람가,연소자관람가,전체관람가']\n pg13 = ['15세이상관람가', '15세관람가', '고등학생이상관람가', '15세 미만인 자는 관람할 수 없는 등급', '15세관람가,15세이상관람가', '15세이상관람가,중학생이상관람가', '연소자관람불가,15세이상관람가', '국민학생관람불가,15세이상관람가',\n '15세이상관람가,전체관람가', '연소자관람가,15세이상관람가', '12세이상관람가,15세이상관람가','15세 미만인 자는 관람할 수 없는 등급 ,15세이상관람가',\n '12세이상관람가,국민학생관람불가,15세이상관람가','12세이상관람가,고등학생이상관람가', '15세관람가,12세이상관람가', '12세이상관람가,15세 미만인 자는 관람할 수 없는 등급']\n r = ['청소년관람불가', '18세관람가', '연소자관람불가', '18세 미만인 자는 관람할 수 없는 등급', '고등학생이상관람가,15세이상관람가', \n '연소자관람불가,청소년관람불가', '미성년자관람불가', '18세관람가,청소년관람불가', '청소년관람불가,15세이상관람가', '15세이상관람가,18세 미만인 자는 관람할 수 없는 등급','미성년자관람가',\n '청소년관람불가,전체관람가', '청소년관람불가,12세관람가', '15세이상관람가,미성년자관람불가', '국민학생관람불가,청소년관람불가',\n '18세관람가,15세이상관람가', '청소년관람불가,고등학생이상관람가']\n nc17 = ['제한상영가']\n none = ['정보없음']\n \n if row in G :\n return '전체관람가'\n if row in pg :\n return '12세이상'\n if row in pg13 :\n return '15세이상'\n if row in r :\n return '청소년관람불가'\n if row in nc17:\n return '제한상영가'\n if row in none:\n return '정보없음'","repo_name":"FLY-CODE77/EDA-project","sub_path":"Korea-movie-eda/module/rate.py","file_name":"rate.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15830613864","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'magic'\n\nurlpatterns = [\n\t#home page\n\turl(r'^$', views.IndexView.as_view(), name='index'),\n\t#registration page\n\turl(r'^register/$', views.UserFormView.as_view(), name='register'),\n\t#logout\n\turl(r'^logout_user/$', views.logout_user, name='logout_user'),\n\t#login\n url(r'^login_user/$', views.login_user, name='login_user'),\n\t#Post detail , /1/ ,/123/\n\turl(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n]","repo_name":"voidmeistr/mtgapp","sub_path":"magic/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22461480007","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\n\nsys.path.append(\"py\")\n\nimport countdowntourney\n\nverbose = False\n\ntests = [\n {\n \"name\" : \"ranktest_wins_points\",\n \"player_names\" : [ \"Alpha\", \"Bravo\", \"Charlie\", \"Delta\", \"Echo\", \"Foxtrot\", \"Golf\", \"Hotel\", \"India\", \"Juliet\", \"Kilo\" ],\n \"games_per_round\" : {\n 1 : [\n (1, \"Alpha\", 60, \"Echo\", 40),\n (1, \"Echo\", 41, \"India\", 31),\n (1, \"India\", 58, \"Alpha\", 68, True),\n (2, \"Bravo\", 46, \"Foxtrot\", 31),\n (2, \"Foxtrot\", 37, \"Juliet\", 30),\n (2, \"Juliet\", 60, \"Bravo\", 55),\n (3, \"Charlie\", 45, \"Golf\", 59),\n (3, \"Golf\", 72, \"Kilo\", 52),\n (3, \"Kilo\", 50, \"Charlie\", 51),\n (4, \"Delta\", 60, \"Hotel\", 30),\n (4, \"Hotel\", 66, \"Prune\", 0),\n (4, \"Prune\", 0, \"Delta\", 70)\n ],\n 2 : [\n (1, \"Golf\", 50, \"Delta\", 60),\n (1, \"Delta\", 65, \"Alpha\", 45),\n (1, \"Alpha\", 49, \"Golf\", 41),\n (2, \"Bravo\", 56, \"Charlie\", 52),\n (2, \"Charlie\", 61, \"Hotel\", 41),\n (2, \"Hotel\", 49, \"Bravo\", 37),\n (3, \"Juliet\", 50, \"Echo\", 60),\n (3, \"Echo\", 53, \"Foxtrot\", 44),\n (3, \"Foxtrot\", 25, \"Juliet\", 57),\n (4, \"Kilo\", 49, \"India\", 47),\n (4, \"India\", 56, \"Prune\", 0),\n (4, \"Prune\", 0, \"Kilo\", 58),\n ],\n 3 : [\n (1, \"Delta\", 72, \"Alpha\", 62),\n (1, \"Alpha\", 58, \"Echo\", 47),\n (1, \"Echo\", 41, \"Delta\", 68),\n (2, \"Golf\", 50, \"Charlie\", 60, True),\n (2, \"Charlie\", 56, \"Kilo\", 30),\n (2, \"Kilo\", 41, \"Golf\", 47),\n (3, \"Juliet\", 41, \"Bravo\", 49),\n (3, \"Bravo\", 40, \"Hotel\", 50),\n (3, \"Hotel\", 54, \"Juliet\", 55),\n (4, \"India\", 40, \"Foxtrot\", 53),\n (4, \"Foxtrot\", 60, \"Prune\", 0),\n (4, \"Prune\", 0, \"India\", 70),\n ]\n },\n \"standings_per_round\" : {\n 1 : [\n ( 1, \"Golf\", 2, 2, 131),\n ( 2, \"Delta\", 2, 2, 130),\n ( 3, \"Alpha\", 2, 2, 118),\n ( 4, \"Bravo\", 2, 1, 101),\n ( 5, \"Charlie\", 2, 1, 96),\n ( 5, \"Hotel\", 2, 1, 96),\n ( 7, \"Juliet\", 2, 1, 90),\n ( 8, \"Echo\", 2, 1, 81),\n ( 9, \"Foxtrot\", 2, 1, 68),\n (10, \"Kilo\", 2, 0, 102),\n (11, \"India\", 2, 0, 89),\n ],\n 2 : [\n ( 1, \"Delta\", 4, 4, 255),\n ( 2, \"Alpha\", 4, 3, 212),\n ( 3, \"Echo\", 4, 3, 194),\n ( 4, \"Golf\", 4, 2, 222),\n ( 5, \"Charlie\", 4, 2, 209),\n ( 5, \"Kilo\", 4, 2, 209),\n ( 7, \"Juliet\", 4, 2, 197),\n ( 8, \"Bravo\", 4, 2, 194),\n ( 9, \"Hotel\", 4, 2, 186),\n (10, \"India\", 4, 1, 192),\n (11, \"Foxtrot\", 4, 1, 137),\n ],\n 3 : [\n ( 1, \"Delta\", 6, 6, 395),\n ( 2, \"Alpha\", 6, 4, 332),\n ( 3, \"Charlie\", 6, 4, 315),\n ( 4, \"Golf\", 6, 3, 319),\n ( 5, \"Juliet\", 6, 3, 293),\n ( 6, \"Hotel\", 6, 3, 290),\n ( 7, \"Bravo\", 6, 3, 283),\n ( 8, \"Echo\", 6, 3, 282),\n ( 9, \"Foxtrot\", 6, 3, 250),\n (10, \"India\", 6, 2, 302),\n (11, \"Kilo\", 6, 2, 280),\n ]\n },\n \"rank\" : countdowntourney.RANK_WINS_POINTS\n },\n {\n \"name\" : \"ranktest_neustadtl\",\n \"player_names\" : [ \"Alpha\", \"Bravo\", \"Charlie\", \"Delta\", \"Echo\", \"Foxtrot\", \"Golf\", \"Hotel\", \"India\", \"Juliet\", \"Kilo\" ],\n \"rank\" : countdowntourney.RANK_WINS_NEUSTADTL,\n \"games_per_round\" : {\n 1 : [\n (1, \"Alpha\", 60, \"Echo\", 40),\n (1, \"Echo\", 41, \"India\", 31),\n (1, \"India\", 75, \"Alpha\", 75),\n (2, \"Bravo\", 46, \"Foxtrot\", 31),\n (2, \"Foxtrot\", 37, \"Juliet\", 30),\n (2, \"Juliet\", 60, \"Bravo\", 55),\n (3, \"Charlie\", 45, \"Golf\", 59),\n (3, \"Golf\", 72, \"Kilo\", 52),\n (3, \"Kilo\", 50, \"Charlie\", 51),\n (4, \"Delta\", 60, \"Hotel\", 30),\n (4, \"Hotel\", 66, \"Prune\", 0),\n (4, \"Prune\", 0, \"Delta\", 70)\n ],\n # Defeated opponents after round 1:\n # Alpha: Echo, India(D)\n # Bravo: Foxtrot\n # Charlie: Kilo\n # Delta: Hotel, Prune\n # Echo: India\n # Foxtrot: Juliet\n # Golf: Charlie, Kilo\n # Hotel: Prune\n # India: Alpha(D)\n # Juliet: Bravo\n # Kilo:\n # Prune:\n # Hotel withdraws.\n # Anyone who beat Hotel in the first round has their Neustadtl\n # score calculated as if Hotel drew their missing games.\n 2 : [\n (1, \"Golf\", 56, \"Delta\", 72),\n (1, \"Delta\", 61, \"Alpha\", 50),\n (1, \"Alpha\", 54, \"Golf\", 39),\n (2, \"Bravo\", 49, \"Juliet\", 32),\n (2, \"Juliet\", 44, \"Foxtrot\", 40),\n (2, \"Foxtrot\", 40, \"Bravo\", 66),\n (3, \"Echo\", 62, \"Charlie\", 52),\n (3, \"Charlie\", 65, \"Prune\", 0),\n (3, \"Prune\", 0, \"Echo\", 59),\n (4, \"India\", 52, \"Kilo\", 39),\n (4, \"Kilo\", 70, \"Prune\", 0),\n (4, \"Prune\", 0, \"India\", 69)\n ],\n # Defeated opponents after round 2:\n # R1 R2\n # Alpha: Echo, India(D), Golf\n # Bravo: Foxtrot, Juliet, Foxtrot\n # Charlie: Kilo, Prune\n # Delta: Hotel, Prune, Alpha, Golf\n # Echo: India, Charlie, Prune\n # Foxtrot: Juliet\n # Golf: Charlie, Kilo\n # Hotel: Prune\n # India: Alpha(D), Kilo, Prune II\n # Juliet: Bravo, Foxtrot\n # Kilo: Prune II\n # Prune:\n # Prune II:\n 3 : [\n (1, \"Delta\", 60, \"Echo\", 55),\n (1, \"Echo\", 65, \"Alpha\", 50),\n (1, \"Alpha\", 59, \"Delta\", 69),\n (2, \"India\", 32, \"Bravo\", 48),\n (2, \"Bravo\", 50, \"Golf\", 31),\n (2, \"Golf\", 48, \"India\", 30),\n (3, \"Juliet\", 40, \"Charlie\", 77),\n (3, \"Charlie\", 56, \"Foxtrot\", 51),\n (3, \"Foxtrot\", 41, \"Juliet\", 59),\n (4, \"Kilo\", 40, \"Late\", 30),\n (4, \"Late\", 59 , \"Prune\", 0),\n (4, \"Prune\", 0, \"Kilo\", 79)\n ]\n # Defeated opponents after round 3:\n # R1 R2 R3\n # Alpha: Echo, India(D), Golf\n # Bravo: Foxtrot, Foxtrot, Juliet Golf, India\n # Charlie: Kilo, Prune, Foxtrot, Juliet\n # Delta: Hotel, Prune, Alpha, Golf, Echo, Alpha\n # Echo: India, Charlie, Prune, Alpha\n # Foxtrot: Juliet\n # Golf: Charlie, Kilo, India\n # Hotel: Prune\n # India: Alpha(D), Kilo, Prune II\n # Juliet: Bravo, Foxtrot, Foxtrot\n # Kilo: Prune II, Late, Prune\n # Late: Prune\n # Prune:\n # Prune II:\n },\n \"standings_per_round\" : {\n 1 : [\n ( 1, \"Golf\", 2, 2, 1, 131),\n ( 2, \"Delta\", 2, 2, 1, 130),\n ( 3, \"Alpha\", 2, 1.5, 1.25, 135),\n ( 4, \"Bravo\", 2, 1, 1, 101),\n ( 5, \"Juliet\", 2, 1, 1, 90),\n ( 6, \"Foxtrot\", 2, 1, 1, 68),\n ( 7, \"Echo\", 2, 1, 0.5, 81),\n ( 8, \"Charlie\", 2, 1, 0, 96),\n ( 8, \"Hotel\", 2, 1, 0, 96),\n (10, \"India\", 2, 0.5, 0.75, 106),\n (11, \"Kilo\", 2, 0, 0, 102),\n ],\n 2 : [\n ( 1, \"Delta\", 4, 4, 6.5, 263),\n ( 2, \"Echo\", 4, 3, 4.5, 202),\n ( 3, \"Bravo\", 4, 3, 4, 216),\n ( 4, \"Alpha\", 4, 2.5, 6.25, 239),\n ( 5, \"India\", 4, 2.5, 2.25, 227),\n ( 6, \"Juliet\", 4, 2, 4, 166),\n ( 7, \"Golf\", 4, 2, 3, 226),\n ( 8, \"Charlie\", 4, 2, 1, 213),\n ( 9, \"Foxtrot\", 4, 1, 2, 148),\n (10, \"Kilo\", 4, 1, 0, 211),\n (11, \"Hotel\", 2, 1, 0, 96),\n ],\n 3 : [\n ( 1, \"Delta\", 6, 6, 15, 392),\n ( 2, \"Bravo\", 6, 5, 10.5, 314),\n ( 3, \"Echo\", 6, 4, 9, 322),\n ( 4, \"Charlie\", 6, 4, 7, 346),\n ( 5, \"Golf\", 6, 3, 9.5, 305),\n ( 6, \"Juliet\", 6, 3, 7, 265),\n ( 7, \"Kilo\", 6, 3, 3, 330),\n ( 8, \"Alpha\", 6, 2.5, 8.25, 348),\n ( 9, \"India\", 6, 2.5, 4.25, 289),\n (10, \"Foxtrot\", 6, 1, 3, 240),\n (11, \"Hotel\", 2, 1, 0, 96),\n (12, \"Late\", 2, 1, 0, 89),\n ],\n },\n \"withdrawals_after_round\" : {\n 1 : [ \"Hotel\" ],\n },\n \"additions_after_round\" : {\n 2 : [ \"Late\" ]\n }\n },\n {\n \"name\" : \"ranktest_solkoff\",\n \"player_names\" : [ \"Alpha\", \"Bravo\", \"Charlie\", \"Delta\", \"Echo\", \"Foxtrot\", \"Golf\", \"Hotel\", \"India\", \"Juliet\", \"Kilo\" ],\n \"rank\" : countdowntourney.RANK_WINS_SOW,\n \"games_per_round\" : {\n 1 : [\n (1, \"Alpha\", 60, \"Echo\", 40),\n (1, \"Echo\", 41, \"India\", 31),\n (1, \"India\", 75, \"Alpha\", 75),\n (2, \"Bravo\", 46, \"Foxtrot\", 31),\n (2, \"Foxtrot\", 37, \"Juliet\", 30),\n (2, \"Juliet\", 60, \"Bravo\", 55),\n (3, \"Charlie\", 45, \"Golf\", 59),\n (3, \"Golf\", 72, \"Kilo\", 52),\n (3, \"Kilo\", 50, \"Charlie\", 51),\n (4, \"Delta\", 60, \"Hotel\", 30),\n (4, \"Hotel\", 66, \"Prune\", 0),\n (4, \"Prune\", 0, \"Delta\", 70)\n ],\n # Opponents faced after round 1:\n #\n # Alpha: Echo, India\n # Bravo: Foxtrot, Juliet\n # Charlie: Golf, Kilo\n # Delta: Hotel, Prune\n # Echo: Alpha, India\n # Foxtrot: Bravo, Juliet\n # Golf: Charlie, Kilo\n # Hotel: Delta, Prune\n # India: Alpha, Echo\n # Juliet: Bravo, Foxtrot\n # Kilo: Charlie, Golf\n # Prune: Delta, Hotel\n\n # Prune withdraws, Late is added. Players who have played Prune\n # do not get a bonus of 0.5 Solkoff points for each of Prune's\n # missed games, but players who play Late DO get a bonus of 0.5\n # Solkoff points for each of Late's two missed games.\n 2 : [\n (1, \"Golf\", 50, \"Delta\", 60),\n (1, \"Delta\", 42, \"Alpha\", 55),\n (1, \"Alpha\", 61, \"Golf\", 30),\n (2, \"Bravo\", 55, \"Hotel\", 50),\n (2, \"Hotel\", 41, \"Charlie\", 57),\n (2, \"Charlie\", 48, \"Bravo\", 38),\n (3, \"Juliet\", 56, \"Echo\", 46),\n (3, \"Echo\", 41, \"Foxtrot\", 35),\n (3, \"Foxtrot\", 65, \"Juliet\", 40),\n (4, \"India\", 50, \"Kilo\", 49 ),\n (4, \"Kilo\", 56, \"Late\", 30),\n (4, \"Late\", 45, \"India\", 41)\n ],\n\n # Opponents faced after round 2:\n #\n # Alpha: Echo, India, Delta, Golf\n # Bravo: Foxtrot, Juliet, Charlie, Hotel\n # Charlie: Golf, Kilo, Bravo, Hotel\n # Delta: Hotel, Prune, Alpha, Golf\n # Echo: Alpha, India, Foxtrot, Juliet\n # Foxtrot: Bravo, Juliet, Echo, Juliet\n # Golf: Charlie, Kilo, Alpha, Delta\n # Hotel: Delta, Prune, Bravo, Charlie\n # India: Alpha, Echo, Kilo, Late\n # Juliet: Bravo, Foxtrot, Echo, Foxtrot\n # Kilo: Charlie, Golf, India, Late\n # Late: India, Kilo\n # Prune: Delta, Hotel\n\n 3 : [\n (1, \"Alpha\", 49, \"Delta\", 62),\n (1, \"Delta\", 55, \"Charlie\", 57),\n (1, \"Charlie\", 41, \"Alpha\", 70),\n (2, \"Golf\", 41, \"Echo\", 59),\n (2, \"Echo\", 37, \"Bravo\", 60),\n (2, \"Bravo\", 72, \"Golf\", 52),\n (3, \"Juliet\", 55, \"Foxtrot\", 62),\n (3, \"Foxtrot\", 51, \"India\", 59),\n (3, \"India\", 58, \"Juliet\", 45),\n (4, \"Kilo\", 50, \"Hotel\", 66),\n (4, \"Hotel\", 52, \"Late\", 50),\n (4, \"Late\", 59, \"Kilo\", 31)\n ]\n # Opponents faced after round 3:\n #\n # Alpha: Echo, India, Delta, Golf, Charlie, Delta\n # Bravo: Foxtrot, Juliet, Charlie, Hotel, Echo, Golf\n # Charlie: Golf, Kilo, Bravo, Hotel, Alpha, Delta\n # Delta: Hotel, Prune, Alpha, Golf, Alpha, Charlie\n # Echo: Alpha, India, Foxtrot, Juliet, Bravo, Golf\n # Foxtrot: Bravo, Juliet, Echo, Juliet, India, Juliet\n # Golf: Charlie, Kilo, Alpha, Delta, Bravo, Echo\n # Hotel: Delta, Prune, Bravo, Charlie, Kilo, Late\n # India: Alpha, Echo, Kilo, Late, Foxtrot, Juliet\n # Juliet: Bravo, Foxtrot, Echo, Foxtrot, Foxtrot, India\n # Kilo: Charlie, Golf, India, Late, Hotel, Late\n # Late: India, Kilo, Hotel, Kilo\n # Prune: Delta, Hotel\n },\n \"standings_per_round\" : {\n 1 : [\n ( 1, \"Golf\", 2, 2, 1, 131),\n ( 2, \"Delta\", 2, 2, 1, 130),\n ( 3, \"Alpha\", 2, 1.5, 1.5, 135),\n ( 4, \"Bravo\", 2, 1, 2, 101),\n ( 5, \"Charlie\", 2, 1, 2, 96),\n ( 5, \"Hotel\", 2, 1, 2, 96),\n ( 7, \"Juliet\", 2, 1, 2, 90),\n ( 8, \"Echo\", 2, 1, 2, 81),\n ( 9, \"Foxtrot\", 2, 1, 2, 68),\n (10, \"India\", 2, 0.5, 2.5, 106),\n (11, \"Kilo\", 2, 0, 3, 102),\n ],\n 2 : [\n ( 1, \"Alpha\", 4, 3.5, 8.5, 251),\n ( 2, \"Delta\", 4, 3, 6.5, 232),\n ( 3, \"Charlie\", 4, 3, 6, 201),\n ( 4, \"Golf\", 4, 2, 10.5, 211),\n ( 5, \"Echo\", 4, 2, 9, 168),\n ( 6, \"Bravo\", 4, 2, 8, 194),\n ( 7, \"Juliet\", 4, 2, 8, 186),\n ( 8, \"Foxtrot\", 4, 2, 8, 168),\n ( 9, \"India\", 4, 1.5, 8.5, 197), # played Late, bonus 2*0.5\n (10, \"Kilo\", 4, 1, 8.5, 207), # played Late, bonus 2*0.5\n (11, \"Hotel\", 4, 1, 8, 187),\n (12, \"Late\", 2, 1, 2.5, 75),\n ],\n 3 : [\n ( 1, \"Alpha\", 6, 4.5, 20.5, 370),\n ( 2, \"Charlie\", 6, 4, 18.5, 299),\n ( 3, \"Delta\", 6, 4, 18, 349),\n ( 4, \"Bravo\", 6, 4, 17, 326),\n ( 5, \"India\", 6, 3.5, 16.5, 314), # played Late, bonus 2*0.5\n ( 6, \"Echo\", 6, 3, 19, 264),\n ( 7, \"Foxtrot\", 6, 3, 16.5, 281),\n ( 8, \"Hotel\", 6, 3, 16, 305), # played Late, bonus 2*0.5\n ( 9, \"Golf\", 6, 2, 20.5, 304),\n (10, \"Juliet\", 6, 2, 19.5, 286),\n (11, \"Late\", 4, 2, 8.5, 184),\n (12, \"Kilo\", 6, 1, 18.5, 288), # played Late*2, bonus 2*2*0.5\n ],\n },\n \"withdrawals_after_round\" : {\n },\n \"additions_after_round\" : {\n 1 : [ \"Late\" ]\n }\n },\n {\n \"name\" : \"ranktest_cumulative\",\n \"player_names\" : [ \"Alpha\", \"Bravo\", \"Charlie\", \"Delta\", \"Echo\", \"Foxtrot\", \"Golf\", \"Hotel\", \"India\", \"Juliet\", \"Kilo\", \"Lima\" ],\n \"rank\" : countdowntourney.RANK_WINS_CUMULATIVE,\n \"games_per_round\" : {\n 1 : [\n (1, \"Alpha\", 60, \"Echo\", 40),\n (1, \"Echo\", 41, \"India\", 31),\n (1, \"India\", 75, \"Alpha\", 75),\n (2, \"Bravo\", 46, \"Foxtrot\", 31),\n (2, \"Foxtrot\", 37, \"Juliet\", 30),\n (2, \"Juliet\", 60, \"Bravo\", 55),\n (3, \"Charlie\", 45, \"Golf\", 59),\n (3, \"Golf\", 72, \"Kilo\", 52),\n (3, \"Kilo\", 50, \"Charlie\", 51),\n (4, \"Delta\", 60, \"Hotel\", 30),\n (4, \"Hotel\", 66, \"Lima\", 33),\n (4, \"Lima\", 70, \"Delta\", 50)\n ],\n # Foxtrot withdraws, Prune is added automatically\n 2 : [\n (1, \"Golf\", 55, \"Alpha\", 51),\n (1, \"Alpha\", 61, \"Delta\", 40),\n (1, \"Delta\", 56, \"Golf\", 72),\n (2, \"Lima\", 50, \"Bravo\", 51),\n (2, \"Bravo\", 59, \"Charlie\", 35),\n (2, \"Charlie\", 64, \"Lima\", 60),\n (3, \"Hotel\", 40, \"Juliet\", 65),\n (3, \"Juliet\", 46, \"Echo\", 41),\n (3, \"Echo\", 56, \"Hotel\", 51),\n (4, \"Prune\", 0, \"India\", 68),\n (4, \"India\", 57, \"Kilo\", 41),\n (4, \"Kilo\", 82, \"Prune\", 0)\n ],\n # Foxtrot returns, Prune withdraws. Foxtrot didn't play in round\n # 2 so gets no cumulative effect for that round.\n 3 : [\n (1, \"Golf\", 50, \"Bravo\", 60),\n (1, \"Bravo\", 63, \"Juliet\", 51),\n (1, \"Juliet\", 41, \"Golf\", 49),\n (2, \"Alpha\", 56, \"India\", 55),\n (2, \"India\", 75, \"Charlie\", 40),\n (2, \"Charlie\", 51, \"Alpha\", 30),\n (3, \"Echo\", 59, \"Lima\", 52),\n (3, \"Lima\", 41, \"Delta\", 60),\n (3, \"Delta\", 55, \"Echo\", 45),\n (4, \"Hotel\", 50, \"Kilo\", 66),\n (4, \"Kilo\", 56, \"Foxtrot\", 39),\n (4, \"Foxtrot\", 61, \"Hotel\", 41),\n ]\n },\n \"standings_per_round\" : {\n 1 : [\n ( 1, \"Golf\", 2, 2, 2, 131),\n ( 2, \"Alpha\", 2, 1.5, 1.5, 135),\n ( 3, \"Delta\", 2, 1, 1, 110),\n ( 4, \"Lima\", 2, 1, 1, 103),\n ( 5, \"Bravo\", 2, 1, 1, 101),\n ( 6, \"Charlie\", 2, 1, 1, 96),\n ( 6, \"Hotel\", 2, 1, 1, 96),\n ( 8, \"Juliet\", 2, 1, 1, 90),\n ( 9, \"Echo\", 2, 1, 1, 81),\n (10, \"Foxtrot\", 2, 1, 1, 68),\n (11, \"India\", 2, 0.5, 0.5, 106),\n (12, \"Kilo\", 2, 0, 0, 102)\n ],\n 2 : [\n ( 1, \"Golf\", 4, 4, 6, 258),\n ( 2, \"Bravo\", 4, 3, 4, 211),\n ( 3, \"Juliet\", 4, 3, 4, 201),\n ( 4, \"Alpha\", 4, 2.5, 4, 247),\n ( 5, \"India\", 4, 2.5, 3, 231),\n ( 6, \"Charlie\", 4, 2, 3, 195),\n ( 7, \"Echo\", 4, 2, 3, 178),\n ( 8, \"Lima\", 4, 1, 2, 213),\n ( 9, \"Delta\", 4, 1, 2, 206),\n (10, \"Hotel\", 4, 1, 2, 187),\n (11, \"Kilo\", 4, 1, 1, 225),\n (12, \"Foxtrot\", 2, 1, 1, 68),\n ],\n 3 : [\n ( 1, \"Golf\", 6, 5, 11, 357),\n ( 2, \"Bravo\", 6, 5, 9, 334),\n ( 3, \"Alpha\", 6, 3.5, 7.5, 333),\n ( 4, \"India\", 6, 3.5, 6.5, 361),\n ( 5, \"Juliet\", 6, 3, 7, 293),\n ( 6, \"Charlie\", 6, 3, 6, 286),\n ( 7, \"Echo\", 6, 3, 6, 282),\n ( 8, \"Delta\", 6, 3, 5, 321),\n ( 9, \"Kilo\", 6, 3, 4, 347),\n (10, \"Foxtrot\", 4, 2, 3, 168),\n (11, \"Lima\", 6, 1, 3, 306),\n (12, \"Hotel\", 6, 1, 3, 278),\n ],\n },\n \"withdrawals_after_round\" : {\n 1 : [ \"Foxtrot\" ],\n },\n \"unwithdrawals_after_round\" : {\n 2 : [ \"Foxtrot\" ],\n },\n },\n {\n \"name\" : \"ranktest_spread\",\n \"player_names\" : [ \"Alpha\", \"Bravo\", \"Charlie\", \"Delta\", \"Echo\", \"Foxtrot\", \"Golf\", \"Hotel\", \"India\", \"Juliet\", \"Kilo\", \"Lima\" ],\n \"rank\" : countdowntourney.RANK_WINS_SPREAD,\n \"games_per_round\" : {\n 1 : [\n (1, \"Alpha\", 60, \"Echo\", 40),\n (1, \"Echo\", 41, \"India\", 31),\n (1, \"India\", 58, \"Alpha\", 68, True),\n (2, \"Bravo\", 46, \"Foxtrot\", 31),\n (2, \"Foxtrot\", 37, \"Juliet\", 30),\n (2, \"Juliet\", 60, \"Bravo\", 55),\n (3, \"Charlie\", 45, \"Golf\", 59),\n (3, \"Golf\", 72, \"Kilo\", 52),\n (3, \"Kilo\", 50, \"Charlie\", 51),\n (4, \"Delta\", 60, \"Hotel\", 30),\n (4, \"Hotel\", 66, \"Lima\", 51),\n (4, \"Lima\", 69, \"Delta\", 70)\n ],\n 2 : [\n (1, \"Golf\", 56, \"Delta\", 51),\n (1, \"Delta\", 44, \"Alpha\", 38),\n (1, \"Alpha\", 50, \"Golf\", 70),\n (2, \"Bravo\", 52, \"Juliet\", 42),\n (2, \"Juliet\", 49, \"Foxtrot\", 41),\n (2, \"Foxtrot\", 44, \"Bravo\", 50),\n (3, \"Echo\", 46, \"Charlie\", 41),\n (3, \"Charlie\", 55, \"Hotel\", 55),\n (3, \"Hotel\", 56, \"Echo\", 60),\n (4, \"India\", 45, \"Lima\", 41),\n (4, \"Lima\", 44, \"Kilo\", 56),\n (4, \"Kilo\", 52, \"India\", 50)\n ],\n 3 : [\n (1, \"Golf\", 72, \"Delta\", 62),\n (1, \"Delta\", 52, \"Bravo\", 54),\n (1, \"Bravo\", 55, \"Golf\", 63),\n (2, \"Echo\", 57, \"Juliet\", 41),\n (2, \"Juliet\", 52, \"Alpha\", 51),\n (2, \"Alpha\", 60, \"Echo\", 59),\n (3, \"Kilo\", 45, \"Charlie\", 35),\n (3, \"Charlie\", 47, \"Hotel\", 42),\n (3, \"Hotel\", 44, \"Kilo\", 70),\n (4, \"India\", 40, \"Foxtrot\", 52),\n (4, \"Foxtrot\", 48, \"Lima\", 56),\n (4, \"Lima\", 43, \"India\", 60)\n ]\n },\n \"standings_per_round\" : {\n 1 : [\n ( 1, \"Golf\", 2, 2, 34),\n ( 2, \"Delta\", 2, 2, 31),\n ( 3, \"Alpha\", 2, 2, 20),\n ( 4, \"Bravo\", 2, 1, 10),\n ( 5, \"Juliet\", 2, 1, -2),\n ( 6, \"Foxtrot\", 2, 1, -8),\n ( 7, \"Echo\", 2, 1, -10),\n ( 8, \"Charlie\", 2, 1, -13),\n ( 9, \"Hotel\", 2, 1, -15),\n (10, \"India\", 2, 0, -10),\n (11, \"Lima\", 2, 0, -16),\n (12, \"Kilo\", 2, 0, -21),\n ],\n 2 : [\n ( 1, \"Golf\", 4, 4, 59),\n ( 2, \"Delta\", 4, 3, 32),\n ( 3, \"Bravo\", 4, 3, 26),\n ( 4, \"Echo\", 4, 3, -1),\n ( 5, \"Juliet\", 4, 2, -4),\n ( 6, \"Alpha\", 4, 2, -6),\n ( 7, \"Kilo\", 4, 2, -7),\n ( 8, \"Charlie\", 4, 1.5, -18),\n ( 9, \"Hotel\", 4, 1.5, -19),\n (10, \"India\", 4, 1, -8),\n (11, \"Foxtrot\", 4, 1, -22),\n (12, \"Lima\", 4, 0, -32),\n ],\n 3 : [\n ( 1, \"Golf\", 6, 6, 77),\n ( 2, \"Kilo\", 6, 4, 29),\n ( 3, \"Bravo\", 6, 4, 20),\n ( 4, \"Echo\", 6, 4, 14),\n ( 5, \"Delta\", 6, 3, 20),\n ( 6, \"Alpha\", 6, 3, -6),\n ( 7, \"Juliet\", 6, 3, -19),\n ( 8, \"Charlie\", 6, 2.5, -23),\n ( 9, \"India\", 6, 2, -3),\n (10, \"Foxtrot\", 6, 2, -18),\n (11, \"Hotel\", 6, 1.5, -50),\n (12, \"Lima\", 6, 1, -41),\n ],\n },\n }\n]\n\nclass TestFailedException(Exception):\n pass\n\ndef testfail(test_name, s):\n print(test_name + \": TEST FAILED\")\n print(s)\n raise TestFailedException(test_name + \": \" + s)\n\n# tourney_name: name of the test and the tourney\n# player_names: list of names, one for each player. Players whose names start\n# with \"Prune\" will get a rating of 0.\n# games_per_round: dict of round numbers to [ (table_number, p1name, p1score, p2name, p2score, tb) ]\n# expected_standings_per_round: dict of round numbers to [ (pos, name, played, wins, secondary rank values...) ]\ndef run_rank_test(tourney_name, player_names, games_per_round,\n expected_standings_per_round,\n rank_method_id=countdowntourney.RANK_WINS_POINTS,\n withdrawals_after_round={}, unwithdrawals_after_round={},\n additions_after_round={}):\n if not tourney_name.startswith(\"_\"):\n tourney_name = \"_\" + tourney_name\n dbfilename = \"./tourneys/\" + tourney_name + \".db\"\n if os.path.isfile(dbfilename):\n os.unlink(dbfilename)\n\n tourney = countdowntourney.tourney_create(tourney_name, \"./tourneys\")\n\n player_list = [ countdowntourney.EnteredPlayer(p, 0 if p.startswith(\"Prune\") else None) for p in player_names ]\n tourney.set_players(player_list, countdowntourney.RATINGS_UNIFORM)\n tourney.set_rank_method_id(rank_method_id)\n\n rank_method = tourney.get_rank_method()\n\n players = tourney.get_players()\n if len(players) != len(player_names):\n testfail(tourney_name, \"tourney.get_players() returned wrong number of players: expected %d, got %d\" % (len(player_names), len(players)))\n\n for round_no in games_per_round:\n games = games_per_round[round_no]\n\n tourney.name_round(round_no, \"Round %d\" % (round_no))\n\n # First, set up fixtures for this round\n round_seq = 1\n fixtures = []\n for row in games:\n (table_no, p1name, p1score, p2name, p2score) = row[0:5]\n tb = (len(row) >= 6 and row[5])\n fixtures.append(countdowntourney.Game(round_no, round_seq, table_no, 0, 'P', tourney.get_player_from_name(p1name), tourney.get_player_from_name(p2name)))\n round_seq += 1\n tourney.merge_games(fixtures)\n\n # Now fill in the results\n round_seq = 1\n for row in games:\n (table_no, p1name, p1score, p2name, p2score) = row[0:5]\n tb = (len(row) >= 6 and row[5])\n game = fixtures[round_seq - 1]\n game.set_score(p1score, p2score, tb)\n tourney.merge_games([game])\n round_seq += 1\n\n # Now check the standings\n standings = tourney.get_standings()\n expected_standings = expected_standings_per_round.get(round_no)\n if expected_standings:\n if len(standings) != len(expected_standings):\n testfail(tourney_name, \"round %d: tourney.get_standings() returned wrong number of rows: expected %d, got %d\" % (round_no, len(expected_standings), len(standings)))\n for standings_index in range(len(standings)):\n row_num = standings_index + 1\n obs = standings[standings_index]\n exs = expected_standings[standings_index]\n if verbose:\n print(str(obs))\n exp_pos = exs[0]\n exp_name = exs[1]\n exp_played = exs[2]\n exp_wins = exs[3]\n exp_secondaries = exs[4:]\n obs_secondaries = obs.get_secondary_rank_values()\n if obs.position != exp_pos:\n testfail(tourney_name, \"round %d: row %d: position: expected %d, observed %d\" % (round_no, row_num, exp_pos, obs.position))\n if obs.name != exp_name:\n testfail(tourney_name, \"round %d: row %d: name: expected %s, observed %s\" % (round_no, row_num, exp_name, obs.name))\n if obs.played != exp_played:\n testfail(tourney_name, \"round %d: row %d: games played: expected %d, observed %d\" % (round_no, row_num, exp_played, obs.played))\n if obs.wins + obs.draws * 0.5 != exp_wins:\n testfail(tourney_name, \"round %d: row %d: games won: expected %g, observed %g\" % (round_no, row_num, exp_wins, obs.wins + obs.draws * 0.5))\n sec_headings = rank_method.get_secondary_rank_headings()\n if len(exp_secondaries) != len(obs_secondaries):\n testfail(tourney_name, \"round %d: row %d: expected %d secondary rank values, observed %d\" % (round_no, row_num, len(exp_secondaries), len(obs_secondaries)))\n for i in range(len(exp_secondaries)):\n if obs_secondaries[i] != exp_secondaries[i]:\n testfail(tourney_name, \"round %d: row %d: %s: expected %s, observed %s\" % (round_no, row_num, sec_headings[i], str(exp_secondaries[i]), str(obs_secondaries[i])))\n else:\n print(\"Warning: no expected standings for round %d\" % (round_no))\n\n # Any players withdrawing this round?\n for name in withdrawals_after_round.get(round_no, []):\n tourney.withdraw_player(name)\n\n # Any players returning this round?\n for name in unwithdrawals_after_round.get(round_no, []):\n tourney.unwithdraw_player(name)\n\n # Any new players joining in this round?\n for name in additions_after_round.get(round_no, []):\n tourney.add_player(name, 0 if name.startswith(\"Prune\") else 1000)\n\n if verbose:\n print(\"Finished round %d.\" % (round_no))\n\ndef main():\n desired_tests = sys.argv[1:]\n desired_tests = set(desired_tests)\n for test in tests:\n if desired_tests and test[\"name\"] not in desired_tests:\n continue\n print(\"Test %s...\" % (test[\"name\"]))\n run_rank_test(test[\"name\"], test[\"player_names\"],\n test[\"games_per_round\"], test[\"standings_per_round\"],\n test[\"rank\"], test.get(\"withdrawals_after_round\", {}),\n test.get(\"unwithdrawals_after_round\", {}),\n test.get(\"additions_after_round\", {}))\n print(\"Test %s passed.\" % (test[\"name\"]))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"elocemearg/atropine","sub_path":"test/ranktest.py","file_name":"ranktest.py","file_ext":"py","file_size_in_byte":30777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23502448267","text":"\"\"\"Implement a Binary Search Tree with insert, delete, search and other functions\"\"\"\n\nfrom collections import deque\nimport random\n\nclass Node:\n \"\"\"A node in a Binary Search Tree. Each node has info about its key, parent \n (if any), and children (if any).\"\"\"\n\n def __init__(self, key):\n \"\"\"Create a node from a key.\"\"\"\n self.key = key\n self.parent = None\n self.left = None\n self.right = None\n\n def get_children(self):\n \"\"\"Retrieve children (if any) of a node.\"\"\"\n children = []\n if self.left is not None:\n children.append(self.left)\n if self.right is not None:\n children.append(self.right)\n return children\n\n\nclass BinarySearchTree():\n \"\"\"A binary search tree: each node has a key and has two subtrees. The key \n in each node must be greater than or equal to any key stored in the left \n subtree, and less than or equal to any key stored in the right subtree.\"\"\"\n\n def __init__(self, root=None):\n \"\"\"Create a Binary Search Tree.\"\"\"\n self.root = root\n\n def insert(self, key):\n \"\"\"Insert a key into the tree.\"\"\"\n if self.root is None:\n self.root = Node(key)\n else:\n self.__insert_node(key, self.root)\n \n def __insert_node(self, key, current_node, parent=None):\n \"\"\"Private function to be used by insert(). Insert a key by comparing it\n with the key of current_node. Assign parent and children (if any) to the\n new node.\"\"\"\n\n if key == current_node.key:\n print (\"Key {} already exists\".format(key))\n return\n elif key < current_node.key:\n if current_node.left:\n self.__insert_node(key, current_node.left, current_node)\n else:\n current_node.left = Node(key)\n current_node.left.parent = current_node\n else:\n if current_node.right:\n self.__insert_node(key, current_node.right, current_node)\n else:\n current_node.right = Node(key)\n current_node.right.parent = current_node\n\n def search(self, key, current_node):\n \"\"\"Search for a key in the tree using Depth First Search.\"\"\"\n if current_node is None or key == current_node.key:\n return current_node\n if key < current_node.key:\n return self.search(key, current_node.left)\n return self.search(key, current_node.right)\n\n def find_min_node(self, current_node):\n \"\"\"Gets the minimum node in a subtree starting with current_node\"\"\"\n while current_node.left:\n current_node = current_node.left\n return current_node\n\n def find_max_node(self, current_node):\n \"\"\"Gets the maximum node in a subtree starting with current_node\"\"\"\n while current_node.right:\n current_node = current_node.right\n return current_node\n\n def replace_child(self, child, parent, new_child=None):\n \"\"\"Replace a child of parent with a new child.\"\"\"\n if parent:\n if child is parent.left:\n parent.left = new_child\n else:\n parent.right = new_child\n if new_child:\n new_child.parent = parent\n\n def delete(self, key, current_node):\n \"\"\"Delete a key from the tree, choosing a replacement randomly\"\"\"\n # Search for the node to delete\n node = self.search(key, current_node)\n if node is None:\n print (\"Key {} is not found in the tree\".format(key))\n # If the node is found, proceed to delete it\n if node is not None:\n parent = node.parent\n if node.left and node.right:\n predecessor = self.find_max_node(node.left)\n successor = self.find_min_node(node.right)\n chosen_node = random.choice([predecessor, successor])\n node.key = chosen_node.key\n self.delete(chosen_node.key, chosen_node)\n elif node.left:\n self.replace_child(node, parent, node.left)\n elif node.right:\n self.replace_child(node, parent, node.right)\n else:\n self.replace_child(node, parent, None)\n \n def get_tree(self, current_node):\n \"\"\"Get the tree with all its nodes in Depth First order.\"\"\"\n tree = []\n tree += [current_node]\n if current_node.left:\n tree += self.get_tree(current_node.left)\n if current_node.right:\n tree += self.get_tree(current_node.right)\n return tree\n\n def print_tree(self):\n \"\"\"Print out the key for each node in the tree in Depth First order.\"\"\"\n print ([node.key for node in self.get_tree(self.root)])\n\n def get_tree_breadth_first(self, current_node):\n \"\"\"Get the tree with all its nodes in Breadth First order.\"\"\"\n tree = []\n queue = deque()\n queue.append(current_node)\n while len(queue) > 0:\n node = queue.popleft()\n tree.append(node)\n if node.left is not None:\n queue.append(node.left)\n if node.right is not None:\n queue.append(node.right)\n return tree\n\n def print_tree_breadth_first(self):\n \"\"\"Print out the key for each node in the tree in Breadth First order.\"\"\"\n print ([node.key for node in self.get_tree_breadth_first(self.root)])\n\n def get_child_parent(self, tree):\n \"\"\"Output a list of tuples; each tuple contains a child and its parent.\"\"\"\n child_parent_pairs = []\n for node in tree:\n if node.parent is not None:\n child_parent_pairs.append((node.key, node.parent.key))\n else:\n child_parent_pairs.append((node.key, node.parent))\n return child_parent_pairs\n\n def get_parent_children(self, tree):\n \"\"\"Output a list of tuples; each tuple contain a node, its left child \n and its right child.\"\"\"\n parent_children = []\n for node in tree:\n if node.left is None:\n if node.right is None:\n parent_children.append((node.key, node.left, node.right))\n else:\n parent_children.append((node.key, node.left, node.right.key))\n else:\n if node.right is None:\n parent_children.append((node.key, node.left.key, node.right))\n else:\n parent_children.append((node.key, node.left.key, node.right.key))\n return parent_children\n\n\nif __name__ == \"__main__\":\n print (\"Create a Binary Search Tree\")\n bst = BinarySearchTree()\n\n test_list = [8, 3, 1, 6, 4, 7, 10, 14, 13, 14]\n print (\"Insert the elements from the following list:\", test_list)\n for element in test_list:\n print (\"Insert\", element)\n bst.insert(element)\n\n tree = bst.get_tree(bst.root)\n print (\"\\nPrint the tree in Depth First order\")\n bst.print_tree()\n\n print (\"The root node is\", bst.root.key)\n\n print (\"\\nGet pairs of children and their parent:\", bst.get_child_parent(tree))\n print (\"Get pairs of parents and their children:\", bst.get_parent_children(tree))\n\n print (\"\\nSearch for 8:\", bst.search(8, bst.root).key)\n print (\"Search for 100:\", bst.search(100, bst.root))\n\n print (\"\\nPrint the tree in Breadth First order\")\n bst.print_tree_breadth_first()\n\n print (\"\\nFind the min, max and 2nd largest node in the tree\")\n print (bst.find_min_node(bst.root).key)\n print (bst.find_max_node(bst.root).key)\n print (bst.find_max_node(bst.root).parent.key)\n\n print (\"\\nDelete the following nodes:\")\n print (\"Delete 200\")\n bst.delete(200, bst.root)\n print (\"Delete 8\")\n bst.delete(8, bst.root)\n print (\"The tree now has the following elements:\")\n bst.print_tree()","repo_name":"ntrang086/python_snippets","sub_path":"tree/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33759474629","text":"import numpy as np\nfrom itertools import combinations\nfrom scipy.special import comb, perm\n\n\nclass mix_forward(object):\n '''\n About the parameters:\n # Nm is the number of modality\n # N is the dimension of stimulus input\n # M is the dimension of contexts input\n # P is the independent sample number of stimulus\n # K is the independent sample number of contexts\n # Nc is the neurons on cortical layer\n # delta_xi is the noise parameter for stimulus modality\n # delta_phi is the noise parameter for contextutal modality\n '''\n\n def __init__(self, Nm, N, M, P, K, Nc, delta_xi = None, delta_phi = None):\n self.num_modality = Nm\n self.dim_stimuli = int(N)\n self.dim_context = int(M)\n self.num_sti_sample = int(P)\n self.num_con_sample = int(K)\n self.dim_cortical = int(Nc)\n self.sti_noise = delta_xi\n self.con_noise = delta_phi\n\n def flip(self,a,noise_parameter):\n flip_vector = np.sign(np.random.rand(a.size)-noise_parameter/2.)\n return a*flip_vector\n\n # ------- initialize random data input-------#\n def generate_input(self):\n\n Nm = self.num_modality\n P = self.num_sti_sample\n K = self.num_con_sample\n N = self.dim_stimuli\n M = self.dim_context\n delta_xi = self.sti_noise\n delta_phi = self.con_noise\n\n # binary data, stimulus input\n if delta_xi == None:\n self.data_0 = np.sign(np.random.randn(P, N))\n else:\n self.data_0 = np.sign(np.random.randn(P, N))\n # add noise\n for i in range(1,P):\n self.data_0[i,:] = self.flip(self.data_0[0,:],delta_xi)\n\n\n if delta_phi == None:\n\n for i in range(1, Nm):\n # dynamically generate instance as input data\n self.__dict__[f'data_{i}'] = np.sign(np.random.randn(K, M))\n\n else:\n\n for i in range(1, Nm):\n\n self.__dict__[f'data_{i}'] = np.sign(np.random.randn(K, M))\n\n for j in range(1,K):\n self.__dict__[f'data_{i}'][j,:] = self.flip(self.__dict__[f'data_{i}'][0,:],delta_phi)\n\n\n\n\n\n # -----fix the sparsity in the cortical layer-----#\n def fix_sparsity(self, a, f=0.5):\n \n '''\n Input: matrix_h\n Return: matrix_m, T\n '''\n\n v = a.copy()\n\n threshold = np.sort(v.flatten())[int((1 - f) * v.size)]\n\n exite = v >= threshold\n inhibit = v < threshold\n\n v[exite] = 1\n v[inhibit] = 0\n\n return v,threshold\n\n # ---------generate random_connectin matrixs for each partition--------#\n def random_connection(self, m):\n\n # ------claim the parameters---------#\n Nm = self.num_modality\n N = self.dim_stimuli\n Nc = self.dim_cortical\n M = self.dim_context\n\n # generate a dim_list for all modalities, convenient for later calculation of partition dimension\n expr1 = lambda i: N if i == 0 else M\n dim_list = [expr1(i) for i in range(Nm)]\n\n # total partition number\n p = comb(Nm, m, exact=True)\n\n # number of partition with task-relevant stimulus\n # p_t = comb(Nm - 1, m - 1, exact=True)\n\n # total partition list\n p_list = list(combinations(range(Nm), m))\n\n # dimension of each partion on cortical layer\n dim_order_m = int(Nc / p)\n\n # --------generate random connection matrix for each partition--------#\n for i in range(1, p):\n # dimension of the partition on input layer\n dim_i = np.sum([dim_list[j] for j in p_list[i]])\n\n # dynamic variable naming\n self.__dict__[f'J_{i}'] = np.zeros((dim_order_m, dim_i))\n start_index = 0\n\n for j in p_list[i]:\n self.__dict__[f'J_{i}'][:,start_index:(start_index+dim_list[j])] = np.random.normal(0, 1 / np.sqrt(m * dim_list[j]), size=(dim_order_m, dim_list[j]))\n start_index = start_index + dim_list[j]\n\n\n\n # random connection matrix for first partition, Now Nc dont have to be interger times of p\n dim_0 = np.sum([dim_list[j] for j in p_list[0]])\n\n self.J_0 = np.zeros((Nc - (p - 1) * dim_order_m, dim_0))\n start_index = 0\n\n for j in p_list[0]:\n self.J_0[:,start_index:(start_index+dim_list[j])] = np.random.normal(0, 1 / np.sqrt(m * dim_list[j]), size=(Nc - (p - 1) * dim_order_m, dim_list[j]))\n start_index = start_index + dim_list[j]\n \n\n # -------initialize the input_data and random connection matrix------#\n def initialize(self, m):\n\n self.generate_input()\n self.random_connection(m)\n\n # recursively realize the traverse of all independent input data\n def dynloop_rcsn(self, sample_list, mix_layer_data, modality_index=0, sample_index=[]):\n\n # ------claim the parameters---------#\n Nm = self.num_modality\n p = self.p\n p_list = self.p_list\n\n # ----------recursion as dynamic loop realization------------------#\n # traverse the stimulus pattern\n for i in range(sample_list[modality_index]):\n\n # append the index of current sample in current modality\n sample_index.append(i)\n\n # deepest loop\n if modality_index == Nm - 1:\n # -----------feed forward the current sample, with the sample index------#\n\n # create a temp array record the vector on cortical layer from current sample\n cort_temp = np.array([])\n\n ##--------calculate the cortical vector for each partition------##\n # j is the index of current partition\n for j in range(p):\n\n # create a temp array record the vector on input layer from current partition\n input_temp = np.array([])\n\n ###-------concatenate to create the input vector for partition j-----#\n # k is the index of modality in current partition\n for k in p_list[j]:\n # sample data of the current modality, self.data_k[sample_index[k]]\n input_temp = np.concatenate((input_temp, self.__dict__[f'data_{k}'][sample_index[k]]))\n\n ###-----feed the input vector forward with the connection matrix self.J_j---#\n part_cort = np.matmul(self.__dict__[f'J_{j}'], input_temp)\n\n cort_temp = np.concatenate((cort_temp, part_cort))\n\n ##--------write the current vector into the whole effective data matrix----##\n mix_layer_data[self.count, :] = cort_temp\n self.count = self.count + 1\n\n # ---------if not the deepest loop, then do one more recursion-----#\n else:\n self.dynloop_rcsn(sample_list, mix_layer_data, modality_index + 1, sample_index)\n\n sample_index.pop()\n\n ## need to generate_input and random_before run the order_m mixing\n def order_m(self, m, f=0.5, initial_data = False, return_m = True, initial_J = True):\n \n \n '''\n if return_m == True: return h,m,T else: return h \n '''\n\n # ------------claim parameters-------------#\n Nm = self.num_modality\n P = self.num_sti_sample\n K = self.num_con_sample\n Nc = self.dim_cortical\n\n # ------initialize the input_data and random_connection matrix------#\n ## Now J_0 to J_p is the random connection matrix for each partition\n ## self.indata_0 to self.indata_Nm are the input data\n if initial_data: self.generate_input()\n if initial_J: self.random_connection(m)\n\n ##----generate a sample number list for all modalities, convenient for later mixing-----##\n expr2 = lambda i: P if i == 0 else K\n sample_list = [expr2(i) for i in range(Nm)]\n\n ##----generate partition number and partition list convenient for later mixing-----##\n self.p = comb(Nm, m, exact=True) # total partition number\n self.p_list = list(combinations(range(Nm), m)) # total partition list\n\n ##-----initialize the effective data matrix on cortical layer------##\n mix_layer_data = np.zeros((P * K ** (Nm - 1), Nc))\n self.count = 0\n\n # -----use recursion to dynamically traverse all possible data and mix feedforwar them-----#\n self.dynloop_rcsn(sample_list, mix_layer_data, modality_index=0, sample_index=[])\n\n\n if return_m == True:\n \n after_nonlinear,T = self.fix_sparsity(mix_layer_data,f=f)\n return mix_layer_data, after_nonlinear, T\n\n else:\n\n return mix_layer_data\n\n","repo_name":"minhuanli/mixed-selectivity-with-scale","sub_path":"Equal_partition/mix_forward.py","file_name":"mix_forward.py","file_ext":"py","file_size_in_byte":8721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41094806102","text":"import numpy as np\nimport tensorflow as tf\nimport tensorflowjs as tfjs\nfrom sklearn import preprocessing\nfrom sklearn import model_selection\n\nclass Attacker:\n # CSVデータ処理(NumPy必要)\n def __init__(self, loading_file, data_normalize=True):\n csv_data = np.loadtxt(loading_file, delimiter=',', dtype=str)\n data_shape = csv_data.shape[0] - 1\n data = np.zeros((data_shape, 34, 2), dtype='int')\n meta = np.zeros((data_shape, 5), dtype='int')\n label = np.zeros(data_shape, dtype='U4')\n for i in range(data_shape):\n for index in range(34):\n data[i][index] = [int(csv_data[i + 1][index * 2 + 6]), int(csv_data[i + 1][index * 2 + 7])]\n meta[i] = csv_data[i + 1][:5].astype(int)\n label[i] = csv_data[i + 1][5]\n\n re_data = np.zeros((data.shape[0], 8))\n for num in range(data.shape[0]):\n # M-FC v1アーキテクチャ適用\n re_data[num][0] = encode_distance(encode_middle_shoulder(data[num]), data[num][9]) # ①左手と肩までの距離\n re_data[num][1] = encode_distance(encode_middle_shoulder(data[num]), data[num][10]) # ②右手と肩までの距離\n re_data[num][2] = encode_distance(encode_middle_mind(data[num]), data[num][9]) # ③左手と上半身下部までの距離\n re_data[num][3] = encode_distance(encode_middle_mind(data[num]), data[num][10]) # ④左手と上半身下部までの距離\n re_data[num][4] = encode_distance(encode_middle_shoulder(data[num]), data[num][15]) # ⑤左足と肩までの距離\n re_data[num][5] = encode_distance(encode_middle_shoulder(data[num]), data[num][16]) # ⑥右足と肩までの距離\n re_data[num][6] = encode_distance(encode_middle_mind(data[num]), data[num][15]) # ⑦左足と上半身下部までの距離\n re_data[num][7] = encode_distance(encode_middle_mind(data[num]), data[num][16]) # ⑧右足と上半身下部までの距離\n if data_normalize:\n sc = preprocessing.StandardScaler()\n re_data = sc.fit_transform(re_data) # 学習データを正規化\n\n feature_names = ['左手と肩までの距離', '右手と肩までの距離', '左手と上半身下部までの距離', '左手と上半身下部までの距離', '左足と肩までの距離', '右足と肩までの距離', '左足と上半身下部までの距離', '右足と上半身下部までの距離']\n\n self.position_data = data\n self.data = re_data\n self.feature_names = feature_names\n self.label = label\n self.label_name_category = ['突き', '回し蹴り', '裏回し蹴り', '正蹴り', 'なし']\n self.label_name_position = ['上段', '中段']\n self.label_name_arrow = ['左', '右']\n self.label_name_status = ['有効', '無効']\n self.meta = meta\n\n print(\"学習データの読み込みに成功しました。\")\n print(\"[読み込みデータ数:\" + str(self.data.shape[0]) + \"]\")\n print(\"[特徴量数:\" + str(self.data.shape[1]) + \"]\")\n\n\n# 肩の中点を求める\ndef encode_middle_shoulder(part):\n x_pos = (part[22][0] + part[23][0]) / 2\n y_pos = (part[22][1] + part[23][1]) / 2\n return [x_pos, y_pos]\n\n\n# 腹の点を求める\ndef encode_middle_mind(part):\n x_pos = (part[22][0] + part[23][0] + (part[28][0] + part[29][0]) * 2) / 6\n y_pos = (part[22][1] + part[23][1] + (part[28][1] + part[29][1]) * 2) / 6\n return [x_pos, y_pos]\n\n\n# 2つの点の距離を求める(Numpy必要)\ndef encode_distance(a, b):\n dist_x = a[0] - b[0]\n dist_y = a[1] - b[1]\n distance = np.linalg.norm([dist_x, dist_y])\n return distance\n\n\nattacker = Attacker('PASTE CSV FILE HERE')\n\nx_train, y_train, x_test, y_test = model_selection.train_test_split(attacker.data, attacker.label, test_size=0.2)\n\nmodel = tf.keras.models.Sequential()\nmodel.add(tf.keras.layers.Input(8))\nmodel.add(tf.keras.layers.Dense(2048, activation='relu'))\nmodel.add(tf.keras.layers.Dense(2048, activation='relu'))\nmodel.add(tf.keras.layers.Dense(2048, activation='relu'))\nmodel.add(tf.keras.layers.Dense(1024, activation='relu'))\nmodel.add(tf.keras.layers.Dense(4, activation='softmax'))\nmodel.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False), loss='categorical_crossentropy', metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=150, batch_size=32, verbose=1)\n\n# モデルを保存\ntfjs.converters.save_keras_model(model, \"./tfjs_model\")\n","repo_name":"imbellt/samurai","sub_path":"data/make_samurai_model.py","file_name":"make_samurai_model.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13276897336","text":"from aux import l,invL,shorter,impulso,vecreale\nfrom random import choice\n\n\nfrom lcapy import difference_equation,expr,nexpr\nfrom lcapy import latex as llatex\nfrom lcapy import simplify as lsimplify\nfrom lcapy.discretetime import z\n\nfrom sympy import simplify,Rational,eye, factor,Matrix,pprint\nfrom sympy import E\nfrom sympy.abc import s,k,t\n\niniz_string = \"\\nLa funzione è \\[ W(s) = \\\\frac{%s}{%s} \\]\\n\\n\"\n\ns2 = \"Per trovare $W(z)$ passo da $y_F(t)$:\\n\\[y_F(t) = \\mathcal{L}^{-1}\\left[ W(s)\\\\frac{1}{s}\\\\right]\"\ns3 = \"\\n = \\mathcal{L}^{-1}\\left[ \\\\frac{%s}{%s}\\\\frac{1}{s}\\\\right]\\n = %s \\]\"\n\ns4 = \"Ora calcolo, $y_{dF}(k)$, ovvero $y_{dF}(kT_c)$ (Prendendo $T_c =1$):\\n\"\ns5 = \"\\[ y_{dF}(k) =\\left.\\left[ %s \\\\right]\\\\right|_{t=kT_c}= %s \\]\\n\"\ns6 = \"Trasformo in z l'uscita ottenuta: \\[[ \\\\mathcal{Z}\\left[y_F(kT_c)\\\\right] = \\\\mathcal{Z}\\left[ %s \\\\right] = \\]\\n\\[ = %s \\]\\n\"\n#s6 = \"Trasformo in z l'uscita ottenuta: \\[[ \\\\mathcal{Z}\\left[y_F(kT_c)\\\\right] = \\\\mathcal{Z}\\left[ %s \\\\right] = \\]\\n\"\ns7 = \"Infine moltiplico per $\\\\frac{z-1}{z}$ per ottenere $W(z)$: \\n\\[ W(z) = %s \\\\frac{z-1}{z} =\\]\\n\\[= %s \\]\\n\"\n\ndef generaU(A,dim,vecs_r,vecs_i):\n\t#Ritorna la matrice degli autovettori sinistri e alpha, omega se ci sono\n\tif not len(vecs_i) or A.cols >= 4:\n\t\t#Caso in cui non ci sono vettori complessi, sympy mi trova direttamente la matrice di\n\t\t#trasformazione\n\t\treturn A.jordan_form()[0],[]\n\t#pprint(vecs_i)\n\tU = Matrix()\n\tfor v in vecs_r:\n\t\tU = add_columns(U,vecs_r[v][1])\n\tn_vecs_r = U.cols\n\ti = n_vecs_r\n\tuab = []\n\tif i != dim:\n\t\tuab = vecs_ab(vecs_i)\n\t\tif not len(uab):\n\t\t\treturn \"ERRORE NON GESTITO\",[]\n\t\tfor v in uab:\n\t\t\tU = U.col_insert(i,v)\n\t\t\ti+=1\n\treturn U,uab\n\ndef autocose(A : Matrix):\n\t#ritorna le autovalorivettori\n\tvecst = A.eigenvects()\n\t#pprint(vecst)\n\t#pprint(vecst)\n\tvals_r = []\n\tvals_i = []\n\tvecs_r = {}\n\tvecs_i = {}\n\tfor v in vecst:\n\t\tvec = v[2][0]\n\t\tif vecreale(vec):\n\t\t\tvals_r.append(v[0])\n\t\t\tvecs_r[v[0]] = [v[1],v[2]]\n\t\telse:\n\t\t\tvals_i.append(v[0])\n\t\t\tvecs_i[v[0]] = [v[1],v[2]]\n\n\tvals_i.reverse()#Per mettere prima l'autovalore con im(lambda)>0\n\tvecs_t = {v: [vecs_i[v][0],vecs_i[v][1]] for v in vals_i}\n\tvecs_i = vecs_t\n\treturn list(vecs_r.keys()),vals_i,vecs_r,vecs_i\n\n\ndef discretizza(ydFn_str):\n\te = expr(ydFn_str)\n\treturn 0 if e==0 else e.discrete_time()\n\ndef trasforma_in_Zeta(yFn):\n\tprint(yFn)\n\tprint(type(yFn))\n\tprint(type(yFn) is int)\n\treturn 0 if type(yFn) is int and yFn == 0 else yFn.ZT()\n\n\ndef discretizzazioneDaW(W):\n\t#Nw = W[0]\n\tperiodi = [1]#,Rational(1,10),Rational(1,2)]\n\tT = 1#choice(periodi)#Rational(1,10)\n\tDw = W[1]\n\tfor Nw in W[0]:\n\t\t\n\t\tout = iniz_string%(l(Nw),l(Dw))\n\t\tout+=s2\n\t\tyFt = shorter(invL(Nw/(Dw*s)))\n\t\t\n\t\t#yFt = t*E**(-t)+impulso(t)\n\t\tout+= s3%(l(Nw),l(Dw),l(yFt))\n\t\tout += s4\n\t\tydFk = yFt.replace(t,T*k)\n\t\tout += s5%(l(yFt),l(ydFk))\n\t\t#Trick per avere la classe espressione in tempo discreto\n\t\tydFn_str = str(ydFk).replace('k','n')\n\n\t\tyFn = discretizza(ydFn_str)\n\t\t\n\t\tZY = trasforma_in_Zeta(yFn)\n\t\t#print(ZY.expand().args)\n\t\tout += s6%(l(ydFk),l(shorter(ZY)))\n\t\t\n\t\t#stringa = \"=\"\n\t\t#for i in ZY.expand().args:\n\t\t#\tstringa+=\"\\[ %s \\]\"%l(i)\n\t\t#out +=stringa\n\t\t#llatex(ZY.expand()))\n\n\t\tWz = shorter(ZY*((z-1)/z))\n\t\tout += s7%(llatex(ZY),llatex(Wz))\n\n\treturn out\n\n\ndef discretizzazioneCompleta(sistema):\n\tA,B,C,D,u_t,x_0 = sistema.get_dati()\n\n\tvals_r,vals_i,vecs_r,vecs_i = autocose(A)\n\tU,uab = generaU(A,A.rows,vecs_r,vecs_i)\t#T-1\n\tV = U**-1\n\tT,T_1 = V,U\n\tD = T*A*T_1\n\n\tout = f\"\\[ {l(U*D.exp()*V)}\\]\"\n\n\n\t'''\n\tPhi_inv = (s*eye(A.cols)-A)\n\tP_d = factor(Phi_inv.det())\n\tPhi_num = simplify((Phi_inv**-1)*P_d)\n\tH_num = simplify(Phi_num*B)\n\tpprint(simplify(C*H_num))\n\tpprint(D*P_d)\n\tW_num = simplify(C*H_num)+(D*P_d)\n\tout += discretizzazioneDaW([W_num,P_d])\n\t'''\n\treturn out","repo_name":"michelelaig/LinearSystemAutomaticStudy","sub_path":"Discretizzazione.py","file_name":"Discretizzazione.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"162698233","text":"import random\nfrom game.word_list import random_words\nfrom game import constants\nfrom game import actor\nfrom game.actor import Actor\nfrom game.point import Point\n\nclass Words(Actor):\n \"\"\"A nutritious substance that snake's like. The responsibility of Food is to keep track of its appearance and position. A Food can move around randomly if asked to do so. \n \n Stereotype:\n Information Holder\n\n Attributes: \n _points (integer): The number of points the food is worth.\n \"\"\"\n def __init__(self):\n \"\"\"The class constructor. Invokes the superclass constructor, set's the \n text and moves the food to a random position within the boundary of the \n screen.\n \n Args:\n self (Actor): an instance of Actor.\n \"\"\"\n super().__init__()\n self._points = 0\n self._words = []\n self.words = random_words\n self.prepare()\n self.reset()\n\n def prepare(self):\n for _ in range(5):\n self.add_word()\n\n def get_word(self):\n \"\"\"Gets a list of words and returns one\n \n Args:\n self (Words): An instance of Words\"\"\"\n get = random.randint(0,len(self.words)-1)\n word = self.words[get]\n self.words.pop(get)\n return word\n\n def get_points(self):\n \"\"\"Gets the points this food is worth.\n \n Args:\n self (Food): an instance of Food.\n\n Returns:\n integer: The points this food is worth.\n \"\"\"\n return self._points\n\n def reset(self):\n \"\"\"Resets the food by moving it to a random position within the boundaries of the screen and reassigning the points to a random number.\n \n Args:\n self (Words): an instance of Words.\n \"\"\"\n self._points = random.randint(1, 5)\n x = random.randint(1, constants.MAX_X - 2)\n y = random.randint(1, constants.MAX_Y - 2)\n position = Point(x, y)\n self.set_position(position)\n\n def add_word(self):\n \"\"\"Adds a word to the screen\n \n Args:\n self (Words): an instance of Words.\n \"\"\"\n word = Actor()\n word.set_text(self.get_word())\n word._points = random.randint(1, 5)\n x = random.randint(1, constants.MAX_X - 2)\n y = random.randint(1, constants.MAX_Y - 2)\n position = Point(x, y)\n word.set_position(position)\n velocity = Point(2, 0)\n word.set_velocity(velocity)\n self._words.append(word)\n\n def move_word(self):\n \"\"\"Moves the words on the screen and adds another word when\n word at the beggining of the list is at y 2\n \n Args:\n self (Words): an instance of Words.\"\"\"\n for n in self._words:\n spot = n.get_position()\n spoty = spot.get_y()\n if spoty == 19:\n spoty = 0\n position = Point(spot.get_x(),spoty+1)\n n.set_position(position)\n first_word = self._words[0]\n first_word = first_word.get_position()\n if len(self.words) >= 1 and first_word.get_y() == 2:\n self.add_word()\n\n","repo_name":"AlanWangsgard/cse210-tc07","sub_path":"speed/game/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39927378382","text":"import argparse\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\nfrom tensorflow.python.platform import tf_logging as logging\n\nfrom psenet import config\nfrom psenet.data import DATASETS, build_input_fn\nfrom psenet.losses import psenet_loss\nfrom psenet.metrics import keras_psenet_metrics\nfrom psenet.optimizers import build_optimizer\n\n\ndef build_eval_exporter():\n def serving_input_fn():\n features = {\n config.IMAGE: tf.compat.v1.placeholder(\n dtype=tf.float32, shape=[None, None, None, 3]\n )\n }\n receiver_tensors = {\n config.IMAGE: tf.compat.v1.placeholder(\n dtype=tf.float32, shape=[None, None, None, 3]\n )\n }\n return tf.estimator.export.ServingInputReceiver(\n features, receiver_tensors\n )\n\n return tf.estimator.LatestExporter(\n name=\"exporter\", serving_input_receiver_fn=serving_input_fn\n )\n\n\ndef build_callbacks(FLAGS):\n log_dir = os.path.join(FLAGS.job_dir, \"logs\")\n callbacks = [\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, write_graph=False)\n ]\n\n return callbacks\n\n\ndef evaluate(FLAGS):\n FLAGS.mode = tf.estimator.ModeKeys.EVAL\n FLAGS.encoder_weights = \"imagenet\"\n\n data = build_input_fn(FLAGS)()\n model = tf.keras.experimental.load_from_saved_model(FLAGS.saved_model)\n model.compile(\n loss=psenet_loss,\n optimizer=build_optimizer(FLAGS),\n metrics=keras_psenet_metrics(),\n )\n model.evaluate(\n data, steps=FLAGS.num_steps, callbacks=build_callbacks(FLAGS)\n )\n\n\nif __name__ == \"__main__\":\n PARSER = argparse.ArgumentParser()\n PARSER.add_argument(\n \"--batch-size\",\n help=\"The batch size for training and evaluation\",\n default=config.BATCH_SIZE,\n type=int,\n )\n PARSER.add_argument(\n \"--backbone-name\",\n help=\"\"\"The name of the FPN backbone. Must be one of the following:\n - 'inceptionresnetv2',\n - 'inceptionv3',\n - 'resnext50',\n - 'resnext101',\n - 'mobilenet',\n - 'mobilenetv2',\n - 'efficientnetb0',\n - 'efficientnetb1',\n - 'efficientnetb2',\n - 'efficientnetb3',\n - 'efficientnetb4',\n - 'efficientnetb5'\n \"\"\",\n default=config.BACKBONE_NAME,\n type=str,\n )\n PARSER.add_argument(\n \"--dataset\",\n help=\"The type of dataset to load. Must be one of {}.\".format(\n list(DATASETS.keys())\n ),\n default=config.PROCESSED_DATA_LABEL,\n type=str,\n )\n PARSER.add_argument(\n \"--eval-data-dir\",\n help=\"The directory with `images/` and `labels/` for evaluation\",\n default=config.EVAL_DATA_DIR,\n type=str,\n )\n PARSER.add_argument(\n \"--job-dir\",\n help=\"The model directory\",\n default=config.MODEL_DIR,\n type=str,\n )\n PARSER.add_argument(\n \"--kernel-num\",\n help=\"The number of output kernels from FPN\",\n default=config.KERNEL_NUM,\n type=int,\n )\n PARSER.add_argument(\n \"--learning-rate\",\n help=\"The initial learning rate\",\n default=config.LEARNING_RATE,\n type=float,\n )\n PARSER.add_argument(\n \"--decay-rate\",\n help=\"The learning rate decay factor\",\n default=config.LEARNING_RATE_DECAY_FACTOR,\n type=float,\n )\n PARSER.add_argument(\n \"--decay-steps\",\n help=\"The number of steps before the learning rate decays\",\n default=config.LEARNING_RATE_DECAY_STEPS,\n type=int,\n )\n PARSER.add_argument(\n \"--resize-length\",\n help=\"The maximum side length of the resized input images\",\n default=config.RESIZE_LENGTH,\n type=int,\n )\n PARSER.add_argument(\n \"--num-readers\",\n help=\"The number of parallel readers\",\n default=config.NUM_READERS,\n type=int,\n )\n PARSER.add_argument(\n \"--num-gpus\",\n help=\"The number of GPUs to use\",\n default=config.GPU_PER_WORKER,\n type=int,\n )\n PARSER.add_argument(\n \"--prefetch\",\n help=\"The number of batches to prefetch\",\n default=config.PREFETCH,\n type=int,\n )\n PARSER.add_argument(\n \"--min-scale\",\n help=\"The minimum kernel scale for pre-processing\",\n default=config.LEARNING_RATE_DECAY_FACTOR,\n type=float,\n )\n PARSER.add_argument(\n \"--regularization-weight-decay\",\n help=\"The L2 regularization loss for Conv layers\",\n default=config.REGULARIZATION_WEIGHT_DECAY,\n type=float,\n )\n PARSER.add_argument(\n \"--num-steps\",\n help=\"The number of evaluation steps\",\n default=config.N_EVAL_STEPS,\n type=int,\n )\n PARSER.add_argument(\n \"--saved_model\",\n help=\"The saved model to load.\",\n default=config.SAVED_MODEL_DIR,\n type=str,\n )\n PARSER.add_argument(\n \"--augment-training-data\",\n help=\"Whether to augment training data.\",\n type=config.str2bool,\n nargs=\"?\",\n const=True,\n default=True,\n )\n\n FLAGS, _ = PARSER.parse_known_args()\n tf.compat.v1.logging.set_verbosity(\"DEBUG\")\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(\n [str(i) for i in range(FLAGS.num_gpus)]\n )\n os.environ[\"TF_FORCE_GPU_ALLOW_GROWTH\"] = \"true\"\n\n if FLAGS.num_gpus > 0:\n if tf.test.gpu_device_name():\n logging.info(\"Default GPU: {}\".format(tf.test.gpu_device_name()))\n logging.info(\n \"All Devices: {}\".format(device_lib.list_local_devices())\n )\n else:\n raise RuntimeError(\"Failed to find the default GPU.\")\n\n evaluate(FLAGS)\n","repo_name":"aptlin/psenet","sub_path":"psenet/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29493411693","text":"import os\nfrom cogtemplate.data.readers.base_reader import BaseReader\nfrom cogtemplate.data.datable import DataTable\nfrom cogtemplate.utils.vocab_utils import Vocabulary\nimport json\nfrom random import shuffle\n\n\n# from cogtemplate.utils.download_utils import Downloader\n\n\nclass PatentReader(BaseReader):\n def __init__(self, raw_data_path):\n super().__init__()\n self.raw_data_path = raw_data_path\n self.train_file = 'train.json'\n # self.train_file = 'augmented_train.json'\n # self.dev_file = 'dev.tsv'\n self.test_file = 'testA.json'\n self.train_path = os.path.join(raw_data_path, self.train_file)\n # self.dev_path = os.path.join(raw_data_path, self.dev_file)\n self.test_path = os.path.join(raw_data_path, self.test_file)\n self.label_vocab = Vocabulary()\n\n def _read_train_and_dev(self,path=None,split=0.9):\n print(\"Reading data...\")\n datable = DataTable()\n with open(path,'r',encoding='utf8') as file:\n lines = file.readlines()\n shuffle(lines)\n my_split = 0.9 if split is None else split\n divide = int(my_split * len(lines))\n train_lines = lines[:divide]\n dev_lines = lines[divide:]\n train_datable = DataTable()\n dev_datable = DataTable()\n\n if split is None:\n train_lines = lines\n for line in train_lines:\n dict_data = json.loads(line)\n for key in [\"id\",\"title\",\"assignee\",\"abstract\"]:\n train_datable(key,dict_data[key])\n train_datable(\"label\",dict_data[\"label_id\"])\n self.label_vocab.add(dict_data[\"label_id\"])\n\n for line in dev_lines:\n dict_data = json.loads(line)\n for key in [\"id\",\"title\",\"assignee\",\"abstract\"]:\n dev_datable(key,dict_data[key])\n dev_datable(\"label\",dict_data[\"label_id\"])\n self.label_vocab.add(dict_data[\"label_id\"])\n\n return train_datable,dev_datable\n\n # def _read_train(self, path=None):\n # return self._read(path)\n #\n # def _read_dev(self, path=None):\n # return self._read(path)\n #\n def _read_test(self, path=None):\n print(\"Reading data...\")\n datable = DataTable()\n with open(path,'r',encoding='utf8') as file:\n lines = file.readlines()\n\n for line in lines:\n dict_data = json.loads(line)\n for key in [\"id\",\"title\",\"assignee\",\"abstract\"]:\n datable(key,dict_data[key])\n # datable(\"label\",dict_data[\"label_id\"])\n # self.label_vocab.add(dict_data[\"label_id\"])\n return datable\n\n def read_all(self,split=0.9):\n train_data,dev_data = self._read_train_and_dev(self.train_path,split=split)\n test_data = self._read_test(self.test_path)\n return train_data,dev_data,test_data\n # return self._read_train(self.train_path), self._read_dev(self.dev_path), self._read_test(self.test_path)\n\n def read_vocab(self):\n self.label_vocab.create()\n return {\"label_vocab\": self.label_vocab}\n\n\nif __name__ == \"__main__\":\n reader = PatentReader(raw_data_path=\"/data/hongbang/CogAGENT/datapath/text_classification/patent/raw_data\")\n # train_data = reader._read_train()\n train_data, dev_data,test_data = reader.read_all()\n vocab = reader.read_vocab()\n print(\"end\")\n","repo_name":"HongbangYuan/PatentClassification","sub_path":"cogtemplate/data/readers/patent_reader.py","file_name":"patent_reader.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26934495465","text":"# assume a 1000 x 1000 board since the size is not given\nSIZE = 1000\nSTART = 500\n\n\ndef parse_input():\n data = []\n file = open(\"inputs/day9.txt\", \"r\")\n line = file.readline().strip()\n while line != '':\n splitted = line.split()\n data.append((splitted[0], int(splitted[1]))) # direction + steps\n line = file.readline().strip()\n return data\n\n\ndef should_move_tail(r1, c1, r2, c2):\n return abs(r1 - r2) > 1 or abs(c1 - c2) > 1\n\n\ndef part_one(data):\n # find number of unique tail positions\n visited = [[False for i in range(SIZE)] for j in range(SIZE)]\n # start from the centre\n dirs = {\"R\": (0, 1), \"D\": (1, 0), \"L\": (0, -1), \"U\": (-1, 0)}\n head_row = head_col = START\n tail_row, tail_col = head_row, head_col\n visited[tail_row][tail_col] = True\n for line in data:\n direction, steps = line[0], line[1]\n movement = dirs[direction]\n while steps > 0:\n new_row, new_col = head_row + movement[0], head_col + movement[1]\n if should_move_tail(tail_row, tail_col, new_row, new_col):\n visited[head_row][head_col] = True # tag tail in old position\n tail_row, tail_col = head_row, head_col # move tail to prev head position\n head_row, head_col = new_row, new_col # move head\n steps -= 1\n return sum([sum(row) for row in visited])\n\n\n# THIS IS FOR PART 2\ndef move(r1, c1, r2, c2):\n # move straight if head and tail are on the same row or col\n if abs(r1 - r2) == 0 or abs(c1 - c2) == 0:\n return move_straight(r1, c1, r2, c2)\n return move_diagonally(r1, c1, r2, c2)\n\n\ndef move_straight(r1, c1, r2, c2):\n # r1, c1 --> from, r2, c2 --> to\n # try all four directions\n dirs = ((0, 1), (1, 0), (0, -1), (-1, 0))\n for direction in dirs:\n new_r, new_c = r1 + direction[0], c1 + direction[1]\n # check if it is in range\n if new_r < 0 or new_r >= SIZE or new_c < 0 or new_c >= SIZE:\n continue\n if not should_move_tail(new_r, new_c, r2, c2):\n return (new_r, new_c) # only one possible direction\n return None # not possible\n\n\ndef move_diagonally(r1, c1, r2, c2):\n # r1, c1 --> from, r2, c2 --> to\n # try all four directions\n dirs = ((1, 1), (1, -1), (-1, 1), (-1, -1))\n for direction in dirs:\n new_r, new_c = r1 + direction[0], c1 + direction[1]\n # check if it is in range\n if new_r < 0 or new_r >= SIZE or new_c < 0 or new_c >= SIZE:\n continue\n if not should_move_tail(new_r, new_c, r2, c2):\n return (new_r, new_c) # only one possible direction\n return None # not possible\n\n\ndef part_two(data):\n # extend tail to 9 units long\n # assume a 1000 x 1000 board since the size is not given\n visited = [[False for i in range(SIZE)] for j in range(SIZE)]\n # start from the centre\n dirs = {\"R\": (0, 1), \"D\": (1, 0), \"L\": (0, -1), \"U\": (-1, 0)}\n # create an array to store its own head and tail row/col\n # [row, col] for size 9, first index is no 1, last index is the tail (no 9)\n # head_row = head_col = 50\n head_row = head_col = START\n positions = [[head_row, head_col] for i in range(9)]\n visited[head_row][head_col] = True\n for line in data:\n direction, steps = line[0], line[1]\n movement = dirs[direction]\n while steps > 0:\n # check and update the first index of positions\n # move head row and head col\n new_row, new_col = head_row + movement[0], head_col + movement[1]\n index = 0\n if should_move_tail(positions[index][0], positions[index][1], new_row, new_col):\n # update the first index position\n positions[index][0], positions[index][1] = move(\n positions[index][0], positions[index][1], new_row, new_col)\n\n # do chain reaction down\n index += 1\n while index < 9 and should_move_tail(positions[index][0], positions[index][1], positions[index - 1][0], positions[index - 1][1]):\n # update new position\n positions[index][0], positions[index][1] = move(\n positions[index][0], positions[index][1], positions[index - 1][0], positions[index - 1][1])\n index += 1\n\n if index == 9:\n # the tail moved, so tag the tail's new position it moved to\n visited[positions[index - 1][0]\n ][positions[index - 1][1]] = True\n head_row, head_col = new_row, new_col # move head\n steps -= 1\n return sum([sum(row) for row in visited])\n\n\nif __name__ == \"__main__\":\n data = parse_input()\n print(part_one(data))\n print(part_two(data))\n","repo_name":"m0nggh/AOC2022","sub_path":"solutions/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7774408316","text":"import pandas\nimport numpy\nimport sys\nimport re\n\nfilename = \"\"\nif len(sys.argv) == 2:\n filename = sys.argv[1]\nelse:\n print(\"usage: \", sys.argv[0], \" filename.xlsx\")\n exit(1)\n\ndf = pandas.read_excel(filename)\ncn = df.columns\n\nprint(\"CN: \", cn)\n\nvalues = numpy.asarray(df[cn[0]].values)\nprint(numpy.mean(values), \" \", numpy.std(values)) \n","repo_name":"lstorchi/teaching","sub_path":"basictests/usingpandas.py","file_name":"usingpandas.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15719118873","text":"from odoo import fields, models\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n is_part = fields.Boolean(\n string='Is Part',\n required=False)\n is_installable = fields.Boolean(\n string='Is Installable',\n required=False)\n component_ids = fields.One2many(\n comodel_name='esd.product.component',\n inverse_name='product_id',\n string='Component_ids',\n required=False)\n\n\nclass ESDProductComponent(models.Model):\n _name = 'esd.product.component'\n _description = 'Product Component'\n\n product_id = fields.Many2one(\n comodel_name='product.template',\n string='Product',\n required=False)\n part_id = fields.Many2one(\n comodel_name='product.template',\n string='Part',\n required=False, domain=\"[('is_part', '=', True)]\")\n\n","repo_name":"WildyEstephan/Services","sub_path":"maintenance_management_coway/models/product_template.py","file_name":"product_template.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42582462246","text":"# <----------NOW DB.py NOT USED!!!!------------>\n\nimport sqlite3\n# Create con and cur for job's be database \n#conn = sqlite3.connect('database.db')\n#cur = conn.cursor()\n\n# select name birthday man's \n\ndef name(dt):\n '''This function return name birthday man's, need write a date'''\n conn = sqlite3.connect('database.db')\n cur = conn.cursor()\n end = []\n result = [x for x in cur.execute('SELECT Name FROM Human WHERE date = \"{}\"'.format(dt))]\n for x in result:\n end.append(x[0])\n\n return ', '.join(end)\n\n\n\n\n\n# create table\n# cur.execute(\"\"\"CREATE TABLE Human (Name text, date text)\"\"\")\n\n# Write information of humans\n# Human = [('Первый', '2, 13'), ('Второй', '2, 13'), ('Третий', '2, 13')]\n\n# delete information\n# cur.execute(\"DELETE FROM Human WHERE Date = '2 13'\" )\n# con.commit()\n\n# Update data from database\n#cur.execute('UPDATE Human SET date = \"2, 15\" WHERE date = \"2, 13\" ')\n#conn.commit()","repo_name":"DemonMaike/whatsappbot","sub_path":"DB.py","file_name":"DB.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11397698372","text":"import os\nimport pandas as pd\nimport glob\nimport csv\n\n\ndef round_down(num, divisor):\n return num - (num%divisor)\n \n# path_to_json = os.path.join(os.getcwd(), 'logs', '')\npath_to_json = glob.glob(os.path.join(os.getcwd(),'*/logs/*'))\njson_files = [pos_json for pos_json in path_to_json if pos_json.endswith('.json')]\n\n\ndfs= []\nfor j in json_files:\n df = pd.read_json(j)\n df['filename'] = j\n dfs.append(df)\n\nreport = pd.concat(dfs, axis=0, ignore_index=True)\n# format date correctly \nreport['ts'] =pd.to_datetime(report['ts'], format='%d-%m-%Y %H:%M:%S') \n\n\n# remove duplicates\n# report.drop_duplicates(inplace= True)\n# filter on the last trials for experiment run\nfiltered = report[report['ts']>'22-02-2021']\n\n# round revealed_pct \nfiltered['revealed_pct'] = round_down(filtered['revealed_pct'], 10)\n\n# get last combination of dataset with model\nfiltered = filtered.sort_values('ts').groupby(['model','dataset','revealed_pct']).tail(1)\n\nsuccessful_runs = filtered[filtered['success']==True] \nsuccessful_runs.to_json('./successful_runs.json')\n\nrerun = filtered[filtered['success']==False ] \nrerun.to_json('./rerun.json')\n\n# run checks\n# get all datasets with revealed_pct and runs output\ndatasets = pd.read_csv('Datasets_metadata.csv')\nrevealed_pct = [10,20,30,100] \nclassifiers = ['ST', 'CBoss', 'TSF', 'PForest', 'WEASEL', 'Dummy']\ndatasets_extended = pd.concat([datasets['dataset']] * len(revealed_pct) , keys = revealed_pct).reset_index(level = 1, drop = True).rename_axis('revealed_pct').reset_index()\ndatasets_extended = pd.concat([datasets_extended] * len(classifiers) , keys = classifiers).reset_index(level = 1, drop = True).rename_axis('model').reset_index()\n# output of all runs and null if it didnt run\nruns_output =datasets_extended.merge(filtered, on=['model','dataset','revealed_pct'], how='left')\ndidnt_run = runs_output[runs_output['score_function'].isnull()]\n\n\nresults = []\n# 1.check that each dataset has 4 runs against each model\nresults.append(successful_runs.groupby(['model','dataset']).size())\n# 2.check that the number of dataset that run for each classifier 5 clf (336), dummy (84), MSM& LS (4)\nresults.append(successful_runs.groupby('model').size())\n\n# 3.check totals 1680 - 5 classifiers 84 - Dummy 4 - MSM & LS\nresults.append(successful_runs.count())\n\nimport IPython\n# IPython.embed()\n# generate output list of valid files to be used for analysis\nheader = ['filename']\nfile_names = successful_runs['filename'].tolist()\nwith open('log_filenames.csv', 'w') as f:\n write = csv.writer(f)\n write.writerow(header)\n for item in file_names:\n write.writerow([item,])\n\n# print report result\nreport_result = pd.concat(results, axis=0, ignore_index=False)\nreport_result.to_json('./reportresult.json')\n\n# it should always be empty which means I didn't put failed dataset while it was successfully run after this failed run\nintersected_runs =successful_runs.merge(rerun, on=['model','dataset','revealed_pct'], how='inner') \nif not intersected_runs.empty:\n print(f\"check duplicated dataset runs{intersected_runs}\")\n\n","repo_name":"isma3ilsamir/Master-Thesis","sub_path":"prepare_report.py","file_name":"prepare_report.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16577546233","text":"from ui4.color import Color\n\n\nwhite = Color('white')\nblack = Color('black')\n\n# Curated colors from Tailwind\n\n# Blue Gray\nblue_gray_50 = Color(\"#F8FAFC\")\nblue_gray_100 = Color(\"#F1F5F9\")\nblue_gray_200 = Color(\"#E2E8F0\")\nblue_gray_300 = Color(\"#CBD5E1\")\nblue_gray_400 = Color(\"#94A3B8\")\nblue_gray_500 = Color(\"#64748B\")\nblue_gray_600 = Color(\"#475569\")\nblue_gray_700 = Color(\"#334155\")\nblue_gray_800 = Color(\"#1E293B\")\nblue_gray_900 = Color(\"#0F172A\")\n\n# Cool Gray\ncool_gray_50 = Color(\"#F9FAFB\")\ncool_gray_100 = Color(\"#F3F4F6\")\ncool_gray_200 = Color(\"#E5E7EB\")\ncool_gray_300 = Color(\"#D1D5DB\")\ncool_gray_400 = Color(\"#9CA3AF\")\ncool_gray_500 = Color(\"#6B7280\")\ncool_gray_600 = Color(\"#4B5563\")\ncool_gray_700 = Color(\"#374151\")\ncool_gray_800 = Color(\"#1F2937\")\ncool_gray_900 = Color(\"#111827\")\n\n# Gray\ngray_50 = Color(\"#FAFAFA\")\ngray_100 = Color(\"#F4F4F5\")\ngray_200 = Color(\"#E4E4E7\")\ngray_300 = Color(\"#D4D4D8\")\ngray_400 = Color(\"#A1A1AA\")\ngray_500 = Color(\"#71717A\")\ngray_600 = Color(\"#52525B\")\ngray_700 = Color(\"#3F3F46\")\ngray_800 = Color(\"#27272A\")\ngray_900 = Color(\"#18181B\")\n\n# True Gray\ntrue_gray_50 = Color(\"#FAFAFA\")\ntrue_gray_100 = Color(\"#F5F5F5\")\ntrue_gray_200 = Color(\"#E5E5E5\")\ntrue_gray_300 = Color(\"#D4D4D4\")\ntrue_gray_400 = Color(\"#A3A3A3\")\ntrue_gray_500 = Color(\"#737373\")\ntrue_gray_600 = Color(\"#525252\")\ntrue_gray_700 = Color(\"#404040\")\ntrue_gray_800 = Color(\"#262626\")\ntrue_gray_900 = Color(\"#171717\")\n\n# Warm Gray\nwarm_gray_50 = Color(\"#FAFAF9\")\nwarm_gray_100 = Color(\"#F5F5F4\")\nwarm_gray_200 = Color(\"#E7E5E4\")\nwarm_gray_300 = Color(\"#D6D3D1\")\nwarm_gray_400 = Color(\"#A8A29E\")\nwarm_gray_500 = Color(\"#78716C\")\nwarm_gray_600 = Color(\"#57534E\")\nwarm_gray_700 = Color(\"#44403C\")\nwarm_gray_800 = Color(\"#292524\")\nwarm_gray_900 = Color(\"#1C1917\")\n\n# Red\nred_50 = Color(\"#FEF2F2\")\nred_100 = Color(\"#FEE2E2\")\nred_200 = Color(\"#FECACA\")\nred_300 = Color(\"#FCA5A5\")\nred_400 = Color(\"#F87171\")\nred_500 = Color(\"#EF4444\")\nred_600 = Color(\"#DC2626\")\nred_700 = Color(\"#B91C1C\")\nred_800 = Color(\"#991B1B\")\nred_900 = Color(\"#7F1D1D\")\n\n# Orange\norange_50 = Color(\"#FFF7ED\")\norange_100 = Color(\"#FFEDD5\")\norange_200 = Color(\"#FED7AA\")\norange_300 = Color(\"#FDBA74\")\norange_400 = Color(\"#FB923C\")\norange_500 = Color(\"#F97316\")\norange_600 = Color(\"#EA580C\")\norange_700 = Color(\"#C2410C\")\norange_800 = Color(\"#9A3412\")\norange_900 = Color(\"#7C2D12\")\n\n# Amber\namber_50 = Color(\"#FFFBEB\")\namber_100 = Color(\"#FEF3C7\")\namber_200 = Color(\"#FDE68A\")\namber_300 = Color(\"#FCD34D\")\namber_400 = Color(\"#FBBF24\")\namber_500 = Color(\"#F59E0B\")\namber_600 = Color(\"#D97706\")\namber_700 = Color(\"#B45309\")\namber_800 = Color(\"#92400E\")\namber_900 = Color(\"#78350F\")\n\n# Yellow\nyellow_50 = Color(\"#FEFCE8\")\nyellow_100 = Color(\"#FEF9C3\")\nyellow_200 = Color(\"#FEF08A\")\nyellow_300 = Color(\"#FDE047\")\nyellow_400 = Color(\"#FACC15\")\nyellow_500 = Color(\"#EAB308\")\nyellow_600 = Color(\"#CA8A04\")\nyellow_700 = Color(\"#A16207\")\nyellow_800 = Color(\"#854D0E\")\nyellow_900 = Color(\"#713F12\")\n\n# Lime\nlime_50 = Color(\"#F7FEE7\")\nlime_100 = Color(\"#ECFCCB\")\nlime_200 = Color(\"#D9F99D\")\nlime_300 = Color(\"#BEF264\")\nlime_400 = Color(\"#A3E635\")\nlime_500 = Color(\"#84CC16\")\nlime_600 = Color(\"#65A30D\")\nlime_700 = Color(\"#4D7C0F\")\nlime_800 = Color(\"#3F6212\")\nlime_900 = Color(\"#365314\")\n\n# Green\ngreen_50 = Color(\"#F0FDF4\")\ngreen_100 = Color(\"#DCFCE7\")\ngreen_200 = Color(\"#BBF7D0\")\ngreen_300 = Color(\"#86EFAC\")\ngreen_400 = Color(\"#4ADE80\")\ngreen_500 = Color(\"#22C55E\")\ngreen_600 = Color(\"#16A34A\")\ngreen_700 = Color(\"#15803D\")\ngreen_800 = Color(\"#166534\")\ngreen_900 = Color(\"#14532D\")\n\n# Emerald\nemerald_50 = Color(\"#ECFDF5\")\nemerald_100 = Color(\"#D1FAE5\")\nemerald_200 = Color(\"#A7F3D0\")\nemerald_300 = Color(\"#6EE7B7\")\nemerald_400 = Color(\"#34D399\")\nemerald_500 = Color(\"#10B981\")\nemerald_600 = Color(\"#059669\")\nemerald_700 = Color(\"#047857\")\nemerald_800 = Color(\"#065F46\")\nemerald_900 = Color(\"#064E3B\")\n\n# Teal\nteal_50 = Color(\"#F0FDFA\")\nteal_100 = Color(\"#CCFBF1\")\nteal_200 = Color(\"#99F6E4\")\nteal_300 = Color(\"#5EEAD4\")\nteal_400 = Color(\"#2DD4BF\")\nteal_500 = Color(\"#14B8A6\")\nteal_600 = Color(\"#0D9488\")\nteal_700 = Color(\"#0F766E\")\nteal_800 = Color(\"#115E59\")\nteal_900 = Color(\"#134E4A\")\n\n# Cyan\ncyan_50 = Color(\"#ECFEFF\")\ncyan_100 = Color(\"#CFFAFE\")\ncyan_200 = Color(\"#A5F3FC\")\ncyan_300 = Color(\"#67E8F9\")\ncyan_400 = Color(\"#22D3EE\")\ncyan_500 = Color(\"#06B6D4\")\ncyan_600 = Color(\"#0891B2\")\ncyan_700 = Color(\"#0E7490\")\ncyan_800 = Color(\"#155E75\")\ncyan_900 = Color(\"#164E63\")\n\n# Light Blue\nlight_blue_50 = Color(\"#F0F9FF\")\nlight_blue_100 = Color(\"#E0F2FE\")\nlight_blue_200 = Color(\"#BAE6FD\")\nlight_blue_300 = Color(\"#7DD3FC\")\nlight_blue_400 = Color(\"#38BDF8\")\nlight_blue_500 = Color(\"#0EA5E9\")\nlight_blue_600 = Color(\"#0284C7\")\nlight_blue_700 = Color(\"#0369A1\")\nlight_blue_800 = Color(\"#075985\")\nlight_blue_900 = Color(\"#0C4A6E\")\n\n# Blue\nblue_50 = Color(\"#EFF6FF\")\nblue_100 = Color(\"#DBEAFE\")\nblue_200 = Color(\"#BFDBFE\")\nblue_300 = Color(\"#93C5FD\")\nblue_400 = Color(\"#60A5FA\")\nblue_500 = Color(\"#3B82F6\")\nblue_600 = Color(\"#2563EB\")\nblue_700 = Color(\"#1D4ED8\")\nblue_800 = Color(\"#1E40AF\")\nblue_900 = Color(\"#1E3A8A\")\n\n# Indigo\nindigo_50 = Color(\"#EEF2FF\")\nindigo_100 = Color(\"#E0E7FF\")\nindigo_200 = Color(\"#C7D2FE\")\nindigo_300 = Color(\"#A5B4FC\")\nindigo_400 = Color(\"#818CF8\")\nindigo_500 = Color(\"#6366F1\")\nindigo_600 = Color(\"#4F46E5\")\nindigo_700 = Color(\"#4338CA\")\nindigo_800 = Color(\"#3730A3\")\nindigo_900 = Color(\"#312E81\")\n\n# Violet\nviolet_50 = Color(\"#F5F3FF\")\nviolet_100 = Color(\"#EDE9FE\")\nviolet_200 = Color(\"#DDD6FE\")\nviolet_300 = Color(\"#C4B5FD\")\nviolet_400 = Color(\"#A78BFA\")\nviolet_500 = Color(\"#8B5CF6\")\nviolet_600 = Color(\"#7C3AED\")\nviolet_700 = Color(\"#6D28D9\")\nviolet_800 = Color(\"#5B21B6\")\nviolet_900 = Color(\"#4C1D95\")\n\n# Purple\npurple_50 = Color(\"#FAF5FF\")\npurple_100 = Color(\"#F3E8FF\")\npurple_200 = Color(\"#E9D5FF\")\npurple_300 = Color(\"#D8B4FE\")\npurple_400 = Color(\"#C084FC\")\npurple_500 = Color(\"#A855F7\")\npurple_600 = Color(\"#9333EA\")\npurple_700 = Color(\"#7E22CE\")\npurple_800 = Color(\"#6B21A8\")\npurple_900 = Color(\"#581C87\")\n\n# Fuchsia\nfuchsia_50 = Color(\"#FDF4FF\")\nfuchsia_100 = Color(\"#FAE8FF\")\nfuchsia_200 = Color(\"#F5D0FE\")\nfuchsia_300 = Color(\"#F0ABFC\")\nfuchsia_400 = Color(\"#E879F9\")\nfuchsia_500 = Color(\"#D946EF\")\nfuchsia_600 = Color(\"#C026D3\")\nfuchsia_700 = Color(\"#A21CAF\")\nfuchsia_800 = Color(\"#86198F\")\nfuchsia_900 = Color(\"#701A75\")\n\n# Pink\npink_50 = Color(\"#FDF2F8\")\npink_100 = Color(\"#FCE7F3\")\npink_200 = Color(\"#FBCFE8\")\npink_300 = Color(\"#F9A8D4\")\npink_400 = Color(\"#F472B6\")\npink_500 = Color(\"#EC4899\")\npink_600 = Color(\"#DB2777\")\npink_700 = Color(\"#BE185D\")\npink_800 = Color(\"#9D174D\")\npink_900 = Color(\"#831843\")\n\n# Rose\nrose_50 = Color(\"#FFF1F2\")\nrose_100 = Color(\"#FFE4E6\")\nrose_200 = Color(\"#FECDD3\")\nrose_300 = Color(\"#FDA4AF\")\nrose_400 = Color(\"#FB7185\")\nrose_500 = Color(\"#F43F5E\")\nrose_600 = Color(\"#E11D48\")\nrose_700 = Color(\"#BE123C\")\nrose_800 = Color(\"#9F1239\")\nrose_900 = Color(\"#881337\")\n\n\nclass Theme:\n \"\"\"\n Defines a \"graphically consistent\" set of colors and other \n visual attributes of the views.\n \"\"\"\n pass\n \n \nclass Green:\n \n primary = green_700\n variant = green_500\n accent = red_500\n tinted = green_50\n inactive = cool_gray_400\n alert = red_900\n background = white\n \n\nclass DefaultColor(Green):\n pass\n \n \nclass DefaultFont:\n font = (\n '-apple-system,'\n 'BlinkMacSystemFont,'\n '\"Segoe UI\",'\n 'Roboto,'\n 'Helvetica,'\n 'Arial,'\n 'sans-serif,'\n '\"Apple Color Emoji\",'\n '\"Segoe UI Emoji\",'\n '\"Segoe UI Symbol\"'\n )\n font_m = 14\n \n \nclass DefaultLook:\n corner_s = 2\n button_border_width = 1\n button_border_style = 'none'\n inner_shadow = 'inset 0 2px 4px 0 rgba(0, 0, 0, 0.06);'\n field_border_width = 1\n\n\nclass DefaultTheme(DefaultColor, DefaultFont, DefaultLook):\n pass\n\n\ndef theme(property_name):\n return lambda cls: getattr(cls.current_theme, property_name)\n\ntheme.background = theme('background')\ntheme.font = theme('font')\n\n\ndef contrast(property_name):\n return lambda cls: getattr(\n cls.current_theme, property_name\n ).contrast_color()\n \ncontrast.background = contrast('background')\n\n\nclass Style:\n \"\"\"\n Defines pseudo style properties for a view, expected to be mostly \n callable references to the active Theme.\n \"\"\"\n current_theme = DefaultTheme\n \n \nclass BaseStyle(Style):\n \n #text_alignment = 'center'\n font = theme.font\n font_size = theme('font_m')\n text_color = contrast('tinted')\n \n \nclass TextInputStyle(BaseStyle):\n \n text_alignment = 'left'\n background_color = theme.background\n border_color = theme('primary')\n border_width = theme('field_border_width')\n border_style = 'solid'\n\n \nclass ButtonStyle(BaseStyle):\n \n background_color = theme('primary')\n text_color = contrast('primary')\n corner_radius = theme('corner_s')\n border_width = theme('button_border_width')\n border_color = theme('primary')\n border_style = theme('button_border_style')\n \n\nclass VariantButtonStyle(ButtonStyle):\n \n background_color = theme('variant')\n text_color = contrast('variant')\n\n\nclass CardStyle(BaseStyle):\n background_color = theme('tinted')\n\n\nclass TableStyle(BaseStyle):\n border_color = theme('primary')\n border_width = theme('field_border_width')\n border_style = 'solid'","repo_name":"mikaelho/ui4","sub_path":"ui4/theme.py","file_name":"theme.py","file_ext":"py","file_size_in_byte":9369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15163058149","text":"import gtk\n\nfrom sk1 import modes\n\nKEY_UP = 65362\nKEY_DOWN = 65364\nKEY_LEFT = 65361\nKEY_RIGHT = 65363\nKEY_KP_UP = 65431\nKEY_KP_DOWN = 65433\nKEY_KP_LEFT = 65430\nKEY_KP_RIGHT = 65432\nKEY_ESCAPE = 65307\n\nclass KeyboardProcessor:\n\n\tdef __init__(self, app, canvas):\n\t\tself.app = app\n\t\tself.insp = app.inspector\n\t\tself.proxy = app.proxy\n\t\tself.canvas = canvas\n\n\tdef check_keypress(self, keyval):\n\n#\t\tkeyname = gtk.gdk.keyval_name(keyval)\n#\t\tprint \"Key %s (%d) was pressed\" % (keyname, keyval)\n\n\t\tif self.canvas.mode == modes.SELECT_MODE:\n\t\t\treturn self.select_mode(keyval)\n\t\tif self.canvas.mode == modes.LINE_MODE:\n\t\t\treturn self.line_mode(keyval)\n\t\tif self.canvas.mode == modes.CURVE_MODE:\n\t\t\treturn self.curve_mode(keyval)\n\t\telif self.canvas.mode == modes.RECT_MODE:\n\t\t\treturn self.rect_mode(keyval)\n\t\telif self.canvas.mode == modes.ELLIPSE_MODE:\n\t\t\treturn self.ellipse_mode(keyval)\n\t\telif self.canvas.mode == modes.POLYGON_MODE:\n\t\t\treturn self.rect_mode(keyval)\n\n\t\treturn False\n\n\t#=========MODES================\n\n\tdef select_mode(self, keyval):\n\t\tif self.check_moving(keyval): return True\n\t\treturn False\n\n\tdef line_mode(self, keyval):\n\t\tif self.check_moving(keyval): return True\n\t\tif self.check_escape(keyval): return True\n\t\treturn False\n\n\tdef curve_mode(self, keyval):\n\t\tif self.check_moving(keyval): return True\n\t\tif self.check_escape(keyval): return True\n\t\treturn False\n\n\tdef rect_mode(self, keyval):\n\t\tif self.check_moving(keyval): return True\n\t\tif self.check_escape(keyval): return True\n\t\treturn False\n\n\tdef ellipse_mode(self, keyval):\n\t\tif self.check_moving(keyval): return True\n\t\tif self.check_escape(keyval): return True\n\t\treturn False\n\n\tdef polygon_mode(self, keyval):\n\t\tif self.check_moving(keyval): return True\n\t\tif self.check_escape(keyval): return True\n\t\treturn False\n\n\t#=========ACTIONS================\n\n\tdef check_escape(self, keyval):\n\t\tif keyval == KEY_ESCAPE:\n\t\t\tself.canvas.set_mode(modes.SELECT_MODE)\n\t\t\treturn True\n\t\treturn False\n\n\tdef check_moving(self, keyval):\n\t\tif keyval in [KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT,\n\t\t\t\t\tKEY_KP_UP, KEY_KP_DOWN, KEY_KP_LEFT, KEY_KP_RIGHT]:\n\t\t\tif keyval in [KEY_RIGHT, KEY_KP_RIGHT]:\n\t\t\t\tself.proxy.move_right()\n\t\t\telif keyval in [KEY_LEFT, KEY_KP_LEFT]:\n\t\t\t\tself.proxy.move_left()\n\t\t\telif keyval in [KEY_UP, KEY_KP_UP]:\n\t\t\t\tself.proxy.move_up()\n\t\t\telif keyval in [KEY_DOWN, KEY_KP_DOWN]:\n\t\t\t\tself.proxy.move_down()\n\t\t\treturn True\n\t\treturn False\n\n","repo_name":"tisn05/sk1","sub_path":"src/sk1/view/kb_processor.py","file_name":"kb_processor.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22947054663","text":"\nfrom vsg.rules import token_prefix\n\nfrom vsg import token\n\nlTokens = []\nlTokens.append(token.incomplete_type_declaration.identifier)\nlTokens.append(token.full_type_declaration.identifier)\n\n\nclass rule_015(token_prefix):\n '''\n This rule checks for valid prefixes in user defined type identifiers.\n The default new type prefix is *t\\_*.\n\n |configuring_prefix_and_suffix_rules_link|\n\n **Violation**\n\n .. code-block:: vhdl\n\n type my_type is range -5 to 5 ;\n\n **Fix**\n\n .. code-block:: vhdl\n\n type t_my_type is range -5 to 5 ;\n '''\n\n def __init__(self):\n token_prefix.__init__(self, 'type', '015', lTokens)\n self.prefixes = ['t_']\n self.solution = 'Type identifiers'\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/rules/type_definition/rule_015.py","file_name":"rule_015.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"7200477920","text":"import pygame\nimport random\nimport math\nimport time\n\n# importing mixer module which heleps us to apply sounds to our game\nfrom pygame import mixer\n\n# initialising the pygame modules\npygame.init()\n\n# Screen width\nSCREEN_WIDTH = 800\n\n# Screen height\nSCREEN_HEIGHT = 600\n\n# creating the screen\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n# loading the background image\nbackground1 = pygame.image.load('background1.jpg')\n\n# setting the caption of the window\npygame.display.set_caption(\"WELCOME TO SPACE INVADERS\")\n\n# setting the icon of the window\nicon = pygame.image.load('ufoicon2.png')\npygame.display.set_icon(icon)\n\n## PLAYER \n# loading the player image\nplayer1 = pygame.image.load('player2.png')\n\n## setting the dimesions where the image has to be loaded\n# position of the image in x direction\nplayerX = 370\n\n# position of the image in y direction\nplayerY = 480\n\n# chnage in x direction\nplayerX_change = 0\n\n# chnage in y direction\nplayerY_change = 0\n\n## ENEMY\n# loading the enemy image\nenemy1 = pygame.image.load('enemy1.png')\n\n## setting the dimesions where the image has to be loaded\n# position of the image in x direction\nenemyX = random.randint(0, 740)\n\n# position of the image in y direction\nenemyY = random.randint(50, 150)\n\n# chnage in x direction\nenemyX_change = 4\n\n# chnage in y direction\nenemyY_change = 40\n\n## BULLET\n# loading the bullet image\nbullet1 = pygame.image.load('bullet1.png')\n\n## defining the dimensions of bullets and corresponding variables\n# defining the x position\nbulletX = 0\n\n#defining the y position\nbulletY = 480\n\n# defining the change in position in x direction\nbulletX_change = 0\n\n# defining the change in position in y direction\nbulletY_change = 5\n\n# defining the state of bullet fired or not\nbulletState = \"ready\"\n\n# deffinig player score\nscore_value = 0\n\n# setting the style of the font for score\nfont = pygame.font.Font('freesansbold.ttf', 32)\n\n# setting the dimesions of the text\ntextX = 10\ntextY = 10\n\n# setting the style of the font for game over\nfontgameover = pygame.font.Font('freesansbold.ttf', 64)\n\n# setting the dimensions of the game over text\ngameoverX = 50\ngameoverY = 50\n\n# defining the function to display the score\ndef displayscore(x, y):\n score = font.render(\"Score :\" + str(score_value), True, (255, 255, 255))\n screen.blit(score, (x, y))\n\n# defining the function to display gameover function\ndef displayGameOver(x, y):\n gameOver = fontgameover.render(\"GAME OVER\", True, (255, 255, 255))\n screen.blit(gameOver, (x, y))\n\n# defining the function to display the image of the player on the screen\ndef player(x, y):\n screen.blit(player1, (x, y))\n\n# defining the function to display the image of the enemy on the screen\ndef enemy(x, y):\n screen.blit(enemy1, (x, y))\n\n# defining the function to display the image of the bullet\n# def bullet(x, y):\n# screen.blit(bullet1, (x, y))\n\n# defining a function to tell the state of the bullet\ndef bullet_fire(x, y):\n global bulletState\n bulletState = \"fire\"\n screen.blit(bullet1, (x + 16, y + 10))\n \n # loading the music for firing a bullet\n mixer.music.load('bulletsound.wav')\n\n # playing the music\n mixer.music.play()\n\n# defining a function to check collision\ndef isCollision(x1, y1, x2, y2):\n\n if math.sqrt(((x2 - x1)*(x2 - x1)) + ((y2 - y1)*(y2 - y1))) <= 20:\n\n # loading the music for when collision occurs\n mixer.music.load('explosionsound.wav')\n\n # playing the music when collision occurs\n mixer.music.play()\n\n # return True if collision occured\n return True\n\n else:\n return False\n\n# creating a variable that would make the game loop work till its true. as soon as it is set to false, the loop will end and game will be over\nrunning = True \n\n# game loop starts\nwhile running:\n\n # setting the window color black again\n screen.fill((0, 0, 0))\n\n # displaying the background image\n screen.blit(background1, (0, 0))\n\n # retrieving all the events one by one\n for event in pygame.event.get():\n\n # searching for an event quit, as soon as we get that we would quit the game by setting running as false\n if event.type == pygame.QUIT:\n\n # setting the running variable to false\n running = False \n\n ## if key stroke is pressed, check if its right or left\n # this checks if key stroke is pressed down. keydown means the key has been pressed down.\n if event.type == pygame.KEYDOWN:\n # print(\"HERE\")\n # checking if key is left\n if event.key == pygame.K_LEFT:\n # print(\"LEFT ARROW IS PRESSED\")\n playerX_change = -4\n\n # checking if key is right\n if event.key == pygame.K_RIGHT:\n # print(\"RIGHT ARROW IS PRESSED\")\n playerX_change = 4\n \n # checking for space key to fire the bullet\n if event.key == pygame.K_SPACE:\n\n if bulletState == 'ready':\n bulletX = playerX\n # call the funcyion fire bullet to fire the bullet\n bullet_fire(bulletX, bulletY)\n\n ## if they key stroke is released , it means keyup\n if event.type == pygame.KEYUP:\n\n # checking if left or right key is unpressed\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n # print(\"LEFT OR RIGHT ARROW KEY IS BEING PRESSED\")\n playerX_change = 0\n\n # this is changing the position of player in x direction\n playerX += playerX_change\n\n # setting up restriction on the boundary for player\n if playerX <= 0:\n playerX = 0\n\n if playerX >= 740:\n playerX = 740\n\n # enemy changing posiion in x direction\n enemyX += enemyX_change\n\n # sertting up restriction on the booundary for enemy\n if enemyX <= 0:\n enemyX_change = 4\n # print(\"HER2\")\n enemyY += enemyY_change\n\n if enemyX >= 740:\n enemyX_change = -4\n # print(\"HER1\")\n enemyY += enemyY_change\n\n # bullet movement\n if bulletY <= 0:\n bulletY = 480\n bulletState = \"ready\"\n\n if bulletState is \"fire\":\n bullet_fire(bulletX, bulletY)\n bulletY -= bulletY_change\n\n # collision function, checking if collision occured between bullet and enemy, if occured increase the score\n if isCollision(enemyX, enemyY, bulletX, bulletY):\n\n bulletY = 480\n bulletState = \"ready\"\n\n # increasing the score of player\n score_value = score_value + 1\n # print(score)\n\n # resetting the enemy after collision\n enemyX = random.randint(0, 740)\n enemyY = random.randint(50, 150)\n\n if (playerY - enemyY) < 40:\n print(\"GAME OVER\")\n displayGameOver(gameoverX, gameoverY)\n # time.sleep(40)\n exit()\n\n # this function would return display the player image according to the dimension set\n player(playerX, playerY)\n\n # this function would return display the enemy image according to the dimensions\n enemy(enemyX, enemyY)\n\n # this function returns the text to be displayed according to the dimension\n displayscore(textX, textY)\n\n # this function would return display the bullet image according to the dimensions\n # bullet(0, 0)\n # this updates the pygamme module\n pygame.display.update()\n","repo_name":"mujproj/space-invader-game-in-python","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25709566363","text":"from Lab5.Ant import Ant\nfrom Lab5.Position import Position\nfrom random import randint\nfrom texttable import Texttable\n\n\nclass Colony:\n def __init__(self, squareMatrixOrder, noOfAnts):\n self.__n = squareMatrixOrder\n self.__valueMatrix = [[[0, 0] for column in range(self.__n)] for row in range(self.__n)]\n\n self.__ants = [Ant(squareMatrixOrder) for _ in range(noOfAnts)]\n for ant in self.__ants:\n position = ant.lastPosition\n self.__valueMatrix[position.row][position.column] = [randint(1, self.__n),\n randint(1, self.__n)]\n\n self.__traceMatrix = [[ dict() for column in range(self.__n) ] for row in range(self.__n)]\n variations = [Position(-1, -1), Position(-1, 0), Position(-1, 1), Position(0, 1), Position(1, 1),\n Position(1, 0), Position(1, -1), Position(0, -1)]\n for row in range(self.__n):\n for column in range(self.__n):\n for variation in variations:\n newRow = row + variation.row\n newColumn = column + variation.column\n if 0 <= newRow < self.__n and 0 <= newColumn < self.__n:\n self.__traceMatrix[row][column] [Position(newRow, newColumn)] = 1\n\n @property\n def ants(self):\n return self.__ants\n\n @ants.setter\n def ants(self, newAnts):\n self.__ants = newAnts\n\n @property\n def valueMatrix(self):\n return self.__valueMatrix\n\n @valueMatrix.setter\n def valueMatrix(self, newValueMatrix):\n self.__valueMatrix = newValueMatrix\n\n @property\n def traceMatrix(self):\n return self.__traceMatrix\n\n @traceMatrix.setter\n def traceMatrix(self, newTraceMatrix):\n self.__traceMatrix = newTraceMatrix\n\n def __str__(self):\n table = Texttable()\n for row in self.__valueMatrix:\n tableRow = []\n for value in row:\n tableRow.append(str(value[0]) + \", \" + str(value[1]))\n table.add_row(tableRow)\n return table.draw()\n\n","repo_name":"AdrianPascan/Bachelor","sub_path":"4th semester/AI (Artificial Intelligence)/Lab5_ACO/Colony.py","file_name":"Colony.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32882927492","text":"from django.urls import path\nfrom . import views\n\napp_name='gateway'\n\nurlpatterns = [\n path('login/', views.loginPage, name=\"login\"),\n path('logout/', views.logoutUser, name=\"logout\"),\n path('register-persona/', views.registerPagePersona, name=\"registerPersona\"),\n path('register-empresa/', views.registerPageEmpresa, name=\"registerEmpresa\"),\n\n path('', views.home, name=\"home\"),\n path('dashboard/', views.mainDashboard, name=\"mainDashboard\"),\n]\n","repo_name":"HiperMaximus/DjangoImebu","sub_path":"gateway/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8681626823","text":"import typing\nimport dataclasses\n\ndef get_nums(file_name:str):\n with open(file_name) as f:\n str_lines = f.readlines()\n return [[int(c) for c in l.strip()] for l in str_lines]\n\n@dataclasses.dataclass(eq=True, frozen=True)\nclass Position:\n x: int\n y: int\n\nclass Point:\n def __init__(self, height:int, neighbor_heights:typing.List[int], pos:Position):\n self._height = height\n self._neighbor_heights = neighbor_heights\n self._pos = pos\n \n @property\n def position(self):\n return self._pos\n \n @property\n def height(self):\n return self._height\n \n @property\n def is_low_point(self):\n for n in self._neighbor_heights:\n if self._height >= n:\n return False\n return True\n \n @property\n def risk_level(self):\n return 1 + self._height\n \n @property\n def x(self):\n return self._pos.x\n \n @property\n def y(self):\n return self._pos.y\n \n def __str__(self):\n return 'x: {}; y: {}; height: {}'.format(self.x, self.y, self.height)\n\ndef get_points_from_nums(nums:typing.List[typing.List[int]]):\n\n points = []\n\n for x, row in enumerate(nums):\n for y, num in enumerate(row):\n\n neighbors = []\n if x != 0:\n neighbors.append(nums[x - 1][y])\n if x != len(nums) - 1:\n neighbors.append(nums[x + 1][y])\n \n if y != 0:\n neighbors.append(nums[x][y - 1])\n \n if y != len(row) - 1:\n neighbors.append(nums[x][y + 1])\n \n points.append(Point(num, neighbor_heights=neighbors, pos=Position(x, y)))\n \n return points\n\nclass PointMap:\n\n def __init__(self, points:typing.List[Point]):\n self._point_dict = {p.position:p for p in points}\n\n def get_all_low_points(self):\n return [p for p in self._point_dict.values() if p.is_low_point]\n\n def calculate_risk_sum(self):\n return sum([p.risk_level for p in self.get_all_low_points()])\n\n def calculate_basin_size_from_low_point(self, low_point:Point):\n visited = {}\n size = 0\n\n stack = [low_point]\n while len(stack) != 0:\n p = stack.pop()\n size += 1\n visited[p.position] = True\n \n while True:\n new_pos = Position(p.x - 1, p.y)\n if new_pos not in self._point_dict:\n break\n if new_pos in visited:\n break\n new_point = self._point_dict[new_pos]\n if new_point.height == 9:\n break\n if new_point.height <= p.height:\n break\n visited[new_pos] = True\n\n stack.append(new_point)\n break\n \n while True:\n new_pos = Position(p.x + 1, p.y)\n if new_pos not in self._point_dict:\n break\n if new_pos in visited:\n break\n new_point = self._point_dict[new_pos]\n if new_point.height == 9:\n break\n if new_point.height <= p.height:\n break\n \n visited[new_pos] = True\n stack.append(new_point)\n break\n\n while True:\n new_pos = Position(p.x, p.y - 1)\n if new_pos not in self._point_dict:\n break\n if new_pos in visited:\n break\n new_point = self._point_dict[new_pos]\n if new_point.height == 9:\n break\n if new_point.height <= p.height:\n break\n \n visited[new_pos] = True\n stack.append(new_point)\n break\n\n while True:\n new_pos = Position(p.x, p.y + 1)\n if new_pos not in self._point_dict:\n break\n if new_pos in visited:\n break\n new_point = self._point_dict[new_pos]\n if new_point.height == 9:\n break\n if new_point.height <= p.height:\n break\n \n visited[new_pos] = True\n stack.append(new_point)\n break\n\n return size\n\n def calculate_all_basins(self):\n return [self.calculate_basin_size_from_low_point(p) for p in self.get_all_low_points()]\n\ndef part_1(file_name:str):\n nums = get_nums(file_name)\n points = get_points_from_nums(nums)\n point_map = PointMap(points)\n print(point_map.calculate_risk_sum())\n risk_sum = 0\n for p in points:\n p:Point\n if not p.is_low_point:\n continue\n risk_sum += p.risk_level\n print(risk_sum)\n\ndef part_2(file_name:str):\n nums = get_nums(file_name)\n points = get_points_from_nums(nums)\n point_map = PointMap(points)\n\n\n amt = 1\n \n basin_sizes = point_map.calculate_all_basins()\n sorted_basins = sorted(basin_sizes)\n considered_basins = sorted_basins[-3:]\n print(considered_basins)\n for b in considered_basins:\n amt *= b\n \n print(amt)\n\n\npart_1('input-day-9.txt')\npart_2('input-day-9.txt')","repo_name":"C-Canchola/advent-of-code-2021","sub_path":"sln-day-9.py","file_name":"sln-day-9.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"517180931","text":"\"\"\"\nA self-dividing number is a number that is divisible by every digit it contains.\n\nFor example, 128 is a self-dividing number because 128 % 1 == 0, 128 % 2 == 0, and 128 % 8 == 0.\n\nAlso, a self-dividing number is not allowed to contain the digit zero.\n\nGiven a lower and upper number bound, output a list of every possible self dividing number, including the bounds if possible.\n\nExample 1:\nInput: \nleft = 1, right = 22\nOutput: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15, 22]\nNote:\n\nThe boundaries of each input argument are 1 <= left <= right <= 10000.\n\"\"\"\n\nclass Solution(object):\n def selfDividingNumbers(self, left, right):\n \"\"\"\n :type left: int\n :type right: int\n :rtype: List[int]\n \"\"\"\n res = list()\n \n def isdiv(n):\n temp = n\n while temp:\n rem = temp % 10\n if rem == 0 or n % rem != 0:\n return False\n temp = temp / 10\n return True\n \n for num in range(left , right+1):\n if isdiv(num):\n res.append(num)\n\n \n return res\n","repo_name":"Sathish-sbu/Lists-Arrays","sub_path":"selfDividingNumbers.py","file_name":"selfDividingNumbers.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22801791972","text":"from zy.zj.views import *\r\nimport time\r\nwhile 1:\r\n print(\"******欢迎使用成绩管理系统********\")\r\n print(\"[1] 学生信息输入\")\r\n print(\"[2] 学生信息输出\")\r\n print(\"[3] 查找学生信息\")\r\n print(\"[4] 成绩排序\")\r\n print(\"[0] 退出\")\r\n print(\"------------------------\")\r\n choice = int(input(\"请输入你的选择:\"))\r\n if choice == 1:\r\n shuru()\r\n print(\"操作成功!请稍后。。。\")\r\n elif choice == 2:\r\n shuchu()\r\n elif choice == 3:\r\n chazhao()\r\n elif choice == 4:\r\n paixu()\r\n elif choice == 0:\r\n print(\"已退出!!\")\r\n quit()\r\n else:\r\n print(\"选择有误!\")\r\n time.sleep(2)\r\n# dictstu = [['李四', '2009002', '86', '78','164','82'],['张三', '2009001', '85', '89','174','87']]\r\n# students1 = \"张三 2009001 85 89\"\r\n# students2 = \"李四 2009002 86 78\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"zhenguo96/test1","sub_path":"Python基础笔记/11/zy/zj/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19624299344","text":"import socket\nimport traceback\n\ndef main():\n server_socket = start_server()\n client_socket, client_address = wait_for_PC_connection(server_socket)\n if client_socket and client_address:\n handshake_messages(client_socket)\n send_user_message(client_socket)\n disconnect_from_PC(client_socket)\n\ndef start_server():\n print(\"Starting server...\")\n port = 5000\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n server_socket.bind((\"0.0.0.0\", port))\n server_socket.listen(5)\n return server_socket\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt\")\n except Exception as e:\n traceback.print_exc()\n print(\"Exception message: \" + str(e))\n return False\n\ndef wait_for_PC_connection(server_socket):\n print(\"Waiting for PC to connect...\")\n while True:\n try:\n client_socket, client_address = server_socket.accept()\n print(f\"Connected to {client_address}\")\n return client_socket, client_address\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt\")\n except Exception as e:\n print(f\"No connection made\")\n traceback.print_exc()\n return False\n\ndef handshake_messages(client_socket):\n print(\"Handshaking with PC...\")\n try:\n message = client_socket.recv(1024)\n client_socket.send(\"Hello, I'm the Raspberry Pi server.\".encode())\n print(message.decode())\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt\")\n except Exception as e:\n traceback.print_exc()\n print(\"Exception message: \" + str(e))\n\ndef send_user_message(client_socket):\n while True:\n message = input(\"Enter message to PC: \")\n try:\n client_socket.send(message.encode())\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt\")\n break\n except Exception as e:\n print(\"Failed to send message.\")\n traceback.print_exc()\n print(\"Exception message: \" + str(e))\n break\n\n print(f\"Sent message '{message}'.\")\n\ndef disconnect_from_PC(client_socket):\n print(\"Disconnecting from PC...\")\n client_socket.close()\n\nmain()","repo_name":"Enthusela/DesktopControlPanel","sub_path":"pc_audio_ctrl/pi_server_test.py","file_name":"pi_server_test.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31642915375","text":"import asyncio\nimport re\n\nfrom time import localtime, strftime\n\nfrom server import logger\nfrom server.constants import ArgType, Clients, Constants\nfrom server.exceptions import AreaError, ClientError, ServerError, PartyError, TsuserverException\nfrom server.fantacrypt import fanta_decrypt\nfrom server.evidence import EvidenceList\n\nclass AOProtocol(asyncio.Protocol):\n \"\"\"\n The main class that deals with the AO protocol.\n \"\"\"\n\n def __init__(self, server):\n super().__init__()\n self.server = server\n self.client = None\n self.buffer = ''\n self.ping_timeout = None\n logger.log_print = logger.log_print2 if self.server.in_test else logger.log_print\n\n # Determine whether /exec is active or not and warn server owner if so.\n if getattr(self.server.commands, \"ooc_cmd_exec\")(self.client, \"is_exec_active\") == 1:\n logger.log_print(\"\"\"\n\n WARNING\n\n THE /exec COMMAND IN commands.py IS ACTIVE.\n\n UNLESS YOU ABSOLUTELY MEANT IT AND KNOW WHAT YOU ARE DOING,\n PLEASE STOP YOUR SERVER RIGHT NOW AND DEACTIVATE IT BY GOING TO THE\n commands.py FILE AND FOLLOWING THE INSTRUCTIONS UNDER ooc_cmd_exec.\\n\n BAD THINGS CAN AND WILL HAPPEN OTHERWISE.\n\n \"\"\")\n\n def data_received(self, data):\n \"\"\" Handles any data received from the network.\n\n Receives data, parses them into a command and passes it\n to the command handler.\n\n :param data: bytes of data\n \"\"\"\n buf = data\n if buf is None:\n buf = b''\n # try to decode as utf-8, ignore any erroneous characters\n self.buffer += buf.decode('utf-8', 'ignore')\n self.buffer = self.buffer.translate({ord(c): None for c in '\\0'})\n\n if len(self.buffer) > 8192:\n msg = self.buffer if len(self.buffer) < 512 else self.buffer[:512] + '...'\n logger.log_server('Terminated {} (packet too long): sent {} ({} bytes)'\n .format(self.client.get_ipreal(), msg, len(self.buffer)))\n self.client.disconnect()\n return\n\n found_message = False\n for msg in self.get_messages():\n found_message = True\n if len(msg) < 2:\n # This immediatelly kills any client that does not even try to follow the proper\n # client protocol\n msg = self.buffer if len(self.buffer) < 512 else self.buffer[:512] + '...'\n logger.log_server('Terminated {} (packet too short): sent {} ({} bytes)'\n .format(self.client.get_ipreal(), msg, len(self.buffer)))\n self.client.disconnect()\n return\n # general netcode structure is not great\n if msg[0] in ('#', '3', '4'):\n if msg[0] == '#':\n msg = msg[1:]\n spl = msg.split('#', 1)\n msg = '#'.join([fanta_decrypt(spl[0])] + spl[1:])\n logger.log_debug('[INC][RAW]{}'.format(msg), self.client)\n try:\n # print(f'> {self.client.id}: {msg}')\n cmd, *args = msg.split('#')\n self.net_cmd_dispatcher[cmd](self, args)\n except Exception as ex:\n self.server.send_error_report(self.client, cmd, args, ex)\n if not found_message:\n # This immediatelly kills any client that does not even try to follow the proper\n # client protocol\n msg = self.buffer if len(self.buffer) < 512 else self.buffer[:512] + '...'\n logger.log_server('Terminated {} (packet syntax unrecognized): sent {} ({} bytes)'\n .format(self.client.get_ipreal(), msg, len(self.buffer)))\n self.client.disconnect()\n\n def connection_made(self, transport, my_protocol=None):\n \"\"\" Called upon a new client connecting\n\n :param transport: the transport object\n \"\"\"\n self.client = self.server.new_client(transport, my_protocol=my_protocol)\n self.ping_timeout = asyncio.get_event_loop().call_later(self.server.config['timeout'], self.client.disconnect)\n self.client.send_command('decryptor', 34) # just fantacrypt things\n\n def connection_lost(self, exc, client=None):\n \"\"\" User disconnected\n\n :param exc: reason\n \"\"\"\n self.server.remove_client(self.client)\n self.ping_timeout.cancel()\n\n def get_messages(self):\n \"\"\" Parses out full messages from the buffer.\n\n :return: yields messages\n \"\"\"\n while '#%' in self.buffer:\n spl = self.buffer.split('#%', 1)\n self.buffer = spl[1]\n yield spl[0]\n # exception because bad netcode\n askchar2 = '#615810BC07D12A5A#'\n if self.buffer == askchar2:\n self.buffer = ''\n yield askchar2\n\n def validate_net_cmd(self, args, *types, needs_auth=True):\n \"\"\" Makes sure the net command's arguments match expectations.\n\n :param args: actual arguments to the net command\n :param types: what kind of data types are expected\n :param needs_auth: whether you need to have chosen a character\n :return: returns True if message was validated\n \"\"\"\n if needs_auth and self.client.char_id == -1:\n return False\n if len(args) != len(types):\n return False\n for i, arg in enumerate(args):\n if len(arg) == 0 and types[i] != ArgType.STR_OR_EMPTY:\n return False\n if types[i] == ArgType.INT:\n try:\n args[i] = int(arg)\n except ValueError:\n return False\n return True\n\n def process_arguments(self, identifier, args, needs_auth=True, fallback_protocols=None):\n if fallback_protocols is None:\n fallback_protocols = list()\n\n packet_type = '{}_INBOUND'.format(identifier.upper())\n for protocol in [self.client.packet_handler]+fallback_protocols:\n expected_pairs = protocol[packet_type].value\n expected_argument_names = [x[0] for x in expected_pairs]\n expected_types = [x[1] for x in expected_pairs]\n if not self.validate_net_cmd(args, *expected_types, needs_auth=needs_auth):\n continue\n\n return dict(zip(expected_argument_names, args))\n return None\n\n def net_cmd_hi(self, args):\n \"\"\" Handshake.\n\n HI##%\n\n :param args: a list containing all the arguments\n \"\"\"\n if not self.validate_net_cmd(args, ArgType.STR, needs_auth=False):\n return\n\n # Record new HDID and IPID if needed\n self.client.hdid = args[0]\n if self.client.hdid not in self.client.server.hdid_list:\n self.client.server.hdid_list[self.client.hdid] = []\n if self.client.ipid not in self.client.server.hdid_list[self.client.hdid]:\n self.client.server.hdid_list[self.client.hdid].append(self.client.ipid)\n self.client.server.dump_hdids()\n\n # Check if the client is banned\n for ipid in self.client.server.hdid_list[self.client.hdid]:\n if self.server.ban_manager.is_banned(ipid):\n self.client.send_ooc_others('Banned client with HDID {} and IPID {} attempted to '\n 'join the server but was refused entrance.'\n .format(self.client.hdid, self.client.ipid),\n is_officer=True)\n self.client.send_command('BD')\n self.client.disconnect()\n return\n\n if self.client.hdid != 'ms2-prober' or self.server.config['show_ms2-prober']:\n logger.log_server('Connected. HDID: {}.'.format(self.client.hdid), self.client)\n self.client.send_command('ID', self.client.id, self.server.software,\n self.server.get_version_string())\n self.client.send_command('PN', self.server.get_player_count(),\n self.server.config['playerlimit'])\n self.client.can_join += 1 # One of two conditions to allow joining\n\n def net_cmd_id(self, args):\n \"\"\" Client version and PV\n\n ID####%\n\n \"\"\"\n\n self.client.can_join += 1 # One of two conditions to allow joining\n\n def check_client_version():\n if len(args) < 2:\n self.client.version = ('DRO', '1.0.0')\n return False\n\n self.client.version = (args[0], args[1])\n\n software = args[0]\n version_list = args[1].split('.')\n\n # Identify version number\n if len(version_list) >= 3:\n # Such versions include DRO and AO\n release = int(version_list[0])\n major = int(version_list[1])\n minor = int(version_list[2])\n\n if args[0] not in ['DRO', 'AO2']:\n return False\n else:\n # Only such version recognized now is CC\n # CC has args[1] == 'CC - Update (\\d+\\.)*\\d+'\n if args[1].startswith('CC'):\n release = 'CC'\n major = float(args[1].split(' ')[-1])\n minor = 0\n else:\n return False\n\n if software == 'DRO':\n self.client.packet_handler = Clients.ClientDRO1d0d0\n else: # AO2 protocol\n if release == 2:\n if major >= 8 and major >= 4:\n self.client.packet_handler = Clients.ClientAO2d8d4\n elif major >= 8: # KFO\n self.client.packet_handler = Clients.ClientKFO2d8\n elif major == 7: # AO 2.7\n self.client.packet_handler = Clients.ClientAO2d7\n elif major == 6: # AO 2.6\n self.client.packet_handler = Clients.ClientAO2d6\n elif major == 4 and minor == 8: # Older DRO\n self.client.packet_handler = Clients.ClientDROLegacy\n else:\n return False # Unrecognized\n elif release == 'CC':\n if major >= 24:\n self.client.packet_handler = Clients.ClientCC24\n elif major >= 22:\n self.client.packet_handler = Clients.ClientCC22\n else:\n return False # Unrecognized\n # The only way to make it here is if we have not returned False\n # If that is the case, we have successfully found a version\n return True\n\n if not check_client_version():\n # Warn player they are using an unknown client.\n # Assume a DRO client instruction set.\n self.client.packet_handler = Clients.ClientDRO1d0d0\n self.client.bad_version = True\n\n self.client.send_command('FL', 'yellowtext', 'customobjections', 'flipping',\n 'fastloading', 'noencryption', 'deskmod', 'evidence',\n 'cccc_ic_support', 'looping_sfx', 'additive', 'effects')\n\n def net_cmd_ch(self, _):\n \"\"\" Periodically checks the connection.\n\n CHECK#%\n\n \"\"\"\n self.client.send_command('CHECK')\n self.ping_timeout.cancel()\n self.ping_timeout = asyncio.get_event_loop().call_later(self.server.config['timeout'], self.client.disconnect)\n\n def net_cmd_askchaa(self, _):\n \"\"\" Ask for the counts of characters/evidence/music\n\n askchaa#%\n\n \"\"\"\n # Check if client is ready to actually join, and did not do weird packet shenanigans before\n if self.client.can_join != 2:\n return\n # Check if client asked for this before but did not finish processing it\n if not self.client.can_askchaa:\n return\n\n self.client.can_askchaa = False # Enforce the joining process happening atomically\n\n # Make sure there is enough room for the client\n char_cnt = len(self.server.char_list)\n evi_cnt = 0\n music_cnt = sum([len(x) for x in self.server.music_pages_ao1])\n self.client.send_command('SI', char_cnt, evi_cnt, music_cnt)\n\n def net_cmd_askchar2(self, _):\n \"\"\" Asks for the character list.\n\n askchar2#%\n\n \"\"\"\n self.client.send_command('CI', *self.server.char_pages_ao1[0])\n\n def net_cmd_an(self, args):\n \"\"\" Asks for specific pages of the character list.\n\n AN##%\n\n \"\"\"\n if not self.validate_net_cmd(args, ArgType.INT, needs_auth=False):\n return\n if len(self.server.char_pages_ao1) > args[0] >= 0:\n self.client.send_command('CI', *self.server.char_pages_ao1[args[0]])\n else:\n self.client.send_command('EM', *self.server.music_pages_ao1[0])\n\n def net_cmd_ae(self, _):\n \"\"\" Asks for specific pages of the evidence list.\n\n AE##%\n\n \"\"\"\n pass # todo evidence maybe later\n\n def net_cmd_am(self, args):\n \"\"\" Asks for specific pages of the music list.\n\n AM##%\n\n \"\"\"\n if not self.validate_net_cmd(args, ArgType.INT, needs_auth=False):\n return\n if len(self.server.music_pages_ao1) > args[0] >= 0:\n self.client.send_command('EM', *self.server.music_pages_ao1[args[0]])\n else:\n self.client.send_done()\n self.client.send_area_list()\n self.client.send_motd()\n self.client.can_askchaa = True # Allow rejoining if left to lobby but did not dc.\n\n def net_cmd_rc(self, _):\n \"\"\" Asks for the whole character list(AO2)\n\n AC#%\n\n \"\"\"\n\n self.client.send_command('SC', *self.server.char_list)\n\n def net_cmd_rm(self, _):\n \"\"\" Asks for the whole music list(AO2)\n\n AM#%\n\n \"\"\"\n # Force the server to rebuild the music list, so that clients who just join get the correct\n # music list (as well as every time they request an updated music list directly).\n\n full_music_list = self.server.build_music_list_ao2(include_areas=True,\n include_music=True)\n self.client.send_command('SM', *full_music_list)\n\n def net_cmd_rd(self, _):\n \"\"\" Asks for server metadata(charscheck, motd etc.) and a DONE#% signal(also best packet)\n\n RD#%\n\n \"\"\"\n\n self.client.send_done()\n if self.server.config['announce_areas']:\n if self.server.config['rp_mode_enabled']:\n self.client.send_limited_area_list()\n else:\n self.client.send_area_list()\n self.client.send_motd()\n self.client.reload_music_list() # Reload the default area's music list\n # so that it only includes areas reachable from that default area.\n self.client.can_askchaa = True # Allow rejoining if left to lobby but did not dc.\n\n def net_cmd_cc(self, args):\n \"\"\" Character selection.\n\n CC####%\n\n \"\"\"\n if not self.validate_net_cmd(args, ArgType.INT, ArgType.INT, ArgType.STR,\n needs_auth=False):\n return\n cid = args[1]\n try:\n self.client.change_character(cid)\n except ClientError:\n return\n self.client.last_active = Constants.get_time()\n\n def net_cmd_ms(self, args):\n \"\"\" IC message.\n\n Refer to the implementation for details.\n\n \"\"\"\n\n if self.client.is_muted: # Checks to see if the client has been muted by a mod\n self.client.send_ooc(\"You have been muted by a moderator.\")\n return\n if self.client.area.ic_lock and not self.client.is_staff():\n self.client.send_ooc(\"IC chat in this area has been locked by a moderator.\")\n return\n if not self.client.area.can_send_message():\n return\n pargs = self.process_arguments('ms', args)\n if not pargs:\n return\n\n # First, check if the player just sent the same message with the same character and did\n # not receive any other messages in the meantime.\n # This helps prevent record these messages and retransmit it to clients who may want to\n # filter these out\n if (pargs['text'] == self.client.last_ic_raw_message and self.client.last_ic_received_mine\n and self.client.get_char_name() == self.client.last_ic_char):\n return\n\n if not self.client.area.iniswap_allowed:\n if self.client.area.is_iniswap(self.client, pargs['pre'], pargs['anim'],\n pargs['folder']):\n self.client.send_ooc(\"Iniswap is blocked in this area.\")\n return\n if pargs['folder'] in self.client.area.restricted_chars and not self.client.is_staff():\n self.client.send_ooc('Your character is restricted in this area.')\n return\n if pargs['msg_type'] not in ('chat', '0', '1'):\n return\n if pargs['anim_type'] not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10):\n return\n if pargs['cid'] != self.client.char_id:\n return\n if pargs['sfx_delay'] < 0:\n return\n if pargs['button'] not in (0, 1, 2, 3, 4, 5, 6, 7): # Shouts\n return\n if pargs['button'] > 0 and not self.client.area.bullet and not self.client.is_staff():\n self.client.send_ooc('Bullets are disabled in this area.')\n return\n if pargs['evidence'] < 0:\n return\n if pargs['ding'] not in (0, 1, 2, 3, 4, 5, 6, 7): # Effects\n return\n if pargs['color'] not in (0, 1, 2, 3, 4, 5, 6):\n return\n if pargs['color'] == 5 and not self.client.is_mod and not self.client.is_cm:\n pargs['color'] = 0\n if pargs['color'] == 6:\n # Remove all unicode to prevent now yellow text abuse\n pargs['text'] = re.sub(r'[^\\x00-\\x7F]+', ' ', pargs['text'])\n if len(pargs['text'].strip(' ')) == 1:\n pargs['color'] = 0\n else:\n if pargs['text'].strip(' ') in ('', '', '', ''):\n pargs['color'] = 0\n if self.client.pos:\n pargs['pos'] = self.client.pos\n else:\n if pargs['pos'] not in ('def', 'pro', 'hld', 'hlp', 'jud', 'wit'):\n return\n self.client.pos = pargs['pos']\n\n # At this point, the message is guaranteed to be sent\n # First, update last raw message sent *before* any transformations. That is so that the\n # server can accurately ignore client sending the same message over and over again\n self.client.last_ic_raw_message = pargs['text']\n self.client.last_ic_char = self.client.get_char_name()\n\n # Truncate and alter message if message effect is in place\n raw_msg = pargs['text'][:256]\n msg = raw_msg\n if self.client.gimp: #If you are gimped, gimp message.\n msg = Constants.gimp_message()\n if self.client.disemvowel: #If you are disemvoweled, replace string.\n msg = Constants.disemvowel_message(msg)\n if self.client.disemconsonant: #If you are disemconsonanted, replace string.\n msg = Constants.disemconsonant_message(msg)\n if self.client.remove_h: #If h is removed, replace string.\n msg = Constants.remove_h_message(msg)\n\n gag_replaced = False\n if self.client.is_gagged:\n allowed_starters = ('(', '*', '[')\n if msg != ' ' and not msg.startswith(allowed_starters):\n gag_replaced = True\n msg = Constants.gagged_message()\n if msg != raw_msg:\n self.client.send_ooc_others('(X) {} [{}] tried to say `{}` but is currently gagged.'\n .format(self.client.displayname, self.client.id,\n raw_msg),\n is_zstaff_flex=True, in_area=True)\n\n # Censor passwords if login command accidentally typed in IC\n for password in self.server.all_passwords:\n for login in ['login ', 'logincm ', 'loginrp ', 'logingm ']:\n if login + password in msg:\n msg = msg.replace(password, '[CENSORED]')\n\n if pargs['evidence']:\n evidence_position = self.client.evi_list[pargs['evidence']] - 1\n if self.client.area.evi_list.evidences[evidence_position].pos != 'all':\n self.client.area.evi_list.evidences[evidence_position].pos = 'all'\n self.client.area.broadcast_evidence_list()\n\n # If client has GlobalIC enabled, set area range target to intended range and remove\n # GlobalIC prefix if needed.\n if self.client.multi_ic is None or not msg.startswith(self.client.multi_ic_pre):\n area_range = range(self.client.area.id, self.client.area.id + 1)\n else:\n # As msg.startswith('') is True, this also accounts for having no required prefix.\n start, end = self.client.multi_ic[0].id, self.client.multi_ic[1].id + 1\n start_area = self.server.area_manager.get_area_by_id(start)\n end_area = self.server.area_manager.get_area_by_id(end-1)\n area_range = range(start, end)\n\n truncated_msg = msg.replace(self.client.multi_ic_pre, '', 1)\n if start != end-1:\n self.client.send_ooc('Sent global IC message \"{}\" to areas {} through {}.'\n .format(truncated_msg, start_area.name, end_area.name))\n else:\n self.client.send_ooc('Sent global IC message \"{}\" to area {}.'\n .format(truncated_msg, start_area.name))\n\n pargs['msg'] = msg\n pargs['evidence'] = self.client.evi_list[pargs['evidence']]\n pargs['showname'] = '' # Dummy value, actual showname is computed later\n\n # Compute pairs\n # Based on tsuserver3.3 code\n # Only do this if character is paired, which would only happen for AO 2.6+ clients\n\n # Handle AO 2.8 logic\n # AO 2.8 sends their charid_pair in slightly longer format (\\d+\\^\\d+)\n # The first bit corresponds to the proper charid_pair, the latter one to whether\n # the character should appear in front or behind the pair. We still want to extract\n # charid_pair so pre-AO 2.8 still see the pair; but make it so that AO 2.6 can send pair\n # messages. Thus, we 'invent' the missing arguments based on available info.\n if 'charid_pair_pair_order' in pargs:\n # AO 2.8 sender\n pargs['charid_pair'] = int(pargs['charid_pair_pair_order'].split('^')[0])\n elif 'charid_pair' in pargs:\n # AO 2.6 sender\n pargs['charid_pair_pair_order'] = f'{pargs[\"charid_pair\"]}^0'\n else:\n # E.g. DRO\n pargs['charid_pair'] = -1\n pargs['charid_pair_pair_order'] = -1\n\n self.client.charid_pair = pargs['charid_pair'] if 'charid_pair' in pargs else -1\n self.client.offset_pair = pargs['offset_pair'] if 'offset_pair' in pargs else 0\n self.client.flip = pargs['flip']\n self.client.char_folder = pargs['folder']\n\n if pargs['anim_type'] not in (5, 6):\n self.client.last_sprite = pargs['anim']\n\n pargs['other_offset'] = 0\n pargs['other_emote'] = 0\n pargs['other_flip'] = 0\n pargs['other_folder'] = ''\n if 'charid_pair' not in pargs or pargs['charid_pair'] < -1:\n pargs['charid_pair'] = -1\n pargs['charid_pair_pair_order'] = -1\n\n if pargs['charid_pair'] > -1:\n for target in self.client.area.clients:\n if target == self.client:\n continue\n # Check pair has accepted pair\n if target.char_id != self.client.charid_pair:\n continue\n if target.charid_pair != self.client.char_id:\n continue\n # Check pair is in same position\n if target.pos != self.client.pos:\n continue\n\n pargs['other_offset'] = target.offset_pair\n pargs['other_emote'] = target.last_sprite\n pargs['other_flip'] = target.flip\n pargs['other_folder'] = target.char_folder\n break\n else:\n # There are no clients who want to pair with this client\n pargs['charid_pair'] = -1\n pargs['offset_pair'] = 0\n pargs['charid_pair_pair_order'] = -1\n\n for area_id in area_range:\n target_area = self.server.area_manager.get_area_by_id(area_id)\n for c in target_area.clients:\n c.send_ic(params=pargs, sender=self.client, gag_replaced=gag_replaced)\n\n target_area.set_next_msg_delay(len(msg))\n\n # Deal with shoutlog\n if pargs['button'] > 0:\n info = 'used shout {} with the message: {}'.format(pargs['button'], msg)\n target_area.add_to_shoutlog(self.client, info)\n\n self.client.area.set_next_msg_delay(len(msg))\n logger.log_server('[IC][{}][{}]{}'\n .format(self.client.area.id, self.client.get_char_name(), msg),\n self.client)\n\n # Sending IC messages reveals sneaked players\n if not self.client.is_staff() and not self.client.is_visible:\n self.client.change_visibility(True)\n self.client.send_ooc_others('(X) {} [{}] revealed themselves by talking ({}).'\n .format(self.client.displayname, self.client.id,\n self.client.area.id),\n is_zstaff=True)\n\n self.server.tasker.create_task(self.client,\n ['as_afk_kick', self.client.area.afk_delay,\n self.client.area.afk_sendto])\n if self.client.area.is_recording:\n self.client.area.recorded_messages.append(args)\n\n self.client.last_ic_message = msg\n self.client.last_active = Constants.get_time()\n\n def net_cmd_ct(self, args):\n \"\"\" OOC Message\n\n CT###%\n\n \"\"\"\n if self.client.is_ooc_muted: # Checks to see if the client has been muted by a mod\n self.client.send_ooc(\"You have been muted by a moderator.\")\n return\n if not self.validate_net_cmd(args, ArgType.STR, ArgType.STR, needs_auth=False):\n return\n if args[0] == '' or not self.client.is_valid_name(args[0]):\n self.client.send_ooc('You must insert a name with at least one letter.')\n return\n if args[0].startswith(' '):\n self.client.send_ooc('You must insert a name that starts with a letter.')\n return\n if Constants.contains_illegal_characters(args[0]):\n self.client.send_ooc('Your name contains an illegal character.')\n return\n if self.server.config['hostname'] in args[0] or 'G' in args[0]:\n self.client.send_ooc('That name is reserved.')\n return\n\n # After this the name is validated\n if self.client.name != args[0] and self.client.fake_name != args[0]:\n if self.client.is_valid_name(args[0]):\n self.client.name = args[0]\n self.client.fake_name = args[0]\n else:\n self.client.fake_name = args[0]\n self.client.name = ''\n if args[1].startswith('/'):\n spl = args[1][1:].split(' ', 1)\n cmd = spl[0]\n arg = ''\n if len(spl) == 2:\n arg = spl[1][:1024]\n try:\n called_function = 'ooc_cmd_{}'.format(cmd)\n function = None # Double assignment to check if it matched to a function later\n function = getattr(self.server.commands, called_function)\n except AttributeError:\n try:\n function = getattr(self.server.commands_alt, called_function)\n except AttributeError:\n logger.log_print('Attribute error with ' + called_function)\n self.client.send_ooc('Invalid command.')\n\n if function:\n try:\n function(self.client, arg)\n except TsuserverException as ex:\n self.client.send_ooc(ex)\n else:\n # Censor passwords if accidentally said without a slash in OOC\n for password in self.server.all_passwords:\n for login in ['login ', 'logincm ', 'loginrp ', 'logingm ']:\n if login + password in args[1]:\n args[1] = args[1].replace(password, '[CENSORED]')\n if self.client.disemvowel: #If you are disemvoweled, replace string.\n args[1] = Constants.disemvowel_message(args[1])\n if self.client.disemconsonant: #If you are disemconsonanted, replace string.\n args[1] = Constants.disemconsonant_message(args[1])\n if self.client.remove_h: #If h is removed, replace string.\n args[1] = Constants.remove_h_message(args[1])\n\n self.client.area.send_command('CT', self.client.name, args[1])\n self.client.last_ooc_message = args[1]\n logger.log_server('[OOC][{}][{}][{}]{}'\n .format(self.client.area.id, self.client.get_char_name(),\n self.client.name, args[1]), self.client)\n self.client.last_active = Constants.get_time()\n\n def net_cmd_mc(self, args):\n \"\"\" Play music.\n\n MC###%\n\n \"\"\"\n # First attempt to switch area,\n # because music lists typically include area names for quick access\n try:\n delimiter = args[0].find('-')\n area = self.server.area_manager.get_area_by_name(args[0][delimiter+1:])\n self.client.change_area(area, from_party=True if self.client.party else False)\n\n # Otherwise, attempt to play music.\n except (AreaError, ValueError):\n if self.client.is_muted: # Checks to see if the client has been muted by a mod\n self.client.send_ooc(\"You have been muted by a moderator.\")\n return\n if not self.client.is_dj:\n self.client.send_ooc('You were blockdj\\'d by a moderator.')\n return\n # We have to use fallback protocols for AO2d6 like clients, because if for whatever\n # reason if they don't set an in-client showname, they send less arguments. In\n # particular, they behave like DRO.\n pargs = self.process_arguments('MC', args, fallback_protocols=[Clients.ClientDROLegacy])\n if not pargs:\n return\n\n if 'cid' not in pargs or int(pargs['cid']) != self.client.char_id:\n return\n if self.client.change_music_cd():\n self.client.send_ooc('You changed song too many times recently. Please try again '\n 'after {} seconds.'.format(int(self.client.change_music_cd())))\n return\n\n try:\n self.client.area.play_track(pargs['name'], self.client, raise_if_not_found=True,\n reveal_sneaked=True, pargs=pargs)\n except ServerError.MusicNotFoundError:\n self.client.send_ooc('Unrecognized area or music `{}`.'.format(args[0]))\n except ServerError:\n return\n except (ClientError, PartyError) as ex:\n self.client.send_ooc(ex)\n\n self.client.last_active = Constants.get_time()\n\n def net_cmd_rt(self, args):\n \"\"\" Plays the Testimony/CE animation.\n\n RT##%\n\n \"\"\"\n if self.client.is_muted: # Checks to see if the client has been muted by a mod\n self.client.send_ooc('You have been muted by a moderator.')\n return\n if not self.validate_net_cmd(args, ArgType.STR):\n return\n if args[0] not in ('testimony1', 'testimony2', 'testimony3', 'testimony4'):\n return\n self.client.area.send_command('RT', args[0])\n self.client.area.add_to_judgelog(self.client, 'used judge button {}.'.format(args[0]))\n logger.log_server('[{}]{} used judge button {}.'\n .format(self.client.area.id, self.client.get_char_name(), args[0]),\n self.client)\n self.client.last_active = Constants.get_time()\n\n def net_cmd_hp(self, args):\n \"\"\" Sets the penalty bar.\n\n HP###%\n\n \"\"\"\n if self.client.is_muted: # Checks to see if the client has been muted by a mod\n self.client.send_ooc(\"You have been muted by a moderator\")\n return\n if not self.validate_net_cmd(args, ArgType.INT, ArgType.INT):\n return\n try:\n self.client.area.change_hp(args[0], args[1])\n info = 'changed penalty bar {} to {}.'.format(args[0], args[1])\n self.client.area.add_to_judgelog(self.client, info)\n logger.log_server('[{}]{} changed HP ({}) to {}'\n .format(self.client.area.id, self.client.get_char_name(),\n args[0], args[1]), self.client)\n except AreaError:\n return\n self.client.last_active = Constants.get_time()\n\n def net_cmd_pe(self, args):\n \"\"\" Adds a piece of evidence.\n\n PE####%\n\n \"\"\"\n if len(args) < 3:\n return\n# evi = Evidence(args[0], args[1], args[2], self.client.pos)\n self.client.area.evi_list.add_evidence(self.client, args[0], args[1], args[2], 'all')\n self.client.area.broadcast_evidence_list()\n self.client.last_active = Constants.get_time()\n\n def net_cmd_de(self, args):\n \"\"\" Deletes a piece of evidence.\n\n DE##%\n\n \"\"\"\n\n self.client.area.evi_list.del_evidence(self.client, self.client.evi_list[int(args[0])])\n self.client.area.broadcast_evidence_list()\n self.client.last_active = Constants.get_time()\n\n def net_cmd_ee(self, args):\n \"\"\" Edits a piece of evidence.\n\n EE#####%\n\n \"\"\"\n\n if len(args) < 4:\n return\n\n evi = (args[1], args[2], args[3], 'all')\n\n self.client.area.evi_list.edit_evidence(self.client, self.client.evi_list[int(args[0])], evi)\n self.client.area.broadcast_evidence_list()\n self.client.last_active = Constants.get_time()\n\n def net_cmd_zz(self, _):\n \"\"\" Sent on mod call.\n\n \"\"\"\n if self.client.is_muted: # Checks to see if the client has been muted by a mod\n self.client.send_ooc('You have been muted by a moderator.')\n return\n\n if not self.client.can_call_mod():\n self.client.send_ooc('You must wait 30 seconds between mod calls.')\n return\n\n current_time = strftime(\"%H:%M\", localtime())\n message = ('[{}] {} ({}) called for a moderator in {} ({}).'\n .format(current_time, self.client.get_char_name(), self.client.get_ip(),\n self.client.area.name, self.client.area.id))\n\n self.server.send_all_cmd_pred('ZZ', message, pred=lambda c: c.is_mod or c.is_cm)\n self.client.set_mod_call_delay()\n logger.log_server('[{}][{}]{} called a moderator.'\n .format(self.client.get_ip(), self.client.area.id,\n self.client.get_char_name()))\n\n def net_cmd_re(self, args):\n # Unsupported\n raise KeyError('Client using {} {} sent an unsupported RE packet.'\n .format(self.client.version[0], self.client.version[1]))\n\n def net_cmd_pw(self, args):\n # Ignore packet\n # For now, TsuserverDR will not implement a character password system\n # However, so that it stops raising errors for clients, an empty method is implemented\n # Well, not empty, there are these comments which makes it not empty\n # but not code is run.\n pass\n\n def net_cmd_opKICK(self, args):\n self.net_cmd_ct(['opkick', '/kick {}'.format(args[0])])\n\n def net_cmd_opBAN(self, args):\n self.net_cmd_ct(['opban', '/ban {}'.format(args[0])])\n\n net_cmd_dispatcher = {\n 'HI': net_cmd_hi, # handshake\n 'ID': net_cmd_id, # client version\n 'CH': net_cmd_ch, # keepalive\n 'askchaa': net_cmd_askchaa, # ask for list lengths\n 'askchar2': net_cmd_askchar2, # ask for list of characters\n 'AN': net_cmd_an, # character list\n 'AE': net_cmd_ae, # evidence list\n 'AM': net_cmd_am, # music list\n 'RC': net_cmd_rc, # AO2 character list\n 'RM': net_cmd_rm, # AO2 music list\n 'RD': net_cmd_rd, # AO2 done request, charscheck etc.\n 'CC': net_cmd_cc, # select character\n 'MS': net_cmd_ms, # IC message\n 'CT': net_cmd_ct, # OOC message\n 'MC': net_cmd_mc, # play song\n 'RT': net_cmd_rt, # WT/CE buttons\n 'HP': net_cmd_hp, # penalties\n 'PE': net_cmd_pe, # add evidence\n 'DE': net_cmd_de, # delete evidence\n 'EE': net_cmd_ee, # edit evidence\n 'ZZ': net_cmd_zz, # call mod button\n 'RE': net_cmd_re, # ??? (Unsupported)\n 'PW': net_cmd_pw, # character password (only on CC/KFO clients)\n 'opKICK': net_cmd_opKICK, # /kick with guard on\n 'opBAN': net_cmd_opBAN, # /ban with guard on\n }\n","repo_name":"INumnumI/PODER_DIVINO","sub_path":"server/aoprotocol.py","file_name":"aoprotocol.py","file_ext":"py","file_size_in_byte":38455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13254847409","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport cv2\n\nfrom keras.utils import np_utils\nfrom tensorflow.keras.models import load_model as lm\nfrom video.img_utils import resize_img_list\nfrom tensorflow.keras.applications.vgg19 import preprocess_input\nfrom tensorflow.keras.applications.vgg19 import VGG19\nfrom tensorflow.keras.layers import Dense, InputLayer, Dropout\nfrom tensorflow.keras.models import Sequential\nfrom sklearn.model_selection import train_test_split\n\n\ndef create_model(training_mapping):\n x, y = load_data(training_mapping)\n x = resize_img_list(x, 224, 224)\n\n # Preprocessing input data -> Mejora el rendimiento\n x = preprocess_input(x)\n\n # Dividir aleatoriamente imgs en entrenamiento y validacion\n x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.3, random_state=42)\n\n # Construccion del modelo -> usa VGG16 pretrained model\n base_model = VGG19(weights='imagenet', include_top=False, input_shape=(224, 224, 3))\n\n x_train = base_model.predict(x_train)\n x_valid = base_model.predict(x_valid)\n\n x_train = x_train.reshape(560, 7 * 7 * 512) # 206 -> 70%\n x_valid = x_valid.reshape(240, 7 * 7 * 512) # 89 -> 30%\n\n x_train = x_train / x_train.max()\n x_valid = x_valid / x_train.max()\n model = train_model(x_train, x_valid, y_train, y_valid)\n return model, base_model\n\n\ndef train_model(x_train, x_valid, y_train, y_valid):\n model = Sequential()\n model.add(InputLayer((7 * 7 * 512,))) # input layer\n model.add(Dense(units=500, activation='relu')) # hidden layer\n model.add(Dropout(0.5)) # adding dropout\n model.add(Dense(units=300, activation='relu')) # hidden layer\n model.add(Dropout(0.5)) # adding dropout\n model.add(Dense(units=100, activation='relu')) # hidden layer\n model.add(Dropout(0.5)) # adding dropout\n model.add(Dense(4, activation='softmax')) # output layer\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.fit(x_train, y_train, epochs=100, validation_data=(x_valid, y_valid))\n return model\n\n\ndef load_data(mapping_csv, is_training=True):\n data_location = \"test\"\n if is_training:\n data_location = \"training\"\n # Load data\n data = pd.read_csv(mapping_csv)\n # Array de imagenes -> Cada img es una matriz de pixeles (R,G,B)\n x = []\n for img_name in data.Image_ID:\n img = plt.imread('data/frames/slides_frames/'+data_location+'/' + img_name)\n x.append(img)\n x = np.array(x)\n y = np_utils.to_categorical(data.Class)\n return x, y\n\n\ndef save_model(model, url):\n model.save(url)\n\n\ndef load_model(url):\n return lm(url)\n\n\ndef compare_screens(slides_csv):\n data = pd.read_csv(slides_csv)\n changes_logs = []\n \n screen1 = None\n screen2 = None\n img_name1 = None\n img_name2 = None\n for i, c in data.iterrows():\n \n img = c['Image_ID'] \n c = c['Class']\n if c == 1 or c == 2:\n if i != 0:\n img_name2 = img\n screen2 = cv2.imread('data/frames/slides_frames/test/frame'+str(img_name2)+'.jpg')\n diff_img=cv2.subtract(screen1,screen2)\n w,h,c=diff_img.shape\n total_pixel_value_count=w*h*c*255\n percentage_match=(total_pixel_value_count -np.sum(diff_img))/total_pixel_value_count*100\n\n if percentage_match < 98.5:\n changes_logs.append([img_name2, percentage_match])\n print(\"Changed ======> \", percentage_match, img_name1, img_name2)\n screen1 = screen2\n img_name1 = img_name2\n else:\n img_name1 = img\n screen1 = cv2.imread('data/frames/slides_frames/test/frame'+str(img_name1)+'.jpg')\n return changes_logs\n\n\n\ndef are_screens_similar2(screen1, screen2):\n comparations = intersection(screen1, screen2) # Intersection of pixels between the one being processed and the one in dictionary\n return comparations > 1000\n\n\ndef are_screens_similar(lst1, lst2): \n \"\"\"\n A pixel is in lst2 if its colors are similar to any pixel in lst2 and color is not purple\n \"\"\"\n count = 0\n for i in range(0, len(lst1)):\n for j in range(0, len(lst1[0])):\n if are_near_pixel_colors(lst1[i][j], lst2[i][j]):\n count += 1\n if count > 400000:\n return True\n j += 2\n i += 1\n return False \n\n\ndef are_near_pixel_colors(color_a, color_b):\n return are_near_single_colors(color_a[0], color_b[0]) and are_near_single_colors(color_a[1], color_b[1]) and are_near_single_colors(color_a[2], color_b[2])\n\n\ndef are_near_single_colors(color_a, color_b):\n return color_b -2 <= color_a <= color_b +2","repo_name":"antonioalfa22/EFE-Call-ML","sub_path":"model/slides_model.py","file_name":"slides_model.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3584501908","text":"from django.contrib import admin\nfrom django.db import models\n\nfrom .constants import CLIENT, STAFF, USER_TYPES\nfrom .models import User, Award, Profile\n# Register your models here.\n\n@admin.register(User)\nclass UserAdmin(admin.ModelAdmin):\n search_fields = ['email',]\n ordering = ['-date_joined']\n list_display = ('id', 'email', 'user_type', 'is_active', 'is_staff', 'is_superuser',)\n search_fields = ('email',)\n\n@admin.register(Award)\nclass AwardAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'type', 'year',)\n\n@admin.register(Profile)\nclass ProfileAdmin(admin.ModelAdmin):\n list_display = ('id', 'user', 'first_name_in_kanji', 'first_name_in_hiragana', 'first_name_in_english',)","repo_name":"rajkumarchaudhary26/Test-Auth-Using-Djoser","sub_path":"auth/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23588205461","text":"\nimport os\nimport sys\nimport glob\nimport subprocess\nimport random\nimport fileinput\nfrom collections import defaultdict\n\n\nnext_line = 0\nlines = [line.strip() for line in fileinput.input()]\ndef get_line():\n global next_line\n i = next_line\n next_line += 1\n return lines[i]\n\n\ndef calc():\n parts = get_line().split()\n N = int(parts[0])\n K = int(parts[1])\n\n U = float(get_line())\n parts = get_line().split()\n ps = [float(i) for i in parts]\n ps = sorted(ps)\n\n ans = 0\n for i in range(N):\n a = (U + sum(ps[0:i+1])) / (i+1)\n if ps[i] > a:\n continue\n \n t = 1\n for j in range(N):\n if j <= i:\n t *= a\n else:\n t *= ps[j]\n if t > ans:\n ans = t\n\n return ans\n\nT = int(get_line())\nfor i in range(1, T + 1):\n print('Case #%d: %s' % (i, calc()))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_211/176.py","file_name":"176.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29820582949","text":"from DataBuilder.ChatParsing import ChatHistory\n\n\ndef nicknames_for_person(chat_history: ChatHistory, participant: str) -> list:\n \"\"\"\n Fetches all of the nicknames for a given person name from the chat history\n\n :param chat_history: The chat history for the Messenger Chat\n :param participant: The participant string\n :return: A list of the nicknames\n \"\"\"\n if participant in chat_history.ChatParticipants: # Makes sure the participant exists\n return chat_history.ChatParticipants[participant].Nicknames\n\n\n\ndef nicknames_by_participant(chat_history: ChatHistory) -> dict:\n \"\"\"\n Fetches all of the nicknames for all of the participants in the chat and returns it as a dict\n :param chat_history: the chat history\n \"\"\"\n res = {}\n\n for p in chat_history.ChatParticipants:\n nicks = chat_history.ChatParticipants[p].Nicknames\n for n in nicks:\n if p not in res:\n res[p] = []\n res[p].append(nicks[n].Nickname)\n return res","repo_name":"ben-dow/MessengerAnalysis","sub_path":"Facebook Messenger Analysis/DataAnalysis/NicknameAnalysis/NicknameLists.py","file_name":"NicknameLists.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73998899714","text":"#!/usr/bin/python3\ndef weight_average(my_list=[]):\n if not my_list:\n return 0\n top = 0\n btm = 0\n for tup in my_list:\n top += tup[0] * tup[1]\n btm += tup[1]\n return (top / btm)\n\n","repo_name":"Dev-sande/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18603261556","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 24 18:22:22 2023\n\n@author: AmayaGS\n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GATv2Conv\nfrom torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp\nfrom torch_geometric.nn import SAGPooling\n\n\nclass GAT_SAGPool(torch.nn.Module):\n\n \"\"\"Graph Attention Network for full slide graph\"\"\"\n\n def __init__(self, dim_in, heads=2, pooling_ratio=0.7):\n\n super().__init__()\n\n self.pooling_ratio = pooling_ratio\n self.heads = heads\n\n self.gat1 = GATv2Conv(dim_in, 512, heads=self.heads, concat=False)\n self.gat2 = GATv2Conv(512, 512, heads=self.heads, concat=False)\n self.gat3 = GATv2Conv(512, 512, heads=self.heads, concat=False)\n self.gat4 = GATv2Conv(512, 512, heads=self.heads, concat=False)\n\n self.topk1 = SAGPooling(512, pooling_ratio)\n self.topk2 = SAGPooling(512, pooling_ratio)\n self.topk3 = SAGPooling(512, pooling_ratio)\n self.topk4 = SAGPooling(512, pooling_ratio)\n\n self.lin1 = torch.nn.Linear(512 * 2, 512)\n self.lin2 = torch.nn.Linear(512, 512 // 2)\n self.lin3 = torch.nn.Linear(512 // 2, 2)\n\n\n def forward(self, data):\n\n x, edge_index, batch = data.x, data.edge_index, data.batch\n\n x = self.gat1(x, edge_index)\n x = F.relu(x)\n #x = F.dropout(x, p=0.1, training=self.training)\n x, edge_index, _, batch, _, _= self.topk1(x, edge_index, None, batch)\n x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)\n\n x = self.gat2(x, edge_index)\n x = F.relu(x)\n #x = F.dropout(x, p=0.1, training=self.training)\n x, edge_index, _, batch, _, _= self.topk2(x, edge_index, None, batch)\n x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)\n\n x = self.gat3(x, edge_index)\n x = F.relu(x)\n #x = F.dropout(x, p=0.1, training=self.training)\n x, edge_index, _, batch, _, _= self.topk3(x, edge_index, None, batch)\n x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)\n\n x = self.gat4(x, edge_index)\n x = F.relu(x)\n #x = F.dropout(x, p=0.1, training=self.training)\n x, edge_index, _, batch, _, _= self.topk4(x, edge_index, None, batch)\n x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)\n\n x = x1 + x2 + x3 + x4\n\n x = self.lin1(x)\n x = F.relu(x)\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.lin2(x)\n x = F.relu(x)\n x_logits = self.lin3(x)\n x_out = F.softmax(x_logits, dim=1)\n\n return x_logits, x_out","repo_name":"AmayaGS/MUSTANG","sub_path":"Graph_model.py","file_name":"Graph_model.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"23561942991","text":"def tidy(n):\n num = str(n)\n for i in range(1, len(num)):\n if num[i] < num[i-1]:\n return False\n return True\n\ndef min_1(k):\n if k == '1':\n return ''\n elif k[-1] > '0':\n return k[:-1]+(chr(ord(k[-1])-1))\n else:\n return min_1(k[:-1])+'9'\n\ndef solution(k):\n ret = ''\n while(k):\n if tidy(k):\n ret = k + ret\n break\n k = min_1(k[:-1])\n ret = '9' + ret\n return ret\n\nK = int(input())\nfor i in range(K):\n k = input()\n print('Case #{0}: {1}'.format(i+1, solution(k)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/456.py","file_name":"456.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21347317045","text":"#!/bin/python3\n\n\"\"\"\nQuestion\nhttps://www.hackerrank.com/challenges/max-array-sum/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=dynamic-programming&isFullScreen=true\n\"\"\"\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the maxSubsetSum function below.\n\ndef maxSubsetSum(arr):\n if len(arr) == 1:\n return arr[0]\n data = [None]*len(arr)\n data[0] = arr[0]\n data[1] = max(arr[0],arr[1])\n for i in range(2,len(arr)):\n data[i] = max(data[i-1],data[i-2],data[i-2]+arr[i],arr[i])\n return data[-1]\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = maxSubsetSum(arr)\n\n fptr.write(str(res) + '\\n')\n\n fptr.close()\n","repo_name":"dranzerashi/HackerRankSols","sub_path":"dp/subsetsum.py","file_name":"subsetsum.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11523625409","text":"from .config import Config\n\nclass ArcfaceConfig(Config):\n\n def __init__(self):\n self.config = {\n 'method': 'arcface',\n 'metric': 'arc_margin',\n 'easy_margin': False,\n 'loss': 'focal_loss',\n\n 'lr': 1e-1,\n 'lr_step': 10,\n 'lr_decay': 0.95,\n 'weight_decay': 5e-4\n }\n","repo_name":"j-alex-hanson/rethinking-race-face-datasets","sub_path":"configs/arcface.py","file_name":"arcface.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"38837756153","text":"## @ingroup Methods-Power-Fuel_Cell-Sizing\n# initialize_larminie_from_power.py\n#\n# Created : Apr 2015, M. Vegh \n# Modified: Sep 2015, M. Vegh\n# Feb 2016, E. Botero\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\nimport scipy as sp\nimport numpy as np\nfrom SUAVE.Core import Units\nfrom SUAVE.Methods.Power.Fuel_Cell.Discharge.find_power_larminie import find_power_larminie\n\n# ----------------------------------------------------------------------\n# Initialize Larminie from Power\n# ----------------------------------------------------------------------\n\n## @ingroup Methods-Power-Fuel_Cell-Sizing\ndef initialize_larminie_from_power(fuel_cell,power): \n '''\n Initializes extra paramters for the fuel cell when using the larminie method\n Determines the number of stacks\n \n Inputs:\n power [W]\n fuel_cell\n \n Outputs:\n \n fuel_cell.\n power_per_cell [W]\n number_of_cells\n max_power [W]\n volume [m**3]\n specific_power [W/kg]\n mass_properties.\n mass [kg]\n \n \n '''\n \n \n \n fc = fuel_cell\n lb = .1*Units.mA/(Units.cm**2.) #lower bound on fuel cell current density\n ub = 1200.0*Units.mA/(Units.cm**2.)\n sign = -1. #used to minimize -power\n current_density = sp.optimize.fminbound(find_power_larminie, lb, ub, args=(fc, sign))\n power_per_cell = find_power_larminie(current_density,fc)\n \n fc.number_of_cells = np.ceil(power/power_per_cell)\n fc.max_power = fc.number_of_cells*power_per_cell\n fc.volume = fc.number_of_cells*fc.interface_area*fc.wall_thickness\n fc.mass_properties.mass = fc.volume*fc.cell_density*fc.porosity_coefficient #fuel cell mass in kg\n fc.mass_density = fc.mass_properties.mass/ fc.volume \n fc.specific_power = fc.max_power/fc.mass_properties.mass #fuel cell specific power in W/kg ","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Methods/Power/Fuel_Cell/Sizing/initialize_larminie_from_power.py","file_name":"initialize_larminie_from_power.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"5416082625","text":"# server\n# All information will be received here and calculations will take place\n# Then the data is sent back to the client\n\n\nfrom threading import Thread\nimport subprocess\nimport socket\nimport os\n\n\nIP:str = socket.gethostbyname(socket.gethostname())\nPORT:int = 12345\n\n\nSOCKET_SPEED:int = 4096\nCOUNT_CONNECT:int = 0\n# if you use PYPY, then True\nUSING_PYPY:bool = True\n\n\n\n\nclass NewConnect(Thread):\n def __init__(self, client_socket, count_connect:int):\n Thread.__init__(self)\n self.client_socket = client_socket\n self.count_connect:int = count_connect\n\n # make a record with the received data\n def write_task(self, number:int, data:bytes) -> None:\n with open(f\"new{number}.txt\", 'wb') as file:\n file.write(data)\n \n\n # performs the task and returns data in byte format\n def doind_task(self, file_name:str, isPypy:bool=False, otherInter:str=None) -> bytes:\n try:\n if isPypy:\n output = subprocess.check_output(['pypy', file_name]).decode()\n elif otherInter is not None:\n output = subprocess.check_output([otherInter.lower(), file_name]).decode()\n else:\n output = subprocess.check_output(['python', file_name]).decode()\n print(\"OUT:\", output.encode()[:-2], \"\\n\")\n return output.encode()[:-2]\n except:\n return \"An error has occurred: check the correctness of the data types and compliance with the rules of writing code.\".encode()\n\n\n def run(self) -> None:\n while True:\n # we accept data from the client\n data = self.client_socket.recv(SOCKET_SPEED)\n\n file_data = data\n\n if file_data:\n break\n \n self.write_task(self.count_connect, file_data)\n res = self.doind_task(f\"new{self.count_connect}.txt\", isPypy=USING_PYPY)\n self.client_socket.sendall(res)\n os.remove(os.path.abspath(f\"new{self.count_connect}.txt\"))\n self.client_socket.close()\n\n\n\nclass Server():\n def __init__(self, IP:str, PORT:int):\n self.IP = IP\n self.PORT = PORT\n\n\n # calling for the starting server\n def run_server(self) -> None:\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind((self.IP, self.PORT))\n server_socket.listen()\n\n\n print(\"<\" + \"--\"*10 + \">\")\n print(f\"SERVER IS RUN...\\nIP: {self.IP}\\nPORT: {self.PORT}\\n\")\n \n while True:\n # we accept connections\n client, adress = server_socket.accept()\n \n global COUNT_CONNECT\n\n COUNT_CONNECT += 1\n\n print(*adress)\n \n # starting a new thread\n new_connect = NewConnect(client_socket=client, count_connect=COUNT_CONNECT)\n new_connect.start()\n \n \nif __name__==\"__main__\":\n server = Server(IP, PORT)\n server.run_server()\n \n","repo_name":"voronov-nikita/individual-project-2023-2024","sub_path":"scr/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23652587795","text":"import json\nimport subprocess\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport pyperclip\nfrom pynput import keyboard, mouse\n\nmouse_controller = mouse.Controller()\n\nwith open(\"assets/emojis.json\") as f:\n emojis = json.load(f)\n\ncategories = {\n \"Gestures\": \"1\",\n \"Emojis\": \"2\",\n \"Objects\": \"3\",\n \"Food\": \"4\",\n \"Places\": \"5\",\n \"Love\": \"6\",\n \"Flags\": \"7\",\n \"Other\": \"8\",\n \"New\": \"9\"\n}\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self):\n subprocess.Popen([\"notify-send\", \"Mint Emojis now running in the background.\\n\\nCTRL+Shift+E to open the menu.\"])\n super().__init__(None, QtCore.Qt.WindowFlags(QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint | QtCore.Qt.Tool))\n self.setWindowState(QtCore.Qt.WindowMinimized)\n self.show()\n self.setWindowState(QtCore.Qt.WindowNoState)\n self.setGeometry(0, 0, 500, 100)\n self.setWindowTitle(\"Mint Emojis\")\n self.setStyleSheet(\"background-color: darkred;\")\n self.setWindowIcon(QtGui.QIcon(\"assets/icon.png\"))\n\n self.central_widget = QtWidgets.QWidget()\n self.setCentralWidget(self.central_widget)\n self.layout = QtWidgets.QVBoxLayout(self.central_widget)\n\n self.notebook = QtWidgets.QTabWidget()\n self.layout.addWidget(self.notebook)\n\n for category, key in categories.items():\n frame = QtWidgets.QWidget()\n self.notebook.addTab(frame, category)\n\n layout = QtWidgets.QVBoxLayout(frame)\n scroll_area = QtWidgets.QScrollArea()\n layout.addWidget(scroll_area)\n\n scrollable_frame = QtWidgets.QWidget()\n scroll_area.setWidget(scrollable_frame)\n scroll_area.setWidgetResizable(True)\n\n inner_layout = QtWidgets.QHBoxLayout(scrollable_frame)\n\n selected_emojis = emojis[category]\n for emoji in selected_emojis:\n button = QtWidgets.QPushButton(emoji)\n button.setFont(QtGui.QFont(\"Noto Color Emoji\", 24))\n button.setFixedWidth(50)\n button.setFixedHeight(50)\n button.clicked.connect(lambda _, e=emoji: self.copy_to_clipboard(e))\n inner_layout.addWidget(button)\n\n scrollbar = scroll_area.horizontalScrollBar()\n scrollbar.setMinimumHeight(scrollbar.sizeHint().height())\n scrollbar.setSingleStep(20)\n\n scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n\n frame.setFixedWidth(500)\n scroll_area.setFixedHeight(80)\n\n shortcut = keyboard.GlobalHotKeys({'++e': self.toggle_visibility})\n shortcut.start()\n\n def copy_to_clipboard(self, emoji):\n pyperclip.copy(emoji)\n subprocess.Popen([\"notify-send\", \"Emoji Copied to Clipboard\"])\n\n def toggle_visibility(self):\n if self.isVisible():\n self.hide()\n else:\n x, y = mouse_controller.position\n self.move(x, y)\n self.show()\n\n def mousePressEvent(self, event):\n if event.button() == QtCore.Qt.LeftButton:\n self.drag_position = event.globalPos() - self.frameGeometry().topLeft()\n event.accept()\n\n def mouseMoveEvent(self, event):\n if event.buttons() == QtCore.Qt.LeftButton:\n self.move(event.globalPos() - self.drag_position)\n event.accept()\n\n def mouseReleaseEvent(self, event):\n if event.button() == QtCore.Qt.LeftButton:\n self.drag_position = None\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n main_window = MainWindow()\n main_window.show()\n sys.exit(app.exec_())","repo_name":"Svxy/mint-emojis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27000949283","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn.decomposition import PCA\r\n\r\n\r\ndef plot_losses(losses):\r\n plt.plot(losses)\r\n plt.legend(['Training', 'Validation'])\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.xticks(np.arange(0, len(losses), 1))\r\n plt.show()\r\n\r\n\r\ndef plot_embeddings(embeddings, names, word_list=None, max_num_words=20):\r\n if word_list is not None:\r\n word_list = set(word_list)\r\n inds = [i for i in range(len(names)) if names[i] in word_list]\r\n embeddings = embeddings[inds]\r\n names = np.array(names)[inds]\r\n else:\r\n embeddings = embeddings[:max_num_words]\r\n names = names[:max_num_words]\r\n embeddings = PCA(2).fit_transform(embeddings.cpu())\r\n plt.scatter(embeddings[:, 0], embeddings[:, 1])\r\n for i in range(len(embeddings)):\r\n plt.annotate(names[i], embeddings[i])\r\n plt.show()\r\n\r\n\r\ndef print_accuracies(accuracies):\r\n print(f'training accuracy: {accuracies[0]}')\r\n print(f'validation accuracy: {accuracies[1]}')\r\n print(f'testing accuracy: {accuracies[2]}')","repo_name":"alraune-esk/IMDB-classification","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27836283054","text":"\nfrom ._db import DBClient\nfrom .models import FFLProtocolsData, FFLNotificationsData\n\n\nclass FortyFourthLawDB(DBClient):\n \"\"\"Class for working with DB for 44th law\n \"\"\"\n\n def insert_protocol_data(self, file_id: int, data: dict, session=None):\n sess = session if session is not None else self._session()\n file_data = FFLProtocolsData(archive_file_id=file_id, data=data)\n sess.add(file_data)\n\n if session is None:\n sess.commit()\n sess.close()\n\n def insert_notification_data(self, file_id: int, data: dict, session=None):\n sess = session if session is not None else self._session()\n file_data = FFLNotificationsData(archive_file_id=file_id, data=data)\n sess.add(file_data)\n\n if session is None:\n sess.commit()\n sess.close()\n\n def delete_file_data(self, file_id: int):\n \"\"\"Delete all rows related with file_id from all forty_fourth_law.* tables\n\n Args:\n file_id (int): ID of XML file.\n \"\"\"\n\n sess = self._session()\n for table in (FFLProtocolsData, FFLNotificationsData):\n sess.query(table).filter(table.archive_file_id == file_id).delete()\n sess.commit()\n sess.close()\n","repo_name":"ruzhnikov/gov-purchases-crawler","sub_path":"gov/db/_ffl.py","file_name":"_ffl.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36576811223","text":"import bpy\n\n\nclass SOURCEOPS_OT_weighted_normal(bpy.types.Operator):\n bl_idname = 'sourceops.weighted_normal'\n bl_label = 'Add Weighted Normal Modifier'\n bl_description = 'Add a weighted normal modifier to the selected objects and remove existing weighted normal modifiers'\n bl_options = {'REGISTER', 'UNDO'}\n\n mode: bpy.props.EnumProperty(\n name='Weighting Mode',\n description='Weighted vertex normal mode to use',\n items=[\n ('FACE_AREA', 'Face Area', 'Generate face area weighted normals'),\n ('CORNER_ANGLE', 'Corner Angle', 'Generate corner angle weighted normals'),\n ('FACE_AREA_WITH_ANGLE', 'Face Area And Angle', 'Generated normals weighted by both face area and angle'),\n ],\n default='FACE_AREA',\n )\n\n weight: bpy.props.IntProperty(\n name='Weight',\n description='Corrective factor applied to faces’ weights, 50 is neutral, lower values increase weight of weak faces, higher values increase weight of strong faces',\n min=1,\n max=100,\n default=50,\n )\n\n thresh: bpy.props.FloatProperty(\n name='Threshold',\n description='Threshold value for different weights to be considered equal',\n min=0,\n max=10,\n default=0.01,\n )\n\n keep_sharp: bpy.props.BoolProperty(\n name='Keep Sharp',\n description='Keep sharp edges as computed for default split normals, instead of setting a single weighted normal for each vertex',\n default=True,\n )\n\n use_face_influence: bpy.props.BoolProperty(\n name='Face Influence',\n description='Use influence of face for weighting',\n default=False,\n )\n\n @classmethod\n def poll(cls, context):\n return context.selected_objects\n\n def execute(self, context):\n bpy.ops.object.shade_smooth()\n\n for obj in context.selected_objects:\n if obj.type == 'MESH':\n obj.data.use_auto_smooth = True\n\n for mod in obj.modifiers[:]:\n if mod.type == 'WEIGHTED_NORMAL':\n obj.modifiers.remove(mod)\n\n mod = obj.modifiers.new('WeightedNormal', 'WEIGHTED_NORMAL')\n mod.mode = self.mode\n mod.weight = self.weight\n mod.thresh = self.thresh\n mod.keep_sharp = self.keep_sharp\n mod.use_face_influence = self.use_face_influence\n\n return {'FINISHED'}\n","repo_name":"bonjorno7/SourceOps","sub_path":"addon/ops/weighted_normal.py","file_name":"weighted_normal.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"61"} +{"seq_id":"33318737087","text":"import json\nimport os\nfrom typing import Dict, TYPE_CHECKING, Union\n\nimport click\nfrom click import style, echo\n\nfrom riptide.config.document.command import Command\nfrom riptide.config.document.service import Service\nfrom riptide.config.files import get_project_meta_folder\nfrom riptide.plugin.abstract import AbstractPlugin\nfrom riptide.util import SystemFlag\nfrom riptide_cli.helpers import cli_section, async_command, warn, RiptideCliError\n\nif TYPE_CHECKING:\n from riptide.config.document.config import Config\n from riptide.config.document.project import Project\n from riptide.engine.abstract import AbstractEngine\n\nCMD_XDEBUG = 'xdebug'\n\nENABLED_FLAG_NAME = 'enabled'\nXDEBUG3_FLAG_NAME = 'xdebug3'\nMODE_FLAG_NAME = 'mode'\nREQUEST_TRIGGER_FLAG_NAME = 'request_trigger'\nPARAMETERS_FLAG_NAME = 'parameters'\n\nXDEBUG_PLUGIN_STATE_FILE = '.xdebug.json'\nVERSION_ENV = 'RIPTIDE_XDEBUG_VERSION'\nVERSION_LABEL = 'php_xdebug_version'\nVERSION_VALID = ('2', '3')\nWARNING_URL = 'https://github.com/theCapypara/riptide-plugin-php-xdebug#xdebug-version'\n\n\nclass PhpXdebugPlugin(AbstractPlugin):\n def __init__(self):\n self.engine: AbstractEngine = None\n self._cached_xdebug_version = None\n\n def after_load_engine(self, engine: 'AbstractEngine'):\n self.engine = engine\n\n def after_load_cli(self, main_cli_object):\n from riptide_cli.command import interrupt_handler\n\n @cli_section(\"PHP\")\n @main_cli_object.command(CMD_XDEBUG)\n @click.pass_context\n @click.argument('state', required=False)\n @click.option('--request/--no-request', '-r/-R', default=None,\n help='Toggles whether or not Xdebug should automatically activate or only using a trigger '\n '(--request sets `xdebug.start_with_request` to \"yes\", --no-request sets it to \"trigger\"). '\n 'Xdebug 3 only.')\n @click.option('--mode', '-m',\n help='Sets the `xdebug.mode` setting (Xdebug 3 only; otherwise ignored)')\n @click.option('--config', '-c',\n help=\"Can set additional configuration values (example: 'log=/tmp/xdebug.log,log_level=10')\")\n @async_command(interrupt_handler=interrupt_handler)\n async def xdebug(ctx, request, mode=None, config=None, state=None):\n \"\"\"\n Control Xdebug for this project.\n\n If STATE is not set:\n Output whether Xdebug is currently enabled for this project.\n\n If STATE is set and either 'on' or 'off':\n Enable / Disable Xdebug for the current project.\n\n Please note that these flags only control flags that need to be used in php.ini configuration files. The\n default PHP service from the Riptide repository is correctly configured to use this.\n\n \"\"\"\n from riptide_cli.loader import cmd_constraint_project_loaded, load_riptide_core\n from riptide_cli.lifecycle import start_project, stop_project\n load_riptide_core(ctx)\n cmd_constraint_project_loaded(ctx)\n version = self.get_xdebug_version(ctx.system_config) # Mainly do this, to show the warning if needed.\n\n if mode is not None:\n self._update_flag(ctx.system_config[\"project\"], mode, MODE_FLAG_NAME)\n\n if request is not None:\n self._update_flag(ctx.system_config[\"project\"], request, REQUEST_TRIGGER_FLAG_NAME)\n\n if config is not None:\n config_dict = {}\n for entry in config.split(','):\n if entry != '':\n try:\n key, value = entry.split('=', 1)\n except ValueError:\n raise RiptideCliError(\"Invalid value for --config.\", ctx)\n config_dict[key] = value\n self._update_flag(ctx.system_config[\"project\"], config_dict, PARAMETERS_FLAG_NAME)\n\n if state is not None:\n new_flag = True\n if state == 'off':\n new_flag = False\n self._update_flag(ctx.system_config[\"project\"], new_flag)\n # If there are services with the role 'php', run a quick restart on them.\n php_services = [\n s['$name']\n for s\n in ctx.system_config[\"project\"][\"app\"].get_services_by_role('php')\n if self.engine.service_status(ctx.system_config[\"project\"], s['$name'])\n ]\n if len(php_services) > 0 and self.engine:\n # Reload the configuration before restarting, to make sure that the\n # Opcache directory settings and maybe other settings affected by the flag are also properly set\n ctx.loaded = False\n load_riptide_core(ctx)\n await stop_project(ctx, php_services, show_status=False)\n await start_project(ctx, php_services, show_status=False, quick=True)\n\n state = self.get_state(ctx.system_config['project'])\n state_str = style('Disabled', fg='red')\n project_name = ctx.system_config[\"project\"][\"name\"]\n config_str = \"\"\n for key, value in state[PARAMETERS_FLAG_NAME].items():\n config_str += f\"{key}={value}\"\n if state[ENABLED_FLAG_NAME]:\n state_str = style('Enabled', fg='green')\n if state[REQUEST_TRIGGER_FLAG_NAME]:\n trigger_str = 'yes'\n trigger_str_start_with_request = 'trigger'\n else:\n trigger_str = 'no'\n trigger_str_start_with_request = 'yes'\n echo(f\"Xdebug status for {project_name}: {state_str}\")\n echo(f'Detected Xdebug version: {version}')\n echo(f'Mode: {state[MODE_FLAG_NAME]}')\n echo(f'Extra configuration: {config_str.rstrip(\",\")}')\n echo(f'Request trigger: {trigger_str} (xdebug.start_with_request={trigger_str_start_with_request})')\n\n def after_reload_config(self, config: 'Config'):\n # Nothing to do. We work solely with flags.\n pass\n\n def get_flag_value(self, config: 'Config', flag_name: str) -> any:\n if not config.internal_contains(\"project\"):\n return False\n if flag_name == XDEBUG3_FLAG_NAME:\n return self.get_xdebug_version(config) == '3'\n else:\n state = self.get_state(config.internal_get(\"project\"))\n if flag_name in state:\n return state[flag_name]\n return False\n\n def get_state(self, project: 'Project') -> Dict:\n if not os.path.exists(self._get_configuration_path(project)):\n s = {ENABLED_FLAG_NAME: False}\n else:\n with open(self._get_configuration_path(project), 'r') as fp:\n s = json.load(fp)\n # For backwards compatibility we check these extra:\n if MODE_FLAG_NAME not in s:\n s[MODE_FLAG_NAME] = 'debug'\n if REQUEST_TRIGGER_FLAG_NAME not in s:\n s[REQUEST_TRIGGER_FLAG_NAME] = False\n if PARAMETERS_FLAG_NAME not in s:\n s[PARAMETERS_FLAG_NAME] = {}\n return s\n\n def _update_flag(self, project: 'Project', new_flag, flag_name=ENABLED_FLAG_NAME):\n state = self.get_state(project)\n state[flag_name] = new_flag\n with open(self._get_configuration_path(project), 'w') as fp:\n return json.dump(state, fp)\n\n def _get_configuration_path(self, project: 'Project'):\n return os.path.join(get_project_meta_folder(project.folder()), XDEBUG_PLUGIN_STATE_FILE)\n\n def get_xdebug_version(self, config: 'Config'):\n if not self.engine:\n raise ValueError(\"Tried to get the Xdebug version before the engine backend was initialized.\")\n if self._cached_xdebug_version is None:\n version = self._detect_xdebug_version(config)\n if not version:\n if SystemFlag.IS_CLI:\n warn(f\"Could not reliably detect the XDebug version. Please see: {WARNING_URL}\")\n self._cached_xdebug_version = version if version is not None else 2\n return self._cached_xdebug_version\n\n def _detect_xdebug_version(self, config: 'Config'):\n # 1. In env:\n if VERSION_ENV in os.environ and os.environ[VERSION_ENV] in VERSION_VALID:\n return os.environ[VERSION_ENV]\n proj = config.internal_get('project')\n # 2. In label of image of service with role 'php':\n svc = proj['app'].get_service_by_role('php')\n if svc:\n labels = self.engine.get_service_or_command_image_labels(svc)\n if labels is not None and VERSION_LABEL in labels and labels[VERSION_LABEL] in VERSION_VALID:\n return labels[VERSION_LABEL]\n # 3. In service/cmd env:\n for obj in list(proj['app']['services'].values()) + list(proj['app']['commands'].values()):\n obj: Union[Service, Command]\n env = obj.collect_environment()\n if VERSION_ENV in env and env[VERSION_ENV] in VERSION_VALID:\n return env[VERSION_ENV]\n return None\n","repo_name":"theCapypara/riptide-plugin-php-xdebug","sub_path":"riptide_plugin_php_xdebug/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":9248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2804670845","text":"import numpy as np\nimport random\n\nprint(\"PUZZLE 1 -----------------------------------------------------\")\nnums = []\nwith open(\"Puzzle10_Input.txt\") as input_file:\n for l in input_file:\n nums.append(int(l))\n\nnums = np.sort(nums)\n# print(nums)\n\njolt_diffs = []\nfor i,n in enumerate(nums):\n if (i+1) < len(nums):\n jolt_diff = nums[i+1]-nums[i]\n jolt_diffs.append(jolt_diff)\n# print(jolt_diffs)\n\nprint(\"The 3 jolt diffs times the 1 jolt diffs are: \", (jolt_diffs.count(3)+1)*(jolt_diffs.count(1)+1)) #1984 is too low\n\nprint(\"PUZZLE 2 -----------------------------------------------------\")\ndef swapPositions(list, pos1, pos2): \n list[pos1], list[pos2] = list[pos2], list[pos1] \n return list\n\n# start = 0\n# end = nums[-1]+3\n# orders = []\n# order = []\n\n# counter = 0\n# while counter < 1e10:\n# counter+=1\n# random.shuffle(nums)\n# counter2 = 0\n# while len(order) < len(nums):\n# counter2+=1\n# for num in nums:\n# if num-start <=3:\n# order.append(num)\n# start = num\n# if counter2 > 1e10:\n# break\n# if len(order) == len(nums):\n# if order[-1] + 3 == end:\n# orders.append(order)\n\n# print(\"Combos: \", len(orders))\norders=0\nfor i, jolt_diff in enumerate(jolt_diffs):\n if jolt_diff == 1:\n orders+=1\n # if jolt_diff ==1 and jolt_diffs[i] ==1:\n # orders+=1\n\nprint(orders**2)\n \n \n\n\n\n\n#Ans = 19026","repo_name":"lauramurgatroyd/Advent-of-Code","sub_path":"2020/dec10.py","file_name":"dec10.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72773516033","text":"import numpy as np\n \ndef get_qp_from_stepsize( stepsize, qp_density ):\n \"\"\"\n Calculates the Quantization Parameter (QP) given a stepsize and a density.\n \n The calculation is based on a logarithmic scale, which is common for quantization processes in video compression.\n \n Args:\n - stepsize (float): The stepsize to be used in the quantization process.\n - qp_density (int): The density of the quantization parameter.\n \n Returns:\n - qp (float): The calculated Quantization Parameter.\n \"\"\"\n \n k = 1 << qp_density # = 2 ** qp_density\n\n # Base QP is calculated by taking the floor of log base 2 of stepsize, multiplied by k\n baseQP = np.floor( np.log2( stepsize ) ) * k\n \n # QP is calculated using the formula given\n qp = baseQP + ( (stepsize * k) / 2**(baseQP/k) - k )\n\n # Returns the calculated QP\n return qp\n\ndef get_stepsize_from_qp( qp, qp_density ):\n \"\"\"\n Calculates the stepsize given a Quantization Parameter (QP) and a density.\n \n This function appears to reverse the operation performed in the 'get_qp_from_stepsize' function.\n \n Args:\n - qp (int/float): The Quantization Parameter to be used.\n - qp_density (int): The density of the quantization parameter.\n \n Returns:\n - delta (float): The calculated stepsize.\n \"\"\"\n \n k = 1 << qp_density # = 2 ** qp_density\n mul = k + (qp & (k-1)) # & : bitwise and operator\n shift = qp >> qp_density # = floor(qp / 2 ** qp_density)\n delta = mul * (2.0 ** (shift - qp_density))\n return delta\n\ndef compute_qp_offset_to_dq_equivalent( qp_density ):\n \"\"\"\n Calculates the QP offset for a given QP density.\n \n This offset seems to be used in some sort of adjustment or correction during the quantization process.\n \n Args:\n - qp_density (int): The density of the quantization parameter.\n \n Returns:\n - qp_off (int): The calculated QP offset.\n \"\"\"\n qp_off = (1 << qp_density) # Shift operator to calculate the value of 2 raised to the power of qp_density\n\n return qp_off # Returns the calculated QP offset\n","repo_name":"jihyounchoi/vanilla-nerf-model-compression-using-lsa-enhanced-nncodec","sub_path":"nnc_core/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33449172368","text":"class Solution:\n def firstMissingPositive(self, nums: List[int]) -> int:\n val = max(nums)\n checker = set(nums)\n \n for i in range(1,val):\n if i not in checker:\n return i\n \n if val < 1:\n return 1\n \n else : return val + 1","repo_name":"yonasengdu/Compitative-programming","sub_path":"0041-first-missing-positive/0041-first-missing-positive.py","file_name":"0041-first-missing-positive.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23188665015","text":"#EntregaTarea serializer\nfrom rest_framework import serializers\nfrom api.models import EntregaTarea\nfrom api.serializers import EstudianteSerializer\n\nclass EntregaTareaSerializer(serializers.ModelSerializer):\n estudiante = EstudianteSerializer()\n class Meta:\n model = EntregaTarea\n fields = (\n 'id',\n 'archivo',\n 'texto',\n 'notaTarea',\n 'estudiante'\n )\n depth = 1\n\nclass EntregaTareaRegistroSerializer(serializers.ModelSerializer):\n class Meta:\n model = EntregaTarea\n fields = (\n 'archivo',\n 'texto',\n )\n\n","repo_name":"raulIxc85/Proyecto-Aula-Virtual","sub_path":"api/serializers/entregaTarea.py","file_name":"entregaTarea.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44437252152","text":"import scipy.io as sio\n# import cv2\nimport numpy as np\nimport numpy.matlib\nfrom FactorGraph import *\nfrom ConstructGM import *\nfrom BaBSolver import *\nfrom sys import argv\n\ndef RunPBP(Fname, IsSparse):\n data = sio.loadmat(Fname);\n\n Edges1 = data['Edge1'];\n Edges2 = data['Edge2'];\n KP = data['KP']\n KQ = data['KQ']\n\n #print(KP)\n \n G = ConstructG(Edges1, Edges2, KP, KQ, IsSparse);\n G.SetVerbose(False)\n # G.Solve(5)\n # GStore = G.StoreDual();\n res = BaBSolver(G, 600, 5, 0.005, False);\n res1 = []\n res1.append(res.Decode)\n res1.append(res.Value)\n res1.append(res.Time)\n del G\n return res1\nif __name__ == '__main__':\n res = RunPBP(True)\n print(res)\n","repo_name":"zzhang1987/HyperGraphMatchingBP","sub_path":"MatlabWrapper.py","file_name":"MatlabWrapper.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"12512649635","text":"from punto import Punto\nfrom triangulo import Triangulo\n\npuntoA = Punto(3, 9)\n# puntoB = Punto(1, 9, 0)\npuntoB = Punto(3, 9)\n\n# Crear dos triángulos con cuatro puntos\npuntoA = Punto(0.0, 0.0)\npuntoB = Punto(6, 0.2)\npuntoC = Punto(3.5, 6.5)\npuntoD = Punto(8.1, 3.6)\n\ntrianguloA = Triangulo(puntoA, puntoB, puntoC)\ntrianguloB = Triangulo(puntoB, puntoD, puntoC)\n\n# Imprimir las coordenadas y área de cada triángulo\nfor i in [trianguloA, trianguloB]:\n i.imprimir_ptos()\n print(\"Area del triángulo: \", i.area())\n\nprint(\"Modificar las coordenadas de puntoB -> se modifican los triángulos porque referencian el mismo objeto\")\nprint(puntoB.modif_coord(5, -0.1))\n\nfor i in [trianguloA, trianguloB]:\n i.imprimir_ptos()\n print(\"Area del triángulo: \", i.area())\n\nprint(\"Modificar las coordenadas de puntoC pero en el triángulo trianguloA\")\nprint(trianguloB.modificar_coord_pto(2, 2.5, 8.3))\n\nfor i in [trianguloA, trianguloB]:\n i.imprimir_ptos()\n print(\"Area del triángulo: \", i.area())\n","repo_name":"dfbarrero/pythonCourse","sub_path":"7-oop/scripts_oop/test_clase_triangulo.py","file_name":"test_clase_triangulo.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23640036381","text":"import sys\n\nif_name, of_name = sys.argv[1:3]\n\ndef surp_trip (a):\n a1,a2,a3 = a\n if abs(a1 - a2) == 2 or abs(a2 - a3) == 2 or abs(a3 - a1) == 2:\n return True\n return False\n\ndef valid (a):\n a1,a2,a3 = a\n if abs(a1 - a2) <= 2 and abs(a2 - a3) <= 2 and abs(a3 - a1) <= 2:\n return True\n return False\n\nt_s = {}\nt_ns = {}\nn_s = {}\nn_ns = {}\nfor x1 in range(0,11):\n for x2 in range(0,11):\n for x3 in range(0,11):\n s = x1+x2+x3\n t = (x1,x2,x3)\n if not valid(t): continue\n if surp_trip (t): \n if t_s.has_key(s):\n t_s[s] += [t]\n else:\n t_s[s] = [t]\n else:\n if t_ns.has_key(s):\n t_ns[s] += [t]\n else:\n t_ns[s] = [t]\n\nifile = open(if_name,'r')\nofile = open(of_name,'w')\n\ndef greater_p (tl,np):\n if tl == []: return False\n if max([max(t) for t in tl]) >= np:\n return True\n return False\n\n\n\nfor i,ln in enumerate(ifile):\n if i == 0: continue\n ln_s = ln.strip().split()\n ng,ns,p = int(ln_s[0]),int(ln_s[1]),int(ln_s[2])\n tarr = map(int,ln_s[3:])\n sel_ns,sel_s = [],[]\n mval_ns, mval_s =[],[]\n for ti in tarr:\n if greater_p(t_ns[ti],p): \n mval_ns += [1]\n else: mval_ns += [0]\n if t_s.has_key(ti) and greater_p(t_s[ti],p): mval_s += [1]\n else: mval_s += [0]\n ofile.write(\"Case #%d: \" % i)\n cnt2 = mval_s.count(1)\n res = min(ns,cnt2)\n# change res elements to 1\n temp = 0\n for i,val in enumerate(mval_s):\n if val == 1 and mval_ns[i] == 0: \n temp += 1 \n for i,val in enumerate(mval_s):\n if val == 1 and mval_ns[i] == 1 and temp < res:\n temp += 1\n mval_ns[i] = 0\n if temp == res: break\n res = min(ng,res+sum(mval_ns))\n ofile.write(\"%d\\n\" % (res))\nifile.close()\nofile.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/269.py","file_name":"269.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1587109657","text":"from ocp_resources.resource import TIMEOUT, Resource\n\n\nclass NodeMaintenance(Resource):\n \"\"\"\n Node Maintenance object, inherited from Resource.\n \"\"\"\n\n api_group = Resource.ApiGroup.NODEMAINTENANCE_KUBEVIRT_IO\n\n def __init__(\n self,\n name=None,\n client=None,\n node=None,\n reason=\"TEST Reason\",\n teardown=True,\n timeout=TIMEOUT,\n yaml_file=None,\n ):\n super().__init__(\n name=name,\n client=client,\n teardown=teardown,\n timeout=timeout,\n yaml_file=yaml_file,\n )\n self.node = node\n self.reason = reason\n\n def to_dict(self):\n res = super().to_dict()\n if self.yaml_file:\n return res\n\n assert self.node, \"node is mandatory for create\"\n res[\"spec\"] = {\"nodeName\": self.node.name, \"reason\": self.reason}\n return res\n","repo_name":"erkanerol/openshift-python-wrapper","sub_path":"ocp_resources/node_maintenance.py","file_name":"node_maintenance.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"19342104962","text":"import socket, random, time, sys\n\nheaders = [\n \"User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36\",\n \"Accept-language: en-US,en\"\n]\n\nsockets = []\n\ndef setupSocket(ip):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(4)\n sock.connect((ip, 80))\n sock.send(\"GET /?{} HTTP/1.1\\r\\n\".format(random.randint(0, 1337)).encode(\"utf-8\"))\n\n for header in headers:\n sock.send(\"{}\\r\\n\".format(header).encode(\"utf-8\"))\n\n return sock\n\nif __name__ == \"__main__\":\n\n ip = sys.argv[1]\n count = int(input(\"================== {} seems like hackable :D ================== \\nEnter the magnitude of earthquake u wanna send to {}! \".format(ip,ip)))\n print(\"\\nAttacking {} !!!\".format(ip))\n\n for _ in range(count):\n try:\n print(\"Socket ready in {} {}\".format(sys.argv[1],_))\n sock = setupSocket(ip)\n except socket.error:\n break\n\n sockets.append(sock)\n\n while True:\n print(\"Connected to {} sockets. Sending headers...\".format(len(sockets)))\n\n for sock in list(sockets):\n try:\n sock.send(\"X-a: {}\\r\\n\".format(random.randint(1, 4600)).encode(\"utf-8\"))\n except socket.error:\n sockets.remove(sock)\n\n for _ in range(count - len(sockets)):\n print(\"Re-opening closed sockets...\")\n try:\n sock = setupSocket(ip)\n if sock:\n sockets.append(sock)\n except socket.error:\n break\n\n time.sleep(15)\n","repo_name":"YosefToty/DosAttack","sub_path":"DosAttack.py","file_name":"DosAttack.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4115472882","text":"from pylon.core.tools import web, log # pylint: disable=E0611,E0401\nfrom tools import auth # pylint: disable=E0401\n\nfrom ..constants import RUNNER_MAPPING\n\n\nclass Slot: # pylint: disable=E1101,R0903\n @web.slot('ui_performance_content')\n @auth.decorators.check_slot({\n \"permissions\": [\"performance.ui_performance\"]\n })\n def content(self, context, slot, payload):\n project_id = context.rpc_manager.call.project_get_id()\n public_regions = context.rpc_manager.call.get_rabbit_queues(\"carrier\", True)\n project_regions = context.rpc_manager.call.get_rabbit_queues(\n f\"project_{project_id}_vhost\")\n cloud_regions = context.rpc_manager.timeout(3).integrations_get_cloud_integrations(\n project_id)\n with context.app.app_context():\n return self.descriptor.render_template(\n 'core/content.html',\n runners=list(RUNNER_MAPPING.keys()),\n locations={\n 'public_regions': public_regions,\n 'project_regions': project_regions,\n 'cloud_regions': cloud_regions\n }\n )\n\n @web.slot('ui_performance_scripts')\n def scripts(self, context, slot, payload):\n with context.app.app_context():\n return self.descriptor.render_template(\n 'core/scripts.html',\n )\n\n @web.slot('ui_performance_styles')\n def styles(self, context, slot, payload):\n with context.app.app_context():\n return self.descriptor.render_template(\n 'core/styles.html',\n )\n","repo_name":"carrier-io/ui_performance","sub_path":"slots/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71446322755","text":"\nimport os\nfrom HDF5DatasetGenerator import HDF5DatasetGenerator\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau\nfrom loss import *\n\nfrom Models.UNet import UNet\nfrom Models.UNet_cascaded import UNet_cascaded\nfrom Models.UNet2Plus import UNet2Plus\nfrom Models.U2_Net import U2_Net, U2_Net_light\nfrom Models.Inner_Cascaded_UNet import Inner_Cascaded_UNet\nfrom Models.Inner_Cascaded_U2_Net import Inner_Cascaded_U2_Net\n\n\ndataset = 'LiTS' # change dataset to 'BraTS' when training using BraTS 2013 dataset\nsave_path = 'save/' # path for saving models and logs\n\ntrain_path = '/home/user/datasets/LiTS/train.h5'\nval_path = '/home/user/datasets/LiTS/val.h5'\nTOTAL_TRAIN = 13144 # total train images in LiTS dataset\nTOTAL_VAL = 3308 # total validation images in LiTS dataset\nBATCH_SIZE = 2\ninput_shape = (512, 512, 1)\npatience = 4\nepochs = 20\nif dataset == 'BraTS':\n train_path = '/home/user/datasets/LiTS/train.h5'\n val_path = '/home/user/datasets/LiTS/val.h5'\n TOTAL_TRAIN = 3361 # total train images in BraTS 2013 dataset\n TOTAL_VAL = 1110 # total validation images in BraTS 2013 dataset\n BATCH_SIZE = 8\n input_shape = (256, 256, 4)\n patience = 10\n epochs = 50\n\n\ndef train():\n train_reader = HDF5DatasetGenerator(db_path=train_path, batch_size=BATCH_SIZE)\n train_iter = train_reader.generator()\n\n val_reader = HDF5DatasetGenerator(db_path=val_path, batch_size=BATCH_SIZE)\n val_iter = val_reader.generator()\n\n model = UNet_cascaded(input_shape=input_shape)\n model.compile(optimizer=Adam(lr=1e-4), loss=focal_tversky_loss, metrics=[dice_coef])\n\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n os.mkdir(save_path + '/model')\n os.mkdir(save_path + '/model/logs')\n\n model_checkpoint = ModelCheckpoint(save_path + '/model/weights.{epoch:02d}-{val_loss:.2f}.hdf5',\n monitor='val_loss', verbose=1, save_best_only=True)\n tensorboard = TensorBoard(log_dir=save_path + '/model/logs')\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=patience, mode='auto')\n callbacks = [model_checkpoint, tensorboard, reduce_lr]\n\n model.fit_generator(train_iter,\n steps_per_epoch=TOTAL_TRAIN//BATCH_SIZE,\n epochs=epochs,\n validation_data=val_iter,\n validation_steps=TOTAL_VAL//BATCH_SIZE,\n verbose=1,\n callbacks=callbacks)\n\n train_reader.close()\n val_reader.close()\n\n print('Finished training ......')\n\n\nif __name__ == '__main__':\n train()","repo_name":"FreedomXL/Inner-Cascaded-U-2-Net","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23502598901","text":"import sys\n\ndef answer_problem(file_name, f_solve):\n with open(file_name, 'r') as f:\n n = int(f.readline())\n output_str = '\\n'.join(\n \"Case #{0}: {1}\".format(i+1, f_solve(input_str))\n for i, input_str in enumerate(f.read().splitlines()))\n return output_str\n\n\n\ndef solve_a(input_str):\n letters_seen = set()\n all_letters = set(str(x) for x in range(10))\n incr = int(input_str)\n if incr == 0:\n return \"INSOMNIA\"\n x = 0\n while letters_seen != all_letters:\n x += incr\n letters_seen |= set(str(x))\n return str(x)\n\ndef solve_b(input_str):\n num_flips = 0\n while input_str != '+' * len(input_str):\n if input_str.find('+') == -1:\n return num_flips + 1\n elif input_str.find('+') < input_str.find('-'):\n input_str = flip_n_pancakes(input_str, \n input_str.find('-'))\n else:\n input_str = flip_n_pancakes(input_str, \n input_str.find('+'))\n num_flips += 1\n return num_flips\n\ndef flip_n_pancakes(pancake_pile, n):\n return ''.join({'+':'-', '-': '+'}[x] for x in \n pancake_pile[:n][::-1]) + pancake_pile[n:]\n\nif __name__ == \"__main__\":\n #output_str = answer_problem(sys.argv[1], solve_a)\n output_str = answer_problem(sys.argv[1], solve_b)\n with open(sys.argv[1].replace('.in', '.out'), 'w') as f:\n f.write(output_str)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_178/850.py","file_name":"850.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23555562311","text":"with open(\"input.txt\",\"r\") as f:\r\n with open(\"output.txt\",\"wt\") as output:\r\n cases = int(f.readline())\r\n for c in range(cases):\r\n target = int(f.readline())\r\n target = [int(n) for n in str(target)]\r\n length = len(target)\r\n case = c + 1\r\n while True:\r\n flag = 1\r\n for l in range(length - 1):\r\n if target[l] > target[l + 1]:\r\n flag = 0\r\n target[l] -= 1\r\n for inner in range(l+1,length):\r\n target[inner] = 9\r\n if flag:\r\n break\r\n answer = str(int(\"\".join([str(n) for n in target])))\r\n print(\"case #{0}: {1}\".format(str(case),answer),file = output)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2424.py","file_name":"2424.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21196182081","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 25 08:32:05 2022\r\n\r\n@author: Ariathna\r\n\"\"\"\r\n\r\nimport pandas as pd\r\ndata= pd.read_csv('transaction.csv', sep= ';')\r\n\r\n#Summary of the data\r\n\r\ndata.info()\r\n\r\n#Calculations\r\n\r\nCostPerItem= data['CostPerItem']\r\nNumberOfItemsPurchased= data['NumberOfItemsPurchased']\r\nCostPerTransaction = CostPerItem * NumberOfItemsPurchased\r\n\r\n#addind new column\r\n\r\ndata['CostPerTransaction']= CostPerTransaction\r\n\r\n#sales per transaction\r\n\r\ndata['SalesPerTransaction']= data['SellingPricePerItem'] * data['NumberOfItemsPurchased']\r\n\r\n#Profit Calculation = Sale - Cost\r\n\r\ndata['ProfitPerTransaction'] = data['SalesPerTransaction'] - data['CostPerTransaction']\r\n\r\n#Markup = (Sale - Cost)/ Cost\r\n\r\ndata['Markup']=data['ProfitPerTransaction'] / data['CostPerTransaction']\r\n\r\n#Roundind\r\n\r\nRoundMarkup = round(data['Markup'],2)\r\n\r\ndata['Markup'] = round(data['Markup'],2)\r\n\r\n#Combining Data Fields\r\n#First we need our variables to be string\r\n\r\nday = data['Day'].astype(str)\r\nyear = data['Year'].astype(str)\r\n\r\nDate = day+'-'+data['Month']+'-'+year\r\ndata['Date']=Date\r\n\r\ndata.head(10)\r\ndata.iloc[:,2]\r\n\r\n#Split the column ClientKeyWords\r\n\r\nsplit_col = data['ClientKeywords'].str.split(',', expand=True)\r\n\r\n#Naming the columns\r\n\r\ndata['ClientAge'] = split_col[0]\r\ndata['ClientType'] = split_col[1]\r\ndata['ClientContract'] = split_col[2]\r\n\r\n#Using replace to delete brakets\r\n\r\ndata['ClientAge'] = data['ClientAge'].str.replace('[','')\r\ndata['ClientContract'] = data['ClientContract'].str.replace(']','')\r\n\r\n#Changing description to low case\r\n\r\ndata['ItemDescription']=data['ItemDescription'].str.lower()\r\n\r\n#Merging Files\r\n\r\nseasons= pd.read_csv('value_inc_seasons.csv', sep= ';')\r\n\r\ndata= pd.merge(data, seasons, on='Month')\r\n\r\n#Dropping Columns\r\n\r\ndata= data.drop(['ClientKeywords','Day','Month','Year'], axis=1)\r\n#data= data.drop('Day', axis=1)\r\n#data= data.drop('Month', axis=1)\r\n#data= data.drop('Year', axis=1)\r\n\r\n#EXPORTING DATA TO CSV\r\n\r\ndata.to_csv('ValueINC_Clean.csv', index=False)","repo_name":"ariacervantes/Portfolio","sub_path":"valueinc_sales.py","file_name":"valueinc_sales.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28084631864","text":"import speedtest\n\nfrom session_config import SessionConfig\nfrom telethon import events\nfrom typing import Tuple\n\n\nclass SpeedTestModule(SessionConfig):\n \"\"\"Module for speed testing, command: speedtest\"\"\"\n async def speedtest_handler(self, msg):\n try:\n await msg.edit(\n \"тЬбя╕П Speed testing...\",\n parse_mode=\"html\"\n )\n\n results = self.speedtester()\n\n await msg.edit(f\"\"\"\nResults:\\n\\n\nЁЯФ╜ Download:\n{round(results[0] / 1024 / 1024)}\\n\nЁЯФ╝ Upload:\n{round(results[1] / 1024 / 1024)}\\n\nPing:\n{round(results[2], 3)}\n \"\"\",\n parse_mode=\"html\"\n )\n except Exception as error:\n await msg.edit(\n f\"тЪая╕П Error: {error}\",\n parse_mode=\"html\"\n )\n\n def speedtester(self) -> Tuple[float, float, float]:\n s = speedtest.Speedtest()\n\n s.get_servers()\n s.get_best_server()\n s.download()\n s.upload()\n res = s.results.dict()\n return res[\"download\"], res[\"upload\"], res[\"ping\"]\n \n def start(self):\n self.client.add_event_handler(\n self.speedtest_handler,\n events.NewMessage(pattern=\".speedtest\")\n )\n","repo_name":"Winchester-Dean/user-bot","sub_path":"modules/speedtest.py","file_name":"speedtest.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33011748901","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nx = [random.gauss(3,1) for _ in range(4000)]\ny = [random.gauss(4,2) for _ in range(4000)]\n#print(x,y)\nxx = []\nyy = []\nsig = []\nfor i in range(1,6):\n xx.append(i)\n r = random.uniform(2,3)\n rr = random.uniform(0,0.05)\n yy.append(random.gauss(np.sqrt(i),np.sqrt(i*0.05)))\n sig.append(np.sqrt(i*0.05))\nprint('xx=',xx)\nprint(\"yy=\", yy)\nprint(\"sig=\",sig)\nx_array = np.array(x)\ny_array =np.array(y)\nz_array = (x_array/y_array)\n#print(x_array,y_array,z_array)\nplt.style.use('seaborn-deep')\nbins = np.linspace(-10, 10, 250)\nhist1 = plt.hist(z_array,bins, alpha = 1.0,color = 'blue',label ='ratio of two gaussian')\nhist2 = plt.hist(x_array,bins, alpha = 0.5,color = 'red',label ='gaussian1')\nhist3 = plt.hist(y_array,bins, alpha = 0.5,color = 'green',label ='gaussian2')\nplt.legend(loc='upper right')\n#plt.show()\n'''pyplot.hist(x, bins, alpha=0.5, label='x')\npyplot.hist(y, bins, alpha=0.5, label='y')\npyplot.show()'''\n","repo_name":"neumann-py/procodepy","sub_path":"gauss_histo.py","file_name":"gauss_histo.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35235905462","text":"import unittest\nfrom pathlib import Path\nfrom gaze_verification.data_objects.sample import Samples, Sample\nfrom gaze_verification.data_processors.filters.savitzky_golay_filter_1d import SavitzkyGolayFilter1D\n\n\nclass TestSavitzkyGolayFilter1D(unittest.TestCase):\n\n def __init__(self, method_name=\"runTest\"):\n super().__init__(method_name)\n self._current_dir = Path().resolve()\n self._init_data_path = self._current_dir / \"test_data_samples\" / \"filtering_samples.pickle\"\n\n def test_hyperparameters(self):\n hp_initial_dict = dict(window_size=7,\n order=3,\n rate=1,\n derivative=0,\n keep_erroneous_samples=False\n )\n sg_filter = SavitzkyGolayFilter1D(**hp_initial_dict, verbose=True)\n hp_dict = sg_filter.get_hyperparameters(as_dict=True)\n self.assertDictEqual(hp_initial_dict, hp_dict)\n\n def test_filtration(self):\n init_sample = Samples.load_pickle(str(self._init_data_path))\n # assuming here is single Sample in Samples\n sample_data = init_sample[0].data\n n_elements, n_dims = sample_data.shape\n\n for window_size, order in zip([5, 7, 9, 11, 13, 15, 17, 19, 21],\n [2, 3, 3, 5, 5, 7, 7, 9, 9]):\n sg_filter = SavitzkyGolayFilter1D(window_size, order,\n rate=1, derivative=0,\n keep_erroneous_samples=False, verbose=True)\n filtered_sample = sg_filter.filter_dataset(init_sample)\n self.assertTupleEqual(filtered_sample[0].data.shape, (n_elements, n_dims))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"IrinaArmstrong/GazeVerification","sub_path":"tests/data_processors/test_savitzky_golay_filter_1d.py","file_name":"test_savitzky_golay_filter_1d.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23432328031","text":"infile=open('D-large.in','r')\r\nlines=infile.readlines()\r\ninfile.close()\r\noutfile=open('ans4.txt','w')\r\ntest=int(lines[0])\r\nfor i in range(test):\r\n text = \"Case #\" + str(i+1) + \": \"\r\n cases = lines[1+(i*3):4+(i*4)]\r\n naomi = sorted(cases[1].strip().split(\" \"))\r\n ken = sorted(cases[2].strip().split(\" \"))\r\n #war\r\n war = 0\r\n for j in range(len(naomi)):\r\n if naomi[-(j+1)] > ken[-1]:\r\n war += 1\r\n else:\r\n ken.pop()\r\n #deceit\r\n naomi = sorted(cases[1].strip().split(\" \"))\r\n ken = sorted(cases[2].strip().split(\" \"))\r\n deceit = 0\r\n foo=0\r\n for j in range(len(naomi)):\r\n if naomi[j] 2e-27)\n ):\n return 2\n elif (self.diff(dm_mass, stau_mass) < 0.2):\n return 3\n elif (bino*bino < 0.9):\n return 4\n else:\n return 5","repo_name":"Neutron-Calibration-in-DUNE/Blip","sub_path":"blip/dataset/mssm.py","file_name":"mssm.py","file_ext":"py","file_size_in_byte":8387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5856844380","text":"import json\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nfrom catapult_base import cloud_storage\nfrom telemetry.page import page\nfrom telemetry.testing import system_stub\nfrom telemetry.wpr import archive_info\n\n\nclass MockPage(page.Page):\n def __init__(self, url, name=None):\n super(MockPage, self).__init__(url, None, name=name)\n\n\npage1 = MockPage('http://www.foo.com/', 'Foo')\npage2 = MockPage('http://www.bar.com/', 'Bar')\npage3 = MockPage('http://www.baz.com/')\nrecording1 = 'data_001.wpr'\nrecording2 = 'data_002.wpr'\narchive_info_contents = (\"\"\"\n{\n\"archives\": {\n \"%s\": [\"%s\", \"%s\"],\n \"%s\": [\"%s\"]\n}\n}\n\"\"\" % (recording1, page1.display_name, page2.display_name, recording2,\n page3.display_name))\n\n\nclass WprArchiveInfoTest(unittest.TestCase):\n def setUp(self):\n self.tmp_dir = tempfile.mkdtemp()\n # Write the metadata.\n self.story_set_archive_info_file = os.path.join(\n self.tmp_dir, 'info.json')\n with open(self.story_set_archive_info_file, 'w') as f:\n f.write(archive_info_contents)\n\n # Write the existing .wpr files.\n for i in [1, 2]:\n with open(os.path.join(self.tmp_dir, ('data_00%d.wpr' % i)), 'w') as f:\n f.write(archive_info_contents)\n\n # Create the PageSetArchiveInfo object to be tested.\n self.archive_info = archive_info.WprArchiveInfo.FromFile(\n self.story_set_archive_info_file, cloud_storage.PUBLIC_BUCKET)\n # Use cloud_storage system stub.\n self.overrides = system_stub.Override(archive_info, ['cloud_storage'])\n\n def tearDown(self):\n shutil.rmtree(self.tmp_dir)\n self.overrides.Restore()\n\n def assertCorrectHashFile(self, file_path):\n old_ch = cloud_storage.CalculateHash\n cloud_storage.CalculateHash = self.overrides.cloud_storage.CalculateHash\n try:\n self.assertTrue(os.path.exists(file_path + '.sha1'))\n with open(file_path + '.sha1', 'rb') as f:\n self.assertEquals(cloud_storage.CalculateHash(file_path), f.read())\n finally:\n cloud_storage.CalculateHash = old_ch\n\n def testDownloadArchivesIfNeeded(self):\n cloud_storage_stub = self.overrides.cloud_storage\n # Second hash doesn't match, need to fetch it.\n cloud_storage_stub.SetRemotePathsForTesting(\n {cloud_storage.PUBLIC_BUCKET: {recording1: \"dummyhash\",\n recording2: \"dummyhash22\"}})\n cloud_storage_stub.SetCalculatedHashesForTesting(\n {os.path.join(self.tmp_dir, recording1): \"dummyhash\",\n os.path.join(self.tmp_dir, recording2): \"dummyhash2\",})\n self.archive_info.DownloadArchivesIfNeeded()\n self.assertEquals(len(cloud_storage_stub.downloaded_files), 1)\n self.assertEquals(cloud_storage_stub.downloaded_files[0], recording2)\n\n def testReadingArchiveInfo(self):\n self.assertIsNotNone(self.archive_info.WprFilePathForStory(page1))\n self.assertEquals(recording1, os.path.basename(\n self.archive_info.WprFilePathForStory(page1)))\n\n self.assertIsNotNone(self.archive_info.WprFilePathForStory(page2))\n self.assertEquals(recording1, os.path.basename(\n self.archive_info.WprFilePathForStory(page2)))\n\n self.assertIsNotNone(self.archive_info.WprFilePathForStory(page3))\n self.assertEquals(recording2, os.path.basename(\n self.archive_info.WprFilePathForStory(page3)))\n\n def testArchiveInfoFileGetsUpdated(self):\n \"\"\"Ensures that the archive info file is updated correctly.\"\"\"\n\n expected_archive_file_contents = {\n u'description': (u'Describes the Web Page Replay archives for a'\n u' story set. Don\\'t edit by hand! Use record_wpr for'\n u' updating.'),\n u'archives': {\n u'data_003.wpr': [u'Bar', u'http://www.baz.com/'],\n u'data_001.wpr': [u'Foo']\n }\n }\n\n new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')\n expected_archive_file_path = os.path.join(self.tmp_dir, 'data_003.wpr')\n hash_dictionary = {expected_archive_file_path:'filehash'}\n cloud_storage_stub = self.overrides.cloud_storage\n cloud_storage_stub.SetCalculatedHashesForTesting(hash_dictionary)\n with open(new_temp_recording, 'w') as f:\n f.write('wpr data')\n self.archive_info.AddNewTemporaryRecording(new_temp_recording)\n self.archive_info.AddRecordedStories([page2, page3])\n\n with open(self.story_set_archive_info_file, 'r') as f:\n archive_file_contents = json.load(f)\n self.assertEquals(expected_archive_file_contents, archive_file_contents)\n\n def testModifications(self):\n recording1_path = os.path.join(self.tmp_dir, recording1)\n recording2_path = os.path.join(self.tmp_dir, recording2)\n\n new_recording1 = os.path.join(self.tmp_dir, 'data_003.wpr')\n new_recording2 = os.path.join(self.tmp_dir, 'data_004.wpr')\n hash_dictionary = {new_recording1:'file_hash1',\n new_recording2:'file_hash2'}\n cloud_storage_stub = self.overrides.cloud_storage\n cloud_storage_stub.SetCalculatedHashesForTesting(hash_dictionary)\n\n new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')\n with open(new_temp_recording, 'w') as f:\n f.write('wpr data')\n\n self.archive_info.AddNewTemporaryRecording(new_temp_recording)\n\n self.assertEquals(new_temp_recording,\n self.archive_info.WprFilePathForStory(page1))\n self.assertEquals(new_temp_recording,\n self.archive_info.WprFilePathForStory(page2))\n self.assertEquals(new_temp_recording,\n self.archive_info.WprFilePathForStory(page3))\n\n self.archive_info.AddRecordedStories([page2])\n\n self.assertTrue(os.path.exists(new_recording1))\n self.assertFalse(os.path.exists(new_temp_recording))\n\n self.assertTrue(os.path.exists(recording1_path))\n self.assertTrue(os.path.exists(recording2_path))\n self.assertCorrectHashFile(new_recording1)\n\n with open(new_temp_recording, 'w') as f:\n f.write('wpr data')\n\n self.archive_info.AddNewTemporaryRecording(new_temp_recording)\n self.archive_info.AddRecordedStories([page3])\n\n self.assertTrue(os.path.exists(new_recording2))\n self.assertCorrectHashFile(new_recording2)\n self.assertFalse(os.path.exists(new_temp_recording))\n\n self.assertTrue(os.path.exists(recording1_path))\n # recording2 is no longer needed, so it was deleted.\n self.assertFalse(os.path.exists(recording2_path))\n\n def testCreatingNewArchiveInfo(self):\n # Write only the page set without the corresponding metadata file.\n story_set_contents = (\"\"\"\n {\n archive_data_file\": \"new_archive_info.json\",\n \"pages\": [\n {\n \"url\": \"%s\",\n }\n ]\n }\"\"\" % page1.url)\n\n story_set_file = os.path.join(self.tmp_dir, 'new_story_set.json')\n with open(story_set_file, 'w') as f:\n f.write(story_set_contents)\n\n self.story_set_archive_info_file = os.path.join(self.tmp_dir,\n 'new_archive_info.json')\n\n expected_archive_file_path = os.path.join(self.tmp_dir,\n 'new_archive_info_000.wpr')\n hash_dictionary = {expected_archive_file_path:'filehash'}\n self.overrides.cloud_storage.SetCalculatedHashesForTesting(hash_dictionary)\n\n # Create the WprArchiveInfo object to be tested.\n self.archive_info = archive_info.WprArchiveInfo.FromFile(\n self.story_set_archive_info_file, cloud_storage.PUBLIC_BUCKET)\n\n # Add a recording for all the pages.\n new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')\n with open(new_temp_recording, 'w') as f:\n f.write('wpr data')\n\n self.archive_info.AddNewTemporaryRecording(new_temp_recording)\n\n self.assertEquals(new_temp_recording,\n self.archive_info.WprFilePathForStory(page1))\n\n self.archive_info.AddRecordedStories([page1])\n\n # Expected name for the recording (decided by WprArchiveInfo).\n new_recording = os.path.join(self.tmp_dir, 'new_archive_info_000.wpr')\n\n self.assertTrue(os.path.exists(new_recording))\n self.assertFalse(os.path.exists(new_temp_recording))\n self.assertCorrectHashFile(new_recording)\n\n # Check that the archive info was written correctly.\n self.assertTrue(os.path.exists(self.story_set_archive_info_file))\n read_archive_info = archive_info.WprArchiveInfo.FromFile(\n self.story_set_archive_info_file, cloud_storage.PUBLIC_BUCKET)\n self.assertEquals(new_recording,\n read_archive_info.WprFilePathForStory(page1))\n","repo_name":"googlearchive/big-rig","sub_path":"app/src/thirdparty/telemetry/wpr/archive_info_unittest.py","file_name":"archive_info_unittest.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","stars":857,"dataset":"github-code","pt":"61"} +{"seq_id":"39513774739","text":"import json\nfrom typing import Union\n\nfrom th2_data_services.interfaces.adapter import IAdapter\n\nfrom google.protobuf.json_format import MessageToDict\nfrom th2_grpc_data_provider.data_provider_pb2 import EventResponse, MessageGroupResponse\n\n\nclass GRPCObjectToDictAdapter(IAdapter):\n \"\"\"GRPC Adapter decodes a GRPC object into a Dict object.\"\"\"\n\n def handle(self, record: Union[MessageGroupResponse, EventResponse]) -> dict:\n \"\"\"Decodes MessageGroupResponse or EventResponse as GRPC object into a Dict object.\n\n Args:\n record: MessageGroupResponse/EventResponse.\n\n Returns:\n Dict object.\n \"\"\"\n new_record = MessageToDict(record, including_default_value_fields=True)\n\n if isinstance(record, EventResponse):\n new_record[\"startTimestamp\"] = {\n \"epochSecond\": record.start_timestamp.seconds,\n \"nano\": record.start_timestamp.nanos,\n }\n new_record[\"endTimestamp\"] = {\n \"epochSecond\": record.end_timestamp.seconds,\n \"nano\": record.end_timestamp.nanos,\n }\n if \"batchId\" not in new_record:\n new_record[\"batchId\"] = None\n if \"parentEventId\" not in new_record:\n new_record[\"parentEventId\"] = None\n elif isinstance(record, MessageGroupResponse):\n new_record[\"timestamp\"] = {\"epochSecond\": record.timestamp.seconds, \"nano\": record.timestamp.nanos}\n\n try:\n new_record[\"body\"] = json.loads(record.body)\n except (KeyError, AttributeError, json.JSONDecodeError):\n return new_record\n except Exception as e:\n raise Exception(f\"{e}; Current record: {record}\")\n return new_record\n","repo_name":"th2-net/th2-data-services","sub_path":"th2_data_services/provider/v6/adapters/basic_adapters.py","file_name":"basic_adapters.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"40023758454","text":"from django import forms\nfrom agents.models import Agent\nfrom .models import Lead, Category\n\n\nclass LeadModelForm(forms.ModelForm):\n agent = forms.ModelChoiceField(queryset=Agent.objects.none(), required=False)\n \n class Meta:\n model = Lead\n fields = (\n \"first_name\",\n \"last_name\",\n \"age\",\n \"agent\"\n )\n \n def __init__(self, *args, **kwargs):\n request = kwargs.pop(\"request\")\n agents = Agent.objects.filter(\n organisation=request.user.userprofile\n )\n super().__init__(*args, **kwargs)\n self.fields[\"agent\"].queryset = agents\n\n\nclass AssignAgentForm(forms.Form):\n agent = forms.ModelChoiceField(queryset=Agent.objects.none())\n \n def __init__(self, *args, **kwargs):\n # agent choices need to dynamic\n request = kwargs.pop(\"request\")\n agents = Agent.objects.filter(\n organisation=request.user.userprofile\n )\n super().__init__(*args, **kwargs)\n self.fields[\"agent\"].queryset = agents\n\n\nclass CategoryUpdateForm(forms.ModelForm):\n category = forms.ModelChoiceField(queryset=Lead.objects.none())\n \n class Meta:\n model = Lead\n fields = (\n \"category\",\n )\n \n def __init__(self, *args, **kwargs):\n request = kwargs.pop(\"request\")\n user = request.user\n if user.is_organisor:\n org = user.userprofile\n else:\n org = user.agent.organisation\n categorys = Category.objects.filter(\n organisation=org\n )\n super().__init__(*args, **kwargs)\n self.fields[\"category\"].queryset = categorys\n","repo_name":"vaisakh23/DJCRM","sub_path":"src/leads/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39031420015","text":"class Solution:\n def countPrimeSetBits(self, L, R):\n def is_prime(n):\n if n<=1:\n return False\n if n <=3:\n return True\n if n%2==0 or n%3==0:\n return False\n i=5\n while i*i<=n:\n if n%i==0 or n%(i+2)==0:\n return False\n i = i+6\n return True\n\n def count_set_bits(n):#Brian Kernighan's algorithm\n count = 0\n while n:\n n = n & (n-1)\n count += 1\n return count\n answer = 0\n for i in range(L,R+1):\n if (is_prime(count_set_bits(i))):\n answer += 1\n return answer\ns = Solution()\nprint(s.countPrimeSetBits(10,15))","repo_name":"jaspal13/Python_competitive_programming","sub_path":"LeetCode/762.py","file_name":"762.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3953066819","text":"from asyncpg import Pool\n\n\nasync def update_user_stats(snowflake: int, pool: Pool, data: dict) -> None:\n \"\"\"Function for updating user stats.\"\"\"\n query = \"\"\"\n INSERT INTO users (snowflake, id, name, avatar, country, pp, rank, country_rank, player_role, history, ranked_acc, total_score, ranked_score, total_played, ranked_played)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)\n ON CONFLICT (snowflake)\n DO UPDATE SET\n id = $2,\n name = $3,\n avatar = $4,\n country = $5,\n pp = $6,\n rank = $7,\n country_rank = $8,\n player_role = $9,\n history = $10,\n ranked_acc = $11,\n total_score = $12,\n ranked_score = $13,\n total_played = $14,\n ranked_played = $15\n \"\"\"\n info = data[\"playerInfo\"]\n score = data[\"scoreStats\"]\n values = (\n snowflake,\n info[\"playerId\"],\n info[\"playerName\"],\n info[\"avatar\"],\n info[\"country\"],\n info[\"pp\"],\n info[\"rank\"],\n info[\"countryRank\"],\n info[\"role\"],\n [int(c) for c in info[\"history\"].split(\",\")],\n score[\"averageRankedAccuracy\"],\n score[\"totalScore\"],\n score[\"totalRankedScore\"],\n score[\"totalPlayCount\"],\n score[\"rankedPlayCount\"],\n )\n async with pool.acquire() as conn:\n async with conn.transaction():\n await conn.execute(query, *values)\n","repo_name":"meizuflux/saberbot","sub_path":"extensions/utils/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14804077715","text":"import random\r\n\"\"\"\r\nДано несколько списков с различным количеством элементов в каждом.\r\nДля каждого списка найти максимум и разделить на него все элементы этого списка.\r\n\"\"\"\r\nlist1, list2, list3 = [], [], []\r\nfor i in range(3):\r\n list1.append(round(random.random() * 100))\r\nfor i in range(5):\r\n list2.append(round(random.random() * 100))\r\nfor i in range(6):\r\n list3.append(round(random.random() * 100))\r\nmax1, max2, max3 = max(list1), max(list2), max(list3)\r\ncounter1, counter2, counter3 = 0, 0, 0\r\nresult1, result2, result3 = [], [], []\r\nwhile counter1 < len(list1):\r\n result1.append(list1[counter1] / max1)\r\n counter1 += 1\r\nwhile counter2 < len(list2):\r\n result2.append(list2[counter2] / max2)\r\n counter2 += 1\r\nwhile counter3 < len(list3):\r\n result3.append(list3[counter3] / max3)\r\n counter3 += 1\r\nprint(f'Исходный спиок 1: {list1}\\nИсходный спиок 2: {list2}\\nИсходный спиок 3: {list3}')\r\nprint(f'Максимальный элемент списка 1: {max1}\\nМаксимальный элемент списка 2: {max2}\\nМаксимальный элемент списка 3: {max3}')\r\nprint(f'Получившийся список 1: {result1}\\nПолучившийся список 2: {result2}\\nПолучившийся список 3: {result3}\\n')\r\n","repo_name":"sudo-slatin01/Python-sudo","sub_path":"Python6.py","file_name":"Python6.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40497843987","text":"'''\nYou are given a 0-indexed integer array nums. In one step, remove all elements nums[i] where nums[i - 1] > nums[i] for all 0 < i < nums.length.\n\nReturn the number of steps performed until nums becomes a non-decreasing array.\n\n \n\nExample 1:\n\nInput: nums = [5,3,4,4,7,3,6,11,8,5,11]\nOutput: 3\nExplanation: The following are the steps performed:\n- Step 1: [5,3,4,4,7,3,6,11,8,5,11] becomes [5,4,4,7,6,11,11]\n- Step 2: [5,4,4,7,6,11,11] becomes [5,4,7,11,11]\n- Step 3: [5,4,7,11,11] becomes [5,7,11,11]\n[5,7,11,11] is a non-decreasing array. Therefore, we return 3.\n\nExample 2:\n\nInput: nums = [4,5,7,7,13]\nOutput: 0\nExplanation: nums is already a non-decreasing array. Therefore, we return 0.\n\n \n\nConstraints:\n\n 1 <= nums.length <= 105\n 1 <= nums[i] <= 109\n'''\n\nclass Solution:\n def totalSteps(self, nums: List[int]) -> int:\n stack = [] # store a tupe -- > (num,step)\n n = len(nums) \n\n for idx in range(n-1,-1,-1) :\n\n curr = nums[idx]\n steps = 0 \n\n while stack :\n n, s = stack[-1]\n if n >= curr :\n break \n\n steps = max(steps+1,s) \n stack.pop()\n\n stack.append((curr,steps))\n\n res = 0 # for storing the result \n\n for i in range(len(stack)) :\n res = max(res, stack[i][1])\n \n return res\n\n\n # dec = False \n # steps = 0\n # while True and not dec:\n # # do something?\n # newNum = []\n # for i in range(len(nums)-1) : \n # if nums[i] > nums[i+1] \n # newNum.append(nums[i])\n\n # for i in range(len(newNum)-1) :\n # if newNum[i] > newNum[i+1] :\n # dec = False \n # continue\n \n # steps += 1\n # return steps","repo_name":"lonebots/python-programming","sub_path":"leet-code/stack/2289-steps-to-make-array-non-decreasing.py","file_name":"2289-steps-to-make-array-non-decreasing.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41482284392","text":"import numpy as np\nfrom nptorch.tensor import Tensor\nfrom nptorch.random import uniform\nfrom .. import functional as F\nfrom nptorch.functional import zeros, stack\nfrom .module import Module, Parameter\n\n\nclass RNNBase(Module):\n def __init__(self, input_size, hidden_size, num_layers=1, bias=True, activation='tanh',\n batch_first=False, dropout=0.):\n \"\"\"\n RNN,输入数据形状默认为(L,B,D)\n @param input_size: 输入节点数量,即 'D'\n @param hidden_size: 隐藏层的节点数量\n @param num_layers: RNN层数,默认是1\n @param bias: 是否使用偏置\n @param activation: 激活函数,支持 'tanh' 与 'relu'\n @param batch_first: 若输入形状为(B,L,D),则将此项置为True\n @param dropout: 隐藏层节点的dropout率,默认为0\n \"\"\"\n super(RNNBase, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.bias = bias\n if activation == 'tanh':\n self.activation_fcn = F.tanh\n elif activation == 'relu':\n self.activation_fcn = F.relu\n else:\n raise ValueError(f'unsupported activation function {activation} for RNN')\n self.batch_first = batch_first\n assert 0. <= dropout <= 1., f'dropout probability has to be between 0 and 1, but got {dropout}'\n if dropout != 0. and self.num_layers == 1:\n raise UserWarning('dropout rate may be useless when num_layers is 1')\n self.dropout = dropout\n self._init_params()\n\n def extra_repr(self):\n return f'{self.input_size}, {self.hidden_size}, num_layers={self.num_layers}, bias={self.bias}, ' \\\n f'\\nactivation={self.activation_fcn.__name__}, batch_first={self.batch_first}, dropout={self.dropout}'\n\n def _init_params(self):\n k = 1. / np.sqrt(self.hidden_size)\n gate_size = {'RNN': 1, 'LSTM': 4, 'GRU': 3}.get(self.__class__.__name__) * self.hidden_size\n for i in range(self.num_layers):\n ih_input_size = self.input_size if i == 0 else self.hidden_size\n self.register_parameter(f'weight_ih_l{i}',\n Parameter(uniform(low=-k, high=k, size=(gate_size, ih_input_size))))\n self.register_parameter(f'weight_hh_l{i}',\n Parameter(uniform(low=-k, high=k, size=(gate_size, self.hidden_size))))\n if self.bias:\n self.register_parameter(f'bias_ih_l{i}', Parameter(zeros(gate_size)))\n self.register_parameter(f'bias_hh_l{i}', Parameter(zeros(gate_size)))\n\n @staticmethod\n def _check_dim(x: Tensor):\n assert x.ndim == 3, 'x must be 3 dimensional'\n\n def forward(self, *args):\n raise NotImplementedError\n\n\nclass RNN(RNNBase):\n \"\"\"\n Simple RNN\n \"\"\"\n def __init__(self, input_size, hidden_size, num_layers=1, bias=True, activation='tanh',\n batch_first=False, dropout=0.):\n super(RNN, self).__init__(input_size, hidden_size, num_layers, bias, activation, batch_first, dropout)\n\n def forward(self, x: Tensor, hidden: Tensor = None) -> (Tensor, Tensor):\n \"\"\"\n @param x: (L, B, D)\n @param hidden: (num_layers, B, hidden_size) initial hidden value, default None\n \"\"\"\n self._check_dim(x)\n if self.batch_first:\n x = x.swapaxes(0, 1) # (B, L, D) => (L, B, D)\n hiddens = [zeros(x.shape[1], self.hidden_size)] * self.num_layers if hidden is None else list(hidden)\n output = []\n for t, xt in enumerate(x):\n hidden = xt\n for i in range(self.num_layers):\n weight_ih = getattr(self, f'weight_ih_l{i}')\n weight_hh = getattr(self, f'weight_hh_l{i}')\n hidden @= weight_ih.T\n hidden += hiddens[i] @ weight_hh.T\n if self.bias:\n hidden += getattr(self, f'bias_ih_l{i}')\n hidden += getattr(self, f'bias_hh_l{i}')\n hidden = self.activation_fcn(hidden)\n hiddens[i] = hidden\n if self.dropout > 0. and i < self.num_layers - 1:\n hidden = F.dropout(hidden, self.dropout, self.training)\n output.append(hidden)\n output = stack(output)\n hidden = stack(hiddens)\n if self.batch_first:\n output = output.swapaxes(0, 1)\n return output, hidden\n\n\nclass LSTM(RNNBase):\n \"\"\"\n Long Short-Term Memory\n \"\"\"\n def __init__(self, input_size, hidden_size, num_layers=1, bias=True, activation='tanh',\n batch_first=False, dropout=0.):\n super(LSTM, self).__init__(input_size, hidden_size, num_layers, bias, activation, batch_first, dropout)\n\n def forward(self, x: Tensor, initial=(None, None)) -> (Tensor, (Tensor, Tensor)):\n \"\"\"\n @param x: (L, B, D)\n @param initial: Tuple (hidden, cache) initial hidden and cache value, default (None, None)\n if given, shape of each one is like: (num_layers, B, hidden_size)\n \"\"\"\n self._check_dim(x)\n if self.batch_first:\n x = x.swapaxes(0, 1) # (B, L, D) => (L, B, D)\n hidden, cache = initial\n hiddens = [zeros(x.shape[1], self.hidden_size)] * self.num_layers if hidden is None else list(hidden)\n caches = [zeros(x.shape[1], self.hidden_size)] * self.num_layers if cache is None else list(cache)\n output = []\n for t, xt in enumerate(x):\n hidden = xt\n for i in range(self.num_layers):\n weight_ih = getattr(self, f'weight_ih_l{i}')\n weight_hh = getattr(self, f'weight_hh_l{i}')\n hidden @= weight_ih.T\n hidden += hiddens[i] @ weight_hh.T\n if self.bias:\n hidden += getattr(self, f'bias_ih_l{i}')\n hidden += getattr(self, f'bias_hh_l{i}')\n it, ft, gt, ot = hidden.split(4, 1)\n it = it.sigmoid()\n ft = ft.sigmoid()\n gt = self.activation_fcn(gt)\n ot = ot.sigmoid()\n ct = ft * caches[i] + it * gt\n hidden = ot * self.activation_fcn(ct)\n caches[i] = ct\n hiddens[i] = hidden\n if self.dropout > 0. and i < self.num_layers - 1:\n hidden = F.dropout(hidden, self.dropout, self.training)\n output.append(hidden)\n output = stack(output)\n hidden = stack(hiddens)\n cache = stack(caches)\n if self.batch_first:\n output = output.swapaxes(0, 1)\n return output, (hidden, cache)\n\n\nclass GRU(RNNBase):\n def __init__(self, input_size, hidden_size, num_layers=1, bias=True, activation='tanh',\n batch_first=False, dropout=0.):\n super(GRU, self).__init__(input_size, hidden_size, num_layers, bias, activation, batch_first, dropout)\n\n def forward(self, x: Tensor, hidden: Tensor = None) -> (Tensor, Tensor):\n \"\"\"\n @param x: (L, B, D)\n @param hidden: (num_layers, B, hidden_size) initial hidden value, default None\n \"\"\"\n self._check_dim(x)\n if self.batch_first:\n x = x.swapaxes(0, 1) # (B, L, D) => (L, B, D)\n hiddens = [zeros(x.shape[1], self.hidden_size)] * self.num_layers if hidden is None else list(hidden)\n output = []\n for t, xt in enumerate(x):\n hidden = xt\n for i in range(self.num_layers):\n weight_ih = getattr(self, f'weight_ih_l{i}')\n weight_hh = getattr(self, f'weight_hh_l{i}')\n hidden @= weight_ih.T\n hiddens_i = hiddens[i]\n hiddens_i @= weight_hh.T\n if self.bias:\n hidden += getattr(self, f'bias_ih_l{i}')\n hiddens_i += getattr(self, f'bias_hh_l{i}')\n rt, zt, nt = hidden.split(3, 1)\n rh, zh, nh = hiddens_i.split(3, 1)\n rt = (rt + rh).sigmoid()\n zt = (zt + zh).sigmoid()\n nt = self.activation_fcn(nt + rt * nh)\n hidden = (1. - zt) * nt + zt * hiddens[i]\n hiddens[i] = hidden\n if self.dropout > 0. and i < self.num_layers - 1:\n hidden = F.dropout(hidden, self.dropout, self.training)\n output.append(hidden)\n output = stack(output)\n hidden = stack(hiddens)\n if self.batch_first:\n output = output.swapaxes(0, 1)\n return output, hidden\n","repo_name":"windshadow233/autograd-with-numpy","sub_path":"nptorch/nn/modules/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":8640,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"23432131661","text":"T = int(input())\nformat_string = \"Case #{0}: {1} {2}\"\n\nfor i in range(T):\n N = int(input())\n naomi = [float(x) for x in input().strip().split()]\n ken = [float(x) for x in input().strip().split()]\n\n naomi.sort()\n ken.sort()\n\n ken_war = list(ken)\n naomi_war = list(naomi)\n war = 0\n\n while len(ken_war) > 0:\n if naomi_war[-1] > ken_war[-1]:\n naomi_war = naomi_war[:-1]\n ken_war = ken_war[1:]\n war += 1\n else:\n naomi_war = naomi_war[:-1]\n ken_war = ken_war[:-1]\n\n ken_deceitful = list(ken)\n naomi_deceitful = list(naomi)\n deceit = 0\n\n while len(ken_deceitful) > 0:\n if ken_deceitful[0] > naomi_deceitful[0]:\n naomi_deceitful = naomi_deceitful[1:]\n ken_deceitful = ken_deceitful[:-1]\n else:\n deceit += 1\n naomi_deceitful = naomi_deceitful[1:]\n ken_deceitful = ken_deceitful[1:]\n\n print(format_string.format(i+1, deceit, war))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/1142.py","file_name":"1142.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12341503635","text":"\"\"\"Utility functions, classes, and references for the HC DPD app.\"\"\"\nfrom . import models\n\n\n# Standard references to use as keys for HC DPD files/tables\nACTIVE_INGREDIENT = 'active_ingredient'\nBIOSIMILAR = 'biosimilar'\nCOMPANY = 'company'\nDRUG_PRODUCT = 'drug_product'\nFORM = 'form'\nINACTIVE_PRODUCT = 'inactive_product'\nPACKAGING = 'packaging'\nPHARMACEUTICAL_STANDARD = 'pharmaceutical_standard'\nROUTE = 'route'\nSCHEDULE = 'schedule'\nSTATUS = 'status'\nTHERAPUETIC_CLASS = 'therapeutic_class'\nVETERINARY_SPECIES = 'veterinary_species'\n\n\n# Dictionary mapping serializer field name to Django model\ndef standard_to_original_model():\n \"\"\"Mapping between standard naming and original models.\n\n Has to be called as function as models may not be\n initialized when function is imported into other\n modules.\n \"\"\"\n return {\n ACTIVE_INGREDIENT: models.OriginalActiveIngredient,\n BIOSIMILAR: models.OriginalBiosimilar,\n COMPANY: models.OriginalCompany,\n DRUG_PRODUCT: models.OriginalDrugProduct,\n FORM: models.OriginalForm,\n INACTIVE_PRODUCT: models.OriginalInactiveProduct,\n PACKAGING: models.OriginalPackaging,\n PHARMACEUTICAL_STANDARD: models.OriginalPharmaceuticalStandard,\n ROUTE: models.OriginalRoute,\n SCHEDULE: models.OriginalSchedule,\n STATUS: models.OriginalStatus,\n THERAPUETIC_CLASS: models.OriginalTherapeuticClass,\n VETERINARY_SPECIES: models.OriginalVeterinarySpecies,\n }\n","repo_name":"studybuffalo/studybuffalo","sub_path":"study_buffalo/hc_dpd/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"12389482608","text":"import numpy as np\nimport math\ndef solution(progresses, speeds):\n answer = []\n \n pro_list=list(map(lambda x: (100- x), progresses))\n speeds=np.array(speeds)\n pro_list=np.array(pro_list)\n moc_list=pro_list/speeds\n moc_list=list(map(lambda x: math.ceil(x), moc_list))\n print(moc_list)\n \n maxDep = 0\n cnt = 1\n for i in moc_list:\n if maxDep=\"}\r\n\r\n indented = ' ' * 2\r\n\r\n def __init__(self, nb_digits=6, sort_variable_names=False):\r\n # comment line is // as in OPL\r\n # do NOT forget about user names\r\n # no encoding is printed\r\n TextModelPrinter.__init__(self, indent=2, comment_start='//',\r\n nb_digits_for_floats=nb_digits,\r\n hide_user_names=False,\r\n encoding=None,\r\n sort_variable_names=sort_variable_names)\r\n\r\n def get_format(self):\r\n from docplex.mp.format import OPL_format\r\n\r\n return OPL_format\r\n\r\n def fix_name(self, mobj, prefix, local_index_map, hide_names):\r\n raw_name = mobj.name\r\n if raw_name and not hide_names and not mobj.is_generated():\r\n # there is a user name\r\n return self._translate_chars(raw_name)\r\n elif not isinstance(mobj, Var):\r\n # constraitn with no name -> no name\r\n return None\r\n elif mobj.is_generated():\r\n #mobj_origin = mobj.origin\r\n # if hasattr(mobj_origin, 'as_var') and mobj is mobj_origin.as_var:\r\n # return str(mobj_origin)\r\n # else:\r\n return self._make_prefix_name(mobj, prefix, local_index_map, offset=1)\r\n else:\r\n # anonymous or anonymized variable. -> x\r\n return self._make_prefix_name(mobj, prefix, local_index_map, offset=1)\r\n\r\n def _print_model_name(self, out, mdl):\r\n printed_name = mdl.name or \"AnonymousModel\"\r\n out.write(\"// model name is: {0:s}\\n\".format(printed_name))\r\n\r\n def _vartype_name(self, vartype):\r\n # INTERNA: returns a printable string for a vartype\r\n return self.vartype_map.get(vartype.cplex_typecode, \"unknown\")\r\n\r\n def _print_var_containers(self, out, mdl):\r\n gensym_count = 1\r\n printed_header = False\r\n for ctn in mdl.iter_var_containers():\r\n if not printed_header:\r\n self._print_line_comment(out, \"var contrainer section\")\r\n printed_header = True\r\n vartype_name = self._vartype_name(ctn.vartype)\r\n varctn_name = ctn.name\r\n if not varctn_name:\r\n varctn_name = 'x%d' % gensym_count\r\n gensym_count += 1\r\n out.write(\"dvar {0} {1}{2};\\n\".format(vartype_name, varctn_name, ctn.dimension_string))\r\n\r\n if printed_header:\r\n self._newline(out)\r\n\r\n def _print_single_vars(self, out, mdl):\r\n printed_header = False\r\n for v in mdl.iter_variables():\r\n var_ctn = v.container\r\n if var_ctn is not None:\r\n continue\r\n\r\n if not printed_header:\r\n self._print_line_comment(out, \"single vars section\")\r\n printed_header = True\r\n vartype_name = self._vartype_name(v.vartype)\r\n var_printname = self._var_name_map.get(v._index, \"???\")\r\n v_origin = v.origin\r\n s_generated = ' # -- generated by: {0!s}'.format(v_origin) if v_origin is not None else ''\r\n out.write(\"dvar {0} {1};{2}\\n\".format(vartype_name, var_printname, s_generated))\r\n\r\n if printed_header:\r\n self._newline(out)\r\n\r\n def _print_objective(self, wrapper, model):\r\n wrapper.write(model.objective_sense.verb)\r\n wrapper.flush(print_newline=True)\r\n objexpr = model.objective_expr\r\n objlin = objexpr.get_linear_part()\r\n printed = self._print_lexpr(wrapper, self._num_printer, self._var_name_map, objlin,\r\n allow_empty=True,\r\n print_constant=False)\r\n if objexpr.is_quad_expr() and objexpr.has_quadratic_term():\r\n self._print_qexpr_obj(wrapper, self._num_printer, self._var_name_map,\r\n quad_expr=objexpr,\r\n force_initial_plus=printed,\r\n use_double=False)\r\n printed = True\r\n obj_offset = objexpr.get_constant()\r\n if obj_offset:\r\n if printed and obj_offset > 0:\r\n wrapper.write(u'+')\r\n wrapper.write(self._num_to_string(obj_offset))\r\n wrapper.write(';', separator=False)\r\n wrapper.flush()\r\n\r\n def _pprint_expr(self, wrapper, expr):\r\n q = 0\r\n if expr.is_quad_expr() and expr.has_quadratic_term():\r\n q = self._print_qexpr_iter(wrapper, self._num_printer, self._var_name_map, expr.iter_sorted_quads(),\r\n use_double=False)\r\n self._print_expr_iter(wrapper, self._num_printer, self._var_name_map, expr.iter_terms(),\r\n constant=expr.get_constant(), # yes, print the constant\r\n allow_empty=q > 0,\r\n force_first_plus=q > 0 # force a '+' if quadratic section is non-empty\r\n )\r\n\r\n def _print_binary_constraint(self, wrapper, ct):\r\n left_expr = ct.left_expr\r\n right_expr = ct.right_expr\r\n self._pprint_expr(wrapper, left_expr)\r\n\r\n wrapper.write(self.ct_symbol_map[ct.sense])\r\n\r\n self._pprint_expr(wrapper, right_expr)\r\n\r\n def _print_range_constraint(self, wrapper, rng):\r\n expr = rng.expr\r\n lb = rng.lb\r\n ub = rng.ub\r\n wrapper.write(self._num_to_string(lb))\r\n wrapper.write(\"<=\")\r\n self._print_lexpr(wrapper, self._num_printer, self._var_name_map, expr, print_constant=True)\r\n wrapper.write(\"<=\")\r\n wrapper.write(self._num_to_string(ub))\r\n\r\n def _print_logical_constraint(self, wrapper, logi_ct, logical_symbol):\r\n active = logi_ct.active_value\r\n linear_ct = logi_ct.linear_constraint\r\n indicator_varname = self._var_print_name(logi_ct.binary_var)\r\n wrapper.write(indicator_varname)\r\n if 0 == active:\r\n wrapper.write(\"== 0\")\r\n wrapper.write(logical_symbol)\r\n wrapper.write('(')\r\n self._print_binary_constraint(wrapper, linear_ct)\r\n wrapper.write(');', separator=False)\r\n\r\n def _print_linear_constraints(self, wrapper, model):\r\n wrapper.begin_line()\r\n for ct in model.iter_binary_constraints():\r\n\r\n wrapper.begin_line()\r\n ctname = self.linearct_print_name(ct)\r\n if ctname:\r\n wrapper.set_indent(' ') # two spaces\r\n wrapper.write(\" %s:\" % ctname)\r\n wrapper.flush()\r\n else:\r\n wrapper.begin_line(indented=True)\r\n\r\n self._print_binary_constraint(wrapper, ct)\r\n wrapper.write(';', separator=False)\r\n wrapper.set_indent(' ')\r\n wrapper.flush(print_newline=True, restart_from_empty_line=False)\r\n\r\n def _print_ranges(self, wrapper, model):\r\n wrapper.begin_line()\r\n for ct in model.iter_range_constraints():\r\n\r\n wrapper.begin_line()\r\n ctname = self.linearct_print_name(ct)\r\n if ctname:\r\n wrapper.set_indent(2 * ' ')\r\n wrapper.write(' %s:' % ctname)\r\n wrapper.flush()\r\n\r\n else:\r\n wrapper.begin_line(indented=True)\r\n\r\n self._print_range_constraint(wrapper, ct)\r\n wrapper.write(';', separator=False)\r\n wrapper.flush()\r\n wrapper.set_indent(' ')\r\n\r\n def _print_logicals(self, wrapper, model):\r\n for ct in model.iter_logical_constraints():\r\n ctname = self.logicalct_print_name(ct)\r\n if ctname:\r\n wrapper.set_indent(' ')\r\n wrapper.write(\" %s:\" % ctname)\r\n wrapper.flush()\r\n else:\r\n wrapper.begin_line(indented=True)\r\n symb = '<=>' if ct.is_equivalence() else '<='\r\n self._print_logical_constraint(wrapper, ct, logical_symbol=symb)\r\n wrapper.set_indent(' ')\r\n wrapper.flush(restart_from_empty_line=True)\r\n\r\n def _print_quadratic_cts(self, wrapper, model):\r\n for qct in model.iter_quadratic_constraints():\r\n\r\n ctname = self.qc_print_name(qct)\r\n if ctname:\r\n wrapper.set_indent(' ')\r\n wrapper.write(\" %s:\" % ctname)\r\n wrapper.flush()\r\n else:\r\n wrapper.begin_line(indented=True)\r\n\r\n self._print_binary_constraint(wrapper, qct)\r\n wrapper.write(';', separator=False)\r\n wrapper.set_indent(' ')\r\n wrapper.flush(restart_from_empty_line=True)\r\n\r\n def _print_kpis(self, out, wrapper, model):\r\n printed_section_header = False\r\n for kpi in model.iter_kpis():\r\n if kpi.is_decision_expression():\r\n if not printed_section_header:\r\n self._newline(out)\r\n self._print_line_comment(out, \" KPI section\")\r\n printed_section_header = True\r\n\r\n kpi_expr = kpi.to_expr()\r\n kpi_typename = 'int' if kpi_expr.is_discrete() else 'float'\r\n wrapper.write('dexpr {0} {1}'.format(kpi_typename, self._translate_chars(kpi.name)))\r\n wrapper.write('=')\r\n if isinstance(kpi_expr, LinearOperand):\r\n self._print_lexpr(wrapper, self._num_printer, self._var_name_map, kpi_expr, print_constant=True)\r\n elif isinstance(kpi_expr, Var):\r\n wrapper.write(kpi_expr.name)\r\n wrapper.write(';', separator=False)\r\n wrapper.flush(restart_from_empty_line=True)\r\n # ---\r\n if printed_section_header:\r\n wrapper.newline()\r\n\r\n def _print_sos(self, wrapper, model):\r\n varname_map = self._var_name_map\r\n for sos_varset in model.iter_sos():\r\n sos_varname_joined = ', '.join(varname_map[v._index] for v in sos_varset.iter_variables())\r\n wrapper.write('sos{0}: {1}'.format(sos_varset.sos_type.value, sos_varname_joined))\r\n wrapper.flush()\r\n\r\n def print_model_to_stream(self, out, model):\r\n wrapper = _ExportWrapper(oss=out, indent_str=' ', line_width=78)\r\n self.prepare(model)\r\n # header\r\n self._print_signature(out)\r\n self._print_encoding(out)\r\n self._print_model_name(out, model)\r\n\r\n # var containers\r\n self._print_var_containers(out, model)\r\n self._print_single_vars(out, model)\r\n # KPI section\r\n self._print_kpis(out, wrapper, model)\r\n\r\n self._print_objective(wrapper, model)\r\n wrapper.write(\"\\nsubject to {\")\r\n wrapper.flush()\r\n self._print_linear_constraints(wrapper, model)\r\n self._print_ranges(wrapper, model)\r\n self._print_logicals(wrapper, model)\r\n self._print_quadratic_cts(wrapper, model)\r\n self._print_sos(wrapper, model)\r\n out.write(\"}\\n\")\r\n","repo_name":"OscarJHernandez/qc_portfolio_optimization","sub_path":"venv/lib/python3.8/site-packages/docplex/mp/ppretty.py","file_name":"ppretty.py","file_ext":"py","file_size_in_byte":11752,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"70265702916","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 26 09:07:57 2022\n\n@author: aaron\n\"\"\"\n\n\nfrom sklearn.model_selection import train_test_split\n\nfrom articles_nlp_module import ExploratoryDataAnalysis, ModelDevelopment, ModelEvaluation\nimport pandas as pd\nimport re\n\n#%% Exploratory Data Analysis\n\n#%% Step 1 ) Data Loading\n\ndf = pd.read_csv(\"https://raw.githubusercontent.com/susanli2016/PyCon-Canada-2019-NLP-Tutorial/master/bbc-text.csv\")\n\n#%% Step 2) Data Inspection/Visualization\n\ndf.info()\ndf.describe().T\ndf.head()\n\ndf.duplicated().sum()\ndf.isna().sum()\n\nprint(df['text'][4])\nprint(df['text'][10])\n\n#%% Step 3) Data Cleaning\n\ntext = df['text']\ncategory = df['category']\n\nfor index, words in enumerate(df['text']):\n text[index] = re.sub('<.*?>','',words) \n text[index] = re.sub('[^a-zA-Z]',' ',words).lower().split() \n\ntext_backup = text.copy()\ncategory_backup = category.copy()\n\n#%% Step 4) Features Selection - text is the feature, category is the target\n\n#%% Step 5) Data Preprocessing\n\neda = ExploratoryDataAnalysis() \n\npadded_text = eda.nlp_tokenizer_padsequences(df, text)\n\n# y target\n\ncategory = eda.one_hot_encoder(category)\n\nX_train, X_test, y_train, y_test = train_test_split(padded_text,category,\n test_size=0.3,\n random_state=123)\n\n#%% Model Development\n\nmd = ModelDevelopment()\n\nmodel = md.dl_nlp_model(df, text, X_train, y_train)\n\n#%% Model Compilation\n\nmd.dl_model_compilation(model, 'cat')\n\n#%% Model Training\n\nhist = md.dl_model_training(X_train, X_test, y_train, y_test, model, epochs=5)\n\n# open anaconda prompt\n# type cd \"folder path to your logs file\"\n# type tensorboard --logdir logs\n\n# in Colab, type %load_ext tensorboard\n# type %tensorboard --logdir (logs folder name that you uploaded)\n\n#%% Model Evaluation/Analysis\n\nme = ModelEvaluation() \n\n# Plotting the model trained\n\nme.dl_plot_hist(hist)\n\n# Printing the classification report\n\nme.classification_report(X_test, y_test, model, 'dl')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"AlfredM91/Articles-Classification","sub_path":"articles_nlp.py","file_name":"articles_nlp.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12111826059","text":"from pathlib import Path\nimport ijson\nfrom scipy.io import wavfile\nimport pandas as pd\nimport numpy as np\nfrom parameter import parameters\n\nlen_files = 0\nk = 0\ndf_meta = pd.DataFrame()\ndf_meta_list = []\n\n# Path to Timit Audio Files (EDIT) Origin\np = Path('C:\\\\Users\\\\silas\\\\Desktop\\\\Neuer Ordner\\\\timit_16kHz_wav\\\\train')\n\n# Path to new normalized audio data Destination\ndb_df = parameters('database')\naudio_file_name_str = db_df.iloc[0]['audiofile']\n\nwith open('Databases\\\\database.json', 'r') as f:\n objects = ijson.items(f, 'data.item')\n list_names = list(objects)\n list_wavPath = list(p.glob('**/*.wav'))\n for i in range(len(list_wavPath)):\n wav_str = str(list_wavPath[i])\n rate, audio_data = wavfile.read(wav_str)\n\n audio_data_proc_mean = audio_data - audio_data.mean()\n audio_data_proc = audio_data_proc_mean/audio_data_proc_mean.std()\n\n print(i)\n\n df = pd.DataFrame({'name': list_names[i][1], 'gender': list_names[i][2], 'sample': list_names[i][4], 'audiosignal{0}'.format(i): audio_data_proc})\n df_meta_list.append([list_names[i][1], list_names[i][4], np.mean(audio_data_proc), np.var(audio_data_proc), audio_data_proc.max(), audio_data_proc.size, list_names[i][2]])\n\n df_meta = pd.DataFrame(df_meta_list, columns=['name', 'sample', 'mean', 'var', 'max', 'sample_length', 'gender'])\n\n df_json = df.to_json(orient='split')\n with open(audio_file_name_str.format(i), 'w') as fs:\n fs.write(df_json)\n\n\nwith open(\"Databases\\\\meta.json\", 'w') as fs:\n meta_json = df_meta.to_json(orient='split')\n fs.write(meta_json)\n\n","repo_name":"SilasRech/Privacy-Aware-Siamese-Neural-Network","sub_path":"Code/GetWavToPyhton.py","file_name":"GetWavToPyhton.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10595373523","text":"import json\nimport time\nimport urllib.request\nimport re\nimport requests\nfrom bs4 import BeautifulSoup as bs\nfrom contextlib import closing\n\nfrom kafka_utils.producer import push_message\n\nkey = '8F8BBCEDF2B6E75EDC1F65A9DADB9A0E'\n\n\n# step 1: Get userID\ndef get_user_id(user_profile, user_ids):\n url = user_profile\n\n with urllib.request.urlopen(url) as page:\n for line in page:\n if b\"steamid\" in line:\n try:\n user_id = re.search(rb\"\\\"steamid\\\":\\\"(\\d+)\\\"\", line).group(1).decode('utf-8')\n print(user_id + ' ' + user_profile)\n if user_id is not None:\n user_ids.append(user_id)\n break\n except Exception as e:\n print(e)\n continue\n\n\ndef get_users(member_list_no, user_ids):\n url = 'https://steamcommunity.com/games/steam/members?p=' + str(member_list_no)\n header = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/57.0.2987.133 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2'}\n resp = requests.get(url, header)\n\n soup = bs(resp.text, 'html.parser')\n # print(soup.prettify())\n\n # search profile of users who are online/in-game\n all_users = soup.find_all(\"div\",\n onclick=re.compile(\"top\\.location\\.href='https:\\/\\/steamcommunity\\.com\\/id\\/(\\w+)'\"))\n\n # get user names\n for user in all_users:\n user_profile = user.div.div.div.a['href']\n # print user_profile\n get_user_id(user_profile, user_ids)\n user_name = re.search('https:\\/\\/steamcommunity\\.com\\/id\\/(\\w+)', user_profile).group(1)\n\n\n# step2: write id in file\ndef dump_user_id(user_ids, user_out_file, user_id_content):\n # with open(user_out_file, 'w') as f:\n for idx in range(0, len(user_ids)):\n user_id_idx = {'user_idx': idx, 'user_id': user_ids[idx]}\n # json.dump(user_id_idx, f)\n push_message('user_idx', json.dumps(user_id_idx))\n # user_id_content.append(user_id_idx)\n # f.write('\\n')\n\n\n# step3: Get all games info\n# get game id\ndef get_app_id_list():\n url = 'https://steamcommunity.com/linkfilter/https://api.steampowered.com/ISteamApps/GetAppList/v2/'\n header = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/57.0.2987.133 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2'}\n resp = requests.get(url, header)\n # [{\"appid\":1941401,\"name\":\"\"}, ...]\n app_id_objs = resp.json()['applist']['apps']\n app_id_list = []\n\n for app in app_id_objs:\n app_id_list.append(app['appid'])\n\n return app_id_list\n\n\ndef get_game_detail(app_id_list, num, game_detail_out_file, game_detail_content):\n url = 'https://store.steampowered.com/api/appdetails?appids='\n header = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/57.0.2987.133 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2'}\n with open(game_detail_out_file, 'w') as f:\n for i in app_id_list:\n j = 0\n while True:\n try:\n url_temp = url + str(i)\n time.sleep(.100) # sleep 100ms\n resp = requests.get(url_temp, header)\n\n obj = resp.json()\n except requests.exceptions.HTTPError as e:\n print(\"HTTP错误\", e)\n except requests.exceptions.ConnectionError as e:\n print(\"连接错误\", e)\n except requests.exceptions.Timeout as e:\n print(\"请求超时\", e)\n except requests.exceptions.RequestException as e:\n print(\"其他错误请求\", e)\n except json.JSONDecodeError as e:\n print(\"JSON解析错误\", e)\n except Exception as e:\n print(\"其他错误\", e)\n if obj is not None:\n for key in obj:\n\n if obj[key][\"success\"] is True:\n json.dump(obj[key][\"data\"], f)\n push_message('game_detail', json.dumps(obj[key][\"data\"]))\n f.write('\\n')\n break\n else:\n # print(idx)\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print(current_time)\n print(\"未找到序号为{},尝试次数{}\".format(i, j))\n j += 1\n\n\ndef process_json_obj(resp, user_out_file, user_id):\n if 'user_summary' in user_out_file:\n # corner case: list index out of range\n try:\n obj = resp.json()['response']['players'][0]\n except Exception as e:\n obj = {'steamid': user_id}\n print(e)\n elif 'user_owned_games' in user_out_file:\n try:\n obj = resp.json()['response']\n obj = {'steamid': user_id, 'game_count': obj['game_count'], 'games': obj['games']}\n except Exception as e:\n print(e)\n obj = {'steamid': user_id, 'game_count': -1, 'games': []}\n elif 'user_friend_list' in user_out_file:\n try:\n obj = resp.json()['friendslist']\n obj = {'steamid': user_id, 'friends': obj['friends']}\n except Exception as e:\n print(e)\n obj = {'steamid': user_id, 'friends': []}\n elif 'user_recently_played_games' in user_out_file:\n try:\n obj = resp.json()['response']\n obj = {'steamid': user_id, 'total_count': obj['total_count'], 'games': obj['games']}\n except Exception as e:\n # corner case: total_count is zero\n print(e)\n if 'total_count' in obj:\n obj = {'steamid': user_id, 'total_count': obj['total_count'], 'games': []}\n else:\n obj = {'steamid': user_id, 'total_count': -1, 'games': []}\n\n return obj\n\n\ndef dump_user_info(topic, url, user_ids, user_out_file):\n user_info_content = []\n # with open(user_out_file, 'w') as f:\n for user_id in user_ids:\n url_temp = url + str(user_id)\n print(url_temp)\n resp = requests.get(url_temp)\n\n # resp = requests.head(url_temp)\n obj = process_json_obj(resp, user_out_file, user_id)\n user_info_content.append(obj)\n # json.dump(obj, f)\n push_message(topic, json.dumps(obj))\n # f.write('\\n')\n return user_info_content\n","repo_name":"Henry59210/bigdata","sub_path":"web_crawler/steam_data.py","file_name":"steam_data.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16766459550","text":"def dfs(v):\n print(v)\n visited = [False] * len(graph) # 방문 처리 초기화\n visited[v] = True # 시작 정점 방문 처리\n stack = [v]\n\n while stack: # 스택이 빌 때까지 반복\n v = stack.pop() # 현재 방문 정점 (후입선출)\n for next_v in graph[v]: # 인접한 모든 정점에 대해\n if not visited[next_v]: # 아직 방문하지 않았다면\n visited[next_v] = True # 방문 처리\n stack.append(next_v) # 스택에 넣기\n\n\ngraph = [\n\t\t[1, 2],\n\t\t[0, 3, 4],\n\t\t[0, 4, 5],\n\t\t[1],\n\t\t[1, 2, 6],\n\t\t[2],\n\t\t[4]\n]\n\ndfs(0) # 0번 정점에서 시작","repo_name":"yooooonzzzzzang/Algorithm","sub_path":"04_Stack1/oneline/stack1_dfs_반복문_교수님.py","file_name":"stack1_dfs_반복문_교수님.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73357493633","text":"import assign_subject\n\n# Given subjects\n# 1. Data mining\n# 2. NLP\n# 3. AI\n# 4. Spatial Data Analysis\n# 5. Image processing\n# 6. Big-Data\n# 7. Graph Mining\n# 8. Machine Learning\n# 9. E-commerce\n# 10. Wireless Mobile communication\n# 11. Cloud computing\n\n# Creating an array for subjects for mapping\nsubjects = ['DM', 'NLP', 'AI', 'SDA', 'IP',\n 'BD', 'GM', 'ML', 'EC', 'WMC', 'CC']\nprefrences = []\n\ntry:\n asubj = assign_subject.AssignSubject()\n input_file = open(\"../inputPS4.txt\")\n studentPreference = {}\n for record in input_file.readlines():\n student = record.split(\" / \")\n studentPref = [0] * 11\n for pref in student[1:]:\n studentPref[subjects.index(pref.strip())] = 1\n prefrences.append(studentPref)\n allocations = asubj.count_allocations(prefrences)\nexcept FileNotFoundError as fe:\n print(fe)\nexcept IOError as ioe:\n print(ioe)\nfinally:\n input_file.close()\n\ntry:\n output_file = open(\"../outputPS4.txt\", \"w+\")\n output_file.write(\n 'The total number of allocations possible is: ' + str(allocations))\n output_file.close()\nexcept FileNotFoundError as fe:\n print(fe)\nexcept IOError as ioe:\n print(ioe)\nfinally:\n output_file.close()\n","repo_name":"brajbhushan-tripathi/ASSIGNMENT2_PUN_B2_G9","sub_path":"AS2_PS4_AP_G9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15364713998","text":"import numpy as np\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\n#from routes import request_api\n#from flask_swagger_ui import get_swaggerui_blueprint\n\napp = Flask(__name__)\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return render_template('index.html')\nSWAGGER_URL = '/swagger'\nAPI_URL = '/static/swagger.json'\n#swaggerui_blueprint = get_swaggerui_blueprint(\n #SWAGGER_URL,\n #API_URL,\n #config={\n #\"app_name\": \"Crime prediction\"\n #}\n#)\n#app.register_blueprint(swaggerui_blueprint, url_prefix=SWAGGER_URL)\n#app.register_blueprint(request_api.get_blueprint())\n\n@app.route('/predict',methods=['POST'])\ndef predict():\n '''\n For rendering results on HTML GUI\n '''\n int_features = [int(x) for x in request.form.values()]\n final_features = [np.array(int_features)]\n prediction = model.predict(final_features)\n\n output = round(prediction[0], 2)\n\n return render_template('index.html', prediction_text='La cantidad de denuncias para el próximo mes deberia ser: {}'.format(output))\n\n@app.route('/predict_api',methods=['POST'])\ndef predict_api():\n '''\n For direct API calls trought request\n '''\n data = request.get_json(force=True)\n prediction = model.predict([np.array(list(data.values()))])\n\n output = prediction[0]\n return jsonify(output)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"JhonathanCP/MLApi","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72984243714","text":"import xml.etree.cElementTree as ET\r\nfrom xml.dom import minidom\r\nimport graphviz\r\nfrom tkinter import *\r\nimport tkinter as t\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox\r\nfrom allReport import allReport\r\nfrom singleReport import singleReport\r\nfrom timeReport import timeReport\r\nfrom PIL import Image, ImageTk\r\nimport re\r\n\r\ndef menu():\r\n print(\"****************************************************\")\r\n print(\"* Menú Principal *\")\r\n print(\"****************************************************\")\r\n print(\"* 1) Cargar archivo de maquina *\")\r\n print(\"* 2) Cargar archivo de simulación *\")\r\n print(\"* 3) Mostrar Lineas de producción *\")\r\n print(\"* 4) Mostrar productos *\")\r\n print(\"* 5) Simulación de un producto *\")\r\n print(\"* 6) Simulación de todos los productos *\")\r\n print(\"* 7) Archivo de salida de un producto XML *\")\r\n print(\"* 8) Archivo de salida de todos los productos XML *\")\r\n print(\"* 9) Reporte de un producto *\")\r\n print(\"* 10) Reporte de todos los productos *\")\r\n print(\"* 11) Reporte de un producto en un tiempo t *\")\r\n print(\"* 12) Salir *\")\r\n print(\"****************************************************\")\r\n\r\nclass node:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n \r\n def __str__(self):\r\n return str(self.value)\r\n\r\nclass circularNode:\r\n def __init__(self, value = None, next = None):\r\n self.value = value\r\n self.next = next\r\n\r\nclass list:\r\n def __init__(self):\r\n self.first = None\r\n self.size = 0\r\n \r\n def agregar(self, valueToAdd):\r\n MyNode = node(valueToAdd)\r\n if self.size == 0:\r\n self.first = MyNode\r\n else:\r\n Current = self.first\r\n while Current.next != None:\r\n Current = Current.next\r\n Current.next = MyNode\r\n self.size += 1\r\n return MyNode\r\n \r\n def remove(self, valuetoRemove):\r\n if self.size == 0:\r\n return False\r\n else:\r\n Current = self.first\r\n while Current.next.value != valuetoRemove:\r\n if Current.next == None:\r\n return False\r\n else:\r\n Current = Current.next\r\n \r\n deletedNode = Current.next\r\n Current.next = deletedNode.next\r\n self.size -= 1\r\n return deletedNode\r\n \r\n def __len__(self):\r\n return self.size\r\n \r\n def __str__(self):\r\n String = \"[\"\r\n Current = self.first\r\n while Current != None:\r\n String += str(Current)\r\n if Current.next != None:\r\n String += str(\",\")\r\n Current = Current.next\r\n String += \"]\"\r\n \r\n return String\r\n\r\nclass circularList:\r\n def __init__(self):\r\n self.first = None\r\n self.size = 0\r\n\r\n def agregar(self, value):\r\n if self.first is None:\r\n self.first = circularNode(value = value)\r\n self.first.next = self.first \r\n else:\r\n actual = circularNode(value = value, next = self.first.next)\r\n self.first.next = actual\r\n self.size += 1\r\n\r\n def recorrer(self):\r\n if self.first is None:\r\n return\r\n actual = self.first\r\n print(actual.value.num)\r\n while actual.next != self.first:\r\n actual = actual.next\r\n print(actual.value.num)\r\n \r\n def __len__(self):\r\n return self.size\r\n \r\nclass machine:\r\n def __init__(self, lineNum, productionList, productList):\r\n self.lineNum = lineNum\r\n self.productionList = productionList\r\n self.productList = productList\r\n\r\nclass productionLine:\r\n def __init__(self, num, componentNum, time, actComp):\r\n self.num = num\r\n self.componentNum = componentNum\r\n self.time = time\r\n self.actComp = actComp\r\n\r\n def setactComp(self, value):\r\n self.actComp = value\r\n\r\n def getactComp(self):\r\n return self.actComp\r\n\r\nclass product:\r\n def __init__(self, name, steps):\r\n self.name = name\r\n self.steps = steps\r\n\r\nclass simulation:\r\n def __init__(self, name, products):\r\n self.name = name\r\n self.products = products\r\n\r\nclass exitProducts:\r\n def __init__(self, name, totalTime, elaboration):\r\n self.name = name\r\n self.totalTime = totalTime\r\n self.elaboration = elaboration\r\n\r\nclass elaboration:\r\n def __init__(self, secNum, stepList):\r\n self.secNum = secNum\r\n self.stepList = stepList\r\n\r\nclass assemblyLine:\r\n def __init__(self, lineNum, action):\r\n self.lineNum = lineNum\r\n self.action = action\r\n\r\nclass stepPassed:\r\n def __init__(self, line, comp, boolean):\r\n self.line = line\r\n self.comp = comp\r\n self.boolean = boolean\r\n\r\n def setBoolean(self, value):\r\n self.boolean = value\r\n\r\n def getBoolean(self):\r\n return self.boolean\r\n\r\nclass listObjectG:\r\n def __init__(self, product, objects):\r\n self.product = product\r\n self.objects = objects\r\n\r\nclass objectG:\r\n def __init__(self, second, steps):\r\n self.second = second\r\n self.steps = steps\r\n\r\nalphaMachine = machine(0, \"\", \"\")\r\nalphaSimulation = simulation(\"\", \"\")\r\nsaveSingleSimulation = simulation(\"\", \"\")\r\nsaveCompleteSimulation = simulation(\"\", \"\")\r\nreportGraph = list()\r\n#valores = Guardar los productos cargados en un combobox\r\nvalores = []\r\n#segundos = Guardar los segundos correspondientes de un producto especifico en un combobox para reportes\r\nsegundos = []\r\n\r\ndef lowerTree(tree):\r\n t = ET.tostring(tree)\r\n t = t.lower()\r\n return minidom.parseString(t)\r\n\r\ndef readMachine(ruta):\r\n try:\r\n #Parse\r\n mytree = ET.parse(ruta)\r\n root = mytree.getroot()\r\n lowerRoot = lowerTree(root)\r\n\r\n lineNum = lowerRoot.getElementsByTagName(\"cantidadlineasproduccion\")[0].firstChild.data\r\n \r\n productionLines = circularList()\r\n production = lowerRoot.getElementsByTagName(\"listadolineasproduccion\")[0]\r\n prodLineLen = len(production.getElementsByTagName(\"lineaproduccion\")) \r\n for num in range(prodLineLen):\r\n prodLine = production.getElementsByTagName(\"lineaproduccion\")[num]\r\n number = prodLine.getElementsByTagName(\"numero\")[0].firstChild.data\r\n components = prodLine.getElementsByTagName(\"cantidadcomponentes\")[0].firstChild.data\r\n time = prodLine.getElementsByTagName(\"tiempoensamblaje\")[0].firstChild.data\r\n productionLines.agregar(productionLine(number, components, time, 0))\r\n #print(\"Linea: \" + str(number) + \", No.Componentes: \" + str(components) + \", tiempo: \" + str(time))\r\n\r\n products = list() \r\n productList = lowerRoot.getElementsByTagName(\"listadoproductos\")[0]\r\n productListLen = len(productList.getElementsByTagName(\"producto\")) \r\n for num2 in range(productListLen):\r\n product1 = productList.getElementsByTagName(\"producto\")[num2]\r\n name = product1.getElementsByTagName(\"nombre\")[0].firstChild.data\r\n process = product1.getElementsByTagName(\"elaboracion\")[0].firstChild.data\r\n cleaned = stepsDetector(process)\r\n products.agregar(product(name, cleaned))\r\n #print(\"Producto: \" + str(name) + \", Proceso de ensamblaje: \" + str(process))\r\n \r\n global alphaMachine\r\n alphaMachine = machine(lineNum, productionLines, products)\r\n\r\n print(\"\\n\")\r\n print(\"Carga realizada correctamente\")\r\n print(\"\\n\") \r\n except:\r\n print(\"\\n\")\r\n print(\"Ha ocurrido un error, intentalo nuevamente\")\r\n print(\"\\n\")\r\n\r\ndef readSimulation(ruta):\r\n try:\r\n #Parse\r\n mytree = ET.parse(ruta)\r\n root = mytree.getroot()\r\n lowerRoot = lowerTree(root)\r\n\r\n name = lowerRoot.getElementsByTagName(\"nombre\")[0].firstChild.data\r\n \r\n productsList = list()\r\n products = lowerRoot.getElementsByTagName(\"listadoproductos\")[0]\r\n productsLen = len(products.getElementsByTagName(\"producto\")) \r\n for num in range(productsLen):\r\n productName = products.getElementsByTagName(\"producto\")[num].firstChild.data\r\n productsList.agregar(productName)\r\n global alphaSimulation\r\n alphaSimulation = simulation(name, productsList)\r\n \r\n print(\"\\n\")\r\n print(\"Carga realizada correctamente\")\r\n print(\"\\n\") \r\n except:\r\n print(\"\\n\")\r\n print(\"Ha ocurrido un error, intentalo nuevamente\")\r\n print(\"\\n\")\r\n\r\ndef stepsDetector(process):\r\n #process = \"L1pC1p L2pC2p L1pC3p\"\r\n regex = \"[Ll][0-9]+p[Cc][0-9]+p\"\r\n reSteps = re.findall(regex, process)\r\n steps = list()\r\n for step in reSteps:\r\n steps.agregar(step)\r\n return steps\r\n\r\ndef completeSimulation():\r\n global alphaSimulation\r\n saveProducts = list()\r\n global reportGraph\r\n reportGraph = list()\r\n current = alphaSimulation.products.first\r\n alphacurrent = alphaSimulation.products.first\r\n print(\"Nombre de la simulación: \" + str(alphaSimulation.name))\r\n print(\"Productos cargados para simulación: \")\r\n while current != None:\r\n print(\"\\t- \" + str(current.value))\r\n current = current.next\r\n #print(\"\\n\")\r\n while alphacurrent != None:\r\n simName = alphacurrent.value\r\n global alphaMachine\r\n current2 = alphaMachine.productList.first\r\n while current2 != None:\r\n tempName = str(current2.value.name)\r\n if tempName.lower() == simName.lower():\r\n Encontrado = True\r\n elaborationProd = list()\r\n print(\"\\n\")\r\n print(\"Simulación del producto: \" + str(tempName.lower()))\r\n print(\"Procedimiento de ensamblaje: \" + str(current2.value.steps))\r\n print(\"\\n\")\r\n contLine = 0\r\n contComp = 0\r\n \r\n current3 = current2.value.steps.first\r\n matches = list()\r\n while current3 != None:\r\n #current3 = \"L2pC3p\" \r\n stringStep = str(current3.value)\r\n regex = \"[0-9]+\"\r\n m = re.findall(regex, stringStep)\r\n matches.agregar(stepPassed(m[0], m[1], False))\r\n current3 = current3.next\r\n\r\n lineCurrent = alphaMachine.productionList.first\r\n lineSeconds = alphaMachine.productionList.first\r\n lineSeconds2 = alphaMachine.productionList.first\r\n secLoop = int(alphaMachine.lineNum)\r\n resetCont = 0\r\n reset = alphaMachine.productionList.first\r\n while resetCont != secLoop:\r\n resetComp = reset.value.setactComp(0)\r\n resetCont += 1\r\n reset = reset.next\r\n \r\n secLoop = 1\r\n while lineSeconds.next != lineSeconds2:\r\n secLoop += 1\r\n lineSeconds = lineSeconds.next\r\n\r\n contBool = 1\r\n clock = 1\r\n contSec = int(alphaMachine.lineNum)\r\n Assembly = False\r\n linePass = 0\r\n alphaCont = 0\r\n whileBreak = 0\r\n addCont = 0\r\n reportG = list()\r\n while lineCurrent != None:\r\n #if whileBreak == 80:\r\n # break\r\n #whileBreak += 1\r\n action = \"\"\r\n if secLoop == contSec:\r\n print(\"------ Segundo \" + str(contBool) + \" ------\")\r\n stepList = list()\r\n secLoop = 0\r\n alphaCont = 0\r\n contBool += 1\r\n secLoop += 1\r\n addCont += 1\r\n #if contBool == 6:\r\n # break\r\n lineNum = lineCurrent.value.num\r\n actComp = lineCurrent.value.getactComp()\r\n timeCont = lineCurrent.value.time\r\n timeAssembling = int(timeCont)\r\n if actComp == 0:\r\n actComp += 1\r\n lineCurrent.value.setactComp(actComp)\r\n print(\"Brazo número \" + str(lineNum) + \" en componente \" + str(lineCurrent.value.getactComp()))\r\n action = \"Mover brazo - Componente \" + str(lineCurrent.value.getactComp())\r\n stepList.agregar(assemblyLine(lineNum, action))\r\n lineCurrent = lineCurrent.next\r\n if addCont == contSec:\r\n elaborationProd.agregar(elaboration(contBool - 1, stepList))\r\n #print(\"contBool - 1 = \" + str(contBool - 1))\r\n gCurrent2 = matches.first\r\n temp = list()\r\n while gCurrent2 != None:\r\n gstepBoolean = gCurrent2.value.boolean\r\n gstepLine = gCurrent2.value.line\r\n gstepComp = gCurrent2.value.comp\r\n #print(\"Linea=\" + str(gstepLine) + \", Comp=\" + str(gstepComp) + \", Booleano=\" + str(gstepBoolean))\r\n temp.agregar(stepPassed(gstepLine, gstepComp, gstepBoolean))\r\n gCurrent2 = gCurrent2.next\r\n reportG.agregar(objectG(contBool - 1, temp))\r\n addCont = 0\r\n continue\r\n\r\n #print(\"Brazo número \" + str(lineNum) + \" en componente \" + str(lineCurrent.value.getactComp()))\r\n currentStep = matches.first # [1, 1], [2, 2], [1, 3]\r\n booleanTrue = matches.first # [1, 1], [2, 2], [1, 3]\r\n contActions = 0\r\n contOrder = 0\r\n while currentStep != None:\r\n stepBoolean = currentStep.value.getBoolean()\r\n if stepBoolean == False:\r\n contOrder += 1\r\n stepLine = currentStep.value.line\r\n if int(stepLine) == int(lineNum):\r\n if stepBoolean == False:\r\n stepComp = currentStep.value.comp\r\n if int(stepComp) > int(actComp):\r\n actComp += 1\r\n lineCurrent.value.setactComp(actComp)\r\n print(\"Brazo número \" + str(lineNum) + \" en componente \" + str(lineCurrent.value.getactComp()))\r\n action = \"Mover brazo - Componente \" + str(lineCurrent.value.getactComp())\r\n stepList.agregar(assemblyLine(lineNum, action))\r\n contActions += 1\r\n break\r\n elif int(stepComp) < int(actComp):\r\n actComp -= 1\r\n lineCurrent.value.setactComp(actComp)\r\n print(\"Brazo número \" + str(lineNum) + \" en componente \" + str(lineCurrent.value.getactComp()))\r\n action = \"Mover brazo - Componente \" + str(lineCurrent.value.getactComp())\r\n stepList.agregar(assemblyLine(lineNum, action))\r\n contActions += 1\r\n break\r\n else:\r\n if contOrder > 1:\r\n contActions = 0\r\n break\r\n else:\r\n if Assembly == False:\r\n if alphaCont == 0:\r\n if timeAssembling != 1:\r\n linePass = lineNum\r\n Assembly = True\r\n print(\"Brazo número \" + str(lineNum) + \" en componente \" + str(lineCurrent.value.getactComp()) + \" ensamblandolo\")\r\n action = \"Ensamblar - Componente \" + str(lineCurrent.value.getactComp())\r\n stepList.agregar(assemblyLine(lineNum, action))\r\n contActions += 1\r\n clock += 1\r\n break\r\n else:\r\n clock = 1\r\n currentStep.value.setBoolean(True)\r\n Assembly = False\r\n alphaCont += 1\r\n print(\"Brazo número \" + str(lineNum) + \" en componente \" + str(lineCurrent.value.getactComp()) + \" ensamblandolo\")\r\n action = \"Ensamblar - Componente \" + str(lineCurrent.value.getactComp())\r\n stepList.agregar(assemblyLine(lineNum, action))\r\n contActions += 1\r\n break\r\n else:\r\n contActions = 0\r\n break\r\n\r\n elif linePass == lineNum:\r\n if clock == timeAssembling:\r\n clock = 1\r\n currentStep.value.setBoolean(True)\r\n Assembly = False\r\n alphaCont += 1\r\n print(\"Brazo número \" + str(lineNum) + \" en componente \" + str(lineCurrent.value.getactComp()) + \" ensamblandolo\")\r\n action = \"Ensamblar - Componente \" + str(lineCurrent.value.getactComp())\r\n stepList.agregar(assemblyLine(lineNum, action))\r\n contActions += 1\r\n break\r\n else:\r\n clock += 1\r\n Assembly = True\r\n alphaCont += 1\r\n print(\"Brazo número \" + str(lineNum) + \" en componente \" + str(lineCurrent.value.getactComp()) + \" ensamblandolo\")\r\n action = \"Ensamblar - Componente \" + str(lineCurrent.value.getactComp())\r\n stepList.agregar(assemblyLine(lineNum, action))\r\n contActions += 1\r\n break\r\n else:\r\n contActions = 0\r\n break\r\n else:\r\n currentStep = currentStep.next\r\n else:\r\n currentStep = currentStep.next\r\n \r\n if contActions == 0:\r\n print(\"Brazo número \" + str(lineNum) + \" no hace nada\")\r\n action = \"No hace nada\"\r\n stepList.agregar(assemblyLine(lineNum, action))\r\n\r\n contUltimate = 0\r\n while booleanTrue != None:\r\n stepBoolean = booleanTrue.value.getBoolean()\r\n #print(\"stepLine2: \" + str(stepLine2) + \", stepBoolean: \" + str(stepBoolean) + \", contUltimate: \" + str(contUltimate))\r\n if stepBoolean == False:\r\n contUltimate += 1\r\n booleanTrue = booleanTrue.next\r\n\r\n if addCont == contSec:\r\n elaborationProd.agregar(elaboration(contBool - 1, stepList))\r\n #print(\"contBool - 1 = \" + str(contBool - 1))\r\n gCurrent = matches.first\r\n temp = list()\r\n while gCurrent != None:\r\n gstepBoolean = gCurrent.value.boolean\r\n gstepLine = gCurrent.value.line\r\n gstepComp = gCurrent.value.comp\r\n #print(\"Linea=\" + str(gstepLine) + \", Comp=\" + str(gstepComp) + \", Booleano=\" + str(gstepBoolean))\r\n temp.agregar(stepPassed(gstepLine, gstepComp, gstepBoolean))\r\n gCurrent = gCurrent.next\r\n reportG.agregar(objectG(contBool - 1, temp))\r\n addCont = 0\r\n \r\n if contUltimate == 0:\r\n if secLoop == contSec:\r\n reportGraph.agregar(listObjectG(tempName, reportG))\r\n print(\"\\n\")\r\n print(\"Simulación terminada\")\r\n print(\"\\n\")\r\n break\r\n lineCurrent = lineCurrent.next\r\n\r\n break\r\n else:\r\n current2 = current2.next\r\n\r\n if Encontrado == False:\r\n print(\"\\n\")\r\n print(\"Producto no encontrado\")\r\n print(\"\\n\")\r\n else:\r\n saveProducts.agregar(exitProducts(tempName,contBool - 1,elaborationProd))\r\n global saveCompleteSimulation\r\n saveCompleteSimulation = simulation(alphaSimulation.name, saveProducts)\r\n\r\n alphacurrent = alphacurrent.next\r\n\r\ndef graphicBrowser():\r\n win = Tk()\r\n win.geometry(\"1x1\")\r\n filename = filedialog.askopenfilename(\r\n initialdir=\"/\", title=\"Select a File\", filetypes=((\"Xml files\", \"*.xml*\"), (\"All files\", \"*.*\")))\r\n win.destroy()\r\n return filename\r\n\r\ndef singleXml():\r\n nameinput = str(xmlInOne.get())\r\n simName = saveCompleteSimulation.name\r\n alphaProd = saveCompleteSimulation.products.first\r\n while alphaProd != None:\r\n tempName = str(alphaProd.value.name)\r\n Encontrado = False\r\n if nameinput.lower() == tempName.lower():\r\n Encontrado = True\r\n try:\r\n salida = ET.Element(\"SalidaSimulacion\")\r\n nameSimulation = ET.SubElement(salida, \"Nombre\").text = str(simName)\r\n productList = ET.SubElement(salida, \"ListadoProductos\")\r\n while alphaProd != None:\r\n product = ET.SubElement(productList, \"Producto\")\r\n nameProd = ET.SubElement(product, \"Nombre\").text = str(alphaProd.value.name)\r\n timeProd = ET.SubElement(product, \"TiempoTotal\").text = str(alphaProd.value.totalTime)\r\n elabProd = ET.SubElement(product, \"ElaboracionOptima\")\r\n elab = alphaProd.value.elaboration.first\r\n while elab != None:\r\n currentSec = ET.SubElement(elabProd, \"Tiempo\", NoSegundo = str(elab.value.secNum))\r\n currentList = elab.value.stepList.first\r\n while currentList != None:\r\n action = ET.SubElement(elabProd, \"LineaEnsamblaje\", NoLinea = str(currentList.value.lineNum)).text = str(currentList.value.action)\r\n currentList = currentList.next\r\n elab = elab.next\r\n break\r\n\r\n archivo = ET.ElementTree(salida)\r\n archivo.write(\"reportes/simulaciones/xml/\" + str(tempName) + \"salida.xml\")\r\n print(\"\\n\")\r\n print(\"Archivo creado con exito\")\r\n print(\"\\n\")\r\n messagebox.showinfo(\"Información\",\"Archivo creado con exito\")\r\n break\r\n except:\r\n print(\"\\n\")\r\n print(\"Ha ocurrido un error al crear el archivo, intentelo de nuevo\")\r\n print(\"\\n\")\r\n break\r\n else:\r\n alphaProd = alphaProd.next\r\n\r\n if Encontrado == False:\r\n print(\"\\n\")\r\n print(\"Producto no encontrado\")\r\n print(\"\\n\")\r\n\r\ndef allXml():\r\n try:\r\n simName = saveCompleteSimulation.name\r\n simProd = saveCompleteSimulation.products.first\r\n salida = ET.Element(\"SalidaSimulacion\")\r\n nameSimulation = ET.SubElement(salida, \"Nombre\").text = str(simName)\r\n productList = ET.SubElement(salida, \"ListadoProductos\")\r\n while simProd != None:\r\n product = ET.SubElement(productList, \"Producto\")\r\n nameProd = ET.SubElement(product, \"Nombre\").text = str(simProd.value.name)\r\n timeProd = ET.SubElement(product, \"TiempoTotal\").text = str(simProd.value.totalTime)\r\n elabProd = ET.SubElement(product, \"ElaboracionOptima\")\r\n elab = simProd.value.elaboration.first\r\n while elab != None:\r\n currentSec = ET.SubElement(elabProd, \"Tiempo\", NoSegundo = str(elab.value.secNum))\r\n currentList = elab.value.stepList.first\r\n while currentList != None:\r\n action = ET.SubElement(elabProd, \"LineaEnsamblaje\", NoLinea = str(currentList.value.lineNum)).text = str(currentList.value.action)\r\n currentList = currentList.next\r\n elab = elab.next\r\n simProd = simProd.next\r\n\r\n archivo = ET.ElementTree(salida)\r\n archivo.write(\"reportes/simulaciones/xml/\" + str(simName) + \"salida.xml\")\r\n print(\"\\n\")\r\n print(\"Archivo creado con exito\")\r\n print(\"\\n\")\r\n messagebox.showinfo(\"Información\",\"Archivo creado con exito\")\r\n except:\r\n print(\"\\n\")\r\n print(\"Ha ocurrido un error al crear el archivo, intentelo de nuevo\")\r\n print(\"\\n\")\r\n\r\ndef browserMachine():\r\n path = graphicBrowser()\r\n readMachine(path)\r\n messagebox.showinfo(\"Información\",\"Carga de maquina realizada correctamente\")\r\n\r\ndef browserSimulation():\r\n path = graphicBrowser()\r\n readSimulation(path)\r\n completeSimulation()\r\n global alphaSimulation\r\n currentAppend = alphaSimulation.products.first\r\n global valores\r\n valores = []\r\n while currentAppend != None:\r\n valores.append(str(currentAppend.value))\r\n currentAppend = currentAppend.next\r\n ttk.Combobox(tab2, textvariable=data, width=15, values=valores).place(x=20, y=75)\r\n ttk.Combobox(tab3, textvariable=htmlInOne, values=valores).place(x=55, y=190)\r\n ttk.Combobox(tab3, textvariable=xmlInOne, values=valores).place(x=325, y=190)\r\n ttk.Combobox(tab3, textvariable=htmlInTwo, width=25, values=valores).place(x=45, y=320)\r\n ttk.Combobox(tab3, textvariable=graphInOne,width=25, values=valores).place(x=586, y=190)\r\n messagebox.showinfo(\"Información\",\"Carga de simulación realizada correctamente\")\r\n\r\ndef simulationTable():\r\n #try:\r\n nameinput = str(data.get())\r\n global saveCompleteSimulation\r\n simName = saveCompleteSimulation.name\r\n alphaProd = saveCompleteSimulation.products.first\r\n dataProd = saveCompleteSimulation.products.first\r\n Encontrado = False\r\n\r\n global alphaMachine\r\n cantLines = int(alphaMachine.lineNum)\r\n columnas = []\r\n for col in range(cantLines + 1):\r\n columnas.append(col + 1)\r\n\r\n while alphaProd != None:\r\n tempName = str(alphaProd.value.name)\r\n if nameinput.lower() == tempName.lower():\r\n simProd = alphaProd\r\n timeProd = alphaProd\r\n global wrapper1\r\n wrapper1.destroy()\r\n wrapper1 = LabelFrame(tab2)\r\n wrapper1.pack(fill=\"both\",expand=\"yes\", padx=0, pady=175)\r\n trv = ttk.Treeview(wrapper1, columns=columnas, show=\"headings\", height=\"6\")\r\n style = ttk.Style(trv)\r\n style.configure(\"Treeview\", rowheight=30)\r\n trv.pack(side=LEFT)\r\n trv.place(x=0, y=0)\r\n trv.heading(1, text=\"Segundo\")\r\n trv.column(1, anchor=\"center\", width=50, minwidth=60)\r\n\r\n Encontrado = True\r\n contHead = 2\r\n while simProd != None:\r\n elab = simProd.value.elaboration.first\r\n while elab != None:\r\n currentList = elab.value.stepList.first\r\n while currentList != None:\r\n tempText = \"\"\r\n NoLinea = str(currentList.value.lineNum)\r\n tempText = \"Linea \" + NoLinea\r\n trv.heading(contHead, text=tempText)\r\n trv.column(contHead, anchor=\"center\", width=150, minwidth=200)\r\n contHead += 1\r\n currentList = currentList.next\r\n break\r\n break\r\n contData = 0\r\n while alphaProd != None:\r\n elab2 = alphaProd.value.elaboration.first\r\n cont = 1\r\n while elab2 != None:\r\n row = []\r\n currentSec = str(elab2.value.secNum)\r\n row.append(currentSec)\r\n currentList2 = elab2.value.stepList.first\r\n while currentList2 != None:\r\n action = str(currentList2.value.action)\r\n row.append(action)\r\n currentList2 = currentList2.next\r\n trv.insert(parent='', index=contData, iid=contData, values=row)\r\n #time.sleep(1)\r\n cont += 1\r\n contData += 1\r\n elab2 = elab2.next\r\n break\r\n\r\n #Vertical ScrollBar\r\n yScroll = ttk.Scrollbar(wrapper1, orient=\"vertical\", command=trv.yview)\r\n yScroll.pack(side=RIGHT, fill=\"y\")\r\n #Horizontal ScrollBar\r\n xScroll = ttk.Scrollbar(wrapper1, orient=\"horizontal\", command=trv.xview)\r\n xScroll.pack(side=BOTTOM, fill=\"x\")\r\n trv.configure(yscrollcommand=yScroll.set, xscrollcommand=xScroll.set)\r\n\r\n liveSec = str(timeProd.value.totalTime)\r\n limit = int(liveSec)\r\n for sec in range(limit):\r\n #time.sleep(1)\r\n global liveSeconds\r\n #liveSeconds.destroy()\r\n liveSeconds = Label(tab2, text=str(sec + 1), width=4, fg=\"#fcba03\", bg =\"#010030\", font = \"Helvetica 32 bold italic\").place(x=610, y=445)\r\n\r\n print(\"\\n\")\r\n print(\"Tabla creada con exito\")\r\n print(\"\\n\")\r\n break\r\n else:\r\n alphaProd = alphaProd.next\r\n #except:\r\n # print(\"\\n\")\r\n # print(\"Ha ocurrido un error al crear la tabla, intentelo de nuevo\")\r\n # print(\"\\n\")\r\n\r\ndef buttonAllReport():\r\n allReport(saveCompleteSimulation)\r\n\r\ndef singleHtmlReport():\r\n singleProduct = str(htmlInOne.get())\r\n print(singleProduct)\r\n singleReport(singleProduct, saveCompleteSimulation)\r\n\r\ndef timeHtmlReport():\r\n singleProduct = str(htmlInTwo.get())\r\n second = int(htmlInTwoSec.get())\r\n print(singleProduct + \", \" + str(second))\r\n timeReport(singleProduct, second, saveCompleteSimulation)\r\n\r\ndef updateData():\r\n nameinput = str(data.get())\r\n mach = alphaMachine.productList.first\r\n stepsText = \"\"\r\n while mach != None:\r\n tempName = mach.value.name\r\n if nameinput.lower() == tempName.lower():\r\n stepsText = str(mach.value.steps)\r\n break\r\n else:\r\n mach = mach.next\r\n t.Label(tab2, text=stepsText, width=25, fg=\"#fcba03\", bg =\"#010030\", font = \"Helvetica 14 bold italic\").place(x=50, y=450)\r\n\r\ndef updateSeconds1():\r\n nameinput = str(htmlInTwo.get())\r\n alphaProd = saveCompleteSimulation.products.first\r\n simProd = saveCompleteSimulation.products.first\r\n timeProd = 0\r\n while alphaProd != None:\r\n tempName = str(alphaProd.value.name)\r\n if nameinput.lower() == tempName.lower():\r\n timeProd = int(alphaProd.value.totalTime)\r\n break\r\n else:\r\n alphaProd = alphaProd.next\r\n\r\n global segundos\r\n segundos = []\r\n for sec in range(timeProd):\r\n segundos.append(sec + 1)\r\n \r\n ttk.Combobox(tab3, width=2, textvariable=htmlInTwoSec, values=segundos).place(x=67, y=353)\r\n\r\ndef updateSeconds2():\r\n nameinput = str(graphInOne.get())\r\n alphaProd = saveCompleteSimulation.products.first\r\n simProd = saveCompleteSimulation.products.first\r\n timeProd = 0\r\n while alphaProd != None:\r\n tempName = str(alphaProd.value.name)\r\n if nameinput.lower() == tempName.lower():\r\n timeProd = int(alphaProd.value.totalTime)\r\n break\r\n else:\r\n alphaProd = alphaProd.next\r\n\r\n global segundos\r\n segundos = []\r\n for sec in range(timeProd):\r\n segundos.append(sec + 1)\r\n \r\n ttk.Combobox(tab3, width=2, textvariable=graphInOneSec, values=segundos).place(x=610, y=223)\r\n\r\ndef graphvizReport(name, breakSecond):\r\n global reportGraph\r\n alphaCurrent = reportGraph.first\r\n while alphaCurrent != None:\r\n alphaName = alphaCurrent.value.product\r\n if alphaName.lower() == name.lower():\r\n current = alphaCurrent.value.objects.first\r\n while current != None:\r\n secondG = current.value.second\r\n if int(breakSecond) == int(secondG):\r\n g = graphviz.Digraph('G', filename= 'reportes/cola/' + str(name) + str(breakSecond) + '.gv')\r\n g.attr(label=r'\\n' + str(name.upper()) + '\\nSegundo = ' + str(breakSecond) + '\\nVerde = Ensamblado\\nRojo = No ensamblado\\n')\r\n currentStep = current.value.steps.first\r\n tempLine = 0\r\n tempComp = 0\r\n cont = 0\r\n cont2 = 0\r\n while currentStep != None:\r\n gstepBoolean = currentStep.value.boolean\r\n gstepLine = currentStep.value.line\r\n gstepComp = currentStep.value.comp\r\n print(\"Linea=\" + str(gstepLine) + \", Comp=\" + str(gstepComp) + \", Booleano=\" + str(gstepBoolean))\r\n if currentStep.next != None:\r\n \r\n if bool(gstepBoolean) == True:\r\n #0,0 - 1,1 - 0,2 - 1,1 - 0,2 - 1,1 - 0,2 - 1,1 - 0,2\r\n if cont == 1:\r\n g.attr('node', shape='box', style='filled', color='green')\r\n g.edge('L' + str(tempLine) + 'C' + str(tempComp), 'L' + str(gstepLine) + 'C' + str(gstepComp), constraint='false')\r\n tempLine = currentStep.value.line\r\n tempComp = currentStep.value.comp\r\n cont2 += 1\r\n cont = 0\r\n elif cont2 == 2:\r\n g.attr('node', shape='box', style='filled', color='green')\r\n g.edge('L' + str(tempLine) + 'C' + str(tempComp), 'L' + str(gstepLine) + 'C' + str(gstepComp), constraint='false')\r\n tempLine = currentStep.value.line\r\n tempComp = currentStep.value.comp\r\n cont2 = 1\r\n cont += 1\r\n else:\r\n g.attr('node', shape='box', style='filled', color='green')\r\n g.node('L' + str(gstepLine) + 'C' + str(gstepComp), constraint='false')\r\n tempLine = currentStep.value.line\r\n tempComp = currentStep.value.comp\r\n cont += 1\r\n cont2 += 1\r\n else:\r\n if cont == 1:\r\n g.attr('node', shape='box', style='filled', color='red')\r\n g.edge('L' + str(tempLine) + 'C' + str(tempComp), 'L' + str(gstepLine) + 'C' + str(gstepComp), constraint='false')\r\n tempLine = currentStep.value.line\r\n tempComp = currentStep.value.comp\r\n cont = 0\r\n cont2 += 1\r\n elif cont2 == 2:\r\n g.attr('node', shape='box', style='filled', color='red')\r\n g.edge('L' + str(tempLine) + 'C' + str(tempComp), 'L' + str(gstepLine) + 'C' + str(gstepComp), constraint='false')\r\n tempLine = currentStep.value.line\r\n tempComp = currentStep.value.comp\r\n cont2 = 1\r\n cont += 1\r\n else:\r\n g.attr('node', shape='box', style='filled', color='red')\r\n g.node('L' + str(gstepLine) + 'C' + str(gstepComp), constraint='false')\r\n tempLine = currentStep.value.line\r\n tempComp = currentStep.value.comp\r\n cont += 1\r\n cont2 += 1\r\n else:\r\n if bool(gstepBoolean) == True:\r\n g.attr('node', shape='box', style='filled', color='green')\r\n g.node('L' + str(gstepLine) + 'C' + str(gstepComp), constraint='false')\r\n cont += 1\r\n else:\r\n g.attr('node', shape='box', style='filled', color='red')\r\n g.node('L' + str(gstepLine) + 'C' + str(gstepComp), constraint='false')\r\n cont += 1\r\n \r\n g.edge('L' + str(tempLine) + 'C' + str(tempComp), 'L' + str(gstepLine) + 'C' + str(gstepComp), constraint='false')\r\n currentStep = currentStep.next\r\n\r\n #g.attr(label=r'\\n\\nVerde = Ensamblado\\nRojo = No ensamblado')\r\n g.view()\r\n break\r\n else:\r\n current = current.next\r\n break\r\n else:\r\n alphaCurrent = alphaCurrent.next\r\n\r\ndef graphButton():\r\n name = str(graphInOne.get())\r\n second = int(graphInOneSec.get())\r\n print(\"name: \" + str(name) + \", second: \" + str(second))\r\n graphvizReport(name, second)\r\n\r\nv = t.Tk()\r\nv.geometry(\"800x600\")\r\nv.resizable(False, False)\r\nv.title(\"Digital Intelligence, S.A.\")\r\n\r\ntabControl = ttk.Notebook(v)\r\n\r\ntab1 = ttk.Frame(tabControl)\r\ntab2 = ttk.Frame(tabControl)\r\ntab3 = ttk.Frame(tabControl)\r\ntab4 = ttk.Frame(tabControl)\r\n\r\ntabControl.add(tab1, text ='Cargar')\r\ntabControl.add(tab2, text ='Procesos')\r\ntabControl.add(tab3, text ='Reportes')\r\ntabControl.add(tab4, text ='Ayuda')\r\ntabControl.pack(expand = 1, fill =\"both\")\r\n\r\n#TAB1\r\nt.Label(tab1, text=\"\", width=200, height=36, bg = \"#162742\").place(x=0, y=0)\r\nt.Label(tab1, text=\"DIGITAL INTELLIGENCE, S.A.\", fg=\"#fcba03\", width=30, height=10, bg = \"#162742\", font = \"Helvetica 24 bold italic\").place(x=105, y=30)\r\nt.Button(tab1, text=\"Cargar Maquina\", width=25, font = \"Arial 14\", command=browserMachine).place(x=90, y=280)\r\nt.Button(tab1, text=\"Cargar Simulación\", width=25, font = \"Arial 14\", command=browserSimulation).place(x=430, y=280)\r\nt.Label(tab1, text = \"\", width=130, height=20, bg = \"dark gray\").place(x=0, y=550)\r\nt.Label(tab1, text = \" 2021 - Proyecto 2 de Introducción a la programación 2\", fg=\"black\", bg = \"dark gray\").place(x=0, y=550)\r\n\r\n#TAB2\r\nt.Label(tab2, text=\"\", width=200, height=36, bg = \"#162742\").place(x=0, y=0)\r\nt.Label(tab2, text=\"Procesos\", width=10, fg=\"#fcba03\", bg = \"#162742\", font = \"Helvetica 24 bold italic\").place(x=600, y=30)\r\ndata = t.StringVar(tab2)\r\nt.Button(tab2, width=1, text=\"⟳\", font = \"Helvetica 10 bold\", bg='#fcba03', command=updateData).place(x=134, y=72)\r\nt.Label(tab2, text=\"Productos\", width=20, fg=\"#fcba03\", bg = \"#162742\", font = \"Helvetica 12\").place(x=-35, y=45)\r\nt.Label(tab2, width=130, height=27, bg = \"#010030\").place(x=0, y=110)\r\nt.Button(tab2, text=\"Iniciar simulación\", width=15, font = \"Arial 10\", command=simulationTable).place(x=20, y=135)\r\nt.Label(tab2, text=\"Segundos:\", width=10, fg=\"#FFFFFF\", bg = \"#010030\", font = \"Helvetica 12\").place(x=600, y=420)\r\nliveSeconds = Label(tab2, text = \"0\", width=4, fg=\"#fcba03\", bg =\"#010030\", font = \"Helvetica 32 bold italic\").place(x=610, y=445)\r\nwrapper1 = LabelFrame(tab2)\r\nwrapper1.pack(fill=\"both\",expand=\"yes\", padx=0, pady=175)\r\nclockImage = Image.open(\"images/clock2.png\")\r\nphoto = ImageTk.PhotoImage(clockImage)\r\nclockLabel = Label(tab2, width=50, image=photo, bg = \"#010030\").place(x=560, y=442)\r\nt.Label(tab2, text=\"Componentes necesarios:\", width=20, fg=\"#FFFFFF\", bg =\"#010030\", font = \"Helvetica 12\").place(x=100, y=420)\r\nt.Label(tab2, text = \"\", width=130, height=20, bg = \"dark gray\").place(x=0, y=550)\r\nt.Label(tab2, text = \" 2021 - Proyecto 2 de Introducción a la programación 2\", fg=\"black\", bg = \"dark gray\").place(x=0, y=550)\r\n\r\n#TAB3\r\nt.Label(tab3, width=200, height=36, bg = \"#162742\").place(x=0, y=0)\r\nt.Label(tab3, width=30, height=26, bg = \"#010030\").place(x=20, y=130)\r\nt.Label(tab3, width=30, height=26, bg = \"#010030\").place(x=290, y=130)\r\nt.Label(tab3, width=30, height=26, bg = \"#010030\").place(x=560, y=130)\r\nt.Label(tab3, text=\"Reportes\", width=10, fg=\"#fcba03\", bg = \"#162742\", font = \"Helvetica 24 bold italic\").place(x=600, y=30)\r\n#HTML\r\nt.Label(tab3, text=\"HTML\", width=20, fg=\"#fcba03\", bg = \"#162742\", font = \"Helvetica 12\").place(x=35, y=100)\r\nt.Label(tab3, text=\"Producto específico\", width=20, fg=\"#FFFFFF\", bg = \"#010030\", font = \"Helvetica 12\").place(x=35, y=160)\r\nhtmlInOne = t.StringVar(tab3)\r\nhtmlBOne = t.Button(tab3, text=\"Generar\", width=10, font = \"Arial 10\", command=singleHtmlReport).place(x=80, y=220)\r\nt.Label(tab3, text=\"Producto en un tiempo t\", width=20, fg=\"#FFFFFF\", bg = \"#010030\", font = \"Helvetica 12\").place(x=35, y=290)\r\nt.Label(tab3, text=\"t\", width=5, fg=\"#fcba03\", bg = \"#010030\", font = \"Helvetica 12 bold italic\").place(x=27, y=351)\r\nhtmlInTwo = t.StringVar(tab3)\r\nhtmlBTwo = t.Button(tab3, text=\"Generar\", width=10, font = \"Arial 10\", command=timeHtmlReport).place(x=133, y=350)\r\nhtmlInTwoSec = t.StringVar(tab3)\r\nt.Button(tab3, width=1, text=\"⟳\", font = \"Helvetica 10 bold\", bg='#fcba03', command=updateSeconds1).place(x=102, y=350)\r\nt.Label(tab3, text=\"Todos los productos\", width=20, fg=\"#FFFFFF\", bg = \"#010030\", font = \"Helvetica 12\").place(x=35, y=420)\r\nt.Button(tab3, text=\"Generar\", width=10, font = \"Arial 10\", command=buttonAllReport).place(x=80, y=450)\r\n#XML\r\nt.Label(tab3, text=\"XML\", width=20, fg=\"#fcba03\", bg = \"#162742\", font = \"Helvetica 12\").place(x=305, y=100)\r\nt.Label(tab3, text=\"Producto específico\", width=20, fg=\"#FFFFFF\", bg = \"#010030\", font = \"Helvetica 12\").place(x=305, y=160)\r\nxmlInOne = t.StringVar(tab3)\r\nxmlBOne = t.Button(tab3, text=\"Generar\", width=10, font = \"Arial 10\", command=singleXml).place(x=350, y=220)\r\nt.Label(tab3, text=\"Todos los productos\", width=20, fg=\"#FFFFFF\", bg = \"#010030\", font = \"Helvetica 12\").place(x=305, y=290)\r\nt.Button(tab3, text=\"Generar\", width=10, font = \"Arial 10\", command=allXml).place(x=350, y=320)\r\n#Graphviz\r\nt.Label(tab3, text=\"Graphviz\", width=20, fg=\"#fcba03\", bg = \"#162742\", font = \"Helvetica 12\").place(x=576, y=100)\r\nt.Label(tab3, text=\"Producto en un tiempo t\", width=20, fg=\"#FFFFFF\", bg = \"#010030\", font = \"Helvetica 12\").place(x=576, y=160)\r\nt.Label(tab3, text=\"t\", width=5, fg=\"#fcba03\", bg = \"#010030\", font = \"Helvetica 12 bold italic\").place(x=570, y=220)\r\ngraphInOne = t.StringVar(tab3)\r\ngraphInOneSec = t.StringVar(tab3)\r\nt.Button(tab3, width=1, text=\"⟳\", font = \"Helvetica 10 bold\", bg='#fcba03', command=updateSeconds2).place(x=645, y=220)\r\nt.Button(tab3, text=\"Generar\", width=10, font = \"Arial 10\", command=graphButton).place(x=675, y=220)\r\nt.Label(tab3, text = \"\", width=130, height=20, bg = \"dark gray\").place(x=0, y=550)\r\nt.Label(tab3, text = \" 2021 - Proyecto 2 de Introducción a la programación 2\", fg=\"black\", bg = \"dark gray\").place(x=0, y=550)\r\n\r\n#TAB4\r\nt.Label(tab4, width=200, height=36, bg = \"#162742\").place(x=0, y=0)\r\nt.Label(tab4, width=107, height=26, bg = \"#010030\").place(x=20, y=130)\r\nt.Label(tab4, text=\"Ayuda\", width=10, fg=\"#fcba03\", bg = \"#162742\", font = \"Helvetica 24 bold italic\").place(x=620, y=30)\r\nt.Label(tab4, text=\"Acerca de la aplicación\", width=20, fg=\"#fcba03\", bg = \"#010030\", font = \"Helvetica 12 italic\").place(x=35, y=160)\r\n\r\ninfo = \"\"\"Esta aplicación simula los procesos de una máquina capaz de \r\nensamblar las partes de cualquier producto en el mercado en un tiempo óptimo.\r\nLa máquina creada por Digital Intelligence, S.A. puede construir cualquier \r\nproducto ensamblando automáticamente los componentes (partes) que lo conforman.\r\nPara esto, la máquina desarrollada consta de una o varias líneas de ensamblaje y\r\nun brazo robótico para cada una de éstas, además, cada línea de ensamblaje posee\r\nun mecanismo que le permite acceder a uno o varios componentes distintos para ir\r\ncompletando el proceso y hacer simulaciones optimizando el tiempo y recursos,\r\npudiendo ver estos procesos en tiempo real como también poder generar reportes\r\nsobre estos procesos.\"\"\"\r\n\r\ndatos = \"\"\"Marvin Alexis Estrada Florian\r\n3007201810101\r\n201800476\"\"\"\r\n\r\nt.Label(tab4, text=info, width=66, fg=\"#FFFFFF\", bg = \"#010030\", font = \"Helvetica 12\", anchor=\"e\", justify=LEFT).place(x=35, y=200)\r\nt.Label(tab4, text=\"Datos del estudiante\", width=20, fg=\"#fcba03\", bg = \"#010030\", font = \"Helvetica 12 italic\").place(x=25, y=400)\r\nt.Label(tab4, text=datos, width=23, fg=\"#FFFFFF\", bg = \"#010030\", font = \"Helvetica 12\", anchor=\"e\", justify=LEFT).place(x=35, y=440)\r\nt.Label(tab4, text = \"\", width=130, height=20, bg = \"dark gray\").place(x=0, y=550)\r\nt.Label(tab4, text = \" 2021 - Proyecto 2 de Introducción a la programación 2\", fg=\"black\", bg = \"dark gray\").place(x=0, y=550)\r\n\r\nv.mainloop()\r\n\r\n\"\"\"\r\nEjecucion = True\r\nwhile Ejecucion:\r\n menu()\r\n\r\n opcion = input(\"Elige una opción: \")\r\n\r\n if opcion == \"1\":\r\n #name = graphicBrowser()\r\n name = \"C:/Users/alexi/Downloads/maquina.xml\"\r\n readMachine(name)\r\n\r\n elif opcion == \"2\":\r\n name2 = graphicBrowser()\r\n #name2 = \"C:/Users/alexi/Downloads/simulacion.xml\"\r\n readSimulation(name2)\r\n \r\n elif opcion == \"3\":\r\n printProdLines()\r\n \r\n elif opcion == \"4\":\r\n printProducts()\r\n \r\n elif opcion == \"5\":\r\n singleSimulation()\r\n \r\n elif opcion == \"6\":\r\n completeSimulation()\r\n \r\n elif opcion == \"7\":\r\n createDoc(1)\r\n \r\n elif opcion == \"8\":\r\n createDoc(2)\r\n \r\n elif opcion == \"9\":\r\n lineNum = int(alphaMachine.lineNum)\r\n singleReport(lineNum, saveCompleteSimulation)\r\n\r\n elif opcion == \"10\":\r\n lineNum = int(alphaMachine.lineNum)\r\n allReport(lineNum, saveCompleteSimulation)\r\n \r\n elif opcion == \"11\":\r\n lineNum = int(alphaMachine.lineNum)\r\n timeReport(lineNum, saveCompleteSimulation)\r\n\r\n elif opcion == \"12\":\r\n print(\"Has salido del programa\")\r\n Ejecucion = False\r\n\r\n else:\r\n print(\"Intenta de nuevo\")\r\n\"\"\"","repo_name":"alexmaest/IPC2_Proyecto2_201800476","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":49034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1605011543","text":"from rouge_score import rouge_scorer\nimport numpy as np\n\n\n\ndef compute_metrics(eval_preds):\n preds, labels = eval_preds\n if isinstance(preds, tuple):\n preds = preds[0]\n decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n if data_args.ignore_pad_token_for_loss:\n # Replace -100 in the labels as we can't decode them.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n # Some simple post-processing\n decoded_preds = [pred.strip() for pred in decoded_preds]\n decoded_labels = [label.strip() for label in decoded_labels]\n if metric_name == \"sacrebleu\":\n decoded_labels = [[label] for label in decoded_labels]\n\n result = metric.compute(predictions=decoded_preds, references=decoded_labels)\n\n # Extract a few results from ROUGE\n if metric_name == \"rouge\":\n result = {key: value.mid.fmeasure * 100 for key, value in result.items()}\n else:\n result = {\"bleu\": result[\"score\"]}\n\n prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]\n result[\"gen_len\"] = np.mean(prediction_lens)\n\n return result","repo_name":"zhangxs131/summary_finetune","sub_path":"generate_summary/utils/metric_rouge.py","file_name":"metric_rouge.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"14962849587","text":"# 2. (!!!Доп!!!) Напишите программу для проверки истинности утверждения ¬(X ⋁ Y ⋁ Z) = ¬X ⋀ ¬Y ⋀ ¬Z для всех значений предикат.\n# NOT (X OR Y OR Z) == (NOT X AND NOT Y AND NOT Z)\n\nx1 = int\ny1 = int\nz1 = int\nintBool = [0,1] #[i for i in range(2)]\nfor x1 in intBool:\n for y1 in intBool:\n for z1 in intBool:\n print(f\"x = {x1}, y = {y1}, z = {z1}\", end=\" -=>-> \")\n print(f\"if not ({x1} or {y1} or {z1}) == (not {x1} and not {y1} and not {z1})\", end=\" -=>-> \")\n print(f\"if {not (x1 or y1 or z1)} == ({not x1} and {not y1} and {not z1})\", end=\" -=>-> \")\n # print(f\"if {not (x1 or y1 or z1)} == ({not x1 and not y1 and not z1})\", end=\" -=>-> \")\n print(not (x1 or y1 or z1) == (not x1 and not y1 and not z1))","repo_name":"Zeloodin/SeminarPython","sub_path":"Seminar1PythonPracticZadacha/Zadacha2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24821029651","text":"from django.contrib import messages\nfrom django.db.models import Sum, F\n\nfrom core.services import get_query_params, annotate_with_discount_prices\nfrom customer.forms import EmailSubForm\nfrom customer.models import Order, LikedProduct, OrderItem\nfrom posts.models import PostCategory\nfrom products.filters import ProductFilter\nfrom products.models import Category, Product\n\n\ndef retrieve_cart_items(request):\n \"\"\"\n Retrieves number of items in the cart and their total price.\n \"\"\"\n customer = request.user.id\n session_id = request.session.session_key\n if request.user.is_authenticated:\n order, created = Order.objects.get_or_create(customer_id=customer, completed=False)\n order.session_id = session_id\n order.save()\n else:\n order, created = Order.objects.get_or_create(session_id=session_id, completed=False)\n\n order_items_query = OrderItem.objects.filter(order=order)\n order_total_query = annotate_with_discount_prices(order_items_query)\n order_total_query = order_total_query.annotate(\n total_price=F('discount_price') * F('quantity'),\n ).aggregate(overall_price=Sum('total_price'))\n\n order_total = order_total_query['overall_price']\n items_count_query = order_items_query.aggregate(count=Sum('quantity'))\n items_count = items_count_query['count']\n context = {\n 'cart_items': items_count or 0,\n 'order_total': order_total or 0,\n }\n\n return context\n\n\ndef retrieve_liked_products(request):\n \"\"\"\n Retrieves number of liked products.\n \"\"\"\n customer_id = request.user.id\n session_id = request.session.session_key\n\n query_kwargs = {'customer_id': customer_id} if request.user.is_authenticated else {'session_id': session_id}\n liked_products_count = LikedProduct.objects.filter(**query_kwargs).count()\n\n return {'liked_products': liked_products_count}\n\n\ndef retrieve_filter_form(request):\n \"\"\"\n Retrieves form to search for products.\n \"\"\"\n query_params = get_query_params(request)\n filtered_products = ProductFilter(request.GET, queryset=Product.objects.filter(**query_params))\n\n return {'filtered_products': filtered_products}\n\n\ndef retrieve_product_categories(request):\n \"\"\"\n Retrieves all product categories.\n \"\"\"\n categories = Category.objects.all()\n\n return {'categories': categories}\n\n# def retrieve_post_categories(request):\n# \"\"\"\n# Retrieves all post categories.\n# \"\"\"\n# categories = PostCategory.objects.all()\n#\n# return {'categories': categories}\n\n\ndef retrieve_email_sub_form(request):\n \"\"\"\n Retrieves email subscription form.\n \"\"\"\n email_sub_form = EmailSubForm(request.POST)\n if request.method == 'POST' and 'email' in request.POST:\n if email_sub_form.is_valid():\n email_sub_form.save()\n messages.success(request, \"Thank you for subscribing to our email newsletter!\")\n else:\n messages.success(request, \"Something went wrong, please try again!\")\n print(email_sub_form.errors.as_data)\n\n context = {\n 'email_form': email_sub_form,\n }\n return context\n","repo_name":"icmrnbw/ecommerce_django","sub_path":"core/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23536030221","text":"__author__ = 'Giruvegan'\n\nletter_dict = {0:'A', 1:'B', 2:'C', 3:'D', 4:'E', 5:'F',\n 6:'G', 7:'H', 8:'I', 9:'J', 10:'K', 11:'L',\n 12:'M', 13:'N', 14:'O', 15:'P', 16:'Q', 17:'R',\n 18:'S', 19:'T', 20:'U', 21:'V', 22:'W', 23:'X',\n 24:'Y', 25:'Z'}\n\ndef plans(case_input):\n input = case_input.split(' ')\n ans = []\n senates = {}\n for i in range(len(input)):\n senates[letter_dict[i]] = int(input[i])\n\n while senates:\n sorted_items = sorted(senates.items(), key=lambda x: -x[1])\n if len(sorted_items) == 2:\n keys = [sorted_items[0][0], sorted_items[1][0]]\n ans.append(''.join(keys))\n for key in keys:\n senates[key] -= 1\n if senates[key] == 0:\n senates.pop(key)\n else:\n key = sorted_items[0][0]\n ans.append(key)\n senates[key] -= 1\n if senates[key] == 0:\n senates.pop(key)\n\n return ' '.join(ans)\n\nif __name__ == '__main__':\n\n filepath = 'A-large.in.txt'\n fout = open(filepath.split('.')[0] + '.out.txt', 'w')\n all_input = open(filepath, 'r').readlines()\n case_num = int(all_input[0])\n for i in range(2, len(all_input), 2):\n case_input = all_input[i].replace('\\n', '')\n fout.write('case #' + str(i/2) + ': ' + plans(case_input) + '\\n')","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_187/828.py","file_name":"828.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32882287207","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, render_template, request\nimport yelp_api\nimport os\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n\tlocation = request.values.get('location')\n\tterm = request.values.get('term')\n\tlang = 'en'\n\tif location:\n\t\t\trestaurant = yelp_api.get_restaurant(term, location, lang)\n\telse:\n\t\t\trestaurant = None\n\treturn render_template('index.html', restaurant=restaurant)\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host=\"0.0.0.0\", port=port)","repo_name":"tvenis/flask_app","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32783539996","text":"import string\nimport re\n\n#Part 1\npriority_dict = dict()\nfor letter, priority in zip([*string.ascii_letters], range(1,53)):\n priority_dict[letter] = priority\n\nwith open(r'./input') as f:\n sacks = f.read().splitlines()\n\nsacks_1 = [[sack[:len(sack)//2] + ' ' + sack[len(sack)//2:]] for sack in sacks]\n\ncommon = []\nfor sack in sacks_1:\n result = re.match(r'.*(\\w).*\\s.*\\1.*', str(sack))\n common.append(result.group(1))\n\npriority_sum = 0\nfor letter in common:\n priority_sum += priority_dict[letter]\n\nprint(f'The answer to Part 1 is {priority_sum}')\n\n#Part 2\ncounter = 0\nentry = 0\nsacks_2 = ['']\nfor sack in sacks:\n if counter < 2:\n sacks_2[entry] += sack + ' '\n counter += 1\n else:\n sacks_2[entry] += sack\n sacks_2.append('')\n counter = 0\n entry += 1\nsacks_2.pop()\n\ncommon = []\nfor sack in sacks_2:\n result = re.match(r'.*(\\w).*\\s.*\\1.*\\s.*\\1.*', str(sack))\n common.append(result.group(1))\n\npriority_sum = 0\nfor letter in common:\n priority_sum += priority_dict[letter]\n\nprint(f'The answer to Part 2 is {priority_sum}')","repo_name":"mcolella326/Advent-of-Code-2022","sub_path":"3/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70984966915","text":"import pandas as pd\nimport sys\nimport os\nsys.path.append('./PathoNet/')\nfrom numpy.random import seed\nimport random\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\" #to use CPU\nimport tensorflow as tf\n\nfrom update_json import updateJsonFile\nimport evaluation\n\n# Set seed to make results reproducible\nSEED = 1\nseed(SEED)\ntf.random.set_seed(SEED)\nrandom.seed(SEED)\n\n\n# Iterate across compressed networks and compute RMSE, cutoff accuracy for ki67-score and TIL-score\n# Change the paths listed in \"list_comprNets\" to evaluate other compressed networks (leave the first item\n# as it is)\nlist_comprNets=[\"./original_nets/PathoNet.hdf5\",\n\"./pathonet_compressed_models/experiments/CWS_k256/0-0-0-256-0.0001-1e-05-5-75-147.1456_save_weights.h5\",\n\"./pathonet_compressed_models/experiments/PWS_k256/0-0-0-256-0.0001-0.0001-5-75-159.29054_save_weights.h5\",\n\"./pathonet_compressed_models/experiments/ECSQ_k256/0-0-0-256-0.0001-1e-05-5-75-152.7148_save_weights.h5\", \n\"./pathonet_compressed_models/experiments/UQ_k256/0-0-0-249-0.0001-0.0001-5-75-146.19026_save_weights.h5\"]\n\n\n# Paths\nconfig_path = \"./PathoNet/configs/eval.json\"\nres_name = \"results_RMSE_cutoffAccuracy_comprNets\"\n\n\n# Create dataframe to store results\ncolumn_names = [\"exp\", \"rmse_ki67\", \"rmse_TIL\", \"acc_ki67_pt\", \"acc_TIL_pt\"]\n\nres = pd.DataFrame(columns = column_names)\n\nfor i,p in enumerate(list_comprNets):\n print(\"Evaluated network path: \", p)\n updateJsonFile(p, config_path, \"temp\")\n rmse_ki67, rmse_TIL, acc_ki67_pt, acc_TIL_pt = evaluation.eval_pts(['-i','./SHIDC-B-Ki-67/Test', '-c', './temp.json'])\n print() #Print empty line\n os.remove(\"temp.json\")\n if i==0:\n res = res.append({\"exp\":\"Original\", \"rmse_ki67\":rmse_ki67, \"rmse_TIL\":rmse_TIL, \"acc_ki67_pt\":acc_ki67_pt, \n \"acc_TIL_pt\":acc_TIL_pt}, ignore_index=True)\n else:\n res = res.append({\"exp\":p.split(\"/\")[3], \"rmse_ki67\":rmse_ki67, \"rmse_TIL\":rmse_TIL, \"acc_ki67_pt\":acc_ki67_pt, \n \"acc_TIL_pt\":acc_TIL_pt}, ignore_index=True)\n\n# Save results\nres.to_pickle(res_name+\".pkl\")\nres.to_csv(res_name+\".csv\")\n","repo_name":"GliozzoJ/pathonet_compression","sub_path":"run_rmse_acc.py","file_name":"run_rmse_acc.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25176540115","text":"'''\nDocumentation:\nhttp://docs.microsoft.com/en-us/windows/desktop/Multimedia\n\n.. versionadded:: 1.4.0\n'''\n\nfrom os.path import join\n\nfrom ctypes import windll\nfrom ctypes import (\n sizeof, c_void_p, c_ulonglong, c_ulong,\n c_wchar_p, byref, Structure, create_string_buffer\n)\nfrom ctypes.wintypes import DWORD, UINT\n\nfrom plyer.facades import Audio\nfrom plyer.platforms.win.storagepath import WinStoragePath\n\n# DWORD_PTR i.e. ULONG_PTR, 32/64bit\nULONG_PTR = c_ulonglong if sizeof(c_void_p) == 8 else c_ulong\n\n# device specific symbols\nMCI_OPEN = 0x803\nMCI_OPEN_TYPE = 0x2000\nMCI_OPEN_ELEMENT = 512\nMCI_RECORD = 0x80F\nMCI_STOP = 0x808\nMCI_SAVE = 0x813\nMCI_PLAY = 0x806\nMCI_CLOSE = 0x804\n\n# recorder specific symbols\nMCI_FROM = 4\nMCI_TO = 8\nMCI_WAIT = 2\nMCI_SAVE_FILE = 256\n\n\nclass MCI_OPEN_PARMS(Structure):\n '''\n Struct for MCI_OPEN message parameters.\n\n .. versionadded:: 1.4.0\n '''\n\n _fields_ = [\n ('mciOpenParms', ULONG_PTR),\n ('wDeviceID', UINT),\n ('lpstrDeviceType', c_wchar_p),\n ('lpstrElementName', c_wchar_p),\n ('lpstrAlias', c_wchar_p)\n ]\n\n\nclass MCI_RECORD_PARMS(Structure):\n '''\n Struct for MCI_RECORD message parameters.\n\n http://docs.microsoft.com/en-us/windows/desktop/Multimedia/mci-record-parms\n\n .. versionadded:: 1.4.0\n '''\n\n _fields_ = [\n ('dwCallback', ULONG_PTR),\n ('dwFrom', DWORD),\n ('dwTo', DWORD)\n ]\n\n\nclass MCI_SAVE_PARMS(Structure):\n '''\n Struct for MCI_SAVE message parameters.\n\n http://docs.microsoft.com/en-us/windows/desktop/Multimedia/mci-save-parms\n\n .. versionadded:: 1.4.0\n '''\n\n _fields_ = [\n ('dwCallback', ULONG_PTR),\n ('lpfilename', c_wchar_p)\n ]\n\n\nclass MCI_PLAY_PARMS(Structure):\n '''\n Struct for MCI_PLAY message parameters.\n\n http://docs.microsoft.com/en-us/windows/desktop/Multimedia/mci-play-parms\n\n .. versionadded:: 1.4.0\n '''\n\n _fields_ = [\n ('dwCallback', ULONG_PTR),\n ('dwFrom', DWORD),\n ('dwTo', DWORD)\n ]\n\n\ndef send_command(device, msg, flags, params):\n '''\n Generic mciSendCommandW() wrapper with error handler.\n All parameters are required as for mciSendCommandW().\n In case of no `params` passed, use `None`, that value\n won't be dereferenced.\n\n .. versionadded:: 1.4.0\n '''\n\n multimedia = windll.winmm\n send_command_w = multimedia.mciSendCommandW\n get_error = multimedia.mciGetErrorStringW\n\n # error text buffer\n # by API specification 128 is max, however the API sometimes\n # kind of does not respect the documented bounds and returns\n # more characters than buffer length...?!\n error_len = 128\n\n # big enough to prevent API accidentally segfaulting\n error_text = create_string_buffer(error_len * 2)\n\n # open a recording device with a new file\n error_code = send_command_w(\n device, # device ID\n msg,\n flags,\n\n # reference to parameters structure or original value\n # in case of params=False/0/None/...\n byref(params) if params else params\n )\n\n # handle error messages if any\n if error_code:\n # device did not open, raise an exception\n get_error(error_code, byref(error_text), error_len)\n error_text = error_text.raw.replace(b'\\x00', b'').decode('utf-8')\n\n # either it can close already open device or it will fail because\n # the device is in non-closable state, but the end result is the same\n # and it makes no sense to parse MCI_CLOSE's error in this case\n send_command_w(device, MCI_CLOSE, 0, None)\n raise Exception(error_code, error_text)\n\n # return params struct because some commands write into it\n # to pass some values out of the local function scope\n return params\n\n\nclass WinRecorder:\n '''\n Generic wrapper for MCI_RECORD handling the filenames and device closing\n in the same approach like it is used for other platforms.\n\n .. versionadded:: 1.4.0\n '''\n\n def __init__(self, device, filename):\n self._device = device\n self._filename = filename\n\n @property\n def device(self):\n '''\n Public property returning device ID.\n\n .. versionadded:: 1.4.0\n '''\n return self._device\n\n @property\n def filename(self):\n '''\n Public property returning filename for current recording.\n\n .. versionadded:: 1.4.0\n '''\n return self._filename\n\n def record(self):\n '''\n Start recording a WAV sound.\n\n .. versionadded:: 1.4.0\n '''\n send_command(\n device=self.device,\n msg=MCI_RECORD,\n flags=0,\n params=None\n )\n\n def stop(self):\n '''\n Stop recording and save the data to a file path\n self.filename. Wait until the file is written.\n Close the device afterwards.\n\n .. versionadded:: 1.4.0\n '''\n\n # stop the recording first\n send_command(\n device=self.device,\n msg=MCI_STOP,\n flags=MCI_WAIT,\n params=None\n )\n\n # choose filename for the WAV file\n save_params = MCI_SAVE_PARMS()\n save_params.lpfilename = self.filename\n\n # save the sound data to a file and wait\n # until it ends writing to the file\n send_command(\n device=self.device,\n msg=MCI_SAVE,\n flags=MCI_SAVE_FILE | MCI_WAIT,\n params=save_params\n )\n\n # close the recording device\n send_command(\n device=self.device,\n msg=MCI_CLOSE,\n flags=0,\n params=None\n )\n\n\nclass WinPlayer:\n '''\n Generic wrapper for MCI_PLAY handling the device closing.\n\n .. versionadded:: 1.4.0\n '''\n\n def __init__(self, device):\n self._device = device\n\n @property\n def device(self):\n '''\n Public property returning device ID.\n\n .. versionadded:: 1.4.0\n '''\n return self._device\n\n def play(self):\n '''\n Start playing a WAV sound.\n\n .. versionadded:: 1.4.0\n '''\n play_params = MCI_PLAY_PARMS()\n play_params.dwFrom = 0\n\n send_command(\n device=self.device,\n msg=MCI_PLAY,\n flags=MCI_FROM,\n params=play_params\n )\n\n def stop(self):\n '''\n Stop playing a WAV sound and close the device.\n\n .. versionadded:: 1.4.0\n '''\n send_command(\n device=self.device,\n msg=MCI_STOP,\n flags=MCI_WAIT,\n params=None\n )\n\n # close the playing device\n send_command(\n device=self.device,\n msg=MCI_CLOSE,\n flags=0,\n params=None\n )\n\n\nclass WinAudio(Audio):\n '''\n Windows implementation of audio recording and audio playing.\n\n .. versionadded:: 1.4.0\n '''\n\n def __init__(self, file_path=None):\n # default path unless specified otherwise\n default_path = join(\n WinStoragePath().get_music_dir(),\n 'audio.wav'\n )\n super().__init__(file_path or default_path)\n\n self._recorder = None\n self._player = None\n self._current_file = None\n\n def _start(self):\n '''\n Start recording a WAV sound in the background asynchronously.\n\n .. versionadded:: 1.4.0\n '''\n\n # clean everything before recording in case\n # there is a different device open\n self._stop()\n\n # create structure and set device parameters\n open_params = MCI_OPEN_PARMS()\n open_params.lpstrDeviceType = 'waveaudio'\n open_params.lpstrElementName = ''\n\n # open a new device for recording\n open_params = send_command(\n device=0, # device ID before opening\n msg=MCI_OPEN,\n\n # empty filename in lpstrElementName\n # device type in lpstrDeviceType\n flags=MCI_OPEN_ELEMENT | MCI_OPEN_TYPE,\n params=open_params\n )\n\n # get recorder with device id and path for saving\n self._recorder = WinRecorder(\n device=open_params.wDeviceID,\n filename=self._file_path\n )\n self._recorder.record()\n\n # Setting the currently recorded file as current file\n # for using it as a parameter in audio player\n self._current_file = self._recorder.filename\n\n def _stop(self):\n '''\n Stop recording or playing of a WAV sound.\n\n .. versionadded:: 1.4.0\n '''\n\n if self._recorder:\n self._recorder.stop()\n self._recorder = None\n\n if self._player:\n self._player.stop()\n self._player = None\n\n def _play(self):\n '''\n Play a WAV sound from a file. Prioritize latest recorded file before\n default file path from WinAudio.\n\n .. versionadded:: 1.4.0\n '''\n\n # create structure and set device parameters\n open_params = MCI_OPEN_PARMS()\n open_params.lpstrDeviceType = 'waveaudio'\n open_params.lpstrElementName = self._current_file or self._file_path\n\n # open a new device for playing\n open_params = send_command(\n device=0, # device ID before opening\n msg=MCI_OPEN,\n\n # existing filename in lpstrElementName\n # device type in lpstrDeviceType\n flags=MCI_OPEN_ELEMENT | MCI_OPEN_TYPE,\n params=open_params\n )\n\n # get recorder with device id and path for saving\n self._player = WinPlayer(device=open_params.wDeviceID)\n self._player.play()\n\n\ndef instance():\n '''\n Instance for facade proxy.\n '''\n return WinAudio()\n","repo_name":"kivy/plyer","sub_path":"plyer/platforms/win/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":9787,"program_lang":"python","lang":"en","doc_type":"code","stars":1476,"dataset":"github-code","pt":"61"} +{"seq_id":"18558045345","text":"from app import app, db\nfrom flask import render_template, redirect, url_for, request, flash, session, g, jsonify, json\nfrom forms import Form\nfrom models import Country, State, City\n\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n form = Form()\n form.country.choices = [(country.id, country.name) for country in Country.query.all()]\n \n if request.method == 'POST':\n city = City.query.filter_by(id=form.city.data).first()\n country = Country.query.filter_by(id=form.country.data).first()\n state = State.query.filter_by(id=form.state.data).first()\n return '

Country : {}, State: {}, City: {}

'.format(country.name, state.name, city.name)\n return render_template('index.html', form=form)\n \n@app.route('/state/')\ndef statebycountry(get_state):\n state = State.query.filter_by(country_id=get_state).all()\n stateArray = []\n for city in state:\n stateObj = {}\n stateObj['id'] = city.id\n stateObj['name'] = city.name\n stateArray.append(stateObj)\n return jsonify({'statecountry' : stateArray})\n \n@app.route('/city/')\ndef city(get_city):\n state_data = City.query.filter_by(stateid=get_city).all()\n cityArray = []\n for city in state_data:\n cityObj = {}\n cityObj['id'] = city.id\n cityObj['name'] = city.name\n cityArray.append(cityObj)\n return jsonify({'citylist' : cityArray}) \n\n\n\n","repo_name":"GakoMK/Flask-Dynamic-Select","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4219594770","text":"import cv2 as cv\r\n\r\ncapture=cv.VideoCapture(0)\r\n\r\nwhile True:\r\n isTrue, frame=capture.read() #read frame by frame, is True returns whether succesfully readed or not\r\n cv.imshow('Video',frame)\r\n\r\n if cv.waitKey(20) and 0xFF==ord('d'):\r\n break\r\n\r\ncapture.release() #releasing capture variable\r\ncv.destroyAllWindows()\r\n\r\n","repo_name":"amitk29/OpenCV_tutorial_basics","sub_path":"video_read.py","file_name":"video_read.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15293030817","text":"# -*- coding: utf-8 -*-\n\"\"\"mytimer params.\"\"\"\n\nMY_TIMER_VERSION = \"1.0\"\nINPUT_ERROR_MESSAGE = \"[Error] Wrong input\"\nSOUND_ERROR_MESSAGE = \"[Error] Unable to play sound\"\nPROGRAMS_LIST_TEMPLATE = \"{0}. `{1}` - {2}\"\nTIME_PRINT_TEMPLATE = \"{0} : {1} : {2}\"\nFACES_LIST_EXAMPLE_MESSAGE = \"12 : 34 : 56\"\nINPUT_EXAMPLE = \"Example: mytimer --hour=1 --minute=1 --second=1\"\nTIME_ELEMENTS = [\"minute\", \"second\", \"hour\"]\nMESSAGE_TEMPLATE = \"Message: {0}\"\n\nTONES_MAP = {\n 1: '1.wav',\n 2: '2.wav',\n 3: '3.wav',\n 4: '4.wav',\n 5: '5.wav',\n 6: '6.wav',\n 7: '7.wav',\n 8: '8.wav',\n 9: '9.wav',\n 10: '10.wav',\n 11: '11.wav'}\n\nFACES_MAP = {\n 1: 'bulbhead',\n 2: 'soft',\n 3: '4max',\n 4: '5x7',\n 5: 'charact4',\n 6: 'o8',\n 7: 'alphabet',\n 8: 'shadow',\n 9: 'speed',\n 10: 'rounded',\n 11: 'chartri',\n 12: 'standard',\n 13: 'contessa',\n 14: 'avatar',\n 15: 'mini',\n 16: 'twopoint',\n 17: '3x5',\n 18: 'threepoint',\n 19: 'ascii_new_roman',\n 20: 'serifcap',\n 21: 'lockergnome',\n 22: 'dotmatrix',\n 23: '3-d',\n 24: 'sweet',\n 25: 'epic',\n}\n\nDEFAULT_PARAMS = {\n \"hour\": 0,\n \"minute\": 0,\n \"second\": 0,\n \"alarm\": 0,\n \"alarm_repeat\": 1,\n \"face\": 1,\n \"tone\": 1,\n \"message\": \"\",\n \"v_shift\": 0,\n \"h_shift\": 0,\n}\n\nPROGRAMS_DEFAULTS = {\n \"alarm\": 1\n}\n\nPROGRAMS_MAP = {\n \"poached-egg\": {\n \"hour\": 0,\n \"minute\": 1,\n \"second\": 30,\n \"message\": \"Poached egg (1.5 mins)\",\n },\n \"boiled-egg\": {\n \"hour\": 0,\n \"minute\": 3,\n \"second\": 0,\n \"message\": \"Boiled egg (3 mins)\",\n },\n \"soft-boiled-egg\": {\n \"hour\": 0,\n \"minute\": 5,\n \"second\": 0,\n \"message\": \"Soft-boiled egg (5 mins)\",\n },\n \"hard-boiled-egg\": {\n \"hour\": 0,\n \"minute\": 10,\n \"second\": 0,\n \"message\": \"Hard-boiled egg (10 mins)\",\n },\n \"pasta\": {\n \"hour\": 0,\n \"minute\": 8,\n \"second\": 0,\n \"message\": \"Pasta (8 mins)\",\n },\n \"quick-rice\": {\n \"hour\": 0,\n \"minute\": 10,\n \"second\": 0,\n \"message\": \"Quick cooking rice (10 mins)\",\n },\n \"japanese-green-tea\": {\n \"hour\": 0,\n \"minute\": 2,\n \"second\": 0,\n \"message\": \"Japanese green tea (2 mins)\",\n },\n \"tea-bag\": {\n \"hour\": 0,\n \"minute\": 2,\n \"second\": 0,\n \"message\": \"Tea bag (2 mins)\",\n },\n \"chinese-green-tea\": {\n \"hour\": 0,\n \"minute\": 5,\n \"second\": 0,\n \"message\": \"Chinese green tea (5 mins)\",\n },\n \"black-tea\": {\n \"hour\": 0,\n \"minute\": 5,\n \"second\": 0,\n \"message\": \"Black tea (5 mins)\",\n },\n \"oolong-tea\": {\n \"hour\": 0,\n \"minute\": 5,\n \"second\": 0,\n \"message\": \"Oolong tea (5 mins)\",\n },\n \"fruit-tea\": {\n \"hour\": 0,\n \"minute\": 8,\n \"second\": 0,\n \"message\": \"Fruit tea (8 mins)\",\n },\n \"white-tea\": {\n \"hour\": 0,\n \"minute\": 10,\n \"second\": 0,\n \"message\": \"White tea (10 mins)\",\n },\n \"work\": {\n \"hour\": 0,\n \"minute\": 25,\n \"second\": 0,\n \"message\": \"Time to work (25 mins)\",\n },\n \"short-break\": {\n \"hour\": 0,\n \"minute\": 10,\n \"second\": 0,\n \"message\": \"Short break (10 mins)\",\n },\n \"long-break\": {\n \"hour\": 0,\n \"minute\": 30,\n \"second\": 0,\n \"message\": \"Long break (30 mins)\",\n },\n \"noodle\": {\n \"hour\": 0,\n \"minute\": 3,\n \"second\": 0,\n \"message\": \"Instant noodle (3 mins)\",\n }\n}\n","repo_name":"sepandhaghighi/mytimer","sub_path":"mytimer/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"61"} +{"seq_id":"9825081335","text":"#Given a sequence of n integer numbers, extract the sub-sequence of maximum\r\n#length which is in ascending order. 210CT Coursework 2016/2017 3 of 7 11\r\n\r\n\r\nlist1 = [1, 2, 3, 4, 1, 5, 1, 6, 7]\r\n\r\ndef sublist(list1):\r\n \"\"\"extracts all sub-strings of the list in ascending order\"\"\"\r\n results = []\r\n currentlist = []\r\n #if there is no list return nothing (base case)\r\n if(len(list1) == 0):\r\n return list[0]\r\n for i in range(len(list1)):\r\n #if the list is \r\n if(len(currentlist) > 1):\r\n #when the next item is smaller that previous it moves list into\r\n #results and clears the currentlist varibale to be used again\r\n if(list1[i] < tail(currentlist)):\r\n results.append(currentlist)\r\n currentlist = []\r\n currentlist.append(list1[i])\r\n results.append(currentlist)\r\n return results\r\n\r\ndef tail(list):\r\n \"\"\"takes the end of the list\"\"\"\r\n return list[len(list) - 1]\r\n\r\nresults = sublist(list1)\r\n#key is the lenght of each array and reverse puts the largest at the front\r\n#and smallest at the back\r\nresults.sort(key = len, reverse = True)\r\nprint('Input: ' + str(list1))\r\nprint('Output: ' + str(results))\r\nprint('largest list: ' + str(results[0]))\r\n","repo_name":"Rick24/210CT-CW","sub_path":"completed/Week 5 Q1 (10).py","file_name":"Week 5 Q1 (10).py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31788240066","text":"import dearpygui.dearpygui as gui\nimport socket\nimport pymem\n\nhostname = socket.gethostname()\n\nprint('-------------------')\n\npm = pymem.Pymem('RobloxPlayerBeta.exe') # <--- your process here\nprint(f'\\nprocess base: {pm.process_base}')\nprint(f'process handle: {pm.process_handle}')\nprint(f'process id: {pm.process_id}')\nprint(f'your pc name : {hostname}')\n\nprint('\\n-------------------')\n\ngui.create_context()\ngui.create_viewport(title='[+]', width=300, height=360)\ngui.setup_dearpygui()\ngui.set_viewport_always_top(True)\n\nwith gui.window(label='[+]', width=1000, height=360, no_title_bar=True, no_resize=True, no_move=True):\n with gui.tab_bar(label='tabs'):\n with gui.tab(label='[+]'):\n gui.add_button(label=f'process id: {pm.process_id}', tag='blehh')\n gui.add_button(label=f'process handle: {pm.process_handle}', tag='blehh1')\n gui.add_button(label=f'process base: {pm.process_base}', tag='blehh2')\n gui.add_button(label=f'your pc name : {hostname}', tag='blehh3')\n\ngui.show_viewport()\ngui.start_dearpygui()\ngui.destroy_context()\n\ninput(\"\\npress enter to close...\")\n[input(i) for i in range(-2, 0, -3)]\n","repo_name":"dewwxfr/process","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74292880515","text":"from __future__ import annotations\n\nfrom typing import Any, Optional\n\nfrom paddle.io import Dataset\n\nURL_PREFIX: str\nCIFAR10_URL: Any\nCIFAR10_MD5: str\nCIFAR100_URL: Any\nCIFAR100_MD5: str\nMODE_FLAG_MAP: Any\n\nclass Cifar10(Dataset):\n mode: Any = ...\n backend: Any = ...\n data_file: Any = ...\n transform: Any = ...\n dtype: Any = ...\n def __init__(\n self,\n data_file: Any | None = ...,\n mode: str = ...,\n transform: Any | None = ...,\n download: bool = ...,\n backend: Any | None = ...,\n ) -> None: ...\n def __getitem__(self, idx: Any): ...\n def __len__(self): ...\n\nclass Cifar100(Cifar10):\n def __init__(\n self,\n data_file: Any | None = ...,\n mode: str = ...,\n transform: Any | None = ...,\n download: bool = ...,\n backend: Any | None = ...,\n ) -> None: ...\n","repo_name":"cattidea/paddlepaddle-stubs","sub_path":"paddle-stubs/vision/datasets/cifar.pyi","file_name":"cifar.pyi","file_ext":"pyi","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"35903646724","text":"import json\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom pprint import pprint\n\nPORT = 8000\n\n\nclass RequestHandler(BaseHTTPRequestHandler):\n def do_POST(self):\n # Read the JSON payload of the request\n payload_len = int(self.headers.get(\"Content-Length\"))\n payload = self.rfile.read(payload_len)\n payload = json.loads(payload.decode(\"utf-8\"))\n\n # Output the payload to the console\n print(\"\\n\")\n pprint(payload)\n\n # Send a response with a 200 status code\n self.send_response(200)\n self.end_headers()\n\n\nif __name__ == \"__main__\":\n print(f\"start listening on port {PORT}\")\n httpd = HTTPServer((\"localhost\", PORT), RequestHandler)\n httpd.serve_forever()\n","repo_name":"martinfaucheux/Test-ADO-Repo-perf","sub_path":"run_simple_server.py","file_name":"run_simple_server.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73506720834","text":"def divisor(numero): # CORRETO (APENAS NUMEROS PRIMOS)\n numeros = []\n produto = 1\n for i in range(2, numero // 2 + 1):\n if numero % i == 0 and len(divisor(i)) == 0:\n numeros.append(i)\n produto *= i\n return numeros\n\n\ndef quadrado_perfeito(numero): # CORRETO\n raiz = int(numero ** 0.5)\n if raiz ** 2 != numero:\n return False\n else:\n return True\n\n\ndef multiplicar(divisores, numero):\n multiplica = 1\n contador = 0\n for i in range(len(divisores)):\n while numero % divisores[i] == 0: #QUANTAS VEZES O DIVISOR PRIMO SE REPETE\n contador += 1\n numero = numero / divisores[i]\n if contador % 2 != 0:\n multiplica *= divisores[i]\n contador = 0\n return multiplica\n\n\ntestes = int(input())\n\nfor i in range(testes):\n numero = int(input())\n boleana = quadrado_perfeito(numero)\n if boleana: # CASO SEJA QUADRADO PERFEITO\n resultado = numero\n else: # SE NÃO FOR QUADRADO PERFEITO\n divisores = divisor(numero) # ACHAMOS OS DIVISORES\n if len(divisores) == 0: # SE O NUMERO FOR PRIMO\n resultado = numero ** 2\n else:\n multiplica = multiplicar(divisores, numero)\n resultado = numero * multiplica\n\n print('Caso #%i: %i' %(i+1,resultado))\n\n#EXCEDEU TEMPO LIMITE\n","repo_name":"Jumaruba/URI","sub_path":"PYTHON/1776(TL).py","file_name":"1776(TL).py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"39559192700","text":"#instances ad Higher order functions\nimport time\nfrom turtle import Turtle, Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.bgcolor(\"black\")\nscreen.tracer(0)\nscreen.update()\n\nplayer = Player()\ncar_manager = CarManager()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(player.go_up, \"Up\")\n\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(0.1)\n screen.update()\n\n car_manager.create_car()\n car_manager.move_car()\n\n # When there is a collision between a car and the turtle\n for car in car_manager.all_cars:\n if car.distance(player) < 20:\n game_is_on = False\n scoreboard.game_over()\n\n # When the tutle reaches the finish line\n if player.finish_line():\n player.starting_position()\n car_manager.increase_speed()\n scoreboard.increase_level()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nscreen.exitonclick()\n","repo_name":"johnalao/Highway-Traffic","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1099652139","text":"\"\"\"\nQueue to stack converter.\n\"\"\"\n\nfrom arrayqueue import ArrayQueue # or from linkedqueue import LinkedQueue\nfrom arraystack import ArrayStack # or from linkedstack import LinkedStack\n\n\ndef queue_to_stack(queue):\n\n elements = []\n while not queue.isEmpty():\n elements.append(queue.pop())\n for el in elements:\n queue.add(el)\n # for i in range(-1, -(len(elements) + 1), -1):\n # stack.push(elements[i])\n stack = ArrayStack(reversed(elements))\n return stack\n\n\nif __name__ == \"__main__\":\n queue = ArrayQueue()\n for i in range(10):\n queue.add(i)\n stack = queue_to_stack(queue)\n print(queue)\n print(stack)\n print(stack.pop())\n print(queue.pop())\n stack.add(11)\n queue.add(11)\n print(queue)\n print(stack)\n","repo_name":"viktorpovazhuk/12_2_queue","sub_path":"queue_to_stack.py","file_name":"queue_to_stack.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74061622275","text":"'''\nTakes folder of wav files as input and outputs clean midi melody to folder\n\nCommand line args:\n1. Audio folder path\n2. (Optional) Midi folder path\n\nRequires spleeter by deezer to be installed!!!\nhttps://github.com/deezer/spleeter\n\nRequires audio2midi.py to be in same directory!!!\nedited https://github.com/tiagoft/audio_to_midi/blob/master/audio2midi.py\n'''\n\nimport os\nimport sys\nimport time\nimport audio2midi\nimport librosa\nimport pypianoroll\n\narguments = len(sys.argv)\nif arguments == 1:\n print(\"Command line arg missing: audio folder path, then midi folder path\")\n sys.exit()\nelif arguments > 1:\n audioFolder = sys.argv[1]\n if arguments == 3:\n midiPath = sys.argv[2]\n else:\n try:\n os.mkdir(os.getcwd()+'/MIDI')\n midiPath = 'MIDI'\n except OSError as error:\n print(error)\n print(\"Command line arg missing: midi path, creating MIDI directory if one doesn't exist\")\n\n# testAudioFolder = \"TestAudio\"\naudioPathList = []\nvocalPathFolder = \"stems\"\n\n\nfor fileName in os.listdir(audioFolder):\n audioPathList.append(f\"{audioFolder}/{fileName}\")\n spleeterCommand = f\"spleeter separate -o {vocalPathFolder} {' '.join(audioPathList)}\"\n\nos.system(spleeterCommand)\n\nif not os.path.exists(vocalPathFolder):\n print(\"spleeter failed\")\n\nfor fileName in os.listdir(vocalPathFolder):\n midiFile = f\"{midiPath}/{fileName}.mid\"\n vocalFile = f\"{vocalPathFolder}/{fileName}/vocals.wav\"\n audio2midi.run(vocalFile, midiFile)\n","repo_name":"lucaspbastos/Audio-Based-Melody-Generation","sub_path":"src/frankenSong.py","file_name":"frankenSong.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5961673679","text":"# making 06_maori_numbers_quiz into a quiz. tested without having a shuffle to see if it can ask and check questions\n\n\nquestion_number = 0\n# List of numbers in Maori\nmaori_numbers = [[\"tahi\", '1'], [\"rua\", '2'], [\"toru\", '3'], [\"wha\", '4'], [\"rima\", '5'], [\"ono\", '6'],\n [\"whitu\", '7'], [\"waru\", '8'], [\"iwa\", '9'], [\"tekau\", '10']]\n\n# While loop that repeats for all numbers 1-10\nwhile question_number < 10:\n\n # Asking the user the question.\n user_answer = input(f\"What is the maori word {maori_numbers[question_number][0]} in numbers: \")\n\n # Checking Users answer.\n if user_answer == maori_numbers[question_number][1]:\n print(\"You are correct\")\n else:\n print(\"You are incorrect\")\n question_number += 1\n","repo_name":"chonge24/Assessment","sub_path":"06_maori_numbers_quiz_v2.py","file_name":"06_maori_numbers_quiz_v2.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71229589635","text":"__author__ = \"Leandro C. Hermida\"\n__email__ = \"hermidalc@pitt.edu\"\n__license__ = \"BSD 3-Clause\"\n\nimport pandas as pd\n\ncount_files = snakemake.input.get(\"counts\")\nassert count_files is not None, \"input: count_files is a required parameter\"\nsample_names = snakemake.params.get(\"samples\")\nassert sample_names is not None, \"params: samples is a required parameter\"\n\nstrand = snakemake.params.get(\"strand\")\nif strand is not None:\n strands = [strand] if isinstance(strand, str) else strand\nelse:\n strand_files = snakemake.input.get(\"strand\")\n assert strand_files is not None, \"input/params: strand is a required parameter\"\n strands = []\n for strand_file in strand_files:\n with open(strand_file, \"r\") as fh:\n strands.append(fh.readline().strip())\n\ncount_df = pd.DataFrame()\nfor count_file, sample_name, strand in zip(count_files, sample_names, strands):\n strand_idx = 2 if strand in (\"forward\", \"yes\") else 3 if strand == \"reverse\" else 1\n counts = pd.read_csv(\n count_file, sep=\"\\t\", header=None, index_col=0, usecols=[0, strand_idx]\n )\n counts.columns = [sample_name]\n count_df = pd.concat([count_df, counts], axis=1, verify_integrity=True)\n assert count_df.shape[0] == counts.shape[0], \"Count files do not have same rows\"\n\ncount_df = count_df.loc[~count_df.index.str.startswith(\"N_\")]\ncount_df.index.name = \"ID_REF\"\n\nout_file = snakemake.output[0]\n\nif count_df.columns.duplicated().any():\n print(f\"Collapsing {out_file} technical replicates\", flush=True)\n count_df = count_df.groupby(count_df.columns, axis=1).sum()\n\ncount_df.sort_index(inplace=True)\ncount_df.to_csv(out_file, sep=\"\\t\")\n","repo_name":"hermidalc/snakemake-wrappers","sub_path":"bio/star/count_matrix/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16099105520","text":"import pymysql\r\nimport datetime\r\nimport time\r\nimport os\r\nimport pandas as pd\r\nfrom pandas import ExcelWriter\r\nimport openpyxl\r\nfrom openpyxl import Workbook\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.styles import Border, Side, PatternFill, Font, GradientFill, Alignment\r\n\r\nimport connection\r\nfrom connection import db_ip_address\r\nfrom connection import db_user\r\nfrom connection import db_password\r\nfrom connection import db_name\r\nfrom connection import student_data\r\n\r\ndb = pymysql.connect(host=db_ip_address, user=db_user, passwd=db_password, db=db_name)\r\ncursor = db.cursor()\r\n\r\nsheet_name = 'Sheet1'\r\n\r\ndef jeinError():\r\n print(\"Please enter either 1 or 0.\\n\")\r\n\r\n\r\n\r\ndef wrongEntry():\r\n print(\"Wrong entry. Please try again.\\n\")\r\n\r\n\r\n\r\ndef update_Master_List(fname, lname, email, country, department, status):\r\n global student_data\r\n global sheet_name\r\n wb = load_workbook(student_data)\r\n ws = wb[sheet_name]\r\n ws.append([fname, lname, email, country, department, status])\r\n wb.save(student_data)\r\n wb.close()\r\n\r\n\r\n\r\n\r\ndef updatedEntry(result):\r\n\r\n for column in result:\r\n fname = column[0]\r\n lname = column[1]\r\n email = column[2]\r\n country = column[3]\r\n department = column[4]\r\n status = column[5]\r\n print(fname, lname, email, country, department, status)\r\n i = 0\r\n while i == 0:\r\n jein = input(\"Confirm entry (1) Yes\\t(0) No\")\r\n if jein == '1':\r\n db.commit()\r\n i = 1\r\n return 1\r\n elif jein == '0':\r\n db.rollback()\r\n i = 1\r\n else:\r\n jeinError()\r\n\r\n\r\ndef newFname(email, ws):\r\n\r\n new_fname = input(\"New first name:\")\r\n sql = 'UPDATE student_data SET First_Name = \"%s\" WHERE Email = \"%s\"' % (new_fname, email)\r\n cursor.execute(sql)\r\n sql1 = \"SELECT * FROM student_data WHERE Email = '%s'\" % (email)\r\n cursor.execute(sql1)\r\n result = cursor.fetchall()\r\n if updatedEntry(result) == 1:\r\n for rowNum in range(ws.max_row, 1, -1):\r\n if ws.cell(row=rowNum, column=3).value == email:\r\n ws.cell(row=rowNum, column=1).value = new_fname\r\n\r\n\r\ndef newLname(email, ws):\r\n\r\n new_lname = input(\"New last name:\")\r\n sql = 'UPDATE student_data SET Last_Name = \"%s\" WHERE Email = \"%s\"' % (new_lname, email)\r\n cursor.execute(sql)\r\n sql1 = \"SELECT * FROM student_data WHERE Email = '%s'\" % (email)\r\n cursor.execute(sql1)\r\n result = cursor.fetchall()\r\n if updatedEntry(result) == 1:\r\n for rowNum in range(ws.max_row, 1, -1):\r\n if ws.cell(row=rowNum, column=3).value == email:\r\n ws.cell(row=rowNum, column=2).value = new_lname\r\n\r\n\r\n\r\ndef newCountry(email, ws):\r\n\r\n new_country = input(\"New country:\")\r\n sql = \"UPDATE student_data SET Country = '%s' WHERE Email = '%s'\" % (new_country, email)\r\n cursor.execute(sql)\r\n sql1 = \"SELECT * FROM student_data WHERE Email = '%s'\" % (email)\r\n cursor.execute(sql1)\r\n result = cursor.fetchall()\r\n if updatedEntry(result) == 1:\r\n for rowNum in range(ws.max_row, 1, -1):\r\n if ws.cell(row=rowNum, column=3).value == email:\r\n ws.cell(row=rowNum, column=4).value = new_country\r\n\r\n\r\ndef newDepartment(email, ws):\r\n\r\n new_department = input(\"New department:\")\r\n sql = \"UPDATE student_data SET Department = '%s' WHERE Email = '%s'\" % (new_department, email)\r\n cursor.execute(sql)\r\n sql1 = \"SELECT * FROM student_data WHERE Email = '%s'\" % (email)\r\n cursor.execute(sql1)\r\n result = cursor.fetchall()\r\n if updatedEntry(result) == 1:\r\n for rowNum in range(ws.max_row, 1, -1):\r\n if ws.cell(row=rowNum, column=3).value == email:\r\n ws.cell(row=rowNum, column=5).value = new_department\r\n \r\n\r\n\r\ndef newStatus(email, ws):\r\n i = 0\r\n while i == 0:\r\n new_status = input(\"New E/D:\")\r\n if new_status == 'E' or new_status == 'e':\r\n i = 1\r\n elif new_status == 'D' or new_status == 'd':\r\n i = 1\r\n else:\r\n print(\"Please enter either E or D.\\n\")\r\n\r\n sql = \"UPDATE student_data SET ED = '%s' WHERE Email = '%s'\" % (new_status, email)\r\n cursor.execute(sql)\r\n sql1 = \"SELECT * FROM student_data WHERE Email = '%s'\" % (email)\r\n cursor.execute(sql1)\r\n result = cursor.fetchall()\r\n if updatedEntry(result) == 1:\r\n for rowNum in range(ws.max_row, 1, -1):\r\n if ws.cell(row=rowNum, column=3).value == email:\r\n ws.cell(row=rowNum, column=6).value = new_status\r\n \r\n\r\n\r\ndef newEmail(email, ws):\r\n\r\n new_email = input(\"New email address:\")\r\n sql = \"UPDATE student_data SET Email = '%s' WHERE Email = '%s'\" % (new_email, email)\r\n cursor.execute(sql)\r\n sql1 = \"SELECT * FROM student_data WHERE Email = '%s'\" % (new_email)\r\n cursor.execute(sql1)\r\n result = cursor.fetchall()\r\n if updatedEntry(result) == 1:\r\n for rowNum in range(ws.max_row, 1, -1):\r\n if ws.cell(row=rowNum, column=3).value == email:\r\n ws.cell(row=rowNum, column=3).value = new_email\r\n\r\n\r\ndef delete_Erasmi(ws):\r\n\r\n sql = \"DELETE FROM student_data WHERE ED = '%s'\" % ('E')\r\n cursor.execute(sql)\r\n\r\n i = 0\r\n while i == 0:\r\n jein = input(\"Delete all Erasmi. Are you sure? (1) Yes\\t(0) No\")\r\n if jein == '1':\r\n j = 0\r\n while j == 0:\r\n jein2 = input(\"ARE YOU FUCKING SURE??? (1) Yes\\t(0) No\")\r\n if jein2 == '1':\r\n for rowNum in range(ws.max_row, 1, -1):\r\n if ws.cell(row=rowNum, column=6).value == 'E':\r\n ws.delete_rows(rowNum)\r\n db.commit()\r\n i = 1\r\n j = 1\r\n elif jein2 == '0':\r\n db.rollback()\r\n i = 1\r\n j = 1\r\n else:\r\n jeinError()\r\n elif jein == '0':\r\n db.rollback()\r\n i = 1\r\n else:\r\n jeinError()\r\n\r\n\r\ndef delete_Degree(ws):\r\n\r\n sql = \"DELETE FROM student_data WHERE ED = '%s'\" % ('D')\r\n cursor.execute(sql)\r\n\r\n i = 0\r\n while i == 0:\r\n jein = input(\"Delete all degree students. Are you sure? (1) Yes\\t(0) No\")\r\n if jein == '1':\r\n j = 0\r\n while j == 0:\r\n jein2 = input(\"ARE YOU FUCKING SURE??? (1) Yes\\t(0) No\")\r\n if jein2 == '1':\r\n for rowNum in range(ws.max_row, 1, -1):\r\n if ws.cell(row=rowNum, column=6).value == 'D':\r\n ws.delete_rows(rowNum)\r\n db.commit()\r\n i = 1\r\n j = 1\r\n elif jein2 == '0':\r\n db.rollback()\r\n i = 1\r\n j = 1\r\n else:\r\n jeinError()\r\n elif jein == '0':\r\n db.rollback()\r\n i = 1\r\n else:\r\n jeinError()\r\n\r\n\r\ndef delete_Everyone(ws):\r\n\r\n sql = \"DELETE FROM student_data\"\r\n cursor.execute(sql)\r\n\r\n i = 0\r\n while i == 0:\r\n jein = input(\"Delete everyone. Are you sure? (1) Yes\\t(0) No\")\r\n if jein == '1':\r\n j = 0\r\n while j == 0:\r\n jein2 = input(\"ARE YOU FUCKING SURE??? (1) Yes\\t(0) No\")\r\n if jein2 == '1':\r\n db.commit()\r\n ws.delete_rows(2, ws.max_row)\r\n i = 1\r\n j = 1\r\n elif jein2 == '0':\r\n db.rollback()\r\n i = 1\r\n j = 1\r\n else:\r\n jeinError()\r\n elif jein == '0':\r\n db.rollback()\r\n i = 1\r\n else:\r\n jeinError()\r\n\r\n\r\ndef delete_Individual(email, ws):\r\n\r\n sql = \"SELECT * FROM student_data WHERE Email = '%s'\" % (email)\r\n cursor.execute(sql)\r\n result = cursor.fetchall()\r\n for column in result:\r\n fname = column[0]\r\n lname = column[1]\r\n\r\n try:\r\n print(\"\\nData of\", fname, lname, \"will be deleted.\")\r\n\r\n i = 0\r\n while i == 0:\r\n jein = input(\"Confirm? (1) Yes\\t(0) No\")\r\n if jein == '1':\r\n i = 1\r\n sql1 = \"DELETE FROM student_data WHERE Email = '%s'\" % (email)\r\n cursor.execute(sql1)\r\n db.commit()\r\n for rowNum in range(ws.max_row, 1, -1):\r\n if ws.cell(row=rowNum, column=3).value == email:\r\n ws.delete_rows(rowNum)\r\n elif jein == '0':\r\n db.rollback()\r\n i = 1\r\n else:\r\n jeinError()\r\n except:\r\n print(\"Person does not exist in the database to begin with.\\n\")\r\n\r\n\r\n\r\ndef delete_Mode():\r\n\r\n print(\"\\n-------------Delete mode-------------\\n\")\r\n \r\n wb = load_workbook(student_data)\r\n ws = wb[sheet_name]\r\n i = 0\r\n while i == 0:\r\n choice = input(\"What would you like to delete from the table?\\n(1) All exchange students\\t\\t(3) All degree Students\\n(2) Individual\\t\\t\\t\\t\\t(4) Everyone\")\r\n if choice == '1':\r\n delete_Erasmi(ws)\r\n i = 1\r\n elif choice == '2':\r\n email = input(\"\\nEmail address of student to be deleted:\")\r\n delete_Individual(email, ws)\r\n i = 1\r\n elif choice == '3':\r\n delete_Degree(ws)\r\n i = 1\r\n elif choice == '4':\r\n delete_Everyone(ws)\r\n i = 1\r\n else:\r\n wrongEntry()\r\n \r\n wb.save(student_data)\r\n wb.close()\r\n\r\n\r\n\r\ndef add_Individual():\r\n print(\"\\n-------------Adding new student-------------\\n\")\r\n\r\n j = 0\r\n while j == 0:\r\n fname = input(\"First name:\")\r\n lname = input(\"Last name:\")\r\n email = input(\"Email address:\")\r\n country = input(\"Country:\")\r\n department = input(\"Department at TUM:\")\r\n i = 0\r\n while i == 0:\r\n status = input(\"E or D Student?\")\r\n if status == 'D' or status == 'd':\r\n i = 1\r\n elif status == 'E' or status == 'e':\r\n i = 1\r\n else:\r\n print(\"Please enter either E or D.\\n\")\r\n\r\n sql = 'INSERT INTO student_data VALUE(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")' % (fname, lname, email, country, department, status)\r\n cursor.execute(sql)\r\n result = cursor.fetchall()\r\n for column in result:\r\n fname = column[0]\r\n lname = column[1]\r\n email = column[2]\r\n country = column[3]\r\n department = column[4]\r\n status = column[5]\r\n print(fname, lname, email, country, department, status)\r\n i = 0\r\n while i == 0:\r\n jein = input(\"\\nConfirm entry (1) Yes\\t(0) No\")\r\n if jein == '1':\r\n update_Master_List(fname, lname, email, country, department, status)\r\n db.commit()\r\n i = 1\r\n elif jein == '0':\r\n db.rollback()\r\n i = 1\r\n else:\r\n jeinError()\r\n\r\n i = 0\r\n while i == 0:\r\n another = input(\"Add another person? (1) Yes\\t(0) No\")\r\n if another == '1':\r\n i = 1\r\n elif another == '0':\r\n i = 1\r\n j = 1\r\n else:\r\n jeinError()\r\n\r\n\r\n\r\ndef import_Data():\r\n global student_data\r\n global sheet_name\r\n\r\n wb = load_workbook(student_data)\r\n ws = wb[sheet_name]\r\n total_entry = ws.max_row - 1 # -1 as the first row in the excel file is the table headings\r\n\r\n sql = \"DELETE FROM student_data\"\r\n cursor.execute(sql)\r\n db.commit()\r\n\r\n i = 0\r\n for i in range(total_entry):\r\n fname = ws['A'+str(i+2)].value\r\n lname = ws['B'+str(i+2)].value\r\n email = ws['C'+str(i+2)].value\r\n country = ws['D'+str(i+2)].value\r\n department = ws['E'+str(i+2)].value\r\n status = ws['F'+str(i+2)].value\r\n try:\r\n sql = 'INSERT INTO student_data VALUE(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")' % (fname, lname, email, country, department, status)\r\n cursor.execute(sql)\r\n db.commit()\r\n except:\r\n continue\r\n\r\n\r\ndef edit_Particulars():\r\n print(\"\\n-------------Update particulars-------------\\n\")\r\n email_student = input(\"Email address of student:\")\r\n\r\n sql = \"SELECT * FROM student_data WHERE Email = '%s'\" % (email_student)\r\n cursor.execute(sql)\r\n result = cursor.fetchall()\r\n\r\n try:\r\n for column in result:\r\n fname = column[0]\r\n lname = column[1]\r\n\r\n print(\"\\nData of\", fname, lname, \"will be edited.\")\r\n wb = load_workbook(student_data)\r\n ws = wb[sheet_name]\r\n\r\n j = 0\r\n while j == 0:\r\n edit = input(\"What would you like to edit?\\n(1) First name\\t\\t\\t(4) Country\\n(2) Last name\\t\\t\\t(5) Department\\n(3) Email\\t\\t\\t\\t(6) E/D\")\r\n if edit == '1':\r\n newFname(email_student, ws)\r\n elif edit == '2':\r\n newLname(email_student, ws)\r\n elif edit == '3':\r\n newEmail(email_student, ws)\r\n elif edit == '4':\r\n newCountry(email_student, ws)\r\n elif edit == '5':\r\n newDepartment(email_student, ws)\r\n elif edit == '6':\r\n newStatus(email_student, ws)\r\n else:\r\n wrongEntry()\r\n\r\n i = 0\r\n while i == 0:\r\n jein = input(\"Edit other particulars of the same person? (1) Yes\\t(0) No\")\r\n if jein == '1':\r\n i = 1\r\n elif jein == '0':\r\n i = 1\r\n j = 1\r\n else:\r\n jeinError()\r\n \r\n wb.save(student_data)\r\n wb.close()\r\n \r\n except:\r\n print(\"Person does not exist in the databank.\\n\")\r\n\r\n\r\n\r\n\r\n# main body\r\nprint(\"\\n-------------Welcome to the student management program-------------\")\r\n\r\ni = 0\r\nwhile i == 0:\r\n choice = input(\"\\nChoose an option below:\\n(1) Import from Excel\\t\\t(3) Add an individual\\n(2) Edit particulars\\t\\t(4) Delete Mode\")\r\n if choice == '1':\r\n import_Data()\r\n i = 1\r\n elif choice == '2':\r\n edit_Particulars()\r\n i = 1\r\n elif choice == '3':\r\n add_Individual()\r\n i = 1\r\n elif choice == '4':\r\n delete_Mode()\r\n i = 1\r\n else:\r\n wrongEntry()\r\n\r\ndb.close()\r\n","repo_name":"edwin-oei/tumi","sub_path":"studentManagement.py","file_name":"studentManagement.py","file_ext":"py","file_size_in_byte":14834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5434412157","text":"'''\nThis file contains functions that help with the dependancy determination\nof C files. It scans headers and function declarations to determine which\ndependancies are needed by each package.\n\nDue to the amazingness of gcc, what would have been an extremely hard problem\nwas compressed down to a single subprocess.call due to the -E flag.\n'''\n\nimport re\nfrom subprocess import check_output\n\ndef owns(path):\n '''\n This function takes a path and runs pacman -Qo on it and parses out the\n package name returned or None if the path is unowned.\n '''\n try:\n ret = check_output(['pacman','-Qo',path])\n return ret.split(b' ')[4]\n except:\n return None\n\n\ndef header2package(header):\n '''\n This function returns the package that owns the system header file.\n '''\n return owns('/usr/include/' + header)\n\ndef packagesNeeded(path):\n '''\n Scans a file for #includes and determines which package(s) \n are needed to build the file.\n '''\n # determine system includes\n includes = []\n with open(path,'r') as f:\n for line in f.readlines():\n match = re.match('\\s*#include\\s+<(\\w+[.]h)>', line, flags = re.IGNORECASE)\n if match:\n includes.append(match.group(1))\n # generate the package owner\n owners = []\n for i in includes:\n owner = header2package(i)\n if not owner in owners:\n owners.append(owner)\n return owners\n","repo_name":"hi117/distroTools","sub_path":"src/utils/langProc/c/depParse.py","file_name":"depParse.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29850939662","text":"try:\n from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative\nexcept ImportError:\n from checks import AgentCheck\n\nfrom datetime import datetime, timezone\n\nfrom six.moves.urllib.parse import quote\n\nfrom .constants import Constants, Tables\nfrom .stats import StatsDo\n\nfrom requests.exceptions import HTTPError # Import exceptions\nfrom json import JSONDecodeError\n\nclass ServicenowCheck(AgentCheck):\n def __init__(self, *args, **kwargs):\n super(ServicenowCheck, self).__init__(*args, **kwargs)\n self.log.debug(\"INFO: ServicenowCheck.__init__() --> initializing\")\n self.instance_name = self.instance.get(\"instance_name\")\n self.collect_statsdo = is_affirmative(\n self.instance.get(\"collect_statsdo\", True)\n )\n self.statsdo_auth = is_affirmative(\n self.instance.get(\"statsdo_auth\", False)\n )\n self.collect_itsm_metrics = is_affirmative(\n self.instance.get(\"collect_itsm_metrics\", False)\n )\n self.opt_fields = self.instance.get('opt_fields', [])\n self.base_url = 'https://' + \\\n str(self.instance_name) + '.service-now.com'\n\n if self.collect_statsdo:\n if self.statsdo_auth:\n self.stats_url = self.base_url + Constants.AUTH_STATSDO_PATH \n else:\n self.stats_url = self.base_url + Constants.STATSDO_PATH\n self.stats_title = (\n self.instance.get(\n \"stats_title\") or Constants.DEFAULT_STATS_TITLE\n )\n self.statsdo_url = self.base_url + Constants.STATSDO_PATH\n\n if self.collect_itsm_metrics:\n self.assignment_group_names = self.instance.get(\n \"assignment_groups\", False)\n self.table_api_url = self.base_url + Constants.TABLE_API_BASE_PATH\n self.agg_api_url = self.base_url + Constants.AGG_API_BASE_PATH\n self.encoded_query = \"\"\n self.open_incident_states = self.instance.get(\"incident_open_states\", [1, 2, 3])\n self.closed_incident_states = self.instance.get(\"incident_closed_states\", [6, 7, 8])\n self.time_format = self.instance.get(\"time_format\", Constants.SNC_TIME_FORMAT)\n self.check_initializations.append(self.validate_config)\n\n def check(self, instance):\n base_tags = Constants.REQUIRED_TAGS\n base_tags = base_tags + [\"instance_name:\" + self.instance_name]\n self.gauge(\"datadog.marketplace.rapdev.servicenow\", 1.0, tags=base_tags)\n\n if self.collect_statsdo:\n self.check_statsdo()\n\n if self.collect_itsm_metrics:\n connected = self.check_itsm_connection()\n if connected:\n try:\n self.check_itsm(instance)\n self.service_check(\n \"{}.itsm_check_online\".format(Constants.METRIC_PREFIX),\n self.OK,\n tags=base_tags\n )\n self.get_inc_this_year()\n except Exception as e:\n self.log.error(\"unknown exception running itsm check\\n\"\n + repr(e)\n )\n self.service_check(\n \"{}.itsm_check_online\".format(Constants.METRIC_PREFIX),\n self.CRITICAL,\n tags=base_tags\n )\n\n def validate_config(self):\n \"\"\"\n check for a valid config, e.g. at least one check,\n basic auth creds if statsdo_auth or itsm check enabled\n \"\"\"\n if not self.instance_name:\n self.log.warning(\n 'ServicenowCheck.validate_config() --> no instance name in config')\n raise ConfigurationError(Constants.ERROR_NO_INSTANCE)\n\n if not self.collect_itsm_metrics and not self.collect_statsdo:\n self.log.warning(\n 'ServicenowCheck.validate_config() --> At least one service check must be provided')\n raise ConfigurationError(Constants.ERROR_AT_LEAST_ONE_CHECK)\n\n if not self.collect_itsm_metrics:\n return\n \n \n\n\n\n def check_itsm_connection(self):\n \"\"\"\n attempt to get one incident from the table api\n to see if the itsm check can run\n \"\"\"\n\n base_tags = Constants.REQUIRED_TAGS\n base_tags = base_tags + [\"instance_name:\" + self.instance_name]\n\n connected = False\n single_incident_url = self.table_api_url + \"/incident?sysparm_limit=1\"\n try:\n response = self.http.get(single_incident_url,extra_headers=Constants.TABLE_API_HEADERS)\n if response.status_code != 200:\n raise Exception(response.status_code)\n except Exception as e:\n self.log.warning(\n \"ServicenowCheck.check_itsm_connection() --> Non 200 request returned from incident api - itsm check will not run + \\n \" + repr(e))\n self.service_check(\n \"{}.table_api_connection\".format(\n Constants.METRIC_PREFIX\n ),\n self.CRITICAL,\n tags=base_tags\n )\n else:\n self.service_check(\n \"{}.table_api_connection\".format(\n Constants.METRIC_PREFIX\n ),\n self.OK,\n tags=base_tags\n )\n connected = True\n finally:\n self.gauge_http_response(response)\n\n return connected\n\n def gauge_http_response(self, response):\n \"\"\"\n gauge http responses from Requests.Response object\n \"\"\"\n base_tags = Constants.REQUIRED_TAGS\n base_tags = base_tags + [\"instance_name:\" + self.instance_name]\n endpoint = response.url\n endpoint = endpoint[endpoint.find(\".com/\") + 4:endpoint.find(\"?\")]\n\n metric_tags = base_tags + [\"endpoint:\" + endpoint,\n \"status_code:\" + str(response.status_code)\n ]\n time_elapsed = response.elapsed.total_seconds()\n self.gauge(\n \"{}.http_response\".format(\n Constants.METRIC_PREFIX,\n response.status_code or 0\n ),\n 1,\n tags=metric_tags\n )\n self.gauge(\n \"{}.http_response_time\".format(\n Constants.METRIC_PREFIX,\n ),\n time_elapsed,\n tags=metric_tags\n )\n\n def check_itsm(self, instance):\n base_tags = Constants.REQUIRED_TAGS\n base_tags = base_tags + [\"instance_name:\" + self.instance_name]\n encoded_query = self.set_encoded_query()\n inc_url = self.table_api_url + \"/incident\"\n if encoded_query:\n inc_url = inc_url + \"?sysparm_query=\" + encoded_query + \"&sysparm_display_value=all\"\n\n \"\"\"\n get all the incidents we will work with this check\n essential to the check so kick back to\n check() if it fails\n \"\"\"\n try:\n resp = self.http.get(inc_url,extra_headers=Constants.TABLE_API_HEADERS)\n\n resp.raise_for_status()\n\n except HTTPError as e:\n self.log.error(\n \"failed to get incidents - itsm check cannot continue\" + repr(e))\n self.service_check(\n \"{}.table_api_connection\".format(Constants.METRIC_PREFIX),\n self.CRITICAL,\n tags=base_tags\n )\n raise Exception(e)\n finally:\n self.gauge_http_response(resp)\n\n data = resp.json()\n incidents = data[\"result\"]\n self.log.debug(\n \"Servicenowcheck.check_itsm() --> \"\n + str(len(incidents))\n + \" incidents returned from encoded query\"\n )\n\n incident_store = {}\n\n # hold glide records\n ag_cache = {0: None}\n user_cache = self.build_gr_cache(\n Tables.SYS_USER,\n \"sys_id\",\n Constants.FIELDS_USER\n )\n\n user_cache[0] = None\n\n location_cache = self.build_gr_cache(\n Tables.CMN_LOCATION,\n \"sys_id\",\n Constants.FIELDS_LOCATION\n )\n\n location_cache[0] = None\n\n task_sla_cache = self.build_gr_cache(\n Tables.TASK_SLA,\n \"task\",\n Constants.FIELDS_TASK_SLA\n )\n task_sla_cache[0] = None\n\n sla_name_cache = self.build_gr_cache(\n Tables.CONTRACT_SLA,\n \"sys_id\",\n Constants.FIELDS_CONTRACT_SLA\n )\n sla_name_cache = {0: None}\n\n for incident in incidents:\n try:\n if incident[\"assignment_group\"]:\n assignment_group_id = incident[\"assignment_group\"][\"value\"]\n else:\n assignment_group_id = 0\n\n if incident[\"caller_id\"][\"value\"]:\n caller_id = incident[\"caller_id\"][\"value\"]\n else:\n caller_id = 0\n\n if incident[\"location\"][\"value\"]:\n location_id = incident[\"location\"][\"value\"]\n else:\n location_id = 0\n\n if incident[\"assigned_to\"][\"value\"]:\n assigned_to_id = incident[\"assigned_to\"][\"value\"]\n else:\n assigned_to_id = 0\n\n if self.opt_fields:\n opt_tags = []\n for field in self.opt_fields:\n field_name = field\n if incident[field]:\n field_value = incident[field][\"value\"]\n else:\n field_value = ''\n opt_tag = field_name + \":\" + field_value\n if opt_tag not in opt_tags:\n opt_tags.append(opt_tag)\n\n priority = \"p\" + incident[\"priority\"][\"value\"]\n sys_created_on = incident[\"sys_created_on\"][\"display_value\"]\n incident_id = incident[\"sys_id\"][\"value\"]\n incident_state = incident[\"state\"][\"value\"] \n now = datetime.now(timezone.utc)\n sco_date = datetime.strptime(\n sys_created_on,\n self.time_format\n ).replace(tzinfo=timezone.utc)\n time_delta = now - sco_date\n time_delta_seconds = time_delta.total_seconds() / 3600\n caller_is_vip = \"false\"\n incident_sla_breached = 0\n assignment_group = incident[\"assignment_group\"][\"display_value\"]\n\n if not assignment_group:\n assignment_group_name = \"none\"\n else:\n assignment_group_name = assignment_group\n\n if assignment_group_name not in incident_store:\n incident_store[assignment_group_name] = {}\n incident_store[assignment_group_name][priority] = {}\n incident_store[assignment_group_name][priority][incident_state] = {\n \"vip\": 0,\n \"breached\": {},\n \"total\": 0,\n }\n elif priority not in incident_store[assignment_group_name]:\n incident_store[assignment_group_name][priority] = {}\n incident_store[assignment_group_name][priority][incident_state] = {\n \"vip\": 0,\n \"breached\": {},\n \"total\": 0,\n }\n elif incident_state not in incident_store[assignment_group_name][priority]:\n incident_store[assignment_group_name][priority][incident_state] = {\n \"vip\": 0,\n \"breached\": {},\n \"total\": 0\n }\n\n self.log.info(\n \"ServicenowCheck.check_itsm() --> processing slas\")\n\n if task_sla_cache.get(incident_id, False):\n sla = task_sla_cache[incident_id]\n\n if sla[\"sla\"]:\n sla_id = sla[\"sla\"][\"value\"]\n else:\n sla_id = 0\n\n sla_stage = sla[\"stage\"]\n sla_end_time = sla[\"end_time\"]\n sla_start_time = sla[\"start_time\"]\n sla_complete_time = 0\n\n if sla_end_time:\n sla_start_time_fmt = datetime.strptime(\n sla_start_time,\n self.time_format\n )\n sla_end_time_fmt = datetime.strptime(\n sla_end_time,\n self.time_format\n )\n sla_complete_time = sla_end_time_fmt - sla_start_time_fmt\n sla_complete_time = sla_complete_time.total_seconds() / 3600\n\n sla_def = self.get_or_update_cache(\n sla_id,\n \"contract_sla\",\n sla_name_cache\n )\n\n sla_name = sla_def.get(\"name\", \"undefined\")\n sla_target = sla_def.get(\"target\", \"undefined\")\n\n sla_tags = base_tags + [\n \"servicenow_assignment_group:\" + assignment_group_name,\n \"servicenow_priority:\" + priority,\n \"sla_name:\" + sla_name,\n \"sla_incident_number:\" + incident[\"number\"][\"value\"],\n \"sla_target:\" + sla_target,\n \"sla_stage:\" + sla_stage,\n ]\n\n if sla[\"stage\"] == \"in_progress\":\n sla_percentage = sla[\"percentage\"]\n if float(sla_percentage) > 100:\n sla_percentage = 100\n self.gauge(\n \"rapdev.servicenow.sla\", sla_percentage, tags=sla_tags\n )\n\n if sla_complete_time:\n self.gauge(\n \"rapdev.servicenow.sla_elapsed_time\",\n sla_complete_time,\n tags=sla_tags,\n )\n\n if sla[\"stage\"] == \"in_progress\" and sla[\"has_breached\"] == \"true\":\n # CHANGE\n some_value = incident_store[assignment_group_name][priority][incident_state][\"breached\"]\n if sla_name not in some_value:\n incident_store[assignment_group_name][priority][incident_state][\"breached\"][\n sla_target\n ] = {\"total\": 0}\n\n incident_store[assignment_group_name][priority][incident_state][\"breached\"][\n sla_target\n ][\"total\"] += 1\n\n caller = self.get_or_update_cache(\n caller_id,\n \"sys_user\",\n user_cache\n )\n\n if caller:\n caller_name = caller[\"name\"]\n else:\n caller_name = \"none\"\n\n # update open inc totals\n if int(incident_state) in self.open_incident_states:\n if caller and caller[\"vip\"] == \"true\":\n incident_store[assignment_group_name][priority][incident_state][\"vip\"] += 1\n\n if incident_sla_breached > 0:\n incident_store[assignment_group_name][priority][incident_state][\n \"breached\"\n ] += incident_sla_breached\n\n incident_store[assignment_group_name][priority][incident_state][\"total\"] += 1\n\n assigned_to = self.get_or_update_cache(\n assigned_to_id,\n \"sys_user\",\n user_cache\n )\n\n if assigned_to:\n assigned_to_name = assigned_to[\"name\"]\n else:\n assigned_to = \"none\"\n\n location = self.get_or_update_cache(\n location_id,\n \"cmn_location\",\n location_cache\n )\n\n if location:\n location_name = location[\"name\"]\n else:\n location_name = \"none\"\n\n inc_opened = int(incident_state) in self.open_incident_states\n\n # prepare individual incident metric\n if self.opt_fields:\n incident_tags = opt_tags + base_tags + [\n \"servicenow_assignment_group:\" + assignment_group_name,\n \"servicenow_priority:\" + incident[\"priority\"][\"value\"],\n \"incident_number:\" + incident[\"number\"][\"value\"],\n \"caller:\" + caller_name,\n \"description:\" + incident[\"short_description\"][\"value\"],\n \"opened:\" + incident[\"sys_created_on\"][\"display_value\"],\n \"assigned_to:\" + assigned_to_name,\n \"location:\" + location_name,\n \"state:\" + incident_state,\n \"open:\" + str(inc_opened)\n ]\n else:\n incident_tags = base_tags + [\n \"servicenow_assignment_group:\" + assignment_group_name,\n \"servicenow_priority:\" + incident[\"priority\"][\"value\"],\n \"incident_number:\" + incident[\"number\"][\"value\"],\n \"caller:\" + caller_name,\n \"description:\" + incident[\"short_description\"][\"value\"],\n \"opened:\" + incident[\"sys_created_on\"][\"display_value\"],\n \"assigned_to:\" + assigned_to_name,\n \"location:\" + location_name,\n \"state:\" + incident_state,\n \"open:\" + str(inc_opened)\n ]\n # closed incidents\n if int(incident_state) not in self.open_incident_states:\n if not incident[\"closed_at\"][\"display_value\"]:\n continue\n\n incident_close_time = incident[\"closed_at\"][\"display_value\"]\n incident_open_time = incident[\"sys_created_on\"][\"display_value\"]\n incident_open_time_fmt = datetime.strptime(\n incident_open_time,\n self.time_format\n ).replace(tzinfo=timezone.utc)\n incident_close_time_fmt = datetime.strptime(\n incident_close_time,\n self.time_format\n ).replace(tzinfo=timezone.utc)\n incident_time_to_close = (\n incident_close_time_fmt - incident_open_time_fmt\n ).total_seconds()\n\n # seconds --> hours\n incident_time_to_close = incident_time_to_close / 3600\n\n self.gauge(\n \"rapdev.servicenow.incident_resolution_time\",\n incident_time_to_close,\n tags=incident_tags,\n )\n\n self.gauge(\n \"rapdev.servicenow.incident\",\n time_delta_seconds,\n tags=incident_tags)\n\n except Exception as e:\n self.log.error(\"Unknown error occurred processing incident payload\"\n + incident_id\n + \"incident metrics will not be processed\"\n + repr(e)\n )\n continue\n\n\n # Print Totals\n for ag in incident_store:\n for priority in incident_store[ag]:\n for state in incident_store[ag][priority]:\n accumulation_tags = base_tags + [\n \"servicenow_assignment_group:\" + ag,\n \"servicenow_priority:\" + priority,\n \"servicenow_state:\" + state\n ]\n\n num_vip = incident_store[ag][priority][state][\"vip\"]\n num_total = incident_store[ag][priority][state][\"total\"]\n\n self.gauge(\n \"rapdev.servicenow.vip_count\", num_vip, accumulation_tags\n )\n\n self.gauge(\n \"rapdev.servicenow.incident_count\", num_total, tags=accumulation_tags\n )\n\n for target in incident_store[ag][priority][state][\"breached\"]:\n breach_tags = accumulation_tags\n breach_tags.append(\"sla_target:\" + target)\n num_breached = incident_store[ag][priority][state][\"breached\"][target][\n \"total\"\n ]\n try:\n self.gauge(\n \"rapdev.servicenow.incident_breached\",\n num_breached,\n accumulation_tags,\n )\n except Exception as e:\n self.log.error(\"unable to gauge breach \" + repr(e))\n\n def set_encoded_query(self):\n \"\"\"\n 1.) all -open- incidents\n 2.) all -closed- incidents -closed- or -resolved- in the last\n 30 days\n \"\"\"\n closed_encoded_query = Constants.QUERY_CLOSED_OR_RESOLVED_LAST_30\n ag_encoded_query = \"\"\n state_ec = \"stateIN\"\n open_states = self.open_incident_states\n\n for x in range (len(open_states)):\n state_ec = state_ec + str(open_states[x])\n if x != len(open_states) -1:\n state_ec = state_ec + \",\"\n\n if self.assignment_group_names:\n for ag in self.assignment_group_names:\n encoded_ag = quote(ag)\n if ag_encoded_query:\n ag_encoded_query = ag_encoded_query + \"%5EOR\" + \\\n \"assignment_group.name%3D\" + encoded_ag\n else:\n ag_encoded_query = \"assignment_group.name%3D\" + encoded_ag\n\n open_encoded_query = state_ec\n if ag_encoded_query:\n open_encoded_query = open_encoded_query + \"^\" + ag_encoded_query\n closed_encoded_query = closed_encoded_query + \"^\" + ag_encoded_query\n\n encoded_query = open_encoded_query + \"^NQ\" + closed_encoded_query\n \n\n return encoded_query\n\n def build_gr_cache(self, table, key, fields):\n cache = {}\n url = (\n self.table_api_url\n + \"/\"\n + table\n )\n\n if fields:\n url = url + \"?sysparm_fields=\" + fields\n\n resp = self.http.get(url,extra_headers=Constants.TABLE_API_HEADERS)\n\n self.gauge_http_response(resp)\n resp.raise_for_status()\n\n records = resp.json()[\"result\"]\n\n if not len(records):\n self.log.warn(\"ServicenowCheck.build_gr_cache() -->\"\n + \" no results returned for \" + table\n )\n return {}\n\n for record in records:\n record_key = record.pop(key)\n if hasattr(record_key, \"get\") and record_key.get(\"value\", False):\n record_key = record_key.get(\"value\")\n\n cache[record_key] = record\n\n self.log.debug(\"ServicenowCheck.build_gr_cache() --> Cache set for \"\n + table\n +\"\\n\"\n + str(cache)\n )\n return cache\n\n\n def sanatize_90p(self, value):\n split = value.split(\":\")\n return (\n int(split[0]) * 86400 * 1000\n + int(split[1]) * 60 * 1000\n + float(split[2]) * 1000\n )\n\n def get_inc_this_year(self):\n base_tags = Constants.REQUIRED_TAGS\n base_tags = base_tags + [\"instance_name:\" + self.instance_name]\n try:\n response = self.http.get(\n self.agg_api_url + Constants.YEAR_SLUG,\n headers=Constants.TABLE_API_HEADERS \n )\n\n response.raise_for_status()\n\n resp = response.json()\n\n result = resp.get(\"result\")\n \n except (HTTPError, JSONDecodeError) as e:\n self.log.error(\n \"failed to get incidents for current year\" + repr(e))\n raise Exception(e)\n\n if not result:\n return\n \n \n \n stats = result.get(\"stats\")\n\n if not stats:\n return\n \n count = stats.get(\"count\") \n\n if count:\n self.gauge(\n \"rapdev.servicenow.incident_total_year\",\n count, tags=base_tags)\n \n def now_request_single_record(self, table, id):\n \"\"\"\n request a single record from the /now/table api\n and return the response\n \"\"\"\n\n base_tags = Constants.REQUIRED_TAGS\n base_tags = base_tags + [\"instance_name:\" + self.instance_name]\n\n try:\n url = self.base_url + Constants.TABLE_API_BASE_PATH + \"/\" + table + \"/\" + id\n resp = self.http.get(url,extra_headers=Constants.TABLE_API_HEADERS)\n\n resp.raise_for_status()\n\n return resp\n\n except HTTPError as e:\n self.log.warning(\n \"Servicenowcheck.now_request_single_record() -->\" +\n \"HTTPError getting \" + table +\n \"from ServiceNow\" + repr(e) + \"\\n\" + id\n )\n self.service_check(\"{}.table_api_connection\".format(\n Constants.METRIC_PREFIX), self.CRITICAL, tags=base_tags)\n raise Exception(e)\n\n finally:\n self.gauge_http_response(resp)\n\n def get_or_update_cache(self, sys_id, table, cache):\n if sys_id not in cache:\n resp = self.now_request_single_record(table, sys_id)\n record = resp.json()[\"result\"]\n cache[sys_id] = record\n\n return cache[sys_id]\n\n def check_statsdo(self):\n base_tags = Constants.REQUIRED_TAGS\n base_tags = base_tags + [\"instance_name:\" + self.instance_name]\n try:\n response = self.http.get(\n self.stats_url, headers=Constants.STATSDO_HEADERS)\n if response.status_code != 200:\n raise HTTPError(\n {\"code\": response.status_code, \"response\": response}\n )\n\n except HTTPError as e:\n self.log.warning(\n \"Servicenowcheck.check_statsdo() -->\" +\n \"HTTP error getting /stats.do - check cannot continue\"\n )\n self.service_check(\n \"rapdev.servicenow.statsdo_connection\",\n self.CRITICAL,\n tags=base_tags\n )\n return\n except Exception as e:\n self.log.error(\"Unknown exception checking statsdo \" + repr(e) +\n \"check cannot continue\"\n )\n return\n\n statsdo = StatsDo(response.text)\n statsdo.set_properties_from_soup(statsdo.soup, [], \"body\", \"\", \"\")\n if statsdo.title != self.stats_title:\n self.service_check(\n \"rapdev.servicenow.statsdo_connection\", self.CRITICAL)\n return\n\n self.service_check(\"rapdev.servicenow.statsdo_connection\", self.OK)\n self.gauge(\n \"rapdev.servicenow.stats.trans_time\",\n # time between sending the request and parse of the headers\n response.elapsed.total_seconds(),\n tags=base_tags,\n )\n\n for metric in statsdo.metrics:\n try:\n float(metric[\"value\"])\n except ValueError:\n continue\n\n try:\n self.gauge(\n metric[\"name\"],\n metric[\"value\"],\n tags=metric[\"tags\"]\n )\n except Exception as e:\n self.log.warning(\n \"error gauging metric \"\n + metric[\"name\"]\n + \":\"\n + metric[\"value\"]\n + \"\\n\"\n + repr(e)\n )\n","repo_name":"gjanco/mezmo-dd-marketplace","sub_path":"rapdev_servicenow/datadog_checks/rapdev_servicenow/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":28703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42876667548","text":"\"\"\" docs tests \"\"\"\n\nfrom typing import Optional, TypedDict\n\n\nfrom jinja2 import Environment, PackageLoader, select_autoescape\nimport jinja2.exceptions\nfrom loguru import logger\n\n\nfrom github.Repository import Repository\nfrom ..repolinter import RepoLinter\n\n\nclass DefaultConfig(TypedDict):\n \"\"\"default config for the test module\"\"\"\n\n contributing_file: str\n\n\nCATEGORY = \"docs\"\nDEFAULT_CONFIG: DefaultConfig = {\"contributing_file\": \".github/CONTRIBUTING.md\"}\nLANGUAGES = [\"ALL\"]\n\n\ndef check_contributing_exists(repo: RepoLinter) -> None:\n \"\"\"checks that .github/CONTRIBUTING.md exists\"\"\"\n # don't need to run this if it's archived\n repo.skip_on_archived()\n\n filepath: str = repo.config[CATEGORY][\"contributing_file\"]\n filecontents = repo.cached_get_file(filepath)\n\n if filecontents is None:\n repo.error(CATEGORY, f\"Couldn't find {filepath}\")\n return None\n logger.debug(\"Found {}\", filepath)\n return None\n\n\ndef generate_contributing_file(repo: Repository) -> Optional[str]:\n \"\"\"generates the 'CONTRIBUTING.md' file\"\"\"\n\n # start up jinja2\n jinja2_env = Environment(\n loader=PackageLoader(package_name=\"github_linter\", package_path=\".\"),\n autoescape=select_autoescape(),\n )\n try:\n template = jinja2_env.get_template(f\"fixes/{CATEGORY}/CONTRIBUTING.md\")\n return template.render(repo=repo)\n\n except jinja2.exceptions.TemplateNotFound as template_error:\n logger.error(\"Failed to load template: {}\", template_error)\n return None\n\n\ndef fix_contributing_exists(repo: RepoLinter) -> None:\n \"\"\"creates a templated file\"\"\"\n\n filepath = repo.config[CATEGORY][\"contributing_file\"]\n new_filecontents = generate_contributing_file(repo.repository)\n if new_filecontents is None:\n repo.error(CATEGORY, f\"Failed to generate {filepath}\")\n return\n\n oldfile = repo.cached_get_file(filepath)\n\n if (\n oldfile is not None\n and oldfile.decoded_content.decode(\"utf-8\") == new_filecontents\n ):\n logger.debug(\"Don't need to update {}\", filepath)\n return\n\n commit_url = repo.create_or_update_file(\n filepath=filepath,\n newfile=new_filecontents,\n oldfile=oldfile,\n message=f\"github-linter docs module creating {filepath}\",\n )\n repo.fix(CATEGORY, f\"Created {filepath}, commit url: {commit_url}\")\n","repo_name":"yaleman/github_linter","sub_path":"github_linter/tests/docs.py","file_name":"docs.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27754878271","text":"# 实现voxel滤波,并加载数据集中的文件进行验证\n\nimport open3d as o3d \nimport os\nimport math\nimport numpy as np\nfrom pyntcloud import PyntCloud\nfrom pandas import DataFrame\n\n# 功能:对点云进行voxel滤波\n# 输入:\n# point_cloud:输入点云\n# leaf_size: voxel尺寸\ndef voxel_filter(point_cloud, leaf_size):\n filtered_points = []\n point_cloud = np.array([point_cloud.x,point_cloud.y,point_cloud.z]).T\n min_boundary = np.min(point_cloud,axis=0)\n max_boundary = np.max(point_cloud,axis=0)\n Dx,Dy,Dz = np.ceil((max_boundary - min_boundary)/leaf_size)\n voxel_grid_nums = int(Dx*Dy*Dz)\n print(\"voxel_grid_nums\",voxel_grid_nums)\n assert voxel_grid_nums /dev/null\")\n\t\telse:\n\t\t\tos.system(\"make test-printf &> /dev/null\")\n\t\tsubprocess.call([\"mv\", \"a.out\", f])\n\n#Generate header of a fuzzing c file.\ndef generateCFile(useMusl):\n\tcFile = '#include \\n'\n\tcFile += '#include \\n'\n\tcFile += '#include \\n'\n\tif(useMusl):\n\t\tcFile += '#include \\n'\n\t\tcFile += '#include \"musl.h\"\\n'\n\t\tcFile += '#define LEN 10000\\n'\n\t\tcFile += 'char buf[LEN];\\n'\n\tcFile += 'int main(int argc, char* argv[]){\\n'\n\tcFile += fuzzer(useMusl)\n\tcFile += 'return 0;\\n}'\n\treturn cFile\n\n#Generate 5000 print statements.\n#Uses musl-snprintf when parameter is true.\ndef fuzzer(useMusl):\n\tfuzz = \"\"\n\tfor i in range(0, 5000):\n\t\tform = createFormat()\n\t\tstring = form.get('format')\n\t\tvalue = form.get('args')\n\t\tif(useMusl):\n\t\t\tfuzz += 'musl_snprintf(buf, LEN, \"'+string+'\\\\n\", '+value+');\\n'\n\t\t\tfuzz += 'printf(\"%s\", buf);\\n'\n\t\telse:\n\t\t\tfuzz += 'printf(\"'+string+'\\\\n\", '+value+');\\n'\t\t\t\t\n\treturn fuzz\n\t\n#Runs both compiled files, tests the outputs for equality.\ndef testFiles():\n\tmusl = subprocess.check_output(\"./musl-version\")\n\tgcc = subprocess.check_output(\"./gcc-version\")\n\tif(musl != gcc):\n\t\tprint(\"File output mismatch!\")\n\telse:\n\t\tprint(\"Files match.\")\n\n#Remove all copied or generated files from current directory.\t\ndef removeFiles():\n\tif(len(sys.argv) != 2 or sys.argv[1] != \"coverage\"):\n\t\tos.system('rm *.c')\n\t\tos.system('rm *.h')\n\tos.remove('musl-version')\n\tos.remove('gcc-version')\n\ndef main():\n\tcloneFiles()\n\tgenerateFiles()\n\ttestFiles()\n\tremoveFiles()\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"regehr/solid_code_class","sub_path":"fuzzer/uLDominator/fuzzer.py","file_name":"fuzzer.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"42269948807","text":"from __future__ import division, print_function\n\nimport click\nimport numpy as np\n\nimport data\nimport util\n\n\ndef compute_mean(files, batch_size=128):\n \"\"\"Load images in files in batches and compute mean.\"\"\"\n m = np.zeros(3)\n for i in range(0, len(files), batch_size):\n images = data.load_image(files[i : i + batch_size])\n m += images.sum(axis=(0, 2, 3))\n return (m / len(files)).astype(np.float32)\n\n\ndef std(files, batch_size=128):\n s = np.zeros(3)\n s2 = np.zeros(3)\n shape = None\n for i in range(0, len(files), batch_size):\n print(\"done with {:>3} / {} images\".format(i, len(files)))\n images = np.array(data.load_image(files[i : i + batch_size]),\n dtype=np.float64)\n shape = images.shape\n s += images.sum(axis=(0, 2, 3))\n s2 += np.power(images, 2).sum(axis=(0, 2, 3))\n n = len(files) * shape[2] * shape[3]\n var = (s2 - s**2.0 / n) / (n - 1)\n\n print('mean')\n print((s / n).astype(np.float32))\n print('std')\n print(np.sqrt(var))\n #return np.sqrt(var)\n\n\n@click.command()\n@click.option('--directory', default=None)\ndef main(directory):\n\n filenames = data.get_image_files(directory)\n\n\n bs = 1000\n batches = [filenames[i * bs : (i + 1) * bs]\n for i in range(int(len(filenames) / bs) + 1)]\n\n # compute mean and std\n std(filenames, bs)\n\n Us, evs = [], []\n for batch in batches:\n images = np.array([data.load_augment(f, 128, 128) for f in batch])\n X = images.transpose(0, 2, 3, 1).reshape(-1, 3)\n cov = np.dot(X.T, X) / X.shape[0]\n U, S, V = np.linalg.svd(cov)\n ev = np.sqrt(S)\n Us.append(U)\n evs.append(ev)\n\n print('U')\n print(np.mean(Us, axis=0))\n print('eigenvalues')\n print(np.mean(evs, axis=0))\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"learningtitans/melanoma-transfer","sub_path":"src/utils/make_pca.py","file_name":"make_pca.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"8270204840","text":"import speech_recognition as sr\nimport sqlite3\n\ndef recog(recognizer, microphone):\n with microphone as source:\n recognizer.adjust_for_ambient_noise(source)\n audio = recognizer.listen(source)\n return audio\n\nif __name__ == '__main__':\n recognizer = sr.Recognizer()\n microphone = sr.Microphone()\n\n staffName = r.recognize_google(audio, language='en-US')\n\n # using google speech recognition\n try:\n print(\"You said \" + staffName)\n except sr.UnknownValueError:\n print(\"Error\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n\n # Connect to database (creates if not exists)\n conn = sqlite3.connect(\"sogeti.db\")\n\n # Create a cursor object\n cursor = conn.cursor()\n\n # Execute the CREATE TABLE statement\n\n cursor.execute('SELECT * FROM employees WHERE name LIKE ?', (staffName,))\n if len(list(cursor)) == 1:\n print('Yes we do have this staff in our company')\n else:\n print('Sorry we don`t know him or her.')\n\n conn.commit()\n conn.close()\n #hhh\n","repo_name":"Richie-Shiqi-Shu/voicereg","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7540222163","text":"#maze solver, GR Oct 2017\n#finds the optimal route between two points\n#import canvas\nimport random\n\nnumy=16\t\nnumx=16\n#start near the top and end near the bottom at random\nstart=(1,1)\nend=(numy-2,1)\n\nwall = -2 #block type\npath = -1 #block type\nmaze = [[path for x in range(numx)] for y in range(numy)] #generate blank maze\nmaze[0]=[wall for x in maze[0]] #build horizontal outer walls\nmaze[numy-1]=[wall for x in maze[0]]\nfor x in range(numy): #build vertical outer walls\n\tmaze[x][0]=wall\n\tmaze[x][numx-1]=wall\n\t\nfor y in range(numy):\n\tfor x in range(numx):\n\t\tif y==numy/2 and x>numx/4 or y==numy/4 and x<3*numx/4 or y==3*numy/4 and x<3*numx/4:\n\t\t\tmaze[y][x]=wall # horizontal inner walls\n\t\tif random.random()<0.2: #random blocks\n\t\t\tmaze[y][x]=wall\n\n\n#works as follows: start at the start point (step 0), look at adjacent blocks that are not walls, call them step 1. add all the steps 1 coordinates to a list. go to each of the step blocks in that Step 1 list, look at adjacent cells that are not walls and where we have not been before, then call them step 2. add all their coordinates to a list called step 2. repeat until one of the new coordinates is the end block. Now walk back from the end taking the direction where the steo number is lowest and not a wall. repeat until step=0 then you are at the start. for each of these steps, add the coordinate to a new solution list. reverse that list and that is the solution on how to walk from the start to the end. \ndef solvemaze(maze,start,end):\n\tstepcounter=0\n\tsteps=[]#holds the list of coordinates for each step number\n\tsucceed=False #when this becomes true, solution found. \n\tfail=False#when this becomes true, no solution is issible. \n\t#avoid start and end being inside a wall\n\tmaze[start[0]][start[1]]=path\n\tmaze[end[0]][end[1]]=path\n\tsteps.append([start]) #start at the start\n\tmaze[start[0]][start[1]]=0\n\n\twhile not succeed and not fail:\n\t\tlistofcoordinates=[] # holds all coordinates of current step number\n\t\tfor step in steps[stepcounter]: #for each of those coordinates\n\t\t\tif maze[step[0]][step[1]-1]==path: #path is left of that block, so add to list of the next step number an update the maze array\n\t\t\t\tlistofcoordinates.append((step[0],step[1]-1))\n\t\t\t\tmaze[step[0]][step[1]-1]=stepcounter+1\n\t\t\tif maze[step[0]][step[1]+1]==path: #path right\n\t\t\t\tlistofcoordinates.append((step[0],step[1]+1))\n\t\t\t\tmaze[step[0]][step[1]+1]=stepcounter+1\n\t\t\tif maze[step[0]-1][step[1]]==path: #path up\n\t\t\t\tlistofcoordinates.append((step[0]-1,step[1]))\n\t\t\t\tmaze[step[0]-1][step[1]]=stepcounter+1\n\t\t\tif maze[step[0]+1][step[1]]==path: #path left\n\t\t\t\tlistofcoordinates.append((step[0]+1,step[1]))\n\t\t\t\tmaze[step[0]+1][step[1]]=stepcounter+1\n\t\tsteps.append(listofcoordinates)\t\n\t\tif end in listofcoordinates:\n\t\t\tsucceed=True\n\t\tif listofcoordinates == []:\n\t\t\tfail=True\t\n\t\tstepcounter+=1\n\n\t#capture solution into new list by walking back from the end to start\n\tsolution=[]\n\tif succeed:\n\t\tpos=[0 for i in range(2)] #pos holds the current position of the walker\n\t\tpos[0]=end[0] # start at the end \n\t\tpos[1]=end[1]\n\t\tsolution.append(pos[:])\n\t\tfor i in range(stepcounter):\n\t\t\t# check which direction has the lowest step count, i.e. is shortest path to start. \n\t\t\tup=maze[pos[0]-1][pos[1]]\n\t\t\tdown=maze[pos[0]+1][pos[1]]\n\t\t\tleft=maze[pos[0]][pos[1]-1]\n\t\t\tright=maze[pos[0]][pos[1]+1]\n\t\t\tdirection=0 #0 is up, 1 is right etc\n\t\t\tif (right<=up or up<0) and (right<=down or down<0) and (right<=left or left<0) and right>path:\n\t\t\t\tdirection=1\n\t\t\tif (down<=up or up<0) and (down<=right or right<0) and (down<=left or left<0)and down>path:\n\t\t\t\tdirection=2\n\t\t\tif (left<=up or up<0) and (left<=down or down<0) and (left<=right or right<0) and left>path:\n\t\t\t\tdirection=3\n\t\t\t#move the position in the chosen direction\n\t\t\tif direction==0:#up\n\t\t\t\tpos[0]=pos[0]-1\n\t\t\t\tpos[1]=pos[1]\n\t\t\tif direction==1:#right\n\t\t\t\tpos[0]=pos[0]\n\t\t\t\tpos[1]=pos[1]+1\n\t\t\tif direction==2:#down\n\t\t\t\tpos[0]=pos[0]+1\n\t\t\t\tpos[1]=pos[1]\n\t\t\tif direction==3:#left\n\t\t\t\tpos[0]=pos[0]\n\t\t\t\tpos[1]=pos[1]-1\n\t\t\tsolution.append(pos[:]) #add the new position to the solution list\n\t\tsolution= solution[::-1]#reverse the list so its start to end\n\t\treturn(solution)\n\nsolution = solvemaze(maze,start,end)\n\n\n#show solution\nfor x in range(numx):\n\tfor y in range(numy):\n\t\tif solution and [y,x] in solution:\n\t\t\tprint(f'\\x1b[38;2;{255};{0};{0}m# \\x1b[0m',end='') # print # char in RGB\n\t\telse:\n\t\t\tif maze[y][x] == wall:\n\t\t\t\tprint(f'\\x1b[38;2;{255};{255};{255}m# \\x1b[0m',end='') # print # char in RGB\n\t\t\telse:\n\t\t\t\tprint(f'\\x1b[38;2;{20};{20};{20}m# \\x1b[0m',end='') # print # char in RGB\n\tprint('') # new line\n \n\n\n","repo_name":"solipsia/LED-Jumper","sub_path":"solve_maze.py","file_name":"solve_maze.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23571242321","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 08 16:20:32 2017\r\n\r\n@author: Suganya\r\n\"\"\"\r\nfrom itertools import groupby\r\n\r\nf = open('C-small-1-attempt0.in')\r\nf1 = open('out.txt', 'wb')\r\nlines = f.read().split(\"\\n\")\r\nnum_lines = len(lines)\r\nk = int(lines[0])\r\nfor j in range(1,k+1):\r\n line = lines[j].split(\" \")\r\n no_of_baths = int(line[0])\r\n people = int(line[1])\r\n baths = [0] * (no_of_baths+2)\r\n baths[0] = 1\r\n baths[no_of_baths+1] = 1\r\n for i in range(0, people):\r\n s = ''.join(str(y) for y in baths)\r\n groups = groupby(s)\r\n result = [(int(label), sum(1 for _ in group)) for label, group in groups]\r\n index = 0\r\n max_count = 0\r\n max_index = 0\r\n l = 0\r\n max_len = 0\r\n for a,count in result:\r\n res_len = len(result)\r\n if count > max_count and a == 0:\r\n max_count = count\r\n max_index = index\r\n max_len = l\r\n index = index + count\r\n l = l+1\r\n if(max_count % 2 != 0):\r\n baths[max_index+(max_count/2)] = 1\r\n if(max_count % 2 == 0):\r\n baths[max_index+(max_count/2 - 1)] = 1\r\n \r\n if(max_count % 2 == 0):\r\n ls = max_count / 2\r\n rs = ls - 1\r\n else:\r\n ls = max_count / 2\r\n rs = ls\r\n output = 'Case #'+ str(j) + ': ' + str(ls) + ' ' + str(rs)\r\n f1.write(output)\r\n f1.write('\\n')\r\n print(output)\r\n \r\nf.close()\r\nf1.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2742.py","file_name":"2742.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19360168243","text":"'''\r\nGiven scores of N athletes, find their relative ranks and the \r\npeople with the top three highest scores, who will be awarded \r\nmedals: \"Gold Medal\", \"Silver Medal\" and \"Bronze Medal\".\r\n\r\nExample 1:\r\nInput: [5, 4, 3, 2, 1]\r\nOutput: [\"Gold Medal\", \"Silver Medal\", \"Bronze Medal\", \"4\", \"5\"]\r\nExplanation: The first three athletes got the top three highest \r\nscores, so they got \"Gold Medal\", \"Silver Medal\" and \r\n\"Bronze Medal\". \r\nFor the left two athletes, you just need to output their relative \r\nranks according to their scores.\r\n'''\r\n\r\ndef RelativeRanks(nums):\r\n nums_sorted = sorted(nums, reverse=True)\r\n\r\n output = []\r\n\r\n for i in range(len(nums)):\r\n temp_rank = nums_sorted.index(nums[i]) + 1\r\n if temp_rank == 1:\r\n temp_rank = 'Gold Medal'\r\n elif temp_rank == 2:\r\n temp_rank = 'Silver Medal'\r\n elif temp_rank == 3:\r\n temp_rank = 'Bronze Medal'\r\n output.append(str(temp_rank))\r\n return output\r\n\r\n\r\nprint (RelativeRanks([5, 4, 3, 2, 1]))\r\n ","repo_name":"newbieeashish/LeetCode_Algo","sub_path":"3rd_30_questions/RelativeRank.py","file_name":"RelativeRank.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37907304050","text":"import datetime\n\nimport pandas as pd\nfrom sqlalchemy.engine import Engine\n\nimport src.db.core as db\n\n\ndef has_twitter_db_joke(conn: Engine, hash_id: str) -> bool:\n df = db.execute_read(conn, \"select hash_id from validate_jokes where hash_id = '{}'\".format(hash_id))\n return not df.empty\n\n\ndef add_joke_to_twitter_table(conn: Engine, d_joke: dict) -> None:\n model = \"validate_jokes\"\n\n # d_joke already has the same parameters and column names as in the table\n d_joke[\"created_at\"] = datetime.datetime.now().isoformat()\n\n db.add_record(conn, model, d_joke)\n\n\ndef get_not_validated_joke() -> pd.DataFrame:\n conn = db.get_jokes_app_connection()\n return db.get_random_element(conn, \"validate_jokes\", where=\"is_joke is null\")\n\n\ndef update_joke_validation(joke_id: str, user_id: str, is_joke: bool) -> None:\n\n conn = db.get_jokes_app_connection()\n sql = \"\"\"\nupdate\n validate_jokes\nset\n is_joke = {is_joke},\n validated_by_user_id = '{validated_by_user}',\n updated_at='{updated_at}'\nwhere\n id = {joke_id}\n\"\"\".format(\n is_joke=is_joke, validated_by_user=user_id, updated_at=datetime.datetime.now().isoformat(), joke_id=joke_id\n )\n\n db.execute_update(conn, sql)\n","repo_name":"watxaut/joke-app","sub_path":"src/api/src/db/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31676465122","text":"import pickle\nimport re\nimport json\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\ndef load_model(filepath):\n \"\"\"\n\n :param filepath:\n :return:\n \"\"\"\n with open(filepath, 'rb') as file:\n model = pickle.load(file)\n return model\n\n\ndef load_parameters():\n \"\"\"\n\n :return:\n \"\"\"\n with open('article_search/ressources/nlpparams.json', 'r') as file:\n nlp_params = json.load(file)\n return nlp_params\n\n\ndef display_topics(model, feature_names, no_top_words):\n \"\"\"\n\n :param model:\n :param feature_names:\n :param no_top_words:\n \"\"\"\n for topic_idx, topic in enumerate(model.components_):\n print(\"Topic {}:\".format(topic_idx))\n print(\" \".join([feature_names[i] for i in topic.argsort()[:-no_top_words - 1:-1]]))\n\n\ndef query_preprocessing(query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n nlp_params = load_parameters()\n tokenizer = re.compile(nlp_params[\"tokenizer\"], re.M)\n matches = re.finditer(tokenizer, query)\n list_words = [match.group(0).replace(\")\", \"\").lower() for match in matches]\n sw = set(nlp_params[\"stop_words\"][\"en\"])\n return \" \".join([word for word in list_words if word not in sw])\n\n\ndef abstract_similarity(query_results):\n tfidf_params = load_model('article_search/mlmodels/tfidf')\n query_vec = tfidf_params[\"model\"].transform(query_results)\n cos_d = cosine_similarity(query_vec, tfidf_params[\"X\"])\n get_ids = [tfidf_params[\"y\"][i] for i in np.argsort(cos_d[0])[::-1]]\n return get_ids\n\n\ndef find_best_topics(n_max=10):\n lda_params = load_model('article_search/mlmodels/lda')\n tf_idf = load_model('article_search/mlmodels/tfidf')\n feature_names = tf_idf[\"model\"].get_feature_names()\n topic_set = set()\n for component in lda_params['model'].components_:\n for i in component.argsort()[:-n_max-1:-1].tolist():\n topic_set.add(feature_names[i])\n return topic_set\n","repo_name":"sboomi/med-article-extractor","sub_path":"article-app/article_search/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11881949549","text":"import numpy as np\nfrom multiprocessing import Manager, Process, RawArray\nimport copy\nfrom functools import partial\nimport time\nimport pdb\nimport sys\n\n\nclass ForkedPdb(pdb.Pdb):\n \"\"\"A Pdb subclass that may be used\n from a forked multiprocessing child\n\n \"\"\"\n def interaction(self, *args, **kwargs):\n _stdin = sys.stdin\n try:\n sys.stdin = open('/dev/stdin')\n pdb.Pdb.interaction(self, *args, **kwargs)\n finally:\n sys.stdin = _stdin\n\n\nclass sharedNumpy(object):\n \"\"\"A wrapper for shared numpy array\"\"\"\n def __init__(self, A):\n self.dtype = A.dtype\n self.shape = A.shape\n self.dtype = np.sctype2char(self.dtype)\n self.arr = RawArray(self.dtype, A.size)\n memoryview(self.arr)[:] = A.ravel()\n\n def numpy(self): # Pytorch naming convention\n return np.reshape(np.frombuffer(self.arr, dtype=self.dtype), self.shape)\n\n\nclass monteCarlo(object):\n \"\"\"A class that supports monte-carlo style simulation\"\"\"\n def __init__(self, f, *args, **kwargs):\n self.fun = f\n self.args = args\n self.kwargs = kwargs\n self.rst = None\n\n def __call__(self, i=None, dct=None):\n self.rst = self.fun(*self.args, **self.kwargs)\n # ForkedPdb().set_trace()\n if i is not None and dct is not None:\n dct[i] = self.rst\n else:\n return self.rst\n\n\nclass mulProcess(object):\n \"\"\"A class for multi processing\"\"\"\n def __init__(self, fun, lsti, nProcess=None, *args, **kwargs):\n if nProcess is not None:\n assert len(lsti) == nProcess\n else:\n nProcess = len(lsti)\n self.MCs = []\n for i in range(nProcess):\n argsin = [lsti[i]]\n if args is not None:\n argsin.extend(args)\n self.MCs.append(monteCarlo(fun, *argsin, **kwargs))\n manager = Manager()\n self.fun = fun\n self.nProcess = nProcess\n self.listi = lsti\n self.args = args\n self.kwargs = kwargs\n self.return_dict = manager.dict()\n self.enable_pid = False\n\n def enablePID(self):\n \"\"\"Enable PID such that the id of the process will be passed automatically\"\"\"\n self.enable_pid = True\n\n def run(self, **kwargs):\n \"\"\"Run the simulation in multiple processes.\n\n :param kwargs: key-word arguments, user can specify wait time by wait=0.1\n :return: a list of return values from each process\n \"\"\"\n allproc = [Process(target=mc, args=(i, self.return_dict)) for i, mc in enumerate(self.MCs)]\n for proc in allproc:\n proc.start()\n if 'wait' in kwargs:\n time.sleep(kwargs['wait'])\n for proc in allproc:\n proc.join()\n results = []\n for i in range(self.nProcess):\n try:\n toappend = self.return_dict[i]\n results.append(toappend)\n except:\n print('Error occurs at %d' % i)\n return results\n\n def debug(self):\n \"\"\"Run the simulation in debug mode.\n\n In debug mode, we call the function directly in current process so pdb can add breakpoints.\n \"\"\"\n self.fun(self.listi[0], *self.args, **self.kwargs)\n\n\nclass ProcessWrap(object):\n \"\"\"Define a process.\"\"\"\n def __init__(self, f, *args, **kwargs):\n \"\"\"Generally this function takes f, execute with a queue, and additional args and kwargs\"\"\"\n self.fun = f\n self.args = args\n self.kwargs = kwargs\n\n def __call__(self, q, i, dct):\n \"\"\"Call this function.\"\"\"\n dct[i] = self.fun(q, *self.args, **self.kwargs)\n\n\nclass PoolProcess(object):\n \"\"\"A pool-style multiprocessing library.\"\"\"\n def __init__(self, nProcess, fun, iterable, with_id, *args, **kwargs):\n \"\"\"Initialize the Pool by specifying process number, function to evaluate, iterable to use, and additional arguments.\"\"\"\n self.fun = fun\n if nProcess is None:\n nProcess = mp.cpu_count()\n self.nProcess = nProcess\n # create process wrapper\n self.proc = []\n for i in range(nProcess):\n if with_id:\n use_args = [i]\n use_args.extend(args)\n else:\n use_args = args\n self.proc.append(ProcessWrap(fun, *use_args, **kwargs))\n # create shared objects\n manager = Manager()\n self.return_dict = manager.dict()\n self.queue = manager.Queue(2 * nProcess)\n self.iterable = iterable\n self.with_id = with_id\n self.args = use_args\n self.kwargs = kwargs\n\n def run(self, **kwargs):\n \"\"\"Run the simulation in multiple processes.\n\n :param kwargs: key-word arguments, user can specify wait time by wait=0.1\n :return: a list of return values from each process\n \"\"\"\n allproc = [Process(target=proc, args=(self.queue, i, self.return_dict)) for i, proc in enumerate(self.proc)]\n for proc in allproc:\n proc.start()\n if 'wait' in kwargs:\n time.sleep(kwargs['wait'])\n # deal with queue here\n for stuff in self.iterable:\n self.queue.put(stuff) # blocks until q below its max size\n for _ in range(self.nProcess): # tell workers we're done\n self.queue.put(None)\n for proc in allproc:\n proc.join()\n results = []\n for i in range(self.nProcess):\n try:\n toappend = self.return_dict[i]\n results.append(toappend)\n except:\n print('Error occurs at %d' % i)\n return results\n\n def debug(self):\n \"\"\"Run the simulation in debug mode.\n\n In debug mode, we call the function directly in current process so pdb can add breakpoints.\n \"\"\"\n for i, stuff in enumerate(self.iterable):\n self.queue.put(stuff)\n if i >= self.nProcess - 1:\n break\n for i in range(self.nProcess):\n self.queue.put(None)\n self.fun(self.queue, *self.args, **self.kwargs)\n\n\ndef getTaskSplit(num, nProcess):\n \"\"\"Return a split of task.\n\n :param num: int, total number of task\n :param nProcess: int, number of process\n \"\"\"\n tmp = np.linspace(0, num, nProcess + 1, dtype=int)\n return [(tmp[i], tmp[i + 1]) for i in range(nProcess)]\n\n\ndef getSharedNumpy(*args):\n \"\"\"Return the shared numpy wrapper for many numpy arrays.\n\n :param args: ndarrays\n :return: the shared numpy wrapper for each numpy array\n \"\"\"\n if len(args) == 1:\n return sharedNumpy(args[0])\n return [sharedNumpy(arg) for arg in args]\n\n\ndef getNumpy(*args):\n \"\"\"Return the numpy instance from shared numpy.\n\n :param args: sharedNumpy object\n :return: the numpy array\n \"\"\"\n if len(args) == 1:\n return args[0].numpy()\n return [arg.numpy() for arg in args]\n","repo_name":"paperstiger/gaolib","sub_path":"gaolib/parallel/mulProcess.py","file_name":"mulProcess.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13809433948","text":"import csv, json, sys, glob, os\n \nfileOutput = 'output.csv'\n \nwriteIt = []\ncounter = 0\n \noutputFile = open(fileOutput, 'w')\n\nfor root, dirs, files in os.walk('lastfm_subset'):\n\tfor filename in files:\n\t\tfileInput = os.path.join(root, filename)\n\t\tinputFile = open(fileInput)\n\t\tdata = json.load(inputFile)\n\t\tinputFile.close()\n\t\t\n\t\tallValues = data.values()\n\t\tallKeys = data.keys()\n\t\t\n\t\toutput = csv.writer(outputFile)\n\t\t\n\t\tif(counter == 0):\n\t\t\toutput.writerow([allKeys[3]] + [allKeys[4]] + [allKeys[1]])\n\t\t\tcounter += 1\n\t\t\n\t\twriteIt = [allValues[3].encode('utf-8')] + [allValues[4]] + [allValues[1]]\n\t\t\n\t\toutput.writerow(writeIt)\n","repo_name":"eudicot/hottt_or_not","sub_path":"lastfm_data/JSONtoCSV.py","file_name":"JSONtoCSV.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3038719744","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nfrom datetime import datetime, timedelta\nfrom urllib.parse import urlparse\nimport json\nimport math\n\nfolder = sys.argv[1]\n\n\ndef get_domain(url):\n return urlparse(url).netloc\n\ndef soft_add(dictionary, key, value):\n if key in dictionary:\n dictionary[key] += value\n else:\n dictionary[key] = value\n\ndef get_computer_usages():\n usages = {\n \"Other\": 0,\n \"Firefox\": {},\n \"URxvt\": {}\n }\n for line in open(folder + \"/log\", \"r\"):\n elements = line.strip().split(\" \")\n if len(elements) == 1:\n usages[\"Other\"] += 1.0 / 6.0 \n elif len(elements) >= 2:\n if elements[1] == \"Firefox\":\n if len(elements) == 2:\n soft_add(usages[\"Firefox\"], \"Unknown\", 1.0 / 6.0)\n else: \n domain = get_domain(elements[2]) \n soft_add(usages[\"Firefox\"], domain, 1.0 / 6.0)\n elif elements[1] == \"URxvt\":\n if len(elements) == 2:\n soft_add(usages[\"URxvt\"], \"Unknown\", 1.0 / 6.0)\n else:\n working_directory = elements[2]\n soft_add(usages[\"URxvt\"], working_directory, 1.0 / 6.0)\n else:\n soft_add(usages, elements[1], 1.0 / 6.0)\n\n return usages\n\ndef get_usage_in_minutes(filename):\n return int(open(folder + \"/\" + filename, \"r\").read())\n\ndef get_sleeping_time():\n text_waking = open(folder + \"/waking\", \"r\").read().strip() + \"AM\"\n waking_time = datetime.strptime(text_waking, \"%I:%M%p\")\n\n text_sleeping = open(folder + \"/sleeping\", \"r\").read().strip() + \"PM\"\n sleeping_time = datetime.strptime(text_sleeping, \"%I:%M%p\")\n\n sleeping_minutes = ( (60 * 60 * 24) - (sleeping_time - waking_time).total_seconds()) / 60\n\n return sleeping_minutes\n\ndef get_depth(dictionary):\n max_depth = 0\n for key, value in dictionary.items():\n if type(value) is dict:\n max_depth = max(max_depth, get_depth(value)) \n return max_depth + 1\n\ndef sum_recursive_dictionary(dictionary):\n total = 0\n for key, value in dictionary.items():\n if type(value) is dict:\n total += sum_recursive_dictionary(value)\n else:\n total += value\n\n return total\n\ndef values_at_depth(dictionary, depth):\n result = {}\n if depth == 0:\n for key, value in dictionary.items():\n if type(value) is dict:\n result[key] = sum_recursive_dictionary(value)\n else:\n if value > 0:\n result[key] = value\n else:\n for key, value in dictionary.items():\n if type(value) is dict:\n result.update(values_at_depth(value, depth - 1))\n else:\n if value > 0:\n result[key] = value\n\n return result\n\n\ndef pi_chart_from_usages(usages):\n unaccounted = 60 * 24 - sum_recursive_dictionary(usages)\n \n usages[\"Unaccounted\"] = unaccounted\n depth = get_depth(usages)\n wedge_width = 1 / (depth + 1)\n\n fig, ax = plt.subplots()\n\n colors = []\n\n total_patches = []\n total_texts = []\n\n def on_mouse_move(event):\n for patch in total_patches:\n index = total_patches.index(patch)\n if patch.contains_point((event.x, event.y)):\n total_texts[index].set_visible(True)\n else:\n total_texts[index].set_visible(False)\n plt.draw()\n \n\n\n\n for level in range(depth):\n chart_dictionary = values_at_depth(usages, level)\n labels = list(chart_dictionary.keys())\n \n values = list(chart_dictionary.values())\n npvalues = list(chart_dictionary.values())\n levels_colors = []\n for index in range(len(values)):\n color = matplotlib.colors.hsv_to_rgb((index / len(values), 1 - level / (depth + 1), 1))\n levels_colors.append(color)\n\n patches, texts = ax.pie(npvalues, labels=labels, colors=np.array(levels_colors), radius=1-level*wedge_width, wedgeprops=dict(width=wedge_width), textprops=dict(visible=False))\n total_patches.extend(patches)\n total_texts.extend(texts)\n\n ax.set(aspect=\"equal\", title='Time allocations')\n fig.canvas.mpl_connect('motion_notify_event', on_mouse_move)\n plt.show()\n\n\nusages = {}\n\nusages.update({\n \"P\": {\n **get_computer_usages(),\n \"Teaching\": {\n \"Tutoring\": get_usage_in_minutes(\"tutoring\")\n },\n \"Meetings\": get_usage_in_minutes(\"meeting\")\n }})\nusages.update({\n \"PC\": {\n \"Reading\": {\n \"Audible\": get_usage_in_minutes(\"audible\"),\n \"Physical\": get_usage_in_minutes(\"reading\")\n },\n \"Bathroom\": {\n \"Shower\": get_usage_in_minutes(\"shower\"),\n \"Teeth\": get_usage_in_minutes(\"brushing_teeth\"),\n \"Shaving\": get_usage_in_minutes(\"shaving\")\n },\n \"Cleaning\": {\n \"Dishes\": get_usage_in_minutes(\"dishes\")\n },\n \"Exercise\": {\n \"Weightlifting\": get_usage_in_minutes(\"weightlifting\"),\n \"Running\": get_usage_in_minutes(\"running\"),\n \"Plank\": get_usage_in_minutes(\"plank\")\n },\n \"Eating\": {\n \"Breakfast\": get_usage_in_minutes(\"breakfast\"),\n \"Lunch\": get_usage_in_minutes(\"lunch\"),\n \"Dinner\": get_usage_in_minutes(\"dinner\")\n },\n \"Driving\": get_usage_in_minutes(\"driving\"),\n \"Music\": get_usage_in_minutes(\"music\"),\n \"Meditation\": get_usage_in_minutes(\"meditating\")\n },\n \"Sleeping\": get_sleeping_time()\n })\n\nprint(json.dumps(usages))\npi_chart_from_usages(usages)\n","repo_name":"Hazelfire/TimeTracker","sub_path":"genpichart.py","file_name":"genpichart.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14545912974","text":"# This is a sample Python script.\nimport connection\n\nhost = '1.1.X.1'\nport = 5000\n\nnew_connection = connection.Connection(host=host, port=port)\n\nmsg = \"abcdefghijkl\"\nnew_connection.add_message(msg=msg, save_img=False)\n\n# option 1: send bin matrix directly\nnew_connection.send_bin_matrix(selection=2)\n\n# option 2: image to bin matrix and send\nimg = new_connection.get_img_path() # ignore this if you have used the add_message function\nnew_connection.image_to_bin_matrix(img_path=img)\nnew_connection.send_bin_matrix(selection=2)\n\n# option 3: send image directly (under construction)\nnew_connection.send_image(img_path=img, selection=1)","repo_name":"XueningD65/ECE532_PC","sub_path":"tcpip/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17617758533","text":"import os, sys\nif __name__ == '__main__':\n framework = None\n execfile(os.path.join(sys.path[0], 'framework.py'))\n\nfrom ZenModelBaseTest import ZenModelBaseTest\nfrom Products.ZenModel.IpInterface import manage_addIpInterface\nfrom Products.ZenModel.WinService import manage_addWinService\nfrom Products.ZenUtils.FakeRequest import FakeRequest\n\nfrom Products.Zuul.catalog.interfaces import IModelCatalogTool\n\nLOCATION = '/TestLoc/MyLocation'\nGROUP = '/TestGrp/MyGroup'\nSYSTEM = '/TestSys/MySystem'\nDEVCLASS = '/TestOrg/MyClass'\nOTHERDEVCLASS = '/TestOrg/MyOtherClass'\n\nMAC = '00:11:22:33:44:55'\nIPADDR = '10.1.2.3/24'\nNET = '10.1.2.0'\n\nclass TestPathIndexing(ZenModelBaseTest):\n\n def afterSetUp(self):\n super(TestPathIndexing, self).afterSetUp()\n self.devcat = IModelCatalogTool(self.dmd.Devices).devices\n\n self.devclass = self.dmd.Devices.createOrganizer(DEVCLASS)\n self.loc = self.dmd.Locations.createOrganizer(LOCATION)\n self.grp = self.dmd.Groups.createOrganizer(GROUP)\n self.sys = self.dmd.Systems.createOrganizer(SYSTEM)\n\n # Control\n dummydev = self.dmd.Devices.createInstance('dummydev')\n\n self.dev = self.devclass.createInstance('testdev')\n self.dev.setLocation(LOCATION)\n self.dev.setGroups((GROUP,))\n self.dev.setSystems((SYSTEM,))\n\n def testDeviceIndexOnCreation(self):\n for org in (self.loc, self.grp, self.sys, self.devclass):\n brains = list(self.devcat(paths='/'.join(org.getPrimaryPath())))\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].id, self.dev.id)\n self.assertEqual(brains[0].getObject(), self.dev)\n\n def testDeviceUnindexOnDeviceClassDelete(self):\n \"\"\"\n Test deviceSearch is updated when a device class is moved. \n \"\"\"\n sourceOrg = self.dmd.Devices.createOrganizer('/Two/Three')\n dcmDevice = sourceOrg.createInstance('dcmDevice')\n self.dmd.Devices.Two._delObject('Three')\n brains = self.devcat()\n self.assertEqual(len(brains), 2) # dummydev and testdev\n\n def testDeviceUnindexOnRemoval(self):\n self.dev.setLocation('')\n self.dev.setGroups([])\n self.dev.setSystems([])\n\n for org in (self.loc, self.grp, self.sys):\n brains = self.devcat(paths='/'.join(org.getPrimaryPath()))\n self.assertEqual(len(brains), 0)\n\n self.dev.deleteDevice()\n brains = list(self.devcat(paths='/'.join(self.devclass.getPrimaryPath())))\n self.assertEqual(len(brains), 0)\n\n def testDeviceReindexOnMove(self):\n neworg = self.dmd.Devices.createOrganizer(DEVCLASS+'NEW')\n self.dmd.Devices.moveDevices(DEVCLASS+'NEW', self.dev.id)\n brains = list(self.devcat(paths='/'.join(neworg.getPrimaryPath())))\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].id, self.dev.id)\n self.assertEqual(brains[0].getObject(), self.dev)\n\n newloc = self.dmd.Locations.createOrganizer(LOCATION+'NEW')\n self.dev.setLocation(LOCATION+'NEW')\n brains = list(self.devcat(paths='/'.join(newloc.getPrimaryPath())))\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].id, self.dev.id)\n self.assertEqual(brains[0].getObject(), self.dev)\n\n newgrp = self.dmd.Groups.createOrganizer(GROUP+'NEW')\n self.dev.setGroups((GROUP+'NEW',))\n brains = list(self.devcat(paths='/'.join(newgrp.getPrimaryPath())))\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].id, self.dev.id)\n self.assertEqual(brains[0].getObject(), self.dev)\n\n newsys = self.dmd.Systems.createOrganizer(SYSTEM + 'NEW')\n self.dev.setSystems((SYSTEM+'NEW',))\n brains = list(self.devcat(paths='/'.join(newsys.getPrimaryPath())))\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].id, self.dev.id)\n self.assertEqual(brains[0].getObject(), self.dev)\n \n \n def testDeviceReindexOnDeviceClassMove(self):\n \"\"\"\n Test deviceSearch is updated when a device class is moved. \n \"\"\"\n sourceOrg = self.dmd.Devices.createOrganizer('/Two/Three')\n dcmDevice = sourceOrg.createInstance('dcmDevice')\n destOrg = self.dmd.Devices.createOrganizer('/One')\n self.dmd.Devices.moveOrganizer('/Devices/One', ['Two'])\n brains = list(self.devcat(paths='/'.join(destOrg.getPrimaryPath())))\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].id, dcmDevice.id)\n self.assertEqual(brains[0].getObject(), dcmDevice)\n\n\n def testNonExistentDeviceInCatalog(self):\n \"\"\"\n Verify that stale catalog entries won't result in tracebacks.\n \"\"\"\n from zExceptions import NotFound\n d = self.dmd.Devices.createInstance('catTestDevice')\n\n from zope.event import notify\n from Products.Zuul.catalog.events import IndexingEvent\n notify(IndexingEvent(d))\n\n device_count = len(self.dmd.Devices.getSubDevices())\n self.dmd.Devices.devices._objects.pop('catTestDevice')\n try:\n self.assertEqual(len(self.dmd.Devices.getSubDevices()),\n device_count - 1)\n except (NotFound, KeyError, AttributeError) as ex:\n self.assertEqual(ex, None)\n\n\nclass TestComponentIndexing(ZenModelBaseTest):\n\n def afterSetUp(self):\n super(TestComponentIndexing, self).afterSetUp()\n self.model_catalog = IModelCatalogTool(self.dmd.Devices)\n self.layer2cat = self.model_catalog.layer2\n self.layer3cat = self.model_catalog.layer3\n\n self.devclass = self.dmd.Devices.createOrganizer(DEVCLASS)\n self.devclass2 = self.dmd.Devices.createOrganizer(DEVCLASS)\n\n # Control\n dummydev = self.dmd.Devices.createInstance('dummydev')\n\n self.dev = self.devclass.createInstance('testdev')\n\n manage_addIpInterface(self.dev.os.interfaces, 'eth0', True)\n self.iface = self.dev.os.interfaces._getOb('eth0')\n self.iface._setPropValue('macaddress', MAC)\n self.iface.addIpAddress(IPADDR)\n self.ipaddress = self.iface.ipaddresses()[0]\n self.net = self.dmd.Networks.getNet(NET)\n \n manage_addWinService(self.dev.os.winservices,'wuauserv','test service')\n self.winService = self.dev.os.winservices._getOb('wuauserv')\n\n def getIdFromPath(self, path):\n \"\"\" return the last part of the path \"\"\"\n _id = None\n if path:\n splitted = path.split('/')\n _id = splitted[-1] if splitted else None\n return _id\n\n def _checkEverything(self):\n for searchcriterion in (dict(macaddress=MAC),\n dict(deviceId='/'.join(self.dev.getPrimaryPath())),\n dict(interfaceId=self.iface.getPrimaryId())):\n brains = list(self.layer2cat(query=searchcriterion))\n self.assertEqual(len(brains), 1)\n brain = brains[0]\n self.assertEqual(brain.deviceId, '/'.join(self.dev.getPrimaryPath()))\n self.assertEqual(brain.interfaceId, self.iface.getPrimaryId())\n self.assertEqual(brain.macaddress, MAC)\n self.assertEqual(brain.lanId, None)\n self.assertEqual(brain.getObject(), self.iface)\n\n for searchcriterion in (dict(deviceId=self.dev.id),\n dict(interfaceId=self.iface.id),\n dict(ipAddressId=self.ipaddress.getPrimaryId()),\n dict(networkId=self.net.getPrimaryId())\n ):\n brains = list(self.layer3cat(query=searchcriterion))\n self.assertEqual(len(brains), 1)\n brain = brains[0]\n self.assertEqual(self.getIdFromPath(brain.deviceId), self.dev.id)\n self.assertEqual(self.getIdFromPath(brain.interfaceId), self.iface.id)\n self.assertEqual(brain.ipAddressId, self.ipaddress.getPrimaryId())\n self.assertEqual(brain.networkId, self.net.getPrimaryId())\n self.assertEqual(brain.getObject(), self.ipaddress)\n\n def testComponentIndexOnCreation(self):\n \"\"\"\n Ensure that the layerN catalogs are updated when the device is created\n \"\"\"\n self._checkEverything()\n\n def testComponentUnindexOnDeviceDeletion(self):\n \"\"\"\n Ensure that the layerN catalogs are updated when the device is deleted\n \"\"\"\n self.dev.deleteDevice()\n\n query = { \"deviceId\" : self.dev.getPrimaryId() }\n brains = list(self.layer2cat(query=query))\n self.assertEqual(len(brains), 0)\n\n query = { \"deviceId\" : self.dev.id }\n brains = list(self.layer3cat(query=query))\n self.assertEqual(len(brains), 0)\n\n def testComponentReindexOnDeviceMove(self):\n \"\"\"\n Ensure that the layerN catalogs are updated when the device is moved\n \"\"\"\n neworg = self.dmd.Devices.createOrganizer(DEVCLASS+'NEW')\n self.dmd.Devices.moveDevices(DEVCLASS+'NEW', self.dev.id)\n self._checkEverything()\n\n def testLayer3LinkUnindexOnNetworkDelete(self):\n \"\"\"\n Ensure that the layerN catalogs are updated when the device is moved\n \"\"\"\n # See that the link has been indexed properly\n brains = list(self.layer3cat( query = {\"deviceId\": self.dev.id} ))\n self.assertEqual(len(brains), 1)\n brains = list(self.layer3cat( query = {\"networkId\": self.net.getPrimaryId()} ))\n self.assertEqual(len(brains), 1)\n\n # Delete the network\n self.dmd.Networks._delObject(self.net.id)\n\n # See that the link has been unindexed\n brains = list(self.layer3cat( query = {\"deviceId\": self.dev.id} ))\n self.assertEqual(len(brains), 0)\n brains = list(self.layer3cat( query = {\"networkId\": self.net.getPrimaryId()} ))\n self.assertEqual(len(brains), 0)\n \n def testWinSerivceComponentReindexOnServiceClassZMonitorChange(self):\n \"\"\"\n Ensure the WinServices in the componentSearch catalog are re-indexed \n when saveZenProperties is called on the Service Class and zMonitor is \n changed\n \"\"\"\n svcClass = self.dmd.Services.WinService.serviceclasses._getOb(self.winService.id)\n \n winSvc = self.dev.getMonitoredComponents(type='WinService')\n #by default monitor is off; should find nothing\n self.assertFalse( winSvc )\n monitored = svcClass.zMonitor\n self.assertFalse( monitored )\n \n #fake request and turn zMonitor to true\n request = FakeRequest()\n request.form = {'zMonitor': True}\n kwargs = {'REQUEST': request}\n svcClass.saveZenProperties(**kwargs)\n \n #verify monitored flag changed and that component is now found\n monitored = svcClass.zMonitor\n self.assertTrue( monitored )\n winSvc2 = self.dev.getMonitoredComponents(type='WinService')\n self.assertTrue ( winSvc2 )\n\n #test that changing zProperty directly does not affect catalog\n svcClass.setZenProperty('zMonitor', False)\n winSvc2 = self.dev.getMonitoredComponents(type='WinService')\n #catalog will find component even though zMonitor is false\n #because index was not updated\n self.assertTrue ( winSvc2 )\n\n def testWinSerivceComponentReindexOnServiceOrganizerZMonitorChange(self):\n \"\"\"\n Ensure the WinServices in the componentSearch catalog are re-indexed \n when saveZenProperties is called on the Service Organizer and zMonitor\n is Changed \n \"\"\"\n\n svcOrg = self.dmd.Services\n winSvc = self.dev.getMonitoredComponents(type='WinService')\n #by default monitor is off; should find nothing\n self.assertFalse( winSvc )\n monitored = svcOrg.zMonitor\n self.assertFalse( monitored )\n \n #fake request and turn zMonitor to true\n request = FakeRequest()\n request.form = {'zMonitor': True}\n kwargs = {'REQUEST': request}\n svcOrg.saveZenProperties(**kwargs)\n \n #verify monitored flag changed and that component is now found\n monitored = svcOrg.zMonitor\n self.assertTrue( monitored )\n winSvc2 = self.dev.getMonitoredComponents(type='WinService')\n self.assertTrue ( winSvc2 )\n\n #test that changing zProperty directly does not affect catalog\n svcOrg.setZenProperty('zMonitor', False)\n winSvc2 = self.dev.getMonitoredComponents(type='WinService')\n #catalog will find component even though zMonitor is false\n #because index was not updated\n self.assertTrue ( winSvc2 )\n \n def testComponentIndexOnDeviceClassMove(self):\n \"\"\"\n Test to make sure that the componentSearch catalog is updated when\n an entire device class path is moved.\n \"\"\"\n sourceOrg = self.dmd.Devices.createOrganizer('/Two/Three')\n dcmDevice = sourceOrg.createInstance('dcmDevice')\n dcmDevice.os.addFileSystem(\"/boot\", False)\n destOrg = self.dmd.Devices.createOrganizer('/One')\n self.dmd.Devices.moveOrganizer('/Devices/One', ['Two'])\n components = dcmDevice.getMonitoredComponents(type='FileSystem')\n self.assertEqual(components[0].device().id, 'dcmDevice')\n\n\n def testNonExistentComponentInCatalog(self):\n \"\"\"\n Verify that stale catalog entries won't result in tracebacks.\n \"\"\"\n from zExceptions import NotFound\n d = self.dmd.Devices.createInstance('catTestDevice')\n d.index_object()\n d.os.addIpInterface('catTestComponent', True)\n c = d.os.interfaces._getOb('catTestComponent')\n c.index_object()\n component_count = len(d.getDeviceComponents())\n d.os.interfaces._objects.pop('catTestComponent')\n try:\n self.assertEqual(len(d.getDeviceComponents()), component_count - 1)\n except (NotFound, KeyError, AttributeError) as ex:\n self.assertEqual(ex, None)\n\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(TestPathIndexing))\n suite.addTest(makeSuite(TestComponentIndexing))\n return suite\n\nif __name__==\"__main__\":\n framework()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/tests/testIndexing.py","file_name":"testIndexing.py","file_ext":"py","file_size_in_byte":14318,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"9392150043","text":"from discord.ext import commands\n\nclass general(commands.Cog):\n def __init__(self, Anna):\n self.Anna = Anna\n\n @commands.command()\n async def botsaid(self, ctx, *,message):\n await ctx.message.delete()\n await ctx.send(message)\n\n @commands.command()\n async def reply(self, ctx, reply_id, *,message):\n await ctx.message.delete()\n reply_message = await ctx.channel.fetch_message(reply_id)\n await reply_message.reply(message)\n \n @commands.has_permissions(manage_messages=True)\n @commands.command()\n async def cls(self, ctx, amount: int):\n await ctx.message.delete()\n if amount > 10:\n await ctx.send(\"訊息數量`>10`請分批執行\")\n else:\n await ctx.channel.purge(limit=amount)\n\n @commands.command()\n async def ping(self, ctx):\n await ctx.send(f\"延遲時間為: {(self.Anna.latency)*1000:.1f} ms\")\n\nasync def setup(Anna):\n await Anna.add_cog(general(Anna))\n","repo_name":"LTurret/AnnaMochizuki","sub_path":"cogs/command/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33995177873","text":"'''import argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--task')\nparser.add_argument('--arg')\nargs = parser.parse_args()\ntask = args.task\narg = args.arg'''\n\narg = input(\"Input: \")\ntask=\"vowels\"\ndef vowels(arg):\n arg = arg.lower()\n count = 0\n for i in range(0, len(arg)):\n if arg[i] in ['a', 'e', 'u', 'y', 'i', 'o']:\n count += 1\n\n return count\n\n\ndef perfect(arg):\n prfct = [0, 1]\n for i in range(2, 81):\n for j in range(2, 14):\n if (i ** j) < 6401:\n prfct.append(i ** j)\n sorted(prfct)\n fprf = []\n for i in sorted(prfct):\n if i not in fprf:\n fprf.append(i)\n arg=int(arg)\n return fprf[arg]\n\n\ndef lazy(arg):\n arg = int(arg)\n x = int(((arg ** 2) - arg + 2) / 2)\n return x\n\n\nif task == 'vowels':\n print(vowels(arg))\nelif task == 'lazy':\n print(lazy(arg))\nelif task == 'perfect':\n print(perfect(arg))\n","repo_name":"NadeemKabha/Vowel-counter","sub_path":"Vowel counter.py","file_name":"Vowel counter.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23928291404","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport math\n\nplt.style.use('seaborn')\nmpl.rcParams['font.family'] = 'serif'\n\n# \n\nimport scipy.stats as stats\n\n'''\nThe Weibull dis. in scipy.stats is Weibull_min.\nOne thing we shall pay attention to is that\nthe pdf of Weibull_min is slightly different from the\npdf we introduced of the pdf in the Wikipedia\nit only need to specify k, and lambda is 1\nto change the lambda(scale parameter) we shall define the scale\n'''\n\nWeibull_test = stats.weibull_min(c=1.5, scale=2)\n\n# (1) draw the graph of Weibull dis.\nx = np.linspace(Weibull_test.ppf(0.01),\n Weibull_test.ppf(0.99),100)\n\nfig, ax = plt.subplots(1, 1)\n\nax.plot(x, Weibull_test.pdf(x), 'r-')\n\n# (2) fit the Weibull dis.\n\n# get some sample data\nWeibull_test1 = stats.weibull_min(1.5)\nsample_weibull = Weibull_test1.rvs(500)\n\n# fit the parameter\nfitPar = stats.weibull_min.fit(sample_weibull)\n\n# there are three params. in fitPar\n# [0]-shape parameter;\n# [1]-loc parameter, \n# where the x begins, maybe draw a picture will help you understand);\n# [2]-scale parameter;\nfitPar\n\n# \n\n# (1) draw the curve\n\nfrom lifelines.datasets import load_waltons\nfrom lifelines import KaplanMeierFitter\n\n# get the sample data(provide by the lifelines.datasets library)\ndata_waltons = load_waltons()\n\nT = data_waltons[\"T\"]\nE = data_waltons[\"E\"]\ngroups = data_waltons[\"group\"]\n\nix = (groups == 'miR-137')\n\nkmf = KaplanMeierFitter()\n\nfig = plt.figure(figsize=(10, 8))\nax = fig.add_subplot(1, 1, 1)\n\n# The ~ operator is used to invert a general condition.\n# For example, here T[~ix] means data in T which are not belong to 'miR-137'\nkmf.fit(T[~ix], E[~ix], label='control')\nkmf.plot(ax=ax)\n\nkmf.fit(T[ix], E[ix], label='miR-137')\nkmf.plot(ax=ax)\n\n\n# (2) Compare two curves using logrank_test\n\nfrom lifelines.statistics import logrank_test\n\nresults = logrank_test(T[ix], T[~ix], event_observed_A=E[ix],\n event_observed_B=E[~ix])\nresults\n# if the p-value is less than 0.05, we shall reject the null hypothesis here\n# which is that the lifetime of the two groups are the same\n\n# \n'''\nyou can see the detail of this case in book 2\n'''\n\n# load the dataset and function we need\nfrom lifelines.datasets import load_rossi\nfrom lifelines import CoxPHFitter\n\nrossi_dataset = load_rossi()\ncph = CoxPHFitter()\n\n# fit the model\ncph_result = cph.fit(rossi_dataset,\n duration_col = 'week', event_col = 'arrest')\n\n# see the result\ncph_result.print_summary()\n\n# plot the CI of the parameters\nfig = plt.figure(figsize=(8, 8))\n\nax = fig.add_subplot(1, 1, 1)\n\ncph.plot(ax = ax)\n","repo_name":"alloylovemath/data_science_leanring","sub_path":"statistics/2_HT_ANOVA/5_survival.py","file_name":"5_survival.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15397456676","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################\n# 获取更多免费策略,请加入WeQuant比特币量化策略交流QQ群:519538535\n# 群主邮箱:lanjason@foxmail.com,群主微信/QQ:64008672\n# 沉迷量化,无法自拔\n###############################################################\n\nimport datetime\nimport logging\n\nfrom common.Errors import StartRunningTimeEmptyError\nfrom exchangeConnection.huobi import huobiService\nfrom exchangeConnection.huobi.util import *\nfrom exchangeConnection.okcoin.util import getOkcoinSpot\nfrom utils import helper\n\n\nclass BanZhuanStrategy(object):\n def __init__(self, startRunningTime, orderRatio, timeInterval, orderWaitingTime, dataLogFixedTimeWindow,\n coinMarketType,\n dailyExitTime=None):\n self.startRunningTime = startRunningTime\n self.orderRatio = orderRatio # 每次预计能吃到的盘口深度的百分比\n self.timeInterval = timeInterval # 每次循环结束之后睡眠的时间, 单位:秒\n self.orderWaitingTime = orderWaitingTime # 每次等待订单执行的最长时间\n self.dataLogFixedTimeWindow = dataLogFixedTimeWindow # in seconds\n self.coinMarketType = coinMarketType\n self.dailyExitTime = dailyExitTime\n self.TimeFormatForFileName = \"%Y%m%d%H%M%S%f\"\n self.TimeFormatForLog = \"%Y-%m-%d %H:%M:%S.%f\"\n self.okcoinService = getOkcoinSpot()\n self.huobiService = huobiService\n self.huobi_min_quantity = self.huobiService.getMinimumOrderQty(\n helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\"coin_type\"])\n self.huobi_min_cash_amount = self.huobiService.getMinimumOrderCashAmount()\n self.okcoin_min_quantity = self.okcoinService.getMinimumOrderQty(\n helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\"coin_type\"])\n # okcoin 的比特币最小市价买单下单金额是:0.01BTC*比特币当时市价\n # okcoin 的莱特币最小市价买单下单金额是:0.1LTC*莱特币当时市价\n self.last_data_log_time = None\n\n # setup timeLogger\n self.timeLogger = logging.getLogger('timeLog')\n self.timeLogger.setLevel(logging.DEBUG)\n self.timeLogHandler = logging.FileHandler(self.getTimeLogFileName())\n self.timeLogHandler.setLevel(logging.DEBUG)\n self.consoleLogHandler = logging.StreamHandler()\n self.consoleLogHandler.setLevel(logging.DEBUG)\n # 定义handler的输出格式\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n self.timeLogHandler.setFormatter(formatter)\n self.consoleLogHandler.setFormatter(formatter)\n # 给timeLogger添加handler\n self.timeLogger.addHandler(self.timeLogHandler)\n self.timeLogger.addHandler(self.consoleLogHandler)\n\n # setup dataLogger\n self.dataLogger = logging.getLogger('dataLog')\n self.dataLogger.setLevel(logging.DEBUG)\n self.dataLogHandler = logging.FileHandler(self.getDataLogFileName())\n self.dataLogHandler.setLevel(logging.DEBUG)\n self.dataLogger.addHandler(self.dataLogHandler)\n\n def getStartRunningTime(self):\n if self.startRunningTime == None:\n raise StartRunningTimeEmptyError(\"startRunningTime is not set yet!\")\n return self.startRunningTime\n\n def getTimeLogFileName(self):\n return \"log/%s_log_%s.txt\" % (\n self.__class__.__name__, self.getStartRunningTime().strftime(self.TimeFormatForFileName))\n\n def getDataLogFileName(self):\n return \"data/%s_data_%s.data\" % (\n self.__class__.__name__, self.getStartRunningTime().strftime(self.TimeFormatForFileName))\n\n def timeLog(self, content):\n self.timeLogger.info(content)\n\n def getAccuntInfo(self):\n huobiAcct = self.huobiService.getAccountInfo(helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\"market\"],\n ACCOUNT_INFO)\n huobi_cny_cash = float(huobiAcct[u'available_cny_display'])\n huobi_cny_btc = float(huobiAcct[u'available_btc_display'])\n huobi_cny_ltc = float(huobiAcct[u'available_ltc_display'])\n huobi_cny_cash_loan = float(huobiAcct[u'loan_cny_display'])\n huobi_cny_btc_loan = float(huobiAcct[u'loan_btc_display'])\n huobi_cny_ltc_loan = float(huobiAcct[u'loan_ltc_display'])\n huobi_cny_cash_frozen = float(huobiAcct[u'frozen_cny_display'])\n huobi_cny_btc_frozen = float(huobiAcct[u'frozen_btc_display'])\n huobi_cny_ltc_frozen = float(huobiAcct[u'frozen_ltc_display'])\n huobi_cny_total = float(huobiAcct[u'total'])\n huobi_cny_net = float(huobiAcct[u'net_asset'])\n\n okcoinAcct = self.okcoinService.userinfo()\n okcoin_cny_cash = float(okcoinAcct[\"info\"][\"funds\"][\"free\"][\"cny\"])\n okcoin_cny_btc = float(okcoinAcct[\"info\"][\"funds\"][\"free\"][\"btc\"])\n okcoin_cny_ltc = float(okcoinAcct[\"info\"][\"funds\"][\"free\"][\"ltc\"])\n okcoin_cny_cash_frozen = float(okcoinAcct[\"info\"][\"funds\"][\"freezed\"][\"cny\"])\n okcoin_cny_btc_frozen = float(okcoinAcct[\"info\"][\"funds\"][\"freezed\"][\"btc\"])\n okcoin_cny_ltc_frozen = float(okcoinAcct[\"info\"][\"funds\"][\"freezed\"][\"ltc\"])\n okcoin_cny_total = float(okcoinAcct[\"info\"][\"funds\"][\"asset\"][\"total\"])\n okcoin_cny_net = float(okcoinAcct[\"info\"][\"funds\"][\"asset\"][\"net\"])\n total_net = huobi_cny_net + okcoin_cny_net\n return {\n \"huobi_cny_cash\": huobi_cny_cash,\n \"huobi_cny_btc\": huobi_cny_btc,\n \"huobi_cny_ltc\": huobi_cny_ltc,\n \"huobi_cny_cash_loan\": huobi_cny_cash_loan,\n \"huobi_cny_btc_loan\": huobi_cny_btc_loan,\n \"huobi_cny_ltc_loan\": huobi_cny_ltc_loan,\n \"huobi_cny_cash_frozen\": huobi_cny_cash_frozen,\n \"huobi_cny_btc_frozen\": huobi_cny_btc_frozen,\n \"huobi_cny_ltc_frozen\": huobi_cny_ltc_frozen,\n \"huobi_cny_total\": huobi_cny_total,\n \"huobi_cny_net\": huobi_cny_net,\n\n \"okcoin_cny_cash\": okcoin_cny_cash,\n \"okcoin_cny_btc\": okcoin_cny_btc,\n \"okcoin_cny_ltc\": okcoin_cny_ltc,\n \"okcoin_cny_cash_frozen\": okcoin_cny_cash_frozen,\n \"okcoin_cny_btc_frozen\": okcoin_cny_btc_frozen,\n \"okcoin_cny_ltc_frozen\": okcoin_cny_ltc_frozen,\n \"okcoin_cny_total\": okcoin_cny_total,\n \"okcoin_cny_net\": okcoin_cny_net,\n\n \"total_net\": total_net,\n }\n\n def dataLog(self, content=None):\n if content is None:\n accountInfo = self.getAccuntInfo()\n t = datetime.datetime.now()\n content = \"%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s\" % \\\n (t.strftime(self.TimeFormatForLog),\n accountInfo[\"huobi_cny_cash\"],\n accountInfo[\"huobi_cny_btc\"],\n accountInfo[\"huobi_cny_ltc\"],\n accountInfo[\"huobi_cny_cash_loan\"],\n accountInfo[\"huobi_cny_btc_loan\"],\n accountInfo[\"huobi_cny_ltc_loan\"],\n accountInfo[\"huobi_cny_cash_frozen\"],\n accountInfo[\"huobi_cny_btc_frozen\"],\n accountInfo[\"huobi_cny_ltc_frozen\"],\n accountInfo[\"huobi_cny_total\"],\n accountInfo[\"huobi_cny_net\"],\n accountInfo[\"okcoin_cny_cash\"],\n accountInfo[\"okcoin_cny_btc\"],\n accountInfo[\"okcoin_cny_ltc\"],\n accountInfo[\"okcoin_cny_cash_frozen\"],\n accountInfo[\"okcoin_cny_btc_frozen\"],\n accountInfo[\"okcoin_cny_ltc_frozen\"],\n accountInfo[\"okcoin_cny_total\"],\n accountInfo[\"okcoin_cny_net\"],\n accountInfo[\"total_net\"])\n self.last_data_log_time = t\n self.dataLogger.info(\"%s\" % str(content))\n\n def sell(self, security, quantity, exchange=\"huobi\"): # quantity is a string value\n if exchange == \"huobi\":\n self.timeLog(\"开始下达火币市价卖单...\")\n self.timeLog(\"只保留下单数量的小数点后4位...\")\n self.timeLog(\"原始下单数量:%s\" % quantity)\n tmp = float(quantity)\n tmp = helper.downRound(tmp, 4)\n quantity = str(tmp)\n self.timeLog(\"做完小数点处理后的下单数量:%s\" % quantity)\n if float(quantity) < self.huobi_min_quantity:\n self.timeLog(\n \"数量:%s 小于交易所最小交易数量(火币最小数量:%f),因此无法下单,此处忽��该信号\" % (quantity, self.huobi_min_quantity))\n return None\n\n coin_type = helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\"coin_type\"]\n res = self.huobiService.sellMarket(coin_type, quantity, None, None,\n helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\"market\"],\n SELL_MARKET)\n if u\"result\" not in res or res[u\"result\"] != u'success':\n self.timeLog(\"下达火币市价卖单(数量:%s)失败!\" % quantity)\n return None\n order_id = res[u\"id\"]\n # 查询订单执行情况\n order_info = self.huobiService.getOrderInfo(coin_type, order_id,\n helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\n \"market\"], ORDER_INFO)\n self.timeLog(\"下达如下火币市价卖单,数量:%s\" % quantity)\n self.timeLog(str(order_info))\n if order_info[\"status\"] != 2:\n self.timeLog(\"等待%f秒直至订单完成\" % self.orderWaitingTime)\n time.sleep(self.orderWaitingTime)\n order_info = self.huobiService.getOrderInfo(coin_type, order_id,\n helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\n \"market\"], ORDER_INFO)\n self.timeLog(str(order_info))\n executed_qty = float(order_info[\"processed_amount\"])\n self.timeLog(\n \"火币市价卖单已被执行,执行数量:%f,收到的现金:%.2f\" % (executed_qty, executed_qty * float(order_info[\"processed_price\"])))\n self.dataLog()\n return executed_qty\n elif exchange == \"okcoin\":\n self.timeLog(\"开始下达okcoin市价卖单...\")\n self.timeLog(\"只保留下单数量的小数点后2位...\")\n self.timeLog(\"原始下单数量:%s\" % quantity)\n tmp = float(quantity)\n tmp = helper.downRound(tmp, 2)\n quantity = str(tmp)\n self.timeLog(\"做完小数点处理后的下单数量:%s\" % quantity)\n if float(quantity) < self.okcoin_min_quantity:\n self.timeLog(\n \"数量:%s 小于交易所最小交易数量(火币最小数量:%f),因此无法下单,此处忽略该信号\" % (quantity, self.okcoin_min_quantity))\n return None\n res = self.okcoinService.trade(helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\"coin_type\"],\n \"sell_market\", amount=quantity)\n if \"result\" not in res or res[\"result\"] != True:\n self.timeLog(\"下达okcoin市价卖单(数量:%s)失败\" % quantity)\n return None\n order_id = res[\"order_id\"]\n # 查询订单执行情况\n order_info = self.okcoinService.orderinfo(\n helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\"coin_type\"], str(order_id))\n self.timeLog(\"下达如下okcoin市价卖单,数量:%s\" % quantity)\n self.timeLog(str(order_info))\n if order_info[\"orders\"][0][\"status\"] != 2:\n self.timeLog(\"等待%.1f秒直至订单完成\" % self.orderWaitingTime)\n time.sleep(self.orderWaitingTime)\n order_info = self.okcoinService.orderinfo(\n helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\"coin_type\"], str(order_id))\n self.timeLog(str(order_info))\n executed_qty = order_info[\"orders\"][0][\"deal_amount\"]\n self.timeLog(\"okcoin市价卖单已被执行,执行数量:%f,收到的现金:%.2f\" % (\n executed_qty, executed_qty * order_info[\"orders\"][0][\"avg_price\"]))\n self.dataLog()\n return executed_qty\n\n def buy(self, security, cash_amount, exchange=\"huobi\", sell_1_price=None): # cash_amount is a string value\n if exchange == \"huobi\":\n self.timeLog(\"开始下达火币市价买单...\")\n self.timeLog(\"只保留下单数量的小数点后2位...\")\n self.timeLog(\"原始下单金额:%s\" % cash_amount)\n tmp = float(cash_amount)\n tmp = helper.downRound(tmp, 2)\n cash_amount = str(tmp)\n self.timeLog(\"做完小数点处理后的下单金额:%s\" % cash_amount)\n\n if float(cash_amount) < self.huobi_min_cash_amount:\n self.timeLog(\"金额:%s 小于交易所最小交易金额(火币最小金额:1元),因此无法下单,此处忽略该信号\" % (cash_amount, self.huobi_min_cash_amount))\n return None\n\n coin_type = helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\"coin_type\"]\n res = self.huobiService.buyMarket(coin_type, cash_amount, None, None,\n helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\"market\"],\n BUY_MARKET)\n if u\"result\" not in res or res[u\"result\"] != u'success':\n self.timeLog(\"下达火币市价买单(金额:%s)失败!\" % cash_amount)\n return None\n order_id = res[u\"id\"]\n # 查询订单执行情况\n order_info = self.huobiService.getOrderInfo(coin_type, order_id,\n helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\n \"market\"], ORDER_INFO)\n self.timeLog(\"下达如下火币市价买单,金额:%s\" % cash_amount)\n self.timeLog(str(order_info))\n if order_info[\"status\"] != 2:\n self.timeLog(\"等待%f秒直至订单完成\" % self.orderWaitingTime)\n time.sleep(self.orderWaitingTime)\n order_info = self.huobiService.getOrderInfo(coin_type, order_id,\n helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\n \"market\"], ORDER_INFO)\n self.timeLog(str(order_info))\n executed_qty = float(order_info[\"processed_amount\"]) / float(order_info[\"processed_price\"])\n self.timeLog(\"火币市价买单已被执行,执行数量:%f,花费的现金:%.2f\" % (executed_qty, float(order_info[\"processed_amount\"])))\n self.dataLog()\n return executed_qty\n elif exchange == \"okcoin\":\n if sell_1_price is None:\n raise ValueError(\"处理okcoin市价买单之前,需要提供当前Okcoin卖一的价格信息,请检查传入的sell_1_price参数是否完备!\")\n self.timeLog(\"开始下达okcoin市价买单...\")\n self.timeLog(\"只保留下单数量的小数点后2位...\")\n self.timeLog(\"原始下单金额:%s\" % cash_amount)\n tmp = float(cash_amount)\n tmp = helper.downRound(tmp, 2)\n cash_amount = str(tmp)\n self.timeLog(\"做完小数点处理后的下单金额:%s\" % cash_amount)\n\n if float(cash_amount) < self.okcoin_min_quantity * sell_1_price:\n self.timeLog(\n \"金额:%s 不足以购买交易所最小交易数量(okcoin最小数量:%f,当前卖一价格%.2f,最小金额要求:%.2f),因此无法下单,此处忽略该信号\" % (\n cash_amount, self.okcoin_min_quantity, sell_1_price, self.okcoin_min_quantity * sell_1_price))\n return None\n res = self.okcoinService.trade(helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\"coin_type\"],\n \"buy_market\", price=cash_amount)\n\n if \"result\" not in res or res[\"result\"] != True:\n self.timeLog(\"下达okcoin市价买单(金额:%s)失败\" % cash_amount)\n return None\n order_id = res[\"order_id\"]\n # 查询订单执行情况\n order_info = self.okcoinService.orderinfo(\n helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\"coin_type\"], str(order_id))\n self.timeLog(\"下达如下okcoin市价买单,金额:%s\" % cash_amount)\n self.timeLog(str(order_info))\n if order_info[\"orders\"][0][\"status\"] != 2:\n self.timeLog(\"等待%.1f秒直至订单完成\" % self.orderWaitingTime)\n time.sleep(self.orderWaitingTime)\n order_info = self.okcoinService.orderinfo(\n helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\"coin_type\"], str(order_id))\n self.timeLog(str(order_info))\n executed_qty = order_info[\"orders\"][0][\"deal_amount\"]\n self.timeLog(\"okcoin市价买单已被执行,执行数量:%f,花费的现金:%.2f\" % (\n executed_qty, executed_qty * order_info[\"orders\"][0][\"avg_price\"]))\n self.dataLog()\n return executed_qty\n\n def go(self):\n self.timeLog(\"日志启动于 %s\" % self.getStartRunningTime().strftime(self.TimeFormatForLog))\n self.dataLog(\n content=\"time|huobi_cny_cash|huobi_cny_btc|huobi_cny_ltc|huobi_cny_cash_loan|huobi_cny_btc_loan|huobi_cny_ltc_loan|huobi_cny_cash_frozen|huobi_cny_btc_frozen|huobi_cny_ltc_frozen|huobi_cny_total|huobi_cny_net|okcoin_cny_cash|okcoin_cny_btc|okcoin_cny_ltc|okcoin_cny_cash_frozen|okcoin_cny_btc_frozen|okcoin_cny_ltc_frozen|okcoin_cny_total|okcoin_cny_net|total_net\")\n self.dataLog()\n\n while (True):\n # check whether current time is after the dailyExitTime, if yes, exit\n if self.dailyExitTime is not None and datetime.datetime.now() > datetime.datetime.strptime(\n datetime.date.today().strftime(\"%Y-%m-%d\") + \" \" + self.dailyExitTime,\n \"%Y-%m-%d %H:%M:%S\"):\n self.timeLog(\"抵达每日终结时间:%s, 现在退出.\" % self.dailyExitTime)\n break\n\n self.timeLog(\"等待 %d 秒进入下一个循环...\" % self.timeInterval)\n time.sleep(self.timeInterval)\n\n # calculate the net asset at a fixed time window\n time_diff = datetime.datetime.now() - self.last_data_log_time\n if time_diff.seconds > self.dataLogFixedTimeWindow:\n self.dataLog()\n\n # 获取当前账户信息\n accountInfo = self.getAccuntInfo()\n\n # 查询huobi第一档深度数据\n huobiDepth = self.huobiService.getDepth(helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\"coin_type\"],\n helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\"market\"],\n depth_size=1)\n # 查询okcoin第一档深度数据\n okcoinDepth = self.okcoinService.depth(helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\"coin_type\"])\n\n huobi_sell_1_price = huobiDepth[\"asks\"][0][0]\n huobi_sell_1_qty = huobiDepth[\"asks\"][0][1]\n huobi_buy_1_price = huobiDepth[\"bids\"][0][0]\n huobi_buy_1_qty = huobiDepth[\"bids\"][0][1]\n\n okcoin_sell_1_price = okcoinDepth[\"asks\"][0][0]\n okcoin_sell_1_qty = okcoinDepth[\"asks\"][0][1]\n okcoin_buy_1_price = okcoinDepth[\"bids\"][0][0]\n okcoin_buy_1_qty = okcoinDepth[\"bids\"][0][1]\n\n if huobi_buy_1_price > okcoin_sell_1_price: # 获利信号:OKcoin买,huobi卖\n self.timeLog(\"发现信号\")\n self.timeLog(\"火币深度:%s\" % str(huobiDepth))\n self.timeLog(\"okcoin深度:%s\" % str(okcoinDepth))\n\n # 每次只能吃掉一定ratio的深度\n Qty = helper.downRound(min(huobi_buy_1_qty, okcoin_sell_1_qty) * self.orderRatio, 4)\n # 每次搬砖前要检查是否有足够security和cash\n Qty = min(Qty, accountInfo[helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\"coin_str\"]],\n helper.downRound(accountInfo[helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\n \"market_str\"]] / okcoin_sell_1_price, 4))\n Qty = helper.downRound(Qty, 4)\n Qty = helper.getRoundedQuantity(Qty, self.coinMarketType)\n\n if Qty < self.huobi_min_quantity or Qty < self.okcoin_min_quantity:\n self.timeLog(\n \"数量:%f 小于交易所最小交易数量(火币最小数量:%f, okcoin最小数量:%f),因此无法下单并忽略该信号\" % (\n Qty, self.huobi_min_quantity, self.okcoin_min_quantity))\n continue\n else:\n # step1: 先处理卖\n executed_qty = self.sell(self.coinMarketType, str(Qty), exchange=\"huobi\")\n if executed_qty is not None:\n # step2: 再执行买\n Qty2 = min(executed_qty, Qty)\n Qty2 = max(helper.getRoundedQuantity(Qty2, self.coinMarketType), self.okcoin_min_quantity)\n\n if Qty2 < self.okcoin_min_quantity * 1.05:\n self.buy(self.coinMarketType, str(Qty2 * okcoin_sell_1_price * 1.05), exchange=\"okcoin\",\n sell_1_price=okcoin_sell_1_price)\n else:\n self.buy(self.coinMarketType, str(Qty2 * okcoin_sell_1_price), exchange=\"okcoin\",\n sell_1_price=okcoin_sell_1_price)\n\n elif okcoin_buy_1_price > huobi_sell_1_price: # 获利信号:OKcoin卖,huobi买\n self.timeLog(\"发现信号\")\n self.timeLog(\"火币深度:%s\" % str(huobiDepth))\n self.timeLog(\"okcoin深度:%s\" % str(okcoinDepth))\n\n # 每次只能吃掉一定ratio的深度\n Qty = helper.downRound(min(huobi_sell_1_qty, okcoin_buy_1_qty) * self.orderRatio, 4)\n # 每次搬砖前要检查是否有足够security和cash\n Qty = min(Qty, accountInfo[helper.coinTypeStructure[self.coinMarketType][\"okcoin\"][\"coin_str\"]],\n helper.downRound(accountInfo[helper.coinTypeStructure[self.coinMarketType][\"huobi\"][\n \"market_str\"]] / huobi_sell_1_price), 4)\n Qty = helper.getRoundedQuantity(Qty, self.coinMarketType)\n\n if Qty < self.huobi_min_quantity or Qty < self.okcoin_min_quantity:\n self.timeLog(\"数量:%f 小于交易所最小交易数量(火币最小数量:%f, okcoin最小数量:%f),因此无法下单并忽略该信号\" % (\n Qty, self.huobi_min_quantity, self.okcoin_min_quantity))\n continue\n else:\n # step1: 先处理卖\n executed_qty = self.sell(self.coinMarketType, str(Qty), exchange=\"okcoin\")\n if executed_qty is not None:\n # step2: 再执行买\n Qty2 = min(executed_qty, Qty)\n self.buy(self.coinMarketType, str(Qty2 * huobi_sell_1_price), exchange=\"huobi\")\n","repo_name":"wequant-org/liveStrategyEngine","sub_path":"banZhuan/banZhuanStrategy.py","file_name":"banZhuanStrategy.py","file_ext":"py","file_size_in_byte":24365,"program_lang":"python","lang":"en","doc_type":"code","stars":612,"dataset":"github-code","pt":"61"} +{"seq_id":"15601756800","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 14 21:44:34 2020\r\n\r\n@author: abanbur\r\n\"\"\"\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport numpy as np\r\nfrom model import network\r\nimport cv2\r\nimport torch\r\nfrom torch.autograd import Variable\r\n\r\ncmap_Nyud = np.load('Data/cmap_nyud.npy')\r\ncmap_Kitti = np.load('Data/cmap_kitti.npy')\r\ndepth_Coeff_Nyud = 5000. # to convert into metres\r\ndepth_Coeff_Kitti = 800.\r\nhas_Cuda = torch.cuda.is_available()\r\nimg_Scale = 1./255\r\nimg_Mean = np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3))\r\nimg_Std = np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3))\r\nmax_Depth_Nyud = 8.\r\nmin_Depth_Nyud = 0.\r\nmax_Depth_Kitti = 80.\r\nmin_Depth_Kitti = 0.\r\nnum_CLASSES = 46\r\nnum_CLASSES_NYUD = 40\r\nnum_CLASSES_KITTI = 6\r\nnum_TASKS = 2 # segm + depth\r\n\r\ndef pre_Processing(img):\r\n return (img * img_Scale - img_Mean) / img_Std\r\n\r\nmodel_Object = network(num_classes = num_CLASSES, num_tasks = num_TASKS)\r\nif has_Cuda:\r\n _ = model_Object.cuda()\r\n_ = model_Object.eval()\r\n\r\ncheck_Point = torch.load('Weights/ExpNYUDKITTI_joint.ckpt')\r\nmodel_Object.load_state_dict(check_Point['state_dict'])\r\n\r\n# NYUD\r\nimg_path = 'Examples/Example_NYUDv2_Segm_Depth/000464.png'\r\nimg_nyud = np.array(Image.open(img_path))\r\ngt_segm_nyud = np.array(Image.open('Examples/Example_NYUDv2_Segm_Depth/segm_gt_000464.png'))\r\n\r\n# KITTI\r\nimg_path = 'Examples/Example_KITTI_Segm_Depth/000099.png'\r\nimg_kitti = np.array(Image.open(img_path))\r\ngt_segm_kitti = np.array(Image.open('Examples/Example_KITTI_Segm_Depth/segm_gt_000099.png'))\r\n\r\nwith torch.no_grad():\r\n # nyud\r\n img_var = Variable(torch.from_numpy(pre_Processing(img_nyud).transpose(2, 0, 1)[None]), requires_grad=False).float()\r\n if has_Cuda:\r\n img_var = img_var.cuda()\r\n segm, depth = model_Object(img_var)\r\n segm = cv2.resize(segm[0, :(num_CLASSES_NYUD)].cpu().data.numpy().transpose(1, 2, 0), img_nyud.shape[:2][::-1], interpolation=cv2.INTER_CUBIC)\r\n depth = cv2.resize(depth[0, 0].cpu().data.numpy(), img_nyud.shape[:2][::-1], interpolation=cv2.INTER_CUBIC)\r\n segm_nyud = cmap_Nyud[segm.argmax(axis=2) + 1].astype(np.uint8)\r\n depth_nyud = np.abs(depth)\r\n # kitti\r\n img_var = Variable(torch.from_numpy(pre_Processing(img_kitti).transpose(2, 0, 1)[None]), requires_grad=False).float()\r\n if has_Cuda:\r\n img_var = img_var.cuda()\r\n segm, depth = model_Object(img_var)\r\n segm = cv2.resize(segm[0, (num_CLASSES_NYUD):(num_CLASSES_NYUD + num_CLASSES_KITTI)].cpu().data.numpy().transpose(1, 2, 0), img_kitti.shape[:2][::-1], interpolation=cv2.INTER_CUBIC)\r\n depth = cv2.resize(depth[0, 0].cpu().data.numpy(), img_kitti.shape[:2][::-1], interpolation=cv2.INTER_CUBIC)\r\n segm_kitti = cmap_Kitti[segm.argmax(axis=2)].astype(np.uint8)\r\n depth_kitti = np.abs(depth)\r\n\r\nplt.figure(figsize=(18, 12))\r\nplt.subplot(141)\r\nplt.imshow(img_nyud)\r\nplt.title('NYUD: img')\r\nplt.axis('off')\r\nplt.subplot(142)\r\nplt.imshow(cmap_Nyud[gt_segm_nyud + 1])\r\nplt.title('NYUD: gt segm')\r\nplt.axis('off')\r\nplt.subplot(143)\r\nplt.imshow(segm_nyud)\r\nplt.title('NYUD: pred segm')\r\nplt.axis('off')\r\nplt.subplot(144)\r\nplt.imshow(depth_nyud, cmap='plasma', vmin=min_Depth_Nyud, vmax=max_Depth_Nyud)\r\nplt.title('NYUD: pred depth')\r\nplt.axis('off')\r\nplt.figure(figsize=(18,12))\r\nplt.subplot(141)\r\nplt.imshow(img_kitti)\r\nplt.title('KITTI: img')\r\nplt.axis('off')\r\nplt.subplot(142)\r\nplt.imshow(gt_segm_kitti)\r\nplt.title('KITTI: gt segm')\r\nplt.axis('off')\r\nplt.subplot(143)\r\nplt.imshow(segm_kitti)\r\nplt.title('KITTI: pred segm')\r\nplt.axis('off')\r\nplt.subplot(144)\r\nplt.imshow(depth_kitti, cmap='plasma', vmin=min_Depth_Kitti, vmax=max_Depth_Kitti)\r\nplt.title('KITTI: pred depth')\r\nplt.axis('off');\r\n\r\n\r\n\r\n\r\n","repo_name":"AleksBanbur/EE8204---Real-Time-Multi-Task-Learning","sub_path":"eval_KITTI_NYUD.py","file_name":"eval_KITTI_NYUD.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"14534187194","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\n# Create your views here.\nfrom core.models import Casa\n\n\ndef Login(request):\n return render(request, 'login.html', {'titulo':'Login'})\n\ndef SubmitLogin(request):\n if request.POST:\n user = request.POST.get('user')\n password = request.POST.get('pass')\n usuario = authenticate(username=user, password=password)\n \n if usuario is not None:\n login(request, usuario)\n return redirect('/home/')\n else:\n messages.error(request,'Usuario ou senha invalido')\n\n return redirect('/login/')\n\ndef Logout(request):\n logout(request) \n return redirect('/')\n\n@login_required(login_url = '/login/')\ndef Home(request):\n return render(request, 'home.html', {'titulo':'Home'})\n\n\ndef ListarCasas(request):\n casas = Casa.objects.all().order_by('nome')\n dados = {\n 'titulo':'Lista de casas',\n 'casas':casas\n }\n return render(request, 'listar.html', dados)\n\n\ndef Menu(request):\n id = request.GET.get('id')\n casa = None\n if id:\n casa = Casa.objects.get(id=id)\n\n dados = {\n 'titulo': 'Visualizar casa',\n 'casa': casa,\n }\n\n return render(request, 'menu.html', dados)","repo_name":"reduPKR/projeto_casa_python","sub_path":"projeto_casa/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42831639503","text":"import random\n\nfrom neuron import Neuron\nfrom bias import Bias\n\nclass Network:\n def __init__(self, layers=[2,2,1]):\n self.layers = []\n for layer_number in range(len(layers)):\n if layer_number == 0:\n self.create_layer(layer_number, 'input', layers[layer_number])\n elif layer_number == len(layers)-1:\n self.create_layer(layer_number, 'output', layers[layer_number])\n else:\n self.create_layer(layer_number, 'hidden', layers[layer_number])\n self.trained = 0\n\n def train_network(self, learning_rate, training_set):\n \"\"\"\n learning_rate: rate of learning\n training_set: [ ([input], oracle) ]\n \"\"\"\n reversed_layers = self.layers[::-1]\n for training_tuple in training_set:\n #settign the input of the input nodes\n for x in range(len(reversed_layers[-1]) -1):\n reversed_layers[-1][x].input = training_tuple[0][x]\n\n only_neurons = list(filter(lambda x: isinstance(x, Neuron), reversed_layers[0]))\n try:\n deltalist = [[only_neurons[x].delta_update(learning_rate, training_tuple[1][x]) for x in range(len(only_neurons))]]\n except:\n deltalist = [[x.delta_update(learning_rate, training_tuple[1]) for x in only_neurons]]\n\n\n for layer in reversed_layers[1:]:\n only_neurons = list(filter(lambda x: isinstance(x, Neuron), layer))\n #removing bias nodes because they dont need their delta calculatedo\n deltalist.append(\n [x.delta_update(learning_rate, training_tuple[1]) for x in only_neurons]\n )\n\n for layer_index in range(len(reversed_layers)):\n only_neurons = list(filter(lambda x: isinstance(x, Neuron), reversed_layers[layer_index]))\n for neuron_index in range(len(only_neurons)):\n reversed_layers[layer_index][neuron_index].input_weights = deltalist[layer_index][neuron_index]\n self.trained += 1\n\n def run(self, inputs):\n assert len(inputs) == len(self.layers[0]) - 1\n\n for x in range(len(self.layers[0])-1):\n self.layers[0][x].input = inputs[x]\n\n return tuple([z.get_output() for z in self.layers[-1]])\n\n\n def create_neuron(self, layer_number, neuron_type):\n if neuron_type == 'input':\n return Neuron()\n else: #if hidden or output or special snow flake\n return Neuron(self.layers[layer_number-1], [random.uniform(-1, 1) for x in range(len(self.layers[layer_number -1]))], neuron_type)\n\n def create_layer(self, layer_number, neuron_type, amount_neurons):\n self.layers.append([self.create_neuron(layer_number, neuron_type) for x in range(amount_neurons)])\n if neuron_type != \"output\":\n self.layers[-1].append(Bias())\n\n def create_graph(self):\n from graphviz import Graph\n dot = Graph()\n\n for i in range(len(self.layers)):\n for y in self.layers[i]:\n dot.node(str(id(y)),str(y))\n if i > 0:\n if not isinstance(y,Bias):\n for x in range(len(self.layers[i-1])):\n dot.edge(\n str(id(self.layers[i-1][x])),\n str(id(y)),\n label=str(round(y.input_weights[x],2))\n )\n return dot\n","repo_name":"Bob-Thomas/TCTI-VKAAI-17","sub_path":"oo-neural/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23631952931","text":"crypt = {1:\"ejp mysljylc kd kxveddknmc re jsicpdrysi\",2:\"rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd\",0:\"de kr kd eoya kw aej tysr re ujdr lkgc jv\"};\r\ndecrypt = {1:\"our language is impossible to understand\",2:\"there are twenty six factorial possibilities\",0:\"so it is okay if you want to just give up\"};\r\nrules \t= {\"q\" : \"z\", \"z\" : \"q\"};\r\n\r\nfor i in range(3):\r\n\tprint(i);\r\n\t\r\n\tfor j in range(len(crypt[i])):\r\n\t\trules[crypt[i][j]]\t= decrypt[i][j]\r\n\t\t\r\n\t\r\nprint(rules);\r\nprint(len(rules));\t\r\ncount = int(input())\r\nfor i in range(count):\t\r\n\tres = \"\".join(list(map(lambda y:rules[y],input())))\r\n\tprint(\"Case #\"+str(i+1)+\": \"+res)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/1861.py","file_name":"1861.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33686021548","text":"from time import sleep\nfrom turtle import * \n\nscreen = Screen()\nscreen.addshape(\"ball.gif\")\n\ncolor(\"red\", \"red\")\nshape(\"ball.gif\")\nshapesize(1,1,1)\n\nx = 0\ny = -200\npenup()\n\nwhile y < 200:\n y = y+10\n goto(x,y)\n # Slow down the movement\n sleep(0.1)\n\n","repo_name":"open-education-polito/esercizi-python","sub_path":"Programmi_turtle/Palloncino.py","file_name":"Palloncino.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1771864180","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 10 21:02:57 2021\n\n@author: james\n\"\"\"\n\n#%% Import Libraries\n#Import function from experiment lib file\nimport my_rp_library as mrpc\n\n\n#%%\n# Plot ZNPHI Dataset as Bargraph\nznphi_data_frame = mrpc.make_my_provintial_totals(mrpc.my_znphi_data_set())\nmrpc.make_monthly_bar_graph_x(znphi_data_frame,['Province','Count','Cases By Province'])\n\n#%%\n## Plot Pie charts from ZNPHI Dataset\n#znphi_data_frame = mrpc.make_my_provintial_totals(mrpc.my_znphi_data_set())\ncolors = mrpc.get_color_group(11)\npostfixtitles = ' at Provicial Level'\npiechart_data = {'infected':[],'deaths':[],'recovered':[],'labels':[],'explodelist':[]}\nfor i in znphi_data_frame.keys():\n piechart_data['labels'].append(i)\n piechart_data['infected'].append(sum(znphi_data_frame[i]['infected'][1]))\n piechart_data['deaths'].append(sum(znphi_data_frame[i]['deaths'][1]))\n piechart_data['recovered'].append(sum(znphi_data_frame[i]['recorvered'][1]))\n piechart_data['explodelist'].append(0.90)\n\nfor j in list(piechart_data.keys())[:3]:\n title = str(j).capitalize() + postfixtitles\n labels = piechart_data['labels']\n sizes = piechart_data[j] \n explodelist = piechart_data['explodelist']\n print(title)\n mrpc.make_me_a_pie_chart(labels,title,sizes,colors[0],explodelist,3)\n \n \n#%%\n## Plot Line Graph of I,R,D from ECDC and HDX Datasets \nX = mrpc.go_get_me_my_eltd_dataset()\ndatesx = X['Contenious']['date']\nydata = {'L':[],'I':[],'R':[],'D':[]}\nzxc = mrpc.breakup_dates_to_months_array_index(datesx,X['Contenious']['infected'],X['Contenious']['deaths'],X['Contenious']['recorvered'],X['Contenious']['recorvered'])\nfor i in zxc:\n ydata['L'].append(zxc[i]['Label'])\n ydata['I'].append(sum(zxc[i]['infected'][1]))\n ydata['R'].append(sum(zxc[i]['recorvered'][1]))\n ydata['D'].append(sum(zxc[i]['deaths'][1]))\n\ndatesx = ydata['L']\nxdata = [ydata['I'],ydata['R'],ydata['D']]\nsizes = [12,8]\nlabels = ['Infected','Recovered','Deaths']\ncolors = ['orange','green','red']\nmarkers = ['x','x','x']\nlst_lab = ['Cases Per Month','Month','Cases']\nmrpc.plot_simple_line_graph(datesx,sizes,xdata,labels,colors,markers,lst_lab)\n\n\n#%%\n## Plot Line Graph of I,R,D from ECDC and HDX Datasets with bounds\nlx1 = [0, 0 ,0, 120, 240, 360, 480, 600, 720, 840, 960, 1080 ]\nlx2 = [0, 0, 400, 800, 1200, 1600, 2000, 2400, 2800, 3200, 3600, 4000]\nX = mrpc.go_get_me_my_eltd_dataset()\ndatesx = X['Contenious']['date']\nydata = {'L':[],'I':[],'R':[],'D':[]}\nzxc = mrpc.breakup_dates_to_months_array_index(datesx,X['Contenious']['infected'],X['Contenious']['deaths'],X['Contenious']['recorvered'],X['Contenious']['recorvered'])\nfor i in zxc:\n ydata['L'].append(zxc[i]['Label'])\n ydata['I'].append(sum(zxc[i]['infected'][1]))\n ydata['R'].append(sum(zxc[i]['recorvered'][1]))\n ydata['D'].append(sum(zxc[i]['deaths'][1]))\n\ndatesx = ydata['L']\nxdata = [ydata['I'],ydata['R'],ydata['D'],lx1 , lx2]\nsizes = [12,8]\nlabels = ['Infected','Recovered','Deaths','Lower Bound','Upper Bound']\ncolors = ['orange','green','red','grey','cyan']\nmarkers = ['x','x','x','x','x']\nlst_lab = ['Cases Per Month','Month','Cases']\nmrpc.plot_simple_line_graph(datesx,sizes,xdata,labels,colors,markers,lst_lab)\n\n#%%\n### Plot ECDC and HDX Datasets. Daywise and Cumulative\nimport numpy as np\nX = mrpc.go_get_me_my_eltd_dataset()\ndatesx = []\nsizes = [12,8]\nlabels = ['Infected','Recovered','Fatalities']\ncolors = ['orange','green','red']\nmarkers = ['x','x','x']\nA = 70\nB = 300\n\nlst_lab = ['Daywise Cases','Day','Count']\nxdata = [X['Contenious']['infected'][A:B],X['Contenious']['recorvered'][A:B],X['Contenious']['deaths'][A:B]]\nmrpc.plot_simple_line_graph(datesx,sizes,xdata,labels,colors,markers,lst_lab)\n\nlst_lab = ['Cumulative Cases','Day','Count']\nxdata = [X['Contenious']['infected_cummulative'][A:B],X['Contenious']['recorvered_cummulative'][A:B],X['Contenious']['deaths_cummulative'][A:B]]\nmrpc.plot_simple_line_graph(datesx,sizes,xdata,labels,colors,markers,lst_lab)\n\n#%%\nimport numpy as np\nX = mrpc.go_get_me_my_eltd_dataset()\ndatesx = []\nsizes = [8,6]\nlabels = ['Actual Infected','Projected Infected','Actual Recovered','Projected Recovered','Actual Deaths','Projected Deaths']\ncolors = ['orange','green','black','gray','blue','pink']\nmarkers = ['x','o','x','o','x','o']\n\nA = list(X['Contenious']['infected_cummulative'][56:])[0]\nB = list(X['Contenious']['infected_cummulative'][56:])[-1]\nC = len(X['Contenious']['infected_cummulative'][56:])\nD = np.linspace(A, B, C)\n\nE = np.min(np.nonzero(X['Contenious']['recorvered_cummulative'][56:]))\nF = list(X['Contenious']['recorvered_cummulative'][56:])[-1]\nG = np.linspace(E, F, C)\n\nH = np.min(np.nonzero(X['Contenious']['deaths_cummulative'][56:]))\nI = list(np.nonzero(X['Contenious']['deaths_cummulative'][56:]))[-1]\nJ = np.linspace(H, I, C)\n\nlst_lab = ['Cumulative Cases','Day','Count']\nxdata = [X['Contenious']['infected_cummulative'][56:],D,X['Contenious']['recorvered_cummulative'][56:],G]\nmrpc.plot_simple_line_graph_dashed(datesx,sizes,xdata,labels,colors,markers,lst_lab)\n","repo_name":"jamesjsakala/jsdk_research","sub_path":"Analysis_Code-Piecharts.py","file_name":"Analysis_Code-Piecharts.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18710248172","text":"# The 5 Love Languages\n\ndef love_language(partner, weeks):\n for i in LOVE_LANGUAGES:\n positive = 0\n neutral = 0\n for j in range(weeks):\n if partner.response(i)==\"positive\":\n positive += 1\n else:\n neutral += 1 \n if positive > neutral:\n return i\n","repo_name":"supvolume/codewars_solution","sub_path":"6kyu/love_language.py","file_name":"love_language.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23835588498","text":"from tyssue import History, Sheet, SheetGeometry, config\nfrom tyssue.dynamics import PlanarModel\nfrom tyssue.generation import three_faces_sheet\nfrom tyssue.solvers.viscous import EulerSolver\n\n\ndef test_euler():\n geom = SheetGeometry\n model = PlanarModel\n sheet = Sheet(\"3\", *three_faces_sheet())\n geom.update_all(sheet)\n sheet.settings[\"threshold_length\"] = 0.1\n\n sheet.update_specs(config.dynamics.quasistatic_plane_spec())\n sheet.face_df[\"prefered_area\"] = sheet.face_df[\"area\"].mean()\n history = History(sheet)\n solver = EulerSolver(sheet, geom, model, history=history, auto_reconnect=True)\n sheet.vert_df[\"viscosity\"] = 0.1\n\n sheet.edge_df.loc[[0, 17], \"line_tension\"] *= 2\n sheet.edge_df.loc[[1], \"line_tension\"] *= 8\n l0 = sheet.edge_df.loc[0, \"length\"]\n _ = solver.solve(0.2, dt=0.05)\n assert sheet.edge_df.loc[0, \"length\"] < l0\n assert len(solver.history) == 5\n","repo_name":"DamCB/tyssue","sub_path":"tests/solvers/test_viscous.py","file_name":"test_viscous.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"61"} +{"seq_id":"37746600824","text":"#!/usr/bin/env python\n# encoding: utf-8\n# coding=utf-8\nimport time\nimport hashlib\nimport time\nfrom bson import ObjectId\nfrom com.distribution_api import distributionApi\nfrom com.douyin_openapi import douyinApi\nfrom com.kuaishou_openapi import kuaishouApi\nfrom com.filter import dateformat\n\nBLUENAME = 'distribution_api_platform'\nfrom flask import Blueprint\nmod = Blueprint(BLUENAME, __name__)\n\nfrom flask import session\n#session.permanent = True\n\nfrom flask import request, render_template, redirect, url_for\nfrom com.db import SequoiaDB\nfrom com.minerequest import MineAuthentic\nfrom com.sf import StreamFileLocal\nfrom com import utils\nimport shutil, config\n\n\nappid = 20310\n_var = locals()\n\n# 获取各大平台扫码授权链接\n@mod.route('/authUrlList',methods=['get'])\ndef platformList():\n userinfo=distributionApi().auth()\n if userinfo == False:\n return distributionApi().error('token error',{},-2)\n list= {\n \"douyin\":douyinApi().DouyinGetAuthUrl(request.args['token']),\n \"kuaishou\": kuaishouApi().kuaishouGetAuthUrl(request.args['token']),\n }\n return distributionApi().success(list)\n# 用抖音授权码换accesstoken\n\n\n@mod.route('/douyinGetCode',methods=['get'])\ndef douyinGetCode():\n base = distributionApi()\n userinfo = base.auth()\n if userinfo == False:\n return base.error('token error',{},-2)\n code = request.args.get('code',type=str,default='')\n if code == '':\n return base.error('授权码不能为空')\n type = request.args.get('type', type=str, default='0')\n if type == '1' :\n douyin = douyinApi()\n res=douyin.DouyinGetAccessToken(code)\n if(res['message']=='error'):\n return base.error(res['data']['description'],res['data'])\n douyinUserInfo=douyin.DouyinGetUserinfo(res['data']['open_id'],res['data']['access_token'])\n if(douyinUserInfo['data']['error_code']!=0):\n return base.error('获取抖音用户信息失败',douyinUserInfo)\n cols = {\n \"openid\":res['data']['open_id'],\n \"type\":type,\n \"from_type\":\"2\",\n \"access_token\":res['data']['access_token'],\n \"refresh_token\":res['data']['refresh_token'],\n \"parent_id\":userinfo.id,\n \"avatar\":douyinUserInfo['data']['avatar'],\n \"nickname\": douyinUserInfo['data']['nickname'],\n \"union_id\":douyinUserInfo['data']['union_id'],\n 'city':douyinUserInfo['data']['city'],\n 'province': douyinUserInfo['data']['province'],\n 'country': douyinUserInfo['data']['country'],\n }\n else:\n kuaishou = kuaishouApi()\n res = kuaishou.KuaishouGetAccessToken(code)\n if (res['result'] != 1):\n return base.error(res['error_msg'])\n kuaishouUserInfo = kuaishou.KuaishouGetUserinfo(res['access_token'])\n if (kuaishouUserInfo['result'] != 1):\n return base.error('获取快手用户信息失败', kuaishouUserInfo)\n cols = {\n \"openid\": res['open_id'],\n \"type\": type,\n \"from_type\": \"2\",\n \"access_token\": res['access_token'],\n \"refresh_token\": res['refresh_token'],\n \"parent_id\": userinfo.id,\n \"avatar\": kuaishouUserInfo['user_info']['head'],\n \"nickname\": kuaishouUserInfo['user_info']['name'],\n 'city': kuaishouUserInfo['user_info']['city'],\n 'province': '',\n 'country': '',\n }\n\n base.updateCurrentInfo(cols)\n return base.success(cols, '授权成功')\n\n# 平台账号列表\n@mod.route('/mediaList',methods=['get'])\ndef mediaList():\n userinfo=distributionApi().auth()\n if userinfo == False:\n return distributionApi().error('token error',{},-2)\n db = SequoiaDB()\n where={'parent_id':userinfo.id}\n type = request.args.get('type',type=str,default='0')\n if type != '0':\n where[\"type\"]=type\n orderby={\"addtime\":-1}\n select={'oid':'','avatar':'','nickname':'','type':'','province':'','city':'','country':'','openid':''}\n query =db.query('medianumbers',select,where,orderby)\n list=[]\n for i in query:\n list.append({\n 'avatar':i.avatar,\n 'nickname': i.nickname,\n 'type': i.type,\n 'province': i.province,\n 'city': i.city,\n 'country': i.country,\n 'openid':i.openid,\n 'id':i.id\n })\n return distributionApi().success({\"list\":list})\n\n# 任务列表\n@mod.route('/publishList',methods=['get'])\ndef publishList():\n userinfo = distributionApi().auth()\n if userinfo == False:\n return distributionApi().error('token error',{},-2)\n db = SequoiaDB()\n where = {\n 'userid': userinfo.id,\n 'openid':request.args.get('openid',type=str,default='')\n }\n video_status = request.args.get('video_status', type=int, default=0)\n if video_status != 0:\n where['video_status'] = video_status\n # publishType = request.args.get('publishType', type=int, default=0)\n #\n # if publishType ==2:\n # where['error_code']=''\n # elif publishType ==3:\n # where['error_code']={ \"$ne\": '' }\n where['platform'] = {\"$ne\": \"1\"}\n page = request.args.get('page', type=int, default=1)\n pageSize = request.args.get('pageSize', type=int, default=10)\n orderby = {\"addtime\": -1}\n select = {\n 'userid':'',\n 'openid':'',\n 'type':'',\n 'addtime':'',\n 'source_id':'',\n 'video_id':'',\n 'err_code':'',\n 'description':'',\n 'title':'',\n 'video_status':''\n }\n query = db.pagebar('publishsource', select, where, orderby,{},[pageSize,page])\n list = []\n for q in query.data:\n list.append({\n # 'userid':q.userid,\n # 'openid':q.openid,\n 'type':q.type,\n 'addtime':q.addtime.strftime('%Y-%m-%d %H:%M:%S'),\n 'err_code':q.err_code,\n 'description':q.description,\n 'title':q.title,\n 'id':q.id,\n 'video_status': q.video_status\n\n\n })\n return distributionApi().success({\"list\":list,\"count\":query.size})\n\n# 删除任务\n@mod.route('/publishDel', methods=['get'])\ndef publishDel():\n Mine = request.Mine()\n # 2. 获取被编辑对象的id\n id = (not request.form.get('id', u'') == u'') and request.form.get('id', u'') or request.args.get('id', u'')\n Query = None\n db = SequoiaDB()\n select = {}\n where = {\"_id\": (id,)}\n\n for q in db.query('publishsource', select, where, {}, limit=1):\n Query = q\n break\n if Query is None: return distributionApi().error('数据不存在')\n desc='误差数据删除'\n Query.remove(request, event_desc=(999, desc))\n return distributionApi().success({},'删除成功')\n\n# 平台视频列表\n\n@mod.route('/videoList', methods=['get'])\ndef videoList():\n userinfo = distributionApi().auth()\n if userinfo == False:\n return distributionApi().error('token error', {}, -2)\n openid=request.args.get('openid',type=str,default='')\n if openid == '':\n return distributionApi().error('openid 不能为空')\n page=request.args.get('cursor',type=str,default='0')\n pageSize = request.args.get('pageSize', type=str, default='10')\n last_cursor= request.args.get('cursor_curr',type=str,default='')\n db=SequoiaDB()\n where={\n 'openid':openid,\n 'parent_id':userinfo.id\n }\n select={\"openid\":'','access_token':'','type':''}\n query=db.query('medianumbers',select,where,{},limit=1)\n if len(query)== 0:\n return distributionApi().error('账号不存在')\n row=query[0]\n video_status = request.args.get('video_status', type=int, default=0)\n if video_status!=0:\n select={\n 'is_reviewed':'',\n 'video_status':'',\n 'title':'',\n 'cover':'',\n 'share_url':'',\n 'is_top':'',\n 'create_time':'',\n 'item_id':'',\n 'statistics':''\n\n }\n where={\"openid\":openid,\"userid\":userinfo.id,'video_status':video_status }\n hasLast = False\n if(page!='0'):\n hasLast = True\n list=[];\n skip= int(page)*int(pageSize)\n query=db.query('publishsource',select,where,{\"create_time\":-1},limit=pageSize,skip= skip)\n count = db.count('publishsource',where)\n for i in query:\n list.append({\n 'is_reviewed':i.is_reviewed,\n 'video_status':i.video_status,\n 'title':i.title,\n 'cover':i.cover,\n 'share_url':i.share_url,\n 'is_top':i.is_top,\n 'create_time':i.create_time,\n 'item_id':i.item_id,\n 'statistics':i.statistics\n\n })\n data={\n 'list':list,\n 'description':'',\n 'has_more':True if skip + int(pageSize) < count else False,\n 'cursor':int(page)+1,\n 'error_code':0,\n 'has_last':hasLast\n }\n return distributionApi().success(data)\n else:\n #抖音\n if row.type == '1':\n douyin = douyinApi()\n res=douyin.DouyinGetVideoList(openid,row.access_token,page,pageSize)\n if res['extra']['error_code'] != 0:\n return distributionApi().error(res['extra']['description'])\n else:\n data = res['data']\n video_website='tiktok'\n #快手\n elif row.type == '2':\n kuaishou=kuaishouApi()\n res = kuaishou.KuaishouGetVideoList(row.access_token,page,pageSize)\n if res['result'] != 1:\n if(res['error']=='access_denied'):\n return distributionApi().error('授权已过期,请重新扫码授权',{},3)\n else:\n return distributionApi().error(res)\n else:\n if len(res['video_list'])== 0:\n return distributionApi().success({},'没有更多了')\n list=[]\n for item in res['video_list']:\n list.append({\n 'statistics':{\n 'forward_count':0,\n 'digg_count':item['like_count'],\n 'play_count':item['view_count'],\n 'comment_count':item['comment_count'],\n 'share_count':0,\n 'download_count':0\n },\n 'is_reviewed':item['pending'],\n 'video_status':4 if item['pending']==True else 1,\n 'title':item['caption'],\n 'cover':item['cover'],\n 'share_url':item['play_url'],\n 'is_top':False,\n 'create_time':item['create_time']/1000,\n 'item_id':item['photo_id']\n })\n nextPgae=kuaishou.KuaishouGetVideoList(row.access_token,res['video_list'][-1]['photo_id'],'1')\n data={\n 'list':list,\n 'cursor':res['video_list'][-1]['photo_id'],\n 'has_more':False if len(nextPgae['video_list'])== 0 else True,\n 'error_code':'0'\n }\n video_website = 'kwai'\n\n #其他\n else:\n return distributionApi().error('尚未接入该平台')\n for i in data['list']:\n cols = {\n 'type':row.type,\n 'cover': i['cover'],\n 'title': i['title'],\n 'share_url': i['share_url'],\n 'video_status': i['video_status'],\n 'create_time': (i['create_time']),\n 'is_top': i['is_top'],\n 'userid': userinfo.id,\n 'openid': openid,\n 'statistics':i['statistics'],\n 'video_website': video_website,\n 'item_id':i['item_id'],\n }\n where={\n 'item_id':i['item_id'],\n 'userid':userinfo.id\n }\n res = db.query('publishsource',where, where, {}, limit=1)\n if res :\n res[0].update(cols,request,event_desc=(1000, u'数据更新[%s]' % res[0].id))\n else:\n cols['platform']='1'\n db.insert(request, 'publishsource', cols)\n data['has_last']= True if last_cursor != '' else False\n return distributionApi().success(data)\n\n# 平台视频图表详情\n@mod.route('/videoDetailChart', methods=['post'])\ndef videoDetailChart():\n userinfo = distributionApi().auth()\n if userinfo == False:\n return distributionApi().error('token error', {}, -2)\n openid=request.form.get('openid',type=str,default='')\n itemid = request.form.get('item_id', type=str, default='')\n datatype = request.form.get('datatype', type=str, default='7')\n res=distributionApi().checkRequestParams(request,['openid','item_id'])\n if res !=False:\n return distributionApi().error(res+'不能为空')\n where={\n 'openid':openid,\n 'parent_id':userinfo.id\n }\n select={\"openid\":'','access_token':'','type':''}\n db = SequoiaDB()\n query=db.query('medianumbers',select,where,{},limit=1)\n if len(query)== 0:\n return distributionApi().error('账号不存在')\n row=query[0]\n #抖音\n if row.type == '1':\n douyin = douyinApi()\n res_like=douyin.DouyinGetItemLike(openid,row.access_token,datatype,itemid)\n res_comment = douyin.DouyinGetItemComment(openid, row.access_token, datatype,itemid)\n res_play = douyin.DouyinGetItemPlay(openid, row.access_token, datatype,itemid)\n res_share = douyin.DouyinGetItemShare(openid, row.access_token, datatype,itemid)\n if res_like['extra']['error_code'] != 0:\n return distributionApi().error(res_like['extra']['description'])\n else:\n like=[]\n comment=[]\n date=[]\n play=[]\n share=[]\n for i in res_like['data']['result_list']:\n like.append(i['like'])\n date.append(i['date'])\n for i in res_comment['data']['result_list']:\n comment.append(i['comment'])\n for i in res_play['data']['result_list']:\n play.append(i['play'])\n for i in res_share['data']['result_list']:\n share.append(i['share'])\n data=[\n {'name':'点赞数','data':like,'type':'line'},\n {'name': '播放数', 'data': play,'type':'line'},\n {'name': '评论数', 'data': comment,'type':'line'},\n {'name': '分享数', 'data': share,'type':'line'},\n ]\n fansChart=distributionApi().getChart(date,data)\n return distributionApi().success({'chart':fansChart})\n else:\n return distributionApi().success({'chart': []})\n\n# 平台视频详情\n@mod.route('/videoDetailComment', methods=['post'])\ndef videoDetailComment():\n userinfo = distributionApi().auth()\n if userinfo == False:\n return distributionApi().error('token error', {}, -2)\n openid=request.form.get('openid',type=str,default='')\n itemid = request.form.get('item_id', type=str, default='')\n page = request.form.get('page', type=str, default='0')\n pageSize = request.form.get('pageSize', type=str, default='10')\n res=distributionApi().checkRequestParams(request,['openid','item_id'])\n if res !=False:\n return distributionApi().error(res+'不能为空')\n where={\n 'openid':openid,\n 'parent_id':userinfo.id\n }\n select={\"openid\":'','access_token':'','type':''}\n db = SequoiaDB()\n query=db.query('medianumbers',select,where,{},limit=1)\n if len(query)== 0:\n return distributionApi().error('账号不存在')\n row=query[0]\n #抖音\n if row.type == '1':\n douyin = douyinApi()\n res=douyin.DouyinGetItemBase(openid,row.access_token,itemid)\n res_comment = douyin.DouyinGetItemCommentList(openid, row.access_token, itemid , page , pageSize)\n if res['extra']['error_code'] != 0:\n return distributionApi().error(res['extra']['description'])\n else:\n data={\n 'baseInfo':res['data'],\n 'commentInfo':res_comment['data']\n }\n return distributionApi().success(data)\n #快手\n elif row.type == '2':\n kuaishou=kuaishouApi()\n res = kuaishou.KuaishouGetVideoList(row.access_token,page,pageSize)\n if res['result'] != 1:\n return distributionApi().error(res)\n else:\n list=[]\n for item in res['video_list']:\n list.append({\n 'statistics':{\n 'forward_count':0,\n 'digg_count':item['like_count'],\n 'play_count':item['view_count'],\n 'comment_count':item['comment_count'],\n 'share_count':0,\n 'download_count':0\n },\n 'is_reviewed':item['pending'],\n 'video_status':item['pending'],\n 'title':item['caption'],\n 'cover':item['cover'],\n 'share_url':item['play_url'],\n 'is_top':False,\n 'create_time':item['create_time'],\n 'item_id':item['photo_id']\n })\n return distributionApi().success({'list':list})\n #其他\n else:\n return distributionApi().error('尚未接入该平台')\n\n# 授权后首页echart+最近视频\n@mod.route('/index', methods=['get'])\ndef index():\n userinfo = distributionApi().auth()\n if userinfo == False:\n return distributionApi().error('token error', {}, -2)\n openid=request.args.get('openid',type=str,default='')\n res=distributionApi().checkRequestParams(request,['openid'])\n if res !=False:\n return distributionApi().error(res+'不能为空')\n where={\n 'openid':openid,\n 'parent_id':userinfo.id\n }\n select={\"openid\":'','access_token':'','type':''}\n db = SequoiaDB()\n query=db.query('medianumbers',select,where,{},limit=1)\n if len(query)== 0:\n return distributionApi().error('账号不存在')\n row=query[0]\n #抖音\n if row.type == '1':\n douyin = douyinApi()\n res_fans=douyin.DouyinGetFans(openid,row.access_token,'30')\n res_item = douyin.DouyinGetItem(openid, row.access_token, '30')\n if res_fans['extra']['error_code'] != 0:\n return distributionApi().error(res_fans['extra']['description'])\n if res_item['extra']['error_code'] != 0:\n return distributionApi().error(res_item['extra']['description'])\n else:\n newFans=[]\n totalFans=[]\n date=[]\n new_issue=[]\n new_play=[]\n total_issue=[]\n for i in res_fans['data']['result_list']:\n newFans.append(i['new_fans'])\n totalFans.append(i['total_fans'])\n date.append(i['date'])\n for i in res_item['data']['result_list']:\n new_issue.append(i['new_issue'])\n new_play.append(i['new_play'])\n total_issue.append(i['total_issue'])\n data = [\n {'name': '每日新粉丝数', 'data': newFans,'type':'line'},\n {'name': '每日总粉丝数', 'data': totalFans,'type':'line'},\n {'name': '每日新增内容数', 'data': new_issue,'type':'line'},\n {'name': '每日新增播放量', 'data': new_play,'type':'bar'},\n {'name': '每日内容总数', 'data': total_issue,'type':'bar'},\n ]\n fansChart = distributionApi().getChart(date, data)\n row.update({\n \"total_fans\":res_fans['data']['result_list'][-1]['total_fans'],\n \"total_issue\":res_item['data']['result_list'][-1]['total_issue'],\n },request)\n return distributionApi().success({\n 'chart':fansChart,\n 'totalFans':res_fans['data']['result_list'][-1]['total_fans'],\n 'total_issue': res_item['data']['result_list'][-1]['total_issue'],\n })\n elif row.type =='2':\n row.update({\n \"total_fans\": \"暂不支持\",\n \"total_issue\": \"暂不支持\",\n },request)\n return distributionApi().success({\n 'chart': [],\n 'totalFans': '暂不支持',\n 'total_issue': '暂不支持',\n })\n #其他\n else:\n return distributionApi().error('尚未接入该平台')\n\n@mod.route('/billboard', methods=['get'])\ndef billboard():\n userinfo = distributionApi().auth()\n if userinfo == False:\n return distributionApi().error('token error', {}, -2)\n openid=request.args.get('openid',type=str,default='')\n if openid == '':\n return distributionApi().error('openid 不能为空')\n page='0'\n pageSize = '20'\n db=SequoiaDB()\n where={\n 'openid':openid,\n 'parent_id':userinfo.id\n }\n select={\"openid\":'','access_token':'','type':''}\n query=db.query('medianumbers',select,where,{},limit=1)\n if len(query)== 0:\n return distributionApi().error('账号不存在')\n row=query[0]\n #抖音\n if row.type == '1':\n douyin = douyinApi()\n res=douyin.DouyinGetVideoList(openid,row.access_token,page,pageSize)\n if res['extra']['error_code'] != 0:\n return distributionApi().error(res['extra']['description'])\n else:\n list = res['data']['list']\n\n\n #快手\n elif row.type == '2':\n kuaishou=kuaishouApi()\n res = kuaishou.KuaishouGetVideoList(row.access_token,'','4')\n if res['result'] != 1:\n return distributionApi().error(res)\n else:\n list=[]\n for item in res['video_list']:\n list.append({\n 'statistics':{\n 'forward_count':0,\n 'digg_count':item['like_count'],\n 'play_count':item['view_count'],\n 'comment_count':item['comment_count'],\n 'share_count':0,\n 'download_count':0\n },\n 'is_reviewed':item['pending'],\n 'video_status':item['pending'],\n 'title':item['caption'],\n 'cover':item['cover'],\n 'share_url':item['play_url'],\n 'is_top':False,\n 'create_time':item['create_time'],\n 'item_id':item['photo_id']\n })\n #其他\n else:\n return distributionApi().error('尚未接入该平台')\n\n digg_count = sorted(list, key = lambda k: k['statistics']['digg_count'])\n digg_count.reverse()\n comment_count = sorted(list, key=lambda k: k['statistics']['comment_count'])\n comment_count.reverse()\n play_count = sorted(list, key=lambda k: k['statistics']['play_count'])\n play_count.reverse()\n return distributionApi().success({\n 'comment':comment_count[:4],\n 'play':play_count[:4],\n 'digg':play_count[:4]\n })\n\n","repo_name":"zzppff321/openslide-webuploder-flask","sub_path":"distribution/api_platform.py","file_name":"api_platform.py","file_ext":"py","file_size_in_byte":23478,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"7189470374","text":"\n########################################################################\n# Object Detection training script\n# Modified by: Jannik Zuern (zuern@informatik.uni-freiburg.de)\n########################################################################\n\nimport argparse\nimport os\nimport numpy as np\nimport torch\nimport yaml\nfrom torch.utils.data import DataLoader\nfrom tqdm.autonotebook import tqdm\nfrom model.efficientdet.backbone import EfficientDetBackbone\nfrom model.efficientdet.dataset import collater\nfrom model.efficientdet.loss import FocalLoss\nfrom utils import CustomDataParallel, get_last_weights, init_weights, boolean_string\nfrom dataloader.freicar_dataloader import FreiCarDataset\nfrom utils import VisdomLinePlotter, visualize_imgs_with_gt_bbox\n\n\nclass Params:\n def __init__(self, project_file):\n self.params = yaml.safe_load(open(project_file).read())\n\n def __getattr__(self, item):\n return self.params.get(item, None)\n\n\ndef get_args():\n parser = argparse.ArgumentParser('Yet Another EfficientDet Pytorch: SOTA object detection network - Zylo117')\n parser.add_argument('-p', '--project', type=str, default='freicar-detection', help='project file that contains parameters')\n parser.add_argument('-c', '--compound_coef', type=int, default=0, help='coefficients of efficientdet')\n parser.add_argument('-n', '--num_workers', type=int, default=8, help='num_workers of dataloader')\n parser.add_argument('--batch_size', type=int, default=12, help='The number of images per batch among all devices')\n parser.add_argument('--lr', type=float, default=1e-4)\n parser.add_argument('--num_epochs', type=int, default=100)\n parser.add_argument('--val_interval', type=int, default=1, help='Number of epoches between valing phases')\n parser.add_argument('--save_interval', type=int, default=500, help='Number of steps between saving')\n parser.add_argument('--es_min_delta', type=float, default=0.0, help='Early stopping\\'s parameter: minimum change loss to qualify as an improvement')\n parser.add_argument('--es_patience', type=int, default=0,\n help='Early stopping\\'s parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.')\n parser.add_argument('-w', '--load_weights', type=str, default=None,\n help='whether to load weights from a checkpoint, set None to initialize, set \\'last\\' to load last checkpoint')\n parser.add_argument('--saved_path', type=str, default='logs/')\n\n # Visualization\n parser.add_argument('--visualize_gt', type=boolean_string, default=False,\n help='whether to visualize the GT bounding boxes in the training loop/')\n\n args = parser.parse_args()\n return args\n\n\ndef train(opt):\n\n global plotter\n plotter = VisdomLinePlotter(env_name='FreiCar Object Detection')\n\n params = Params(f'projects/{opt.project}.yml')\n\n if params.num_gpus == 0:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n if torch.cuda.is_available():\n torch.cuda.manual_seed(42)\n else:\n torch.manual_seed(42)\n\n opt.saved_path = opt.saved_path + f'/{params.project_name}/'\n os.makedirs(opt.saved_path, exist_ok=True)\n\n # define paramteters for model training\n training_params = {'batch_size': opt.batch_size,\n 'shuffle': True,\n 'drop_last': True,\n 'collate_fn': collater,\n 'num_workers': opt.num_workers}\n\n # define paramteters for model evaluation\n val_params = {'batch_size': opt.batch_size,\n 'shuffle': False,\n 'drop_last': True,\n 'collate_fn': collater,\n 'num_workers': opt.num_workers}\n\n # get training dataset\n training_set = FreiCarDataset(data_dir=\"./dataloader/data/\",\n padding=(0, 0, 12, 12),\n split='training',\n load_real=True)\n\n # and make data generator from dataset\n training_generator = DataLoader(training_set, **training_params)\n\n # get validation dataset\n val_set = FreiCarDataset(data_dir=\"./dataloader/data/\",\n padding=(0, 0, 12, 12),\n split='validation',\n load_real=False)\n\n # and make data generator from dataset\n val_generator = DataLoader(val_set, **val_params)\n\n # Instantiation of the EfficientDet model\n model = EfficientDetBackbone(num_classes=len(params.obj_list),\n compound_coef=opt.compound_coef,\n ratios=eval(params.anchors_ratios),\n scales=eval(params.anchors_scales))\n\n # load last weights if training from checkpoint\n if opt.load_weights is not None:\n if opt.load_weights.endswith('.pth'):\n weights_path = opt.load_weights\n else:\n weights_path = get_last_weights(opt.saved_path)\n try:\n last_step = int(os.path.basename(weights_path).split('_')[-1].split('.')[0])\n except:\n last_step = 0\n\n try:\n ret = model.load_state_dict(torch.load(weights_path), strict=False)\n except RuntimeError as e:\n print(f'[Warning] Ignoring {e}')\n print('[Warning] Don\\'t panic if you see this, this might be because you load a pretrained weights with '\n 'different number of classes. The rest of the weights should be loaded already.')\n\n print(f'[Info] loaded weights: {os.path.basename(weights_path)}, resuming checkpoint from step: {last_step}')\n else:\n last_step = 0\n print('[Info] initializing weights...')\n init_weights(model)\n\n if params.num_gpus > 0:\n model = model.cuda()\n if params.num_gpus > 1:\n model = CustomDataParallel(model, params.num_gpus)\n\n optimizer = torch.optim.AdamW(model.parameters(), opt.lr)\n\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)\n\n best_loss = 1e5\n best_epoch = 0\n step = max(0, last_step)\n\n # Define training criterion\n criterion = FocalLoss()\n\n # Set model to train mode\n model.train()\n\n num_iter_per_epoch = len(training_generator)\n\n print('Started Training')\n\n # Train loop\n for epoch in range(opt.num_epochs):\n last_epoch = step // num_iter_per_epoch\n if epoch < last_epoch:\n continue\n\n epoch_loss = [] # here we append new total losses for each step\n\n progress_bar = tqdm(training_generator)\n for iter, data in enumerate(progress_bar):\n if iter < step - last_epoch * num_iter_per_epoch:\n progress_bar.update()\n continue\n\n ##########################################\n # TODO: implement me!\n # Made by DavideRezzoli\n ##########################################\n optimizer.zero_grad()\n _, reg, clas, anchor = model(data['img'].cuda())\n cls_loss, reg_loss = criterion(clas, reg, anchor, data['annot'].cuda())\n loss = cls_loss.mean() + reg_loss.mean()\n loss.backward()\n optimizer.step()\n\n\n\n epoch_loss.append(float(loss))\n\n progress_bar.set_description(\n 'Step: {}. Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Total loss: {:.5f}'.format(\n step, epoch, opt.num_epochs, iter + 1, num_iter_per_epoch, cls_loss.item(),\n reg_loss.item(), loss.item()))\n\n plotter.plot('Total loss', 'train', 'Total loss', step, loss.item())\n plotter.plot('Regression_loss', 'train', 'Regression_loss', step, reg_loss.item())\n plotter.plot('Classfication_loss', 'train', 'Classfication_loss', step, cls_loss.item())\n\n # log learning_rate\n current_lr = optimizer.param_groups[0]['lr']\n plotter.plot('learning rate', 'train', 'Classfication_loss', step, current_lr)\n\n # increment step counter\n step += 1\n\n if step % opt.save_interval == 0 and step > 0:\n save_checkpoint(model, f'efficientdet-d{opt.compound_coef}_{epoch}_{step}.pth')\n print('saved checkpoint...')\n\n # adjust learning rate via learning rate scheduler\n scheduler.step(np.mean(epoch_loss))\n\n if epoch % opt.val_interval == 0:\n\n print('Evaluating model')\n\n model.eval()\n loss_regression_ls = [] # here we append new regression losses for each step\n loss_classification_ls = [] # here we append new classification losses for each step\n\n for iter, data in enumerate(val_generator):\n\n with torch.no_grad():\n ##########################################\n # TODO: implement me!\n # Made by Davide Rezzoli\n #########################################\n _, reg, clas, anchor = model(data['img'].cuda())\n cls_loss, reg_loss = criterion(clas, reg, anchor, data['annot'].cuda())\n\n loss_classification_ls.append(cls_loss.item())\n loss_regression_ls.append(reg_loss.item())\n\n cls_loss = np.mean(loss_classification_ls)\n reg_loss = np.mean(loss_regression_ls)\n loss = cls_loss + reg_loss\n\n # LOGGING\n print(\n 'Val. Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Total loss: {:1.5f}'.format(\n epoch, opt.num_epochs, cls_loss, reg_loss, loss))\n\n plotter.plot('Total loss', 'val', 'Total loss', step, loss.item())\n plotter.plot('Regression_loss', 'val', 'Regression_loss', step, reg_loss.item())\n plotter.plot('Classfication_loss', 'val', 'Classfication_loss', step, cls_loss.item())\n\n # Save model checkpoint if new best validation loss\n if loss + opt.es_min_delta < best_loss:\n best_loss = loss\n best_epoch = epoch\n\n save_checkpoint(model, f'efficientdet-d{opt.compound_coef}_{epoch}_{step}.pth')\n\n model.train()\n\n # Early stopping\n if epoch - best_epoch > opt.es_patience > 0:\n print('[Info] Stop training at epoch {}. The lowest loss achieved is {}'.format(epoch, best_loss))\n break\n\n\ndef save_checkpoint(model, name):\n if isinstance(model, CustomDataParallel):\n torch.save(model.module.model.state_dict(), os.path.join(opt.saved_path, name))\n else:\n torch.save(model.state_dict(), os.path.join(opt.saved_path, name))\n\n\nif __name__ == '__main__':\n opt = get_args()\n train(opt)\n","repo_name":"Forrest-Z/prefinal","sub_path":"ros_node/freicar_bb_box_sr/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2369262129","text":"\"\"\"Environment builder.\"\"\"\n# Used by Ray to register and build environments\n# %% Imports\n# Standard Library Imports\nimport json\nimport warnings\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom pathlib import Path\n\n# Third Party Imports\nimport gymnasium as gym\nfrom gymnasium import Wrapper\nfrom gymnasium import wrappers as gym_wrappers\n\n# Punch Clock Imports\nfrom punchclock.common.utilities import array2List\nfrom punchclock.environment import (\n info_wrappers,\n misc_wrappers,\n obs_wrappers,\n reward_wrappers,\n)\nfrom punchclock.environment.env import SSAScheduler, SSASchedulerParams\n\n# %% Functions\n\n\n# An environment builder function is required by RLlib. It simply takes in a dict and\n# outputs an environment. In the case of SSAScheduler, we use SSASchedulerParams\n# to build the environment, so the environment builder function here acts as a\n# wrapper around SSASchedulerParams.\ndef buildEnv(env_config: dict) -> gym.Env:\n \"\"\"Build SSAScheduler environment from config dict.\n\n Args:\n env_config (`dict`): A dict with the following structure:\n {\n \"horizon\" (`int`): See `SSASChedulerParams`,\n \"agent_params\" (`dict`): See `SSASChedulerParams`,\n \"filter_params\" (`dict`): See `SSASChedulerParams`,\n \"time_step\" (`float`, optional): See `SSASChedulerParams`,\n \"seed\" (`int`, optional): See `SSASChedulerParams`,\n \"constructor_params\" (`dict`, optional):\n {\n \"wrappers\" (`list[dict]`, optional): List of wrappers and their\n associated configs to apply to base environment. Wrappers are\n applied in the order in which they appear in the list. Each\n entry is in the following format:\n {\n \"wrapper\": wrapper_name (`str`),\n \"wrapper_config\": A `dict` containing optional arguments\n for wrapper. Optional, defaults to {}.\n }\n {\n }\n\n Returns:\n `env`: Gym environment.\n\n Example wrappers input:\n constructor_params = {\n \"wrappers\": [\n {\n \"wrapper\": \"filter_observation\",\n \"wrapper_config\": {\"filter_keys\": [\"vis_map_est\", \"num_tasked\"]},\n },\n {\"wrapper\": \"float_obs\"},\n {\n \"wrapper\": \"action_mask\",\n \"action_mask_on\": False,\n },\n ]\n }\n \"\"\"\n # %% Default constructor params\n # Create constructor params if not specified in arg. Set \"wrappers\" to [] if\n # not specified in arg; this makes other argument checking easier later.\n if \"constructor_params\" not in env_config.keys():\n env_config[\"constructor_params\"] = {}\n\n if \"wrappers\" not in env_config[\"constructor_params\"].keys():\n env_config[\"constructor_params\"][\"wrappers\"] = []\n\n if \"rescale_dict_obs\" in [\n wpr[\"wrapper\"] for wpr in env_config[\"constructor_params\"][\"wrappers\"]\n ]:\n warnings.warn(\n \"\"\" Replace 'rescale_dict_obs' with 'linscale_dict_obs';\n 'rescale_dict_obs' will be deprecated.\"\"\"\n )\n\n # If an observation target_filter was set, make sure vis_map_est won't get filtered\n # out (if vis_map_est was not provided in the list of states to wrapper).\n wrapper_names = [\n a[\"wrapper\"] for a in env_config[\"constructor_params\"][\"wrappers\"]\n ]\n if \"FilterObservation\" in wrapper_names:\n filt_obs = [\n a\n for a in env_config[\"constructor_params\"][\"wrappers\"]\n if a[\"wrapper\"] == \"FilterObservation\"\n ][0]\n if \"vis_map_est\" not in filt_obs[\"wrapper_config\"][\"filter_keys\"]:\n filt_obs[\"wrapper_config\"][\"filter_keys\"].append(\"vis_map_est\")\n warnings.warn(\n \"\"\"'vis_map_est' not included in FilterObservation config.\n Appending to list of filters.\"\"\"\n )\n # %% Build base environment\n # separate target_filter config from env config\n scheduler_config = deepcopy(env_config)\n scheduler_config.pop(\"constructor_params\")\n\n env_params = SSASchedulerParams(**scheduler_config)\n env = SSAScheduler(env_params)\n\n # %% Wrap environment\n # Iterate along list of input wrappers and wrap the env according to the order\n # of the inputs. Order of wrappers matters.\n # Wrapper names must match the wrapper class name in the relevant module.\n for wrapper_dict in env_config[\"constructor_params\"][\"wrappers\"]:\n wrapper_name = wrapper_dict[\"wrapper\"]\n wrapper = getWrapper(wrapper_name)\n\n # Use blank dict for unprovided wrapper configs. Not all wrappers even\n # have configs, so need to have a default kwargs.\n kwargs = wrapper_dict.get(\"wrapper_config\", {})\n env = wrapper(env, **kwargs)\n\n return env\n\n\ndef getWrapper(wrapper_name: str) -> Wrapper:\n \"\"\"Get a Gymnasium wrapper class from a str of the wrapper.\n\n Args:\n wrapper_name (str): Name of wrapper.\n\n Returns:\n Wrapper: See Gymnasium documentation.\n \"\"\"\n # Wrapper names must match the wrapper class name in the relevant module.\n # Try 3 modules to get wrappers.\n wrapper_modules = [\n gym_wrappers,\n obs_wrappers,\n reward_wrappers,\n misc_wrappers,\n info_wrappers,\n ]\n for wm in wrapper_modules:\n try:\n wrapper = getattr(wm, wrapper_name, {})\n except Exception:\n pass\n\n if wrapper != {}:\n break\n\n if wrapper == {}:\n # If wrapper not found in wrapper_modules, raise error\n raise ValueError(f\"Wrapper '{wrapper_name}' not found.\")\n\n return wrapper\n\n\ndef genConfigFile(\n config_dir: str | Path,\n config_file_name: str = None,\n num_cpus: int = None,\n trainable: str = None,\n param_space: dict = None,\n tune_config: dict = None,\n run_config: dict = None,\n) -> dict:\n \"\"\"Generate a config file to be used by buildTuner.\n\n By default, file is saves as 'config_YY-MM-DD_HH-MM-SS.json'.\n\n Args:\n config_dir (`str | Path`): Directory to save config file.\n config_file_name (`str`, optional): The name of the config file. Do not include\n path or file extension. Defaults to 'config_YY-MM-DD_HH-MM-SS'.\n num_cpus (`int`, optional): Number of CPUs to use in training run. Use\n None to use maximum available on machine. Defaults to None.\n trainable (`str`, optional): Defaults to None.\n param_space (`dict`): Search space of the tuning job. Must include the following\n keys:\n {\n \"env_config\": (`dict`) See `SSASchedulerParams`.\n }\n tune_config (`dict`, optional): Defaults to {}.\n run_config (`dict`, optional): Defaults to None.\n\n Returns:\n `dict`: Contains all of the arguments except `config_dir` and `config_file_name`.\n \"\"\"\n # Check if environment config is in parameter space; we don't care about the contents\n # of env config, only that it exists, to stop really egregious arguments.\n if \"env_config\" not in param_space:\n raise ValueError(\"'env_config' is not in param_space\")\n\n if isinstance(config_dir, str):\n config_dir = Path(config_dir)\n\n if tune_config is None:\n tune_config = {}\n\n # assemble data in dict\n config_data = {\n \"num_cpus\": num_cpus,\n \"trainable\": trainable,\n \"param_space\": param_space,\n \"tune_config\": tune_config,\n \"run_config\": run_config,\n }\n\n # Convert dict to json. The default setting is how to handle certain datatypes.\n json_object = json.dumps(config_data, default=array2List)\n\n if config_file_name is None:\n # set file path for default file name\n # get time stamp for file name\n now = datetime.now()\n date_time = now.strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_path = config_dir.joinpath(\"config_\" + date_time).with_suffix(\n \".json\"\n )\n else:\n # set file path for custom file name\n file_path = config_dir.joinpath(config_file_name).with_suffix(\".json\")\n\n # save json\n with open(str(file_path), \"w\") as outfile:\n outfile.write(json_object)\n\n print(f\"Config file saved to {file_path} \\n\")\n return config_data\n","repo_name":"dylan906/clockpunch","sub_path":"punchclock/ray/build_env.py","file_name":"build_env.py","file_ext":"py","file_size_in_byte":8457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32879903873","text":"\"\"\"Codage de Huffman.\nCette verion est purement pédagogique et ne pourrait pas servir de mise en\nproduction efficace.\nChaque étape donne lieu à des objets intermédiares qui peuvent être affichés,\nvisualisés, contrôlés.\nCette version code des fichiers en UTF8 dont certains caractères sont codés sur\nun, deux, trois ou quatre octets.\"\"\"\n\ndef get_n_for_sort(l):\n return l[0][1]\n\ndef construit_liste_ss_arbres_caracteres_nombres(fichier, affiche = True):\n \"\"\"Pour chaque caractère c du fichier, constuit une liste :\n [(c,n), None, None] où n est le nombre de fois que c est présent dans le\n fichier. Une telle liste sera vue plus tard comme une feuille.\n Si affiche == True, afficher les paires (c,n) dans l'ordre croissant de n.\n \"\"\"\n dico_occurrences = {}\n list_of_leaves = []\n with open(fichier, \"r\", encoding=\"utf8\") as f:\n for ligne in f:\n for carac in ligne:\n if dico_occurrences.get(carac) is None:\n dico_occurrences[carac] = 0\n dico_occurrences[carac] += 1\n for carac, nb_occurrences in dico_occurrences.items():\n list_of_leaves.append([(carac, nb_occurrences), None, None])\n\n if affiche:\n print(sorted(list_of_leaves, key= get_n_for_sort))\n return list_of_leaves\n\ndef construit_arbre_huffman_depuis_liste(liste_car_nbre):\n \"\"\"À partir de la liste composée de listes du type [(c,n), None, None],\n construit et retourne l'arbre de Huffman suivant l'algorithme classique.\n Le résultat (l'arbre) est une liste composée de listes du type :\n [(c,n), a_1, a_2] avec :\n + n un entier.\n + c un caractère ; dans ce cas a_1 et a_2 sont None et c'est une feuille\n ou c est None ; Dans ce cas c'est un noeud interne et a_1 et a_2 sont\n des sous-arbres. Par convention, a_1 est le sous-arbre gauche codant 0\n et a_1 le sous-arbre droit codant 1.\"\"\"\n arbre_huffman = liste_car_nbre.copy()\n while len(arbre_huffman) > 1:\n first_node = min(arbre_huffman, key= get_n_for_sort)\n # ou first_node = min(arbre_huffman, key= lambda x:x[0][1])\n arbre_huffman.remove(first_node)\n second_node = min(arbre_huffman, key= get_n_for_sort)\n arbre_huffman.remove(second_node)\n new_node = [(None, first_node[0][1] + second_node[0][1]), first_node, second_node]\n arbre_huffman.append(new_node)\n return arbre_huffman[0]\n\n# Correction\ndef construit_table_codage_depuis_arbre_huffman(arbre):\n \"\"\"Construit la table de codage à partir de l'arbre de Huffman.\n Le resultat est un dictionnaire dont les clés sont les caractères et les\n valeurs sont les codes binaires correspondant issus de l'arbre. Un code\n binaire est retourné ici sous forme de chaine de caractères de '0' et '1'.\n \"\"\"\n def iter_rec_chaines_binaires(arbre, chaine_courante):\n if arbre[0][0] is not None:\n yield arbre[0][0], chaine_courante\n else:\n yield from iter_rec_chaines_binaires(arbre[1], chaine_courante + \"0\")\n yield from iter_rec_chaines_binaires(arbre[2], chaine_courante + \"1\")\n\n table = {}\n it = iter_rec_chaines_binaires(arbre, \"\")\n for (carac, code) in it:\n table[carac] = code\n return table\n\n\ndef code_fichier(fichier, table_codage):\n \"\"\"Code chaque caractère du fichier avec la table de codage dont les clés\n sont les caractères et les valeurs les codes binaires sous forme de chaines\n de '0' et de '1'.\n Le résultat est une chaine de caractères de '0' et de '1'.\"\"\"\n message_code = \"\"\n with open(fichier, \"r\", encoding=\"utf8\") as f:\n for ligne in f:\n for carac in ligne:\n message_code += table_codage[carac]\n return message_code\n\n\ndef decode_message(message_binaire, arbre):\n \"\"\"Prend en entrée une chaine de caractères de '0' et de '1' (message codé)\n + un arbre de huffman. Retourne le décodage sous forme d'une chaine de\n caractères.\"\"\"\n message_decode = \"\"\n arbre_courant = arbre\n for bit in message_binaire:\n if arbre_courant[0][0] is not None:\n message_decode += arbre_courant[0][0]\n arbre_courant = arbre\n if bit == '0':\n arbre_courant = arbre_courant[1]\n elif bit == '1':\n arbre_courant = arbre_courant[2]\n return message_decode\n\n# MARCH PA\ndef tree_to_dfs(arbre):\n def tree_to_dfs_rec(arbre, dfs):\n if arbre[0][0] is None:\n dfs += tree_to_dfs_rec(arbre[1], dfs + \"0\")\n dfs += tree_to_dfs_rec(arbre[2], dfs + \"1\")\n return dfs\n\n return tree_to_dfs_rec(arbre, \"\")\n\n#----- Manipulations de ces fonctions.\n\n# Partie codage du fichier : \nfichier = \"FICHIER_ESSAI_HUFFMAN.txt\"\n\n# fichier = \"test_file.txt\"\n\n#fichier = \"Exo_Codage-Huffman-Simple.py\" # Pour coder le fichier source...\nliste_feuilles = construit_liste_ss_arbres_caracteres_nombres(fichier, False)\narbre = construit_arbre_huffman_depuis_liste(liste_feuilles)\nprint(arbre)\ntable = construit_table_codage_depuis_arbre_huffman(arbre)\nmessage_codé = code_fichier(fichier, table) # Codage Huffman en bin. du fichier \n\nprint(f\"Le message codé est :\\n{message_codé}\")\nprint(10*\"---\")\nprint(f\"La taille du message codé est de : {len(message_codé)} bits, soit \" +\n f\"{len(message_codé)/8} octets.\")\nprint(10*\"---\")\n\n# Partie décodage :\n\nmessage_décodé = decode_message(message_codé, arbre)\nprint(f\"Le message décodé est : \\n{message_décodé}\")\n\nprint(\"DFS de l'arbre\")\nprint(tree_to_dfs(arbre))","repo_name":"MateoAllegre/travaux-mrp","sub_path":"Huffman/Exo_Codage-Huffman-Simple.py","file_name":"Exo_Codage-Huffman-Simple.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15325558296","text":"from django.db import models\n\nfrom api.models.room.room import Room\n\n\nclass RoomAd(models.Model):\n room = models.ForeignKey(Room, on_delete=models.PROTECT)\n click = models.IntegerField(default=0) # 10x click\n expired_date = models.DateTimeField()\n start_date = models.DateTimeField(auto_now_add=True)\n active = models.BooleanField(default=True)\n","repo_name":"lukaskris/rent_backend","sub_path":"api/models/ads/room_ad.py","file_name":"room_ad.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71766833475","text":"hora = input('Digite a hora')\n\nif hora.isdigit():\n hora = int(hora)\n if hora < 0 or hora > 23:\n print('horario deve estar entre 0 e 23 ')\n else:\n if hora <= 11:\n print('Bomdia')\n elif hora <= 17:\n print('Boa tarde')\n else:\n print('Boa noite')\nelse:\n print('por favor digite um horario entra 0 e 23')\n\n\n","repo_name":"YuriScorsolino/CursoPython","sub_path":"desafio2/hora.py","file_name":"hora.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34511932247","text":"class ChildrenCamp:\r\n def __init__(self, child_id, chocolates_received):\r\n self.child_id = child_id\r\n self.chocolates_received = chocolates_received\r\n\r\n def calculate_total_chocolates(self):\r\n return sum(self.chocolates_received)\r\n\r\n def reward_child(self, child_id_rewarded, extra_chocolates):\r\n if extra_chocolates < 1:\r\n return \"Extra chocolates is less than 1\"\r\n if child_id_rewarded not in self.child_id:\r\n return \"Child id is invalid\"\r\n index = self.child_id.index(child_id_rewarded)\r\n self.chocolates_received[index] += extra_chocolates\r\n return self.chocolates_received\r\n\r\nchild_id = (10, 20, 30, 40, 50)\r\nchocolates_received = [12, 5, 3, 4, 6]\r\n\r\ncamp = ChildrenCamp(child_id, chocolates_received)\r\nprint(camp.calculate_total_chocolates())\r\n\r\nprint(camp.reward_child(30, 2))\r\n\r\nprint(camp.calculate_total_chocolates())\r\n\r\nprint(camp.reward_child(60, 2))\r\n\r\nprint(camp.reward_child(40, 0))\r\n","repo_name":"SAPNESWAR/DS-ALGO-PYTHON","sub_path":"Day 6/ChildrenCamp.py","file_name":"ChildrenCamp.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25894299295","text":"import utime\n\nfrom components.uart_handler import UARTHandler\nfrom components.settings import *\n\nclass ESP8266:\n def __init__(self, pin, baudrate):\n self.__uart_handler = UARTHandler(pin, baudrate)\n \n \"\"\" HELPERS \"\"\"\n def __get_timestamp(self):\n y, m, md, h, m, s, wd, yd = utime.localtime()\n return \"{}/{}/{} {}:{}:{}\".format(y, m, md, h, m, s)\n \n def __log(self, msg):\n print(\"[ESP8266] {}\\t- {}\".format(\n self.__get_timestamp(), msg))\n \n \"\"\"\n BASIC COMMANDS\n \"\"\"\n def start(self):\n response = self.__uart_handler.send_receive_cmd(STARTUP)\n if \"OK\" in response: self.__log(\"Started\")\n else: self.restart()\n\n def restart(self):\n response = self.__uart_handler.send_receive_cmd(RESTART, timeout=5000)\n if \"OK\" in response: self.__log(\"Restarted\")\n else: self.__log(\"Failed to restart\")\n \n \"\"\"\n WI-FI COMMANDS\n \"\"\"\n def get_mode(self):\n response = self.__uart_handler.send_receive_query(WIFI_MODE)\n mode = -1\n if \"OK\" in response:\n mode = response.split(\"\\n\")[0].split(\":\")[1]\n self.__log(\"Works in {} mode\".format(\n WIFI_MODES.get(int(mode))))\n else:\n self.__log(\"Failed to query WiFi mode\")\n return WIFI_MODES.get(int(mode))\n \n def set_mode(self, mode):\n response = self.__uart_handler.send_receive_cmd(WIFI_MODE, mode, timeout=2000)\n if \"OK\" in response: self.__log(\"WiFi mode is set to {}\".format(WIFI_MODES.get(mode)))\n else: self.__log(\"Failed to set the Wifi mode to {}\".format(WIFI_MODES.get(mode)))\n \n def list_access_points(self):\n response = self.__uart_handler.send_receive_cmd(LIST_APS, timeout=10000)\n for access_point in response.split(\"\\n\"):\n ap_entires = access_point.split(\",\")\n if len(ap_entires) > 1:\n ap_name = ap_entires[1]\n self.__log(\"Access Point {} is in range.\".format(ap_name))\n utime.sleep_ms(250)\n \n def join_access_point(self, ssid, pwd):\n ssid, pwd = \"\\\"{}\\\"\".format(ssid), \"\\\"{}\\\"\".format(pwd)\n response = self.__uart_handler.send_receive_cmd(\n WIFI_CONNECTION, ssid, pwd, timeout=5000)\n if \"OK\" in response: self.__log(\"Joined to {}\".format(ssid))\n else: self.__log(\"Failed to join to {}\".format(ssid))\n \n def get_access_point(self):\n response = self.__uart_handler.send_receive_query(WIFI_CONNECTION)\n \n def quit_access_point(self):\n response = self.__uart_handler.send_receive_cmd(WIFI_DISCONNECT)\n if \"OK\" in response: self.__log(\"Quit from access point\")\n else: self.__log(\"Failed to quit from access point\")\n \n def get_static_ip(self):\n response = self.__uart_handler.send_receive_query(GET_SET_IP)\n addrs = dict()\n if \"OK\" in response:\n for entry in response.split(\"\\n\")[0:-2]:\n entry_parts = entry.split(\":\")\n if len(entry_parts) > 1:\n name = entry.split(\":\")[1]\n value = entry.split(\":\")[2]\n addrs[name] = value\n self.__log(\"{}: {}\".format(name.upper(), value))\n utime.sleep_ms(250)\n else:\n self.__log(\"Failed to get static addresses\")\n return addrs.get(\"ip\")\n \n def set_static_ip(self, ip):\n ip = \"\\\"{}\\\"\".format(ip)\n response = self.__uart_handler.send_receive_cmd(GET_SET_IP, ip, timeout=2000)\n if \"OK\" in response: self.__log(\"IP Address is set to {}\".format(ip))\n else: self.__log(\"Failed to set the IP Address to {}\".format(ip))\n \n \"\"\"\n TCP COMMANDS\n \"\"\"\n def get_status(self):\n response = self.__uart_handler.send_receive_cmd(CONN_STATUS)\n status = -1\n if \"OK\" in response:\n status = response.split(\"\\n\")[0]\n status = status.split(\":\")[1]\n self.__log(CONNECTION_STATUS.get(int(status)))\n else:\n self.__log(\"Failed to get connection status.\")\n return CONNECTION_STATUS.get(int(status))\n \n def start_connection(self, conn_type, remote_ip, remote_port):\n conn_type, remote_ip = \"\\\"{}\\\"\".format(conn_type), \"\\\"{}\\\"\".format(remote_ip)\n response = self.__uart_handler.send_receive_cmd(\n CONN_START, conn_type, remote_ip, str(remote_port), timeout=2000)\n if \"OK\" in response:\n self.__log(\"Connected to {} at port {}.\".format(\n remote_ip, remote_port))\n else:\n self.__log(\"Failed to create a connection with {} at port {}.\".format(\n remote_ip, remote_port))\n \n def close_connection(self):\n response = self.__uart_handler.send_receive_cmd(CONN_CLOSE)\n if \"OK\" in response: self.__log(\"Disconnected\")\n else: self.__log(\"Failed to disconnect\")\n \n def get_connection_mode(self):\n response = self.__uart_handler.send_receive_query(CONN_MODE),\n mode = -1\n if \"OK\" in response: \n mode = response.split(\"\\n\")[0].split(\":\")[1]\n self.__log(\"Connection mode is {}.\".format(\n CONNECTION_MODES.get(int(mode))))\n else:\n self.__log(\"Failed to query the connection mode.\")\n return CONNECTION_MODES.get(int(mode))\n \n def set_connection_mode(self, mode):\n response = self.__uart_handler.send_receive_cmd(TCP_MODE, mode, timeout=2000)\n if \"OK\" in response: self.__log(\"Connection mode is set to {}\".format(\n CONNECTION_MODES.get(mode)))\n else: self.__log(\"Failed to set the connection mode to {}\".format(\n CONNECTION_MODES.get(mode)))\n \n def send_receive_data(self, data):\n response = self.__uart_handler.send_receive_cmd(SEND, len(data), timeout=2000)\n if \"OK\" in response:\n response = self.__uart_handler.send_receive_data(data)\n print(response)\n self.__log(\"Data is sent and response received\")\n else:\n self.__log(\"Failed to send data and receive response\")\n","repo_name":"canbatuhan/pico-talks","sub_path":"components/esp8266.py","file_name":"esp8266.py","file_ext":"py","file_size_in_byte":6190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27799007594","text":"import threading\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\n\r\n# start driver and chromedriver.exe\r\ndriver = webdriver.Chrome()\r\ndriver.get(\"https://www.tetralark.com/ClickerJs/\")\r\n\r\n# thread of click me button\r\nclickme_button = driver.find_element(By.XPATH, \"//button[text()='Click me']\")\r\n\r\n\r\ndef clickme(button, boolean):\r\n while boolean:\r\n button.click()\r\n\r\ndef new_upgrade():\r\n while True:\r\n ask = int(input(\"Which upgrade to you want to improve ? \"))-1\r\n ntimes = int(input(\"How many times ?\"))\r\n new_upgrade = driver.find_element(By.CSS_SELECTOR, f\"button[data-reactid='.0.3.0.${ask}.${ask}.${ask}']\")\r\n thread = threading.Thread(target=buy_upgrade, args=(new_upgrade, ntimes))\r\n thread.start()\r\n\r\n\r\nthread = threading.Thread(target=clickme, args=(clickme_button, True))\r\nthread.start()\r\n\r\nthread = threading.Thread(target=new_upgrade)\r\nthread.start()\r\n\r\n\r\ndef buy_upgrade(upgrade_button, times):\r\n actual = 0\r\n while actual < times:\r\n try:\r\n if upgrade_button.is_enabled():\r\n upgrade_button.click()\r\n actual += 1\r\n except:\r\n continue\r\n # boolean:\r\n # upgrade_button.click()\r\n\r\n\r\n# buy upgrades\r\nupgrade_number = 0\r\n\r\nwhile True: # .0.3.0.$x.$x.$x\r\n\r\n try:\r\n upgrade = driver.find_element(By.CSS_SELECTOR, f\"button[data-reactid='.0.3.0.${upgrade_number}.${upgrade_number}.${upgrade_number}']\")\r\n upgrade.click()\r\n thread = threading.Thread(target=buy_upgrade, args=(upgrade, 1000))\r\n thread.start()\r\n upgrade_number += 1\r\n\r\n except:\r\n continue\r\n\r\nprint(\"GOOD !\")\r\ninput()\r\n\r\ndriver.quit()\r\n","repo_name":"KataKatis/clicker_bot_game","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23744626278","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass TreeNode(object):\n def __init__(self, key):\n self.key = key\n self.left = None\n self.right = None\n self.height = 0\n self.inorder_pos = 0\n\n # change to keep the tree balancee - tree rotations \n # Case1 : height(node.left) = height(node.right) + 2\n # - Case1a: height(node.left.left) > height(node.left.right) \n # - height(node.left.left) = h + 1\n # - clockwise rotation about node makes both left and right height of h + 1\n #\n # - Case1b: height(node.left.left) < height(node.left.right)\n # - more annoying because a rotation about node will not balance this\n # - both of the childred of node.left.right have a hight of <= h\n # - counter clockwise rotation about node.left makes height(node.left.left) > height (node.left.righ)\n # - turns into case1a\n\n def add(self, key):\n ret = self\n if key < self.key:\n if self.left:\n self.left.add(key)\n else:\n self.left = TreeNode(key)\n elif key > self.key:\n if self.right:\n self.right.add(key)\n else:\n self.right = TreeNode(key)\n\n # rebalance \n\n #return\n\n\n def height(self):\n if self == None:\n return -1\n return 1 + max(self.left.height(), self.right.height())\n \n def inorder(self, num, key_list):\n \"\"\"\n Parameters\n ----------\n num: list\n List of a single element which keeps \n track of the number I'm at\n \"\"\"\n if self.left:\n self.left.inorder(num, key_list)\n self.inorder_pos = num[0]\n key_list.append(self.key)\n num[0] += 1\n if self.right:\n self.right.inorder(num, key_list)\n \n def draw(self, y):\n x = self.inorder_pos\n plt.scatter([x], [y], 50, 'k')\n plt.text(x+0.2, y, \"{}\".format(self.key))\n y_next = y-1\n if self.left:\n x_next = self.left.inorder_pos\n plt.plot([x, x_next], [y, y_next])\n self.left.draw(y_next)\n if self.right:\n x_next = self.right.inorder_pos\n plt.plot([x, x_next], [y, y_next])\n self.right.draw(y_next)\n\n def remove(self, node):\n if node > self.key and self.right:\n self.right = self.right.remove(node)\n return self\n elif node < self.key and self.left:\n self.left = self.left.remove(node)\n return self\n else:\n if self.left and self.right:\n # get the highest number in the left sub tree\n cursor = self.left\n while cursor.right:\n cursor = cursor.right\n # replace the node we want to remove with that \n self.key = cursor.key\n # remove the one we replaced with \n self.left = self.left.remove(cursor.key)\n return self\n else:\n if self.left:\n return self.left\n else: \n return self.right\n\n\n \n \nclass BinaryTree(object):\n def __init__(self):\n self.root = None\n \n def inorder(self):\n key_list = []\n if self.root:\n self.root.inorder([0], key_list)\n return key_list\n \n def draw(self):\n self.inorder()\n if self.root:\n self.root.draw(0)\n \n def add(self, key):\n if self.root:\n self.root.add(key)\n else:\n self.root = TreeNode(key)\n \n def remove(self, node):\n if self.root:\n self.root.remove(node)\n\n def height(self, node):\n return node.height()\n\ndef make_tree():\n T = BinaryTree()\n for val in [10, 7, 16, 3, 9, 11, 20, 14, 17, 13, 12, 6, 5, 17, 15, 4, 8]:\n T.add(val)\n return T\n\nT = make_tree()\nT.draw()\nprint(height(T.root))\n\n\n","repo_name":"Duntron1000/CS271","sub_path":"classNotes/fri-11-3.py","file_name":"fri-11-3.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9836683365","text":"from slackbot.bot import respond_to\nfrom slackbot.bot import listen_to\nfrom slackbot.bot import idle\nimport re\nimport time\n\n@respond_to('stat$', re.IGNORECASE)\n@respond_to('stat (.*) (.*)', re.IGNORECASE)\ndef stats(message, start_date=None, end_date=None):\n print(\"stats!\")\n message.reply(\"okay stats\")\n message.reply(start_date)\n message.reply(end_date)\n\n\n@respond_to('respondtime', re.IGNORECASE)\ndef respondtime(message):\n message.reply(\"respond: current time =\" + str(datetime.datetime.now()))\n\n@listen_to('listentime', re.IGNORECASE)\ndef listentime(message):\n message.reply(\"listen: current time =\" + str(datetime.datetime.now()))\n\n@respond_to(\"who?\", re.IGNORECASE)\ndef who(message):\n message.reply('I am friendly bot.')\n details = \"\"\n details += \"text = '{}'\\n\".format(message.body[\"text\"])\n details += \"ts = '{}'\\n\".format(message.body[\"ts\"])\n details += \"user id = '{}'\\n\".format(message.body[\"user\"])\n details += \"user name = '{}'\\n\".format(message._client.users.get(message.body[\"user\"])[\"name\"])\n details += \"team id = '{}'\\n\".format(message.body[\"team\"])\n details += \"type = '{}'\\n\".format(message.body[\"type\"])\n details += \"channel = '{}'\\n\".format(message.body[\"channel\"])\n message.reply('```{}```'.format(details))\n","repo_name":"dantaeyoung/messy.io","sub_path":"_DEVELOPMENT/messybot/plugins/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27185079653","text":"# Function for nth Fibonacci number with user input\n\nn = (int(input(\"Enter A Whole Number Greater Than Zero For The Fibonacci Sequence, Please. Then, Press Enter: \")))\n\ndef fibonacci(n):\n if n <= 0:\n print(\"Sorry! Please enter a number greater than zero.\")\n # First Fibonacci number is 0\n elif n == 1:\n return 0\n # Second Fibonacci number is 1\n elif n == 2:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)\n\n# print(fibonacci(n))","repo_name":"larryquinto/STG-Python-Challenges","sub_path":"challenge4/fibonacci_sequence.py","file_name":"fibonacci_sequence.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3673842130","text":"import unittest\nfrom pprint import pprint\n\nimport numpy\nimport pandas\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n\nfrom pytolemaic.analysis_logic.model_analysis.sensitivity.sensitivity_reports import SensitivityFullReport\nfrom pytolemaic.pytrust import PyTrust\nfrom pytolemaic.utils.constants import CLASSIFICATION, REGRESSION\nfrom pytolemaic.utils.dmd import DMD\nfrom pytolemaic.utils.general import GeneralUtils\nfrom pytolemaic.utils.metrics import Metrics\n\n\nclass TestSensitivity(unittest.TestCase):\n\n def get_data(self, is_classification, seed=0):\n rs = numpy.random.RandomState(seed)\n x = rs.rand(200, 3)\n x[:, 1] = 0\n # 1st is double importance, 2nd has no importance\n y = numpy.sum(x, axis=1) + 2 * x[:, 0]\n if is_classification:\n y = numpy.round(y, 0).astype(int)\n return DMD(x=x, y=y,\n columns_meta={DMD.FEATURE_NAMES: ['f_' + str(k) for k in\n range(x.shape[1])]})\n\n def get_model(self, is_classification):\n if is_classification:\n estimator = RandomForestClassifier\n else:\n estimator = RandomForestRegressor\n\n model = GeneralUtils.simple_imputation_pipeline(\n estimator(random_state=0, n_estimators=3))\n\n return model\n\n def get_pytrust(self, is_classification):\n if is_classification:\n metric = Metrics.recall.name\n else:\n metric = Metrics.mae.name\n\n model = self.get_model(is_classification)\n\n train = self.get_data(is_classification)\n model.fit(train.values, train.target.ravel())\n\n test = self.get_data(is_classification, seed=1)\n pytrust = PyTrust(\n model=model,\n xtrain=train.values, ytrain=train.target,\n xtest=test.values, ytest=test.target,\n sample_meta_train=None, sample_meta_test=None,\n columns_meta={DMD.FEATURE_NAMES: ['f' + str(k) for k in\n range(train.n_features)]},\n metric=metric)\n\n return pytrust\n\n def test_pytrust_sensitivity_classification(self):\n is_classification = True\n metric = Metrics.recall.name\n\n model = self.get_model(is_classification)\n\n train = self.get_data(is_classification)\n model.fit(train.values, train.target.ravel())\n\n test = self.get_data(is_classification, seed=1)\n pytrust = PyTrust(\n model=model,\n xtrain=train.values, ytrain=train.target,\n xtest=test.values, ytest=test.target,\n sample_meta_train=None, sample_meta_test=None,\n columns_meta={DMD.FEATURE_NAMES: ['f' + str(k) for k in\n range(train.n_features)]},\n metric=metric)\n\n sensitivity_report = pytrust.sensitivity_report\n print(sensitivity_report.to_dict())\n self.assertTrue(isinstance(sensitivity_report, SensitivityFullReport))\n for key, value in sensitivity_report.__dict__.items():\n if key.startswith('_'):\n continue\n print(key)\n self.assertTrue(value is not None)\n\n pytrust = PyTrust(\n model=model,\n xtrain=pandas.DataFrame(train.values),\n ytrain=pandas.DataFrame(train.target),\n xtest=pandas.DataFrame(test.values),\n ytest=pandas.DataFrame(test.target),\n sample_meta_train=None, sample_meta_test=None,\n columns_meta={DMD.FEATURE_NAMES: ['f' + str(k) for k in\n range(train.n_features)]},\n metric=metric)\n\n sensitivity_report2 = pytrust.sensitivity_report\n pprint(sensitivity_report.to_dict())\n self.maxDiff = None\n self.assertEqual(sensitivity_report2.shuffle_report.sensitivities, sensitivity_report.shuffle_report.sensitivities)\n self.assertEqual(sensitivity_report2.missing_report.sensitivities, sensitivity_report.missing_report.sensitivities)\n\n def test_pytrust_scoring_report(self):\n\n pytrust = self.get_pytrust(is_classification=True)\n\n scoring_report = pytrust.scoring_report\n\n for metric in Metrics.supported_metrics().values():\n if metric.ptype == CLASSIFICATION:\n metric_report = scoring_report.metric_scores[metric.name]\n score_value = metric_report.value\n ci_low = metric_report.ci_low\n ci_high = metric_report.ci_high\n\n self.assertTrue(ci_low < score_value < ci_high)\n\n pytrust = self.get_pytrust(is_classification=False)\n\n scoring_report = pytrust.scoring_report\n for metric in Metrics.supported_metrics().values():\n if metric.ptype == REGRESSION:\n metric_report = scoring_report.metric_scores[metric.name]\n score_value = metric_report.value\n ci_low = metric_report.ci_low\n ci_high = metric_report.ci_high\n\n self.assertTrue(ci_low < score_value < ci_high)\n\n def test_pytrust_report(self):\n pytrust = self.get_pytrust(is_classification=True)\n\n pprint(pytrust.report.insights())\n pprint(pytrust.report.to_dict())\n pprint(pytrust.report.to_dict_meaning())\n pprint(pytrust.report.plot())\n","repo_name":"Broundal/Pytolemaic","sub_path":"tests/integrative_tests/pytrust/test_pytrust.py","file_name":"test_pytrust.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"40096426116","text":"import re\nfrom nltk.corpus import stopwords\n\n\ndef review_to_wordlist(plots):\n\n review_text = re.sub(\"[^a-zA-Z]\", \" \", plots)\n words = review_text.lower().split()\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n\n return words\n\n\ndef review_to_sentences_list(plots, tokenizer):\n\n raw_sentences = tokenizer.tokenize(plots.strip())\n sentences = []\n for raw_sentence in raw_sentences:\n if len(raw_sentence) > 0:\n sentences.append(review_to_wordlist(raw_sentence))\n\n return sentences\n\n\ndef review_to_wordsentence(reviews):\n l = list()\n for review in reviews:\n review_text = re.sub(\"[^a-zA-Z]\", \" \", review)\n words = review_text.lower().split()\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n l.append(words)\n\n return l","repo_name":"ksenyavasina/Cell-Phones-and-Accessories-","sub_path":"function_review_NLP.py","file_name":"function_review_NLP.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26545798592","text":"import re\n\npaths = [\"tool_support_common.tex\", \"wait_id.tex\", \"frames.tex\", \"thread_states.tex\", \"tool_support_entrypoints.tex\", \"start_tool.tex\", \"tool_support_callbacks.tex\",]\npaths.extend([\"tool_support_debug.tex\", \"ompd_dll.tex\", \"ompd/ompd_activating_a_third_party_tool.tex\", \"ompd/ompd_introduction.tex\", \"ompd/ompd_third_party_callback_interface.tex\", \"ompd/ompd_data_types_for_third_party_tools.tex\", \"ompd/ompd_runtime_entry_points.tex\", \"ompd/ompd_third_party_tool_interface_routines.tex\"])\n\n# These type definitions are used in other definitions and must be placed at the beginning of the header file\nimportant = [\"typedef void \\(\\*ompt_interface_fn_t\\)\",\n \"typedef union ompt_data_t\",\n \"typedef struct ompt_frame_t\",\n \"typedef void \\(\\*ompt_callback_t\\)\",\n \"typedef void ompt_device_t\",\n \"typedef void ompt_buffer_t\",\n \"typedef void \\(\\*ompt_callback_buffer_request_t\\)\",\n \"typedef void \\(\\*ompt_callback_buffer_complete_t\\)\",\n \"typedef void \\(\\*ompt_finalize_t\\)\",\n \"typedef int \\(\\*ompt_initialize_t\\)\",\n ]\n\nlast = [\"typedef struct ompt_record_ompt_t\", \"typedef ompt_record_ompt_t \\*\\(\\*ompt_get_record_ompt_t\\)\"]\n# Read all files\ncontent = \"\"\nfor path in paths:\n with open(\"../../tool_support/\"+path) as file:\n content+=file.read()\n\nsplitted = content.split(\"{ccppspecific}\");\ndefs = [splitt for i,splitt in enumerate(splitted) if i%2 == 1 and re.search(\"begin{omp\", splitt) != None]\nsplitted = content.split(\"{cspecific}\");\ndefs += [splitt for i,splitt in enumerate(splitted) if i%2 == 1 and re.search(\"begin{omp\", splitt) != None]\n\n# Remove unwanted parts\nfor i,deff in enumerate(defs):\n deff = re.sub(r\"^.*\\\\begin{[^}]*}\", \"\", deff, flags=re.DOTALL)\n defs[i] = re.sub(r\"\\\\end.*$\", \"\", deff, flags=re.DOTALL)\n\n# Split into the definitions that must come first and the remaining ones \nenum_defs = list(filter(lambda x: re.search(\"typedef (enum|uint64_t)\", x) != None, defs))\nfirst_defs = [filter(lambda x: re.search(i, x) != None, defs)[0] for i in important]\nlast_defs = [filter(lambda x: re.search(i, x) != None, defs)[0] for i in last] \nrem_defs = [d for d in defs if d not in first_defs and d not in enum_defs and d not in last_defs]\n\ndefs = \"\\n\".join(enum_defs+first_defs+rem_defs+last_defs)\n\ndefs = \"\"\"\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\"\"\" + defs + \"\"\"\n#define ompt_id_none 0\n#define ompt_data_none {0}\n#define ompt_time_none 0\n#define ompt_hwid_none 0\n#define ompt_addr_none ~0\n#define ompt_mutex_impl_none 0\n#define ompt_wait_id_none 0\n\n#define ompd_segment_none 0\n\n#ifdef __cplusplus\n} // extern \"C\"\n#endif\n\"\"\"\n\n# Remove unwanted parts and newlines\ndefs = re.sub(r\"\\\\plc{([^}]*)}\", r\"\\1\", defs)\ndefs = re.sub(r\"\\n\\n\\n\", \"\\n\\n\", defs)\ndefs = re.sub(r\"^\\n\", \"\", defs)\n\n# Write to file\nwith open(\"omp-tools.h\", \"w+\") as file:\n file.write(str(defs))\n","repo_name":"gregrodgers/spec.old","sub_path":"util/omp-tools-header-generator/header_extract.py","file_name":"header_extract.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30464108960","text":"from Interpreter.tokenizer import Tokenizer\n\n\nif __name__ == \"__main__\":\n\t# t = Tokenizer(\" 45 + 6 \")\n\tt = Tokenizer(\" 44.54 + 66 \")\n\tprint(\"Initial expression:\", t)\n\n\ttokens = t.get_tokens()\n\tprint(\"\\nTokenization result:\")\n\tfor i in tokens:\n\t\tprint(i)\n","repo_name":"MS-17/programming_languages","sub_path":"my_interpreter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11511883431","text":"def solution(answers):\n p1 = [1,2,3,4,5]\n p2 = [2,1,2,3,2,4,2,5]\n p3 = [3,3,1,1,2,2,4,4,5,5]\n correct_1 = []\n correct_2 = []\n correct_3 = []\n answer = []\n\n for i in range(len(answers)):\n if answers[i] == p1[i%5]:\n correct_1.append(answers[i])\n if answers[i] == p2[i%8]:\n correct_2.append(answers[i])\n if answers[i] == p3[i%10]:\n correct_3.append(answers[i])\n\n a = len(correct_1)\n b = len(correct_2)\n c = len(correct_3)\n\n x = [a,b,c]\n\n if max(x) == a:\n answer.append(1)\n if max(x) == b:\n answer.append(2)\n if max(x) == c:\n answer.append(3)\n return answer\n","repo_name":"updaun/PythonBasic","sub_path":"Programmers/full_exploration.py","file_name":"full_exploration.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29945323754","text":"# This is the class you derive to create a plugin\nfrom airflow.plugins_manager import AirflowPlugin\n\nfrom flask import Blueprint\nfrom flask_admin import BaseView, expose\n\n\nclass Longitudinal(BaseView):\n @expose('/')\n def main(self):\n # Connect to Database (mySQL) that is being run locally (VM).\n\n # JSON Query's\n\n\n\n\n\n return self.render(\"mm_data_over_time/trends.html\",\n content=\"Hello Longitudinal Quality Statistics!\")\n\n\n\n\n\n\n\n\nv = Longitudinal(category=\"Trends\", name=\"Longitudinal Vizualization\")\n\n# Creating a flask blueprint to intergrate the templates and static folder\nbp = Blueprint(\n \"data_over_time\", __name__,\n# registers airflow/plugins/templates as a Jinja template folder\n template_folder='templates',\n static_folder='static',\n static_url_path='/static/test_plugin')\n\n\n# Defining the plugin class\nclass AirflowTestPlugin(AirflowPlugin):\n name = \"test_plugin\"\n admin_views = [v]\n flask_blueprints = [bp]\n # menu_links = [ml]\n","repo_name":"kehv1n/Airflow-Custom-DAGS","sub_path":"plugins/mm_data_trend.py","file_name":"mm_data_trend.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22643676394","text":"import os, time\n\n# shows how to set up a pipe between two python scripts\n\n# run with\n# python pipe-background.py &\n# and then run\n# python pipe-foreground.py\n#\n# ctrl-c to exit foreground\n#\n# ps -aux | grep pipe\n# lists pid\n# sudo kill \n\n\npipe_name = \"/tmp/mypipe\"\n\npipe = open(pipe_name, 'w', 1)\npipe.write(\"opening pipep\\n\")\n\ni = 0\nwhile True:\n time.sleep(1)\n\n pipe.write(\"sending message \" + str(i) + \"\\n\")\n i += 1\n","repo_name":"dumbo25/smart-doorbell","sub_path":"pipe_background.py","file_name":"pipe_background.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32701099635","text":"#!/usr/bin/python\n\nimport datetime\nfrom operator import itemgetter\n\nimport apiclient.discovery\nimport apiclient.errors\nfrom googleapiclient.discovery import build\nfrom oauth2client.tools import argparser\nimport urllib.request\nimport json\nfrom collections import OrderedDict\nimport pprint\n\nAIM = \"a\"\n\ndef nico_search(qu, time, con, view):\n qu = qu.replace(\"\\n\", \"\")\n q = urllib.parse.quote_plus(qu, encoding='utf-8')\n u = urllib.request.urlopen('http://api.search.nicovideo.jp/api/v2/video/contents/search?q='+q+'&targets=tags&fields=contentId,title,viewCounter,startTime&filters[startTime][gte]='+time+'&_sort=-viewCounter&_offset=0&_limit=20&_context=apiguide')\n t = u.read()\n e = json.loads(t)\n youso = []\n count = 0\n\n for i in e[\"data\"]:\n count += int(i[\"viewCounter\"])\n if i[\"contentId\"] not in con:\n youso.append([qu, i[\"title\"], i[\"contentId\"], int(i[\"viewCounter\"]), i[\"startTime\"]])\n con.append(i[\"contentId\"])\n view.append(count)\n u.close()\n return youso, view\n\ndef main(aim):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n now_list = now.split(\"-\")\n today = datetime.date.today()\n\n if aim == \"week\":\n time = today - datetime.timedelta(days = 7)\n elif aim == \"month\":\n time = today - datetime.timedelta(days = 30)\n else:\n time = today - datetime.timedelta(days = 90)\n\n time_list = (time.strftime('%Y/%m/%d')).split(\"/\")\n time_aim = time_list[0] + \"-\" + time_list[1] + \"-\" + time_list[2] + \"T\" + now_list[3] + \":\" +now_list[4]\n\n with open('nicotag.txt', 'r') as fr:\n data = fr.readlines()\n record_list = []\n con = []\n view = []\n for q in data:\n record, view = nico_search(q, time_aim, con, view)\n record_list+=record\n #print(q)\n record_list.sort(key=itemgetter(3), reverse=True)\n #print(view)\n return record_list\n\nif __name__ == \"__main__\":\n a = main(AIM)\n for i in range(10):\n print(i+1, a[i])\n","repo_name":"hikarusuzukicloud/webcomp_t","sub_path":"nicovideo_ranking.py","file_name":"nicovideo_ranking.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32196689539","text":"from typing import List, Optional\n\nfrom ..bits import align\nfrom ..pe import PE\nfrom .binmem import ProcessMemoryBinary\nfrom .procmem import ProcessMemoryBuffer\nfrom .region import Region\n\n__all__ = [\"ProcessMemoryPE\", \"procmempe\"]\n\n\nclass ProcessMemoryPE(ProcessMemoryBinary):\n \"\"\"\n Representation of memory-mapped PE file\n\n Short name: `procmempe`\n\n :param buf: A memory object containing the PE to be loaded\n :type buf: bytes, mmap, memoryview, bytearray or :py:meth:`MemoryBuffer` object\n\n :param base: Virtual address of the region of interest (or beginning of buf when no regions provided)\n :type base: int, optional (default: 0)\n\n :param image: The memory object is a dump of memory-mapped PE\n :type image: bool, optional (default: False)\n\n :param detect_image: Try to automatically detect if the input buffer is memory-mapped PE using some heuristics\n :type detect_image: bool, optional (default: False)\n\n File `memory_dump` contains a 64bit memory-aligned PE dumped from address `0x140000000`, in order to load it\n into procmempe and access the `pe` field all we have to do is initialize a new object with the file data:\n\n .. code-block:: python\n\n from malduck import procmempe\n\n with open(\"memory_dump\", \"rb\") as f:\n data = f.read()\n\n pe_dump = procmempe(buf=data, base=0x140000000, image=True)\n print(pe_dump.pe.is64bit)\n\n\n PE files can also be read directly using inherited :py:meth:`ProcessMemory.from_file` with `image` argument set\n (look at :py:meth:`from_memory` method).\n\n .. code-block:: python\n\n pe_dump = procmempe.from_file(\"140000000_1d5bdc3dbe71a7bd\", image=True)\n print(pe_dump.pe.sections)\n \"\"\"\n\n __magic__ = b\"MZ\"\n\n def __init__(\n self,\n buf: ProcessMemoryBuffer,\n base: int = 0,\n regions: Optional[List[Region]] = None,\n image: bool = False,\n detect_image: bool = False,\n ) -> None:\n self._pe: Optional[PE] = None\n super(ProcessMemoryPE, self).__init__(\n buf, base=base, regions=regions, image=image, detect_image=detect_image\n )\n\n def _pe_direct_load(self, fast_load: bool = True) -> PE:\n offset = self.v2p(self.imgbase)\n if offset is None:\n raise ValueError(\"imgbase out of regions\")\n # Expected m type: bytearray\n m = bytearray(self.readp(offset))\n pe = PE(data=m, fast_load=fast_load)\n return pe\n\n def _reload_as_image(self) -> None:\n # Load PE data from imgbase offset\n pe = self._pe_direct_load(fast_load=False)\n # If mmap: close all descriptors or\n # nullify references if mmap is not owned by current object\n self.close()\n # Set memory to the pe.data buffer\n self.memory = bytearray(pe.data)\n self.imgbase = pe.optional_header.ImageBase\n # Reset regions\n self.regions = [Region(self.imgbase, pe.headers_size, 0, 0, 0, 0)]\n # Load image sections\n for section in pe.sections:\n if section.SizeOfRawData > 0:\n self.regions.append(\n Region(\n self.imgbase + section.VirtualAddress,\n section.SizeOfRawData,\n 0,\n 0,\n 0,\n section.PointerToRawData,\n )\n )\n\n def is_valid(self) -> bool:\n if self.readv(self.imgbase, 2) != self.__magic__:\n return False\n pe_offs = self.uint32v(self.imgbase + 0x3C)\n if pe_offs is None:\n return False\n if self.readv(self.imgbase + pe_offs, 2) != b\"PE\":\n return False\n try:\n PE(self)\n return True\n except Exception:\n return False\n\n def is_image_loaded_as_memdump(self) -> bool:\n \"\"\"\n Checks whether memory region contains image incorrectly loaded as memory-mapped PE dump (image=False).\n\n .. code-block:: python\n\n embed_pe = procmempe.from_memory(mem)\n if not embed_pe.is_image_loaded_as_memdump():\n # Memory contains plain PE file - need to load it first\n embed_pe = procmempe.from_memory(mem, image=True)\n \"\"\"\n pe = self._pe_direct_load(fast_load=True)\n # If import table is corrupted - possible dump\n if not pe.validate_import_names():\n return False\n # If resources are corrupted - possible dump\n if not pe.validate_resources():\n return False\n # If first 4kB seem to be zero-padded - possible dump\n if not pe.validate_padding():\n return False\n # No errors, so it must be PE file\n return True\n\n @property\n def pe(self) -> PE:\n \"\"\"Related :class:`PE` object\"\"\"\n if self._pe is None:\n self._pe = PE(self)\n return self._pe\n\n @property\n def imgend(self) -> int:\n \"\"\"Address where PE image ends\"\"\"\n section = self.pe.sections[-1]\n return self.imgbase + section.VirtualAddress + section.Misc_VirtualSize\n\n def store(self) -> bytes:\n \"\"\"\n Store ProcessMemoryPE contents as PE file data.\n\n :rtype: bytes\n \"\"\"\n data = []\n current_offs = self.pe.headers_size\n # Read headers (until first section page in raw data)\n pe = PE(self.readv(self.imgbase, current_offs), fast_load=True)\n\n for idx, section in enumerate(pe.sections):\n # Find corresponding region\n section_region = self.addr_region(self.imgbase + section.VirtualAddress)\n # No corresponding region? BSS.\n if not section_region:\n continue\n # Get possible section size\n section_size = max(section.Misc_VirtualSize, section.SizeOfRawData)\n # Align to section alignment (usually 0x1000)\n section_alignment = max(0x1000, pe.optional_header.SectionAlignment)\n section_size = align(section_size, section_alignment)\n # Sometimes real region size is less than virtual size (image=True)\n section_size = min(section_region.size, section_size)\n # Align to file alignment (usually 0x200)\n file_alignment = max(0x200, pe.optional_header.FileAlignment)\n section_size = align(section_size, file_alignment)\n # Read section data including appropriate padding\n section_data = self.readv(\n self.imgbase + section.VirtualAddress, section_size\n )\n section_data += (section_size - len(section_data)) * b\"\\x00\"\n data.append(section_data)\n # Fix section values\n section.PointerToRawData, section.SizeOfRawData = current_offs, section_size\n current_offs += section_size\n\n pe.optional_header.ImageBase = self.imgbase\n\n # Generate header data\n pe_data = b\"\".join([bytes(pe.pe.write())] + data)\n\n # Return PE file data\n return pe_data\n\n\nprocmempe = ProcessMemoryPE\n","repo_name":"CERT-Polska/malduck","sub_path":"malduck/procmem/procmempe.py","file_name":"procmempe.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","stars":284,"dataset":"github-code","pt":"61"} +{"seq_id":"1980323099","text":"from fastapi import FastAPI, HTTPException\nfrom pydantic import BaseModel\nfrom uuid import uuid4 as uuid\n\napp = FastAPI()\n\nposts = [\n\n]\n\n\nclass User(BaseModel):\n id_usuario: int\n nombre_usuario: str\n apellido_usuario: str\n edad: int\n\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n@app.get(\"/posts\")\ndef obtener_usuarios():\n return posts\n\n\n@app.post(\"/posts\")\ndef guardar_usuario(post: User):\n post.id_usuario = str(uuid()) # esto le dara un id aleatorio y unico\n posts.append(post.dict())\n return posts[-1] # me devolverá la última entrada o post.\n\n\n@app.get(\"/posts/{post_id_usuario}\") # obtener el post mediante un id\ndef obtener_post(post_id_usuario: str):\n print(post_id_usuario)\n for post in posts:\n if post[\"id_usuario\"] == post_id_usuario:\n return post\n raise HTTPException(status_code=404,\n detail=\"post not found\") # usa http exepciton para dar una explicacion de error\n\n\n@app.delete(\"/posts/{post_id_usuario}\")\ndef borrar_post(post_id_usuario: str):\n for i, post in enumerate(posts):\n if post[\"id_usuario\"] == post_id_usuario:\n posts.pop(i)\n return {\"message\": \"la publicacion fue borrada\"}\n return \"received\"\n\n\n@app.put(\"/posts/{post_id_usuario}\")\ndef actualizar_post(post_id_usuario: str, actualizacion_post: User):\n for i, post in enumerate(posts):\n if post[\"id_usuario\"] == post_id_usuario:\n posts[i][\"nombre_usuario\"] = actualizacion_post.nombre_usuario\n posts[i][\"apellido_usuario\"] = actualizacion_post.apellido_usuario\n posts[i][\"edad\"] = actualizacion_post.edad\n return {\"message\": \"la publicacion fue actualizada\"}\n raise HTTPException(status_code=404,\n detail=\"post not found\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"number948/fastApiProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34841728874","text":"\nwith open('updatedWithBlanks.tsv','r') as in_file, open('final_updated.tsv','w') as out_file:\n count = 0\n for line in in_file:\n count += 1\n if (count >= 2289):\n items = line.split(\"\\t\")\n for i in range(len(items)):\n if items[i] == \"n/a\":\n items[i] = 0\n newline = \"\\t\".join([str(item) for item in items])\n out_file.write(newline)\n","repo_name":"NeerajRattehalli/legal.io","sub_path":"format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"74959842435","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# @file apriltag.py\n# @brief\n# @author QRS\n# @version 1.0\n# @date 2023-03-20 15:47\n\n\nimport cv2\nfrom apriltags_eth import make_default_detector\n\nCOLUMNS = 6\nSIZE = 3.52 / 100\nSPACING = 1.056 / 100\n\nimg = cv2.resize(cv2.imread('1679033209275314432.png'), (640, 480))\n\n\ndef get_tag_corners_for_id(tag_id):\n # col: x row: y\n # ll lr\n # ul ur\n a = SIZE\n b = SPACING\n tag_row, tag_col = (tag_id) // COLUMNS, (tag_id) % COLUMNS\n left = bottom = lambda i: i * (a + b)\n right = top = lambda i: (i + 1) * a + (i) * b\n return [(left(tag_col), bottom(tag_row)),\n (right(tag_col), bottom(tag_row)),\n (right(tag_col), top(tag_row)), (left(tag_col), top(tag_row))]\n\n\ndetector = make_default_detector()\nids = detector.extract_tags(img)\nids.sort(key=lambda x: x.id)\nfor tag in ids:\n if tag.id in (0, 2, 7, 9):\n print(get_tag_corners_for_id(tag.id))\n print(tag.id, tag.corners)\n for corner in tag.corners:\n x, y = corner \n img = cv2.circle(img, (int(x), int(y)), 2, (0, 0, 255), -1)\n break\n\ncv2.imwrite('out.png', img)\n\n# from aprilgrid import AprilGrid\n# import cv2\n# \n# grid = AprilGrid(7, 6, 2.0/100, 0.5/100)\n# \n# im = cv2.imread('/tmp/april.png')\n# res = grid.compute_observation(im)\n# \n# for image_point, tgt_point in zip(res.image_points, res.target_points):\n# x = int(image_point[0])\n# y = int(image_point[1])\n# tx = tgt_point[0]\n# ty = tgt_point[1]\n# print tgt_point\n# cv2.circle(im, (x, y), 5, (255 - tx/0.1205*200, 255, ty/0.1406*200), -1)\n# \n# cv2.imwrite('/tmp/out.png', im)\n","repo_name":"qrsforever/vision-mcs","sub_path":"test/camera/apriltag.py","file_name":"apriltag.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38309152323","text":"from CSV_UI import ui\r\nfrom Sort_CSV import sort_csv\r\nfrom FileCreater384 import fileCreater\r\n# Press the green button in the gutter to run the script.\r\nif __name__ == '__main__':\r\n file1=ui()\r\n file2=ui()\r\n file3 =ui()\r\n file4 =ui()\r\n\r\n screeningSubString = \"COVIDSourcePlates\"\r\n boolScreening1=0\r\n boolScreening2 = 0\r\n boolScreening3 = 0\r\n boolScreening4 = 0\r\n\r\n\r\n if screeningSubString in file1[0]:\r\n boolScreening1=1\r\n if screeningSubString in file2[0]:\r\n boolScreening2=1\r\n if screeningSubString in file3[0]:\r\n boolScreening3=1\r\n if screeningSubString in file4[0]:\r\n boolScreening4=1\r\n\r\n newFile1=sort_csv(file1[0])\r\n newFile2=sort_csv(file2[0])\r\n newFile3 = sort_csv(file3[0])\r\n newFile4 = sort_csv(file4[0])\r\n\r\n fileCreater(newFile1,newFile2,newFile3,newFile4,boolScreening1,boolScreening2,boolScreening3,boolScreening4)\r\n\r\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\r\n","repo_name":"tonester40/Multiplexing-Validation-Scripts","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21742962175","text":"import os\nfrom linebot import LineBotApi\nfrom linebot.models import TextSendMessage\nfrom linebot.exceptions import LineBotApiError\n\ndef send_line_message(line_user_data, stalking_data_dict, line_message_info, last_sent_time, now_datetime_string, line_bot_api):\n\n send_message_num = 0 #メッセージを送った数\n inactive_user = 0 #友達登録してるけどストーキングリストに登録していない人\n \n for line_user_item in line_user_data:\n send_message = \"\"\n line_user_id = line_user_item[\"line_user_id\"]\n user_id = int(line_user_item[\"user_id\"]) #Decimal型をint型に変更\n\n try:\n for stalking_user in stalking_data_dict[user_id]:\n try:\n if(line_message_info[stalking_user][\"new_ac\"] > 0):\n send_message += (stalking_user + \"さんが\" + str(line_message_info[stalking_user][\"new_ac\"]) + \"問ACしました!\\n合計\"+ str(line_message_info[stalking_user][\"accepted_count\"]) + \"問AC! \" + '{:,}'.format(line_message_info[stalking_user][\"rated_point_sum\"]) + \"RPSを獲得!\" + \"\\n\\n\")\n except KeyError as e:\n print(\"catch KeyError\" + str(e))\n send_message = send_message[:-2] #最後の改行文字を削除\n\n #LINEにメッセージ送信\n if(send_message != \"\"):\n try:\n now_datetime_string = now_datetime_string[5:] # 2020/06/07 01:19 → 06/07 01:19\n #send_message = last_sent_time + \"から\" + now_datetime_string + \"までの間\\n\" + send_message\n send_message = last_sent_time + \"から今までの間に\\n\" + send_message\n line_bot_api.push_message(line_user_id, TextSendMessage(text=send_message))\n print(str(user_id) + \"さんにLINEメッセージを送信しました。\")\n send_message_num += 1\n except LineBotApiError as e:\n print(e)\n\n except KeyError as e:\n print(str(user_id) + \"さんはストーキングリストに誰も登録していません。\")\n inactive_user += 1\n\n try:\n admin_send_message = str(send_message_num) + \"通のメッセージを送信しました\\nInactiveユーザーは\" + str(inactive_user) + \"人です\"\n line_bot_api.push_message(os.environ[\"ADMIN_USER_ID\"], TextSendMessage(text=admin_send_message))\n print(\"Admin Messageを送信しました\")\n except LineBotApiError as e:\n print(e)","repo_name":"zaurus-yusya/AtCoderStalker","sub_path":"src/pushmessage/commonfunctions/send_line_message.py","file_name":"send_line_message.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"ja","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"31827987502","text":"import logging\nfrom os import path\nfrom tempfile import mkdtemp\n\nLOG = logging.getLogger(__name__)\n\nUPDATE_FIELD = \"/types/actioninvocation/fields/{}\"\nGET_FIELD = \"/types/actioninvocation/fields/{}?include_principals=true\"\n\nclass SoarHelper():\n def __init__(self, rest_client):\n self.rest_client = rest_client\n\n def temp_attach(self, inc_id, incident_attachment_list, requested_attachments):\n \"\"\"[match attachments found in an incident with the list provided by the function call and\n prep for sending email]\n\n Args:\n inc_id ([int]):\n incident_attachment_list ([json]): [results of api call to get all incident attachments ]\n requested_attachments ([list]): [list of attachments to include, or '*' for all]\n\n Returns:\n [set]: [file paths of attachments to send]\n \"\"\"\n remaining_attachment_list = requested_attachments[:]\n attachment_path = []\n tempdir = mkdtemp()\n\n for incident_attachment in incident_attachment_list:\n file_name = incident_attachment[\"name\"]\n if file_name in requested_attachments:\n remaining_attachment_list.remove(file_name)\n\n if incident_attachment['type'] == 'incident':\n file_contents = self.rest_client.get_content(f\"/incidents/{inc_id}/attachments/{incident_attachment['id']}/contents\")\n else:\n file_contents = self.rest_client.get_content(f\"/tasks/{incident_attachment['task_id']}/attachments/{incident_attachment['id']}/contents\")\n file_path = path.join(tempdir, file_name)\n with open(file_path, \"wb+\") as temp_file:\n temp_file.write(file_contents)\n attachment_path.append(file_path)\n\n # send warnings when attachments are not found\n if remaining_attachment_list:\n LOG.warning(\"Unable to find the following attachments: %s\", \",\".join(remaining_attachment_list))\n\n return set(attachment_path)\n\n def process_attachments(self, inc_id, attachments):\n \"\"\"[return a list of filepaths to include as attachments ]\n\n Args:\n inc_id ([int]): [incident id]\n attachments ([str]): [comma separated attachment list or '*' for all]\n\n Returns:\n [set]: [file paths for attachments]\n \"\"\"\n if not attachments:\n return None\n\n incident_attachment_result = self.rest_client.post(f\"/incidents/{inc_id}/attachments/query?include_tasks=true\", None)\n incident_attachment_list = incident_attachment_result['attachments']\n # convert the list of requested attachments\n if attachments and attachments == \"*\":\n # include all incident attachments\n attachment_list = [incident_attachment[\"name\"] for incident_attachment in incident_attachment_list]\n else:\n attachment_list = [attach.strip() for attach in split_string(attachments)] \\\n if attachments else []\n\n all_attach = self.temp_attach(inc_id, incident_attachment_list, attachment_list)\n all_attach and LOG.debug(\"Attachments to include: %s\", \",\".join(all_attach))\n\n return all_attach\n\n def get_incident_data(self, mail_incident_id):\n return self.rest_client.get(f\"/incidents/{mail_incident_id}?handle_format=names\")\n\n def get_artifact_data(self, mail_incident_id):\n return self.rest_client.post(f\"/incidents/{mail_incident_id}/artifacts/query_paged?include_related_incident_count=true\", payload={})\n\n def get_note_data(self, mail_incident_id):\n return self.rest_client.post(f\"/incidents/{mail_incident_id}/comments/query?include_tasks=false\", payload={})\n\n def update_select_list(self, field_name, selection_list):\n \"\"\"\n Update values in a select field\n :param field_name: Activity field name\n :param selection_list: list of items to replace in the field_name selection list\n :return: True/False if the operation is successful\n \"\"\"\n\n try:\n payload = self.rest_client.get(GET_FIELD.format(field_name))\n\n if type(payload) == list or payload.get(\"input_type\") != \"select\":\n return None\n\n # Put payload with no values to delete old values\n #del payload[\"values\"]\n #self.rest_client.put(UPDATE_FIELD.format(field_name), payload)\n\n # Add values to the payload\n payload[\"values\"] = [\n {\"label\": str(value), \"enabled\": True, \"hidden\": False}\n for value in selection_list\n ]\n\n # update the selection list\n self.rest_client.put(UPDATE_FIELD.format(field_name), payload)\n return True\n except Exception as err_msg:\n LOG.error(\"Action failed for field: %s error: %s\", field_name, str(err_msg))\n return False\n\n\ndef split_string(in_string):\n return in_string.split(',') if in_string else []\n","repo_name":"calvinwynne/resilient-community-apps","sub_path":"fn_outbound_email/fn_outbound_email/lib/soar_helper.py","file_name":"soar_helper.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"40497607267","text":"'''\nThere are n cities. Some of them are connected, while some are not. If city a is connected directly with city b, and city b is connected directly with city c, then city a is connected indirectly with city c.\n\nA province is a group of directly or indirectly connected cities and no other cities outside of the group.\n\nYou are given an n x n matrix isConnected where isConnected[i][j] = 1 if the ith city and the jth city are directly connected, and isConnected[i][j] = 0 otherwise.\n\nReturn the total number of provinces. \n'''\n\n\nclass Solution(object):\n def findCircleNum(self, isConnected):\n \"\"\"\n :type isConnected: List[List[int]]\n :rtype: int\n \"\"\"\n provinceCount = 0\n visited = set()\n\n def dfs(neighbourCities) :\n for neighbour, connected in enumerate(neighbourCities) :\n if connected and neighbour not in visited :\n visited.add(neighbour)\n dfs(isConnected[neighbour])\n\n for city, neighbourCities in enumerate(isConnected) :\n if city not in visited :\n dfs(neighbourCities)\n provinceCount += 1 \n \n return provinceCount","repo_name":"lonebots/python-programming","sub_path":"leet-code/547-no-of-provinces.py","file_name":"547-no-of-provinces.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29495606862","text":"import unittest\n\nfrom rnamake import motif_graph, motif_topology, motif_state_graph\nfrom rnamake import sequence_optimizer, settings, util\nimport is_equal\nfrom rnamake import resource_manager as rm\nimport build\n\nclass SequenceOptimizerUnittests(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_simple_hairpin(self):\n mg = motif_graph.MotifGraph()\n for i in range(10):\n mg.add_motif(m_name=\"HELIX.IDEAL\")\n mg.add_motif(m_name=\"HAIRPIN.1GID.0\")\n\n so = sequence_optimizer.SequenceOptimizer3D()\n scorer = sequence_optimizer.ExternalTargetScorer(\n mg.get_node(9).data.ends[1].state().copy(), 8, 1)\n sols = so.get_optimized_sequences(mg, scorer)\n bp1 = mg.get_node(9).data.ends[1].copy()\n mg.replace_helix_sequence(seq=sols[0].sequence)\n\n self.failUnless(mg.sequence() == sols[0].sequence)\n bp2 = mg.get_node(8).data.ends[1]\n self.failUnless(abs(sols[0].dist_score - bp1.diff(bp2)) < 0.2)\n\n def test_simple_hairpin_2(self):\n mg = motif_graph.MotifGraph()\n for i in range(10):\n mg.add_motif(m_name=\"HELIX.IDEAL\")\n mg.add_motif(m_name=\"HAIRPIN.1GID.0\")\n so = sequence_optimizer.SequenceOptimizer3D()\n scorer = sequence_optimizer.ExternalTargetScorer(\n mg.get_node(9).data.ends[1].state().copy(), 8, 1)\n mg_opt = so.get_optimized_mg(mg, scorer)\n\n mg.replace_helix_sequence(seq=mg_opt.sequence())\n\n for n in mg:\n self.failUnless(n.data.name == mg_opt.get_node(n.index).data.name)\n\n atoms1 = mg.get_structure().structure.atoms()\n atoms2 = mg_opt.get_structure().structure.atoms()\n\n for i in range(len(atoms1)):\n diff = util.distance(atoms1[i].coords, atoms2[i].coords)\n self.failUnless(diff < 0.1)\n\n def test_minittr(self):\n path = settings.UNITTEST_PATH + \"/test_problems/mini_ttr/\"\n f = open(path+\"sol.mg\")\n lines = f.readlines()\n f.close()\n\n mg = motif_graph.MotifGraph(mg_str=lines[0])\n mg.replace_ideal_helices()\n scorer = sequence_optimizer.InternalTargetScorer(11, 1, 19, 1)\n so = sequence_optimizer.SequenceOptimizer3D()\n mg_opt = so.get_optimized_mg(mg, scorer)\n\n mg.replace_helix_sequence(seq=mg_opt.sequence())\n\n for n in mg:\n self.failUnless(n.data.name == mg_opt.get_node(n.index).data.name)\n\n atoms1 = mg.get_structure().structure.atoms()\n atoms2 = mg_opt.get_structure().structure.atoms()\n\n for i in range(len(atoms1)):\n diff = util.distance(atoms1[i].coords, atoms2[i].coords)\n self.failUnless(diff < 0.1)\n\n def test_chip_only(self):\n base_dir = settings.UNITTEST_PATH + \"resources/motif_graph/\"\n f = open(base_dir+\"tecto_chip_only.mg\")\n l = f.readline()\n f.close()\n\n mg = motif_graph.MotifGraph(mg_str=l)\n #for n in mg:\n # print n.index, n.data.name\n\n scorer = sequence_optimizer.InternalTargetScorer(22, 1, 19, 1)\n so = sequence_optimizer.SequenceOptimizer3D()\n so.option('return_lowest', 1)\n so.option('max_steps', 100)\n mg_opt = so.get_optimized_mg(mg, scorer)\n\n mg.replace_helix_sequence(seq=mg_opt.sequence())\n\n mg.write_pdbs()\n mg_opt.write_pdbs(\"opt\")\n\n self.failUnless(mg_opt.sequence() == mg.sequence())\n\n for n in mg:\n self.failUnless(n.data.name == mg_opt.get_node(n.index).data.name)\n\n\n\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()\n","repo_name":"zhuoyuzhang/RNAMake","sub_path":"rnamake/unittests/sequence_optimizer_unittests.py","file_name":"sequence_optimizer_unittests.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30767463071","text":"##Power of Two\n##Given an integer, write a function to determine if it is a power of two.\n##\n##2015年8月21日 16:04:17 AC\n##zss\n\nclass Solution(object):\n def isPowerOfTwo(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n if n==2 or n==1:\n return True\n if n<1 or n %2 !=0:\n return False\n return self.isPowerOfTwo(n//2)\n","repo_name":"zingzheng/LeetCode_py","sub_path":"231Power of Two.py","file_name":"231Power of Two.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36576803153","text":"import bpy\nimport mathutils\nfrom .. import utils\n\n\nclass SOURCEOPS_OT_RigSimulation(bpy.types.Operator):\n '''Duplicate rigid body objects from a collection and rig the copies, then make the bones follow the rigid bodies'''\n bl_options = {'REGISTER', 'UNDO'}\n bl_idname = 'sourceops.rig_simulation'\n bl_label = 'Rig Simulation'\n\n @classmethod\n def poll(cls, context):\n\n # Make sure input and output collections are set\n sourceops = utils.common.get_globals(context)\n input_collection = sourceops.simulation_input\n output_collection = sourceops.simulation_output\n return input_collection and output_collection\n\n def execute(self, context):\n\n # Switch to object mode\n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Get input and output collections\n sourceops = utils.common.get_globals(context)\n input_collection = sourceops.simulation_input\n output_collection = sourceops.simulation_output\n\n # Iterate through output objects\n for output_obj in output_collection.all_objects:\n if output_obj.type not in {'MESH', 'ARMATURE'}:\n continue\n\n # Store object type and data\n obj_type = output_obj.type\n obj_data = output_obj.data\n\n # Remove object\n bpy.data.objects.remove(output_obj)\n\n # Remove data\n if obj_type == 'MESH':\n bpy.data.meshes.remove(obj_data)\n elif obj_type == 'ARMATURE':\n bpy.data.armatures.remove(obj_data)\n\n # Create armature\n name = f'{output_collection.name} Armature'\n arm_data = bpy.data.armatures.new(name=name)\n arm_obj = bpy.data.objects.new(name=name, object_data=arm_data)\n output_collection.objects.link(arm_obj)\n arm_obj.show_in_front = True\n\n # Iterate through input objects\n for input_obj in input_collection.all_objects:\n if input_obj.type != 'MESH':\n continue\n\n # Duplicate to output collection\n output_obj = input_obj.copy()\n output_obj.data = input_obj.data.copy()\n output_obj.name = f'Rigged {input_obj.name}'\n output_obj.data.name = output_obj.name\n output_obj.animation_data_clear()\n output_collection.objects.link(output_obj)\n\n # Reset transforms\n output_obj.location = (0, 0, 0)\n output_obj.rotation_euler = (0, 0, 0)\n output_obj.scale = (1, 1, 1)\n\n # Remove rigid body properties\n if output_obj.rigid_body:\n context.view_layer.objects.active = output_obj\n bpy.ops.rigidbody.object_remove()\n\n # Add vertex group\n output_obj.vertex_groups.clear()\n group = output_obj.vertex_groups.new(name=output_obj.name)\n indices = list(range(len(output_obj.data.vertices)))\n group.add(index=indices, weight=1.0, type='REPLACE')\n\n # Add armature modifier\n mod = output_obj.modifiers.new('Armature', 'ARMATURE')\n mod.use_vertex_groups = True\n mod.object = arm_obj\n\n # Switch to armature edit mode\n context.view_layer.objects.active = arm_obj\n bpy.ops.object.mode_set(mode='EDIT')\n\n # Iterate through output objects\n for output_obj in output_collection.all_objects:\n if output_obj.type != 'MESH':\n continue\n\n # Add bone\n bone = arm_data.edit_bones.new(output_obj.name)\n bone.tail.z = output_obj.dimensions[2]\n\n # Switch to armature pose mode\n context.view_layer.objects.active = arm_obj\n bpy.ops.object.mode_set(mode='POSE')\n\n # Iterate through bones\n for bone in arm_obj.pose.bones:\n\n # Add bone constraint\n constraint = bone.constraints.new('CHILD_OF')\n name = bone.name.replace('Rigged ', '', 1)\n constraint.target = bpy.data.objects[name]\n constraint.inverse_matrix = mathutils.Matrix.Identity(4)\n\n # Switch to object mode\n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Report to user and finish\n self.report({'INFO'}, f'Rigged Simulation')\n return {'FINISHED'}\n","repo_name":"bonjorno7/SourceOps","sub_path":"addon/ops/rig_simulation.py","file_name":"rig_simulation.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"61"} +{"seq_id":"9659780764","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import csv\n#from numpy import loadtxt\nimport statistics\nimport pylab\n\n\n\n\n#引入TXT\nf = open('TemperatureofThreecities.txt','r')\ntext = []\nfor line in f:\n text.append(line.rstrip('\\n').split(','))\n#擷取台南的資訊\nTainan_T = text[1:10]\n#存入各年分\nTainan2013 = list(map(float,Tainan_T[0]))\nTainan2014 = list(map(float,Tainan_T[1]))\nTainan2015 = list(map(float,Tainan_T[2]))\nTainan2016 = list(map(float,Tainan_T[3]))\nTainan2017 = list(map(float,Tainan_T[4]))\nTainan2018 = list(map(float,Tainan_T[5]))\nTainan2019 = list(map(float,Tainan_T[6]))\nTainan2020 = list(map(float,Tainan_T[7]))\nTainan2021 = list(map(float,Tainan_T[8]))\n#建立一個array儲存台南個年份及月分資訊\nTem_array = np.zeros([9,12])\n#存入個年份資訊\nTem_array[0] = Tainan2013[1:13]\nTem_array[1] = Tainan2014[1:13]\nTem_array[2] = Tainan2015[1:13]\nTem_array[3] = Tainan2016[1:13]\nTem_array[4] = Tainan2017[1:13]\nTem_array[5] = Tainan2018[1:13]\nTem_array[6] = Tainan2019[1:13]\nTem_array[7] = Tainan2020[1:13]\nTem_array[8] = Tainan2021[1:13]\n\n#print(Tem_array)\n\n\n# In[2]:\n\n\n#1\n\nplt.figure(figsize = (12, 8))\n#建立X軸LIST\nx_diff = [1,2,3,4,5,6,7,8,9,10,11,12]\n#畫出個年份月份溫度變化\nfor i in range (2013,2022):\n plt.plot(x_diff,Tem_array[i - 2013],linewidth=\"2\",label = i)\nplt.xticks(range(1,13,1))\nplt.title(\"Tainan Monthly Mean Temperature From 2013 To 2021\") \nplt.legend(loc = \"lower center\")\nplt.xlabel(\"Month\")\nplt.xlim(1,12,1)\nplt.ylabel(\"Temperature in Degree C\")\nplt.show()\n\n\n\n# In[3]:\n#2\n\nmean_list = []\n#把九年各月分別平均\nfor i in range(0,12,1):\n mean =(Tem_array[0][i]+Tem_array[1][i]+Tem_array[2][i]+Tem_array[3][i]+Tem_array[4][i]+Tem_array[5][i]+Tem_array[6][i]+Tem_array[7][i]+Tem_array[8][i])/9\n mean_list.append(mean)\nmean_year = statistics.mean(mean_list)\n#九年總平均\nmean_year_list = []\nfor i in range(12):\n mean_year_list.append(mean_year)\n\nplt.figure(figsize = (12, 8))\n#建立X軸LIST\nx_diff = [1,2,3,4,5,6,7,8,9,10,11,12]\nplt.plot(x_diff,mean_list,linestyle = '-',marker = \"o\",color =\"r\")\nplt.plot(x_diff,mean_list,color = \"b\")\nplt.plot(x_diff,mean_year_list,linestyle = \"--\",color =\"r\",label = \"Mean of 9 years\")\n#標記每一個點溫度\nfor i in range(12):\n plt.annotate(str(mean_list[i].round(2)),(i+1,mean_list[i]))\nplt.annotate(str(mean_year.round(2)),(1,mean_year))\nplt.xlim(0,12,1)\nplt.xticks(range(1,13,1))\nplt.title(\"Tainan Monthly Mean Temperature From 2013 To 2021\")\nplt.xlabel(\"Month\")\nplt.ylabel(\"Temperature in Degree C\")\npylab.legend(loc =\"upper right\")\nplt.show()\n\n\n# In[136]:\n#3\n\nmonth_mean = []\nmean_year_list_2 = []\n\nfor i in range(9):\n mean_year_list_2.append(mean_year)\n#建立X軸LIST\nx_diff =[2013,2014,2015,2016,2017,2018,2019,2020,2021]\n#建立label的list\nlabels =['Mean of Jan','Mean of Feb','Mean of Mar','Mean of Apr','Mean of May','Mean of Jun','Mean of Jul','Mean of Aug','Mean of Set','Mean of Oct','Mean of Nov','Mean of Dec']\n#轉至原本的矩陣\nTem_mean_T = Tem_array.transpose()\nplt.figure(figsize = (12, 8))\n\nfor i in range (12):\n plt.plot(x_diff,Tem_mean_T[i],label =labels[i])\nplt.plot(x_diff,mean_year_list_2,linestyle = \"--\",color =\"r\",label = \"Mean of 9 years\") \nplt.annotate(str(mean_year.round(2)),(2013,mean_year))\nplt.title(\"Tainan Mean Temperature Of Month And Total Year Mean From 2013 To 2021\") \nplt.legend(loc =\"lower center\") \nplt.xlabel(\"Years\")\nplt.ylabel(\"Temperature in Degree C\")\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kuku000/-COMPUTATIONAL-THINKING-AND-PROBLEM-SOLVING","sub_path":"E94084032_林耕澤_HW_temperature/Tainan_temperature.py","file_name":"Tainan_temperature.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9471983863","text":"from plyer import notification\nimport os\nimport configparser\nimport shutil\n\nconfig = configparser.ConfigParser()\nconfig.read(os.getcwd() + '\\\\config.ini','UTF-8')\n\n\nflag = os.path.exists(config['Dir']['copydir'] + '/' + config['Dir']['target'])\n\nif not flag:\n copyfiles = os.listdir(os.getcwd() + '\\\\files')\n for file in copyfiles:\n print(os.getcwd() + '\\\\files\\\\' + file + \"から\" + config['Dir']['copydir'] + \"にコピー\")\n shutil.copy(os.getcwd() + '\\\\files\\\\' + file, config['Dir']['copydir'])\n \n notification.notify(\n title = config['setting']['title'],\n message = config['setting']['msg'],\n app_name = \"自動ファイル追加\"\n )\nelse:\n notification.notify(\n title = config['setting']['title'],\n message = config['setting']['nomsg'],\n app_name = \"自動ファイル追加\"\n )\n\nos.system('powershell -Command start dmmgameplayer://umamusume/cl/general/umamusume')","repo_name":"ishida-shunya/Umamusume_copyfileauto","sub_path":"umafile.py","file_name":"umafile.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5066377247","text":"from numpy import genfromtxt, zeros\n\nfrom math import pi\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\nmap_from_file = genfromtxt('../data/healpix_map.dat')\n\nN = 512\nnpix = 786432\nprojection = 'moll' # 'cyl', 'moll', 'ortho'\nsave_as_png = False\nsave_as_svg = False\n\ninside_map = zeros((int(npix + 1), 3))\ny = zeros((int(npix + 1), 3))\nx = zeros((int(npix + 1), 3))\n\nfor i in range(0, npix):\n y[i][0] = map_from_file[i][0]\n x[i][0] = map_from_file[i][1]\n\nfor i in range(0, npix):\n inside_map[i][0] = map_from_file[i][2]\n\nrad = 180.0 / pi\n\nfig = plt.figure(figsize=(8, 4))\nfig.subplots_adjust(\n left=0.0, right=1.0, top=1.0, bottom=0.0, wspace=0.0, hspace=0.0)\n\nax = fig.add_axes([0.0, 0.0, 1.0, 1.0])\nax.axis('off')\n\ncmbmap = Basemap(projection=projection, lon_0=0, resolution='l')\ncmbmap.contourf(x * rad, y * rad, inside_map, 512, cmap=plt.cm.jet, latlon=True)\n\nif save_as_png:\n plt.savefig('out.png', dpi=300)\nif save_as_svg:\n plt.savefig('out.svg')\n\nplt.show()\n","repo_name":"heyfaraday/CMBcpp","sub_path":"test/test1/py/vis_3.py","file_name":"vis_3.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33711442113","text":"from aiogram.types import Message\nfrom aiogram import Router\nfrom FSM.states import Simp\nfrom create_bot import bot\nfrom config import admin_id\nfrom aiogram.fsm.context import FSMContext\nfrom db.db_api import check_profile\n\nsimp_router = Router()\n\n@simp_router.message(Simp.simp)\nasync def simpp(message: Message,state: FSMContext) -> None:\n id = message.from_user.id\n if message.text==\"👎\":\n await state.clear()\n return\n elif message.text==\"👍\":\n data = await state.get_data()\n profile = check_profile(id)\n if profile==None:\n bot.send_message(id,\"У вас еще нет анкеты.\")\n await state.clear()\n return\n await bot.send_photo(int(data[\"id\"]),caption=f\"{profile[5]},{profile[1]},{profile[4]}\\n{profile[6]}\\n\\n{message.from_user.username}\",photo=profile[7])\n await state.clear() \n elif message.text==\"/report\":\n data = await state.get_data()\n await bot.send_message(admin_id,f\"Поступила жалоба на пользователя:{data['id']}\")\n await bot.send_message(id,f\"Жалоба была отправлена.\")\n await state.clear()\n","repo_name":"Olegggg12/acquaintance_bot","sub_path":"FSM/simp.py","file_name":"simp.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19055028223","text":"import math\nimport numpy as np\nfrom scipy.spatial.transform import Rotation\n\n\ndef normalize_angle(angle):\n \"\"\"Normalize angle\n \"\"\"\n a = (angle + math.pi) % (2.0 * math.pi)\n if a < 0.0:\n a += (2.0 * math.pi)\n return a - math.pi\n\n\ndef get_transform_matrix(quat, vector):\n \"\"\"Get transform matrix from quaternion and vector\n\n Args:\n quat: [qx, qy, qz, qw]\n vector: [x, y, z]\n \"\"\"\n rotation_matrix = Rotation.from_quat(quat)\n transform_vector = np.array(vector).reshape(-1, 1)\n rotation = rotation_matrix.as_matrix()\n transform_matrix = np.concatenate([rotation, transform_vector], axis=1)\n bottom = np.array([0, 0, 0, 1]).reshape(1, -1)\n transform_matrix = np.concatenate([transform_matrix, bottom], axis=0)\n return transform_matrix\n\n\ndef get_quat_and_vector(transform_matrix):\n \"\"\"Get quaternion and vector from transform matrix\n\n Args:\n transform_matrix: transformation matrix\n \"\"\"\n rotation_mat = transform_matrix[0:3, 0:3]\n vector = transform_matrix[0:3, 3]\n rotation = Rotation.from_matrix(rotation_mat)\n quat = rotation.as_quat()\n return quat, vector\n\n\ndef euler_to_rotation_matrix(theta1, theta2, theta3, order='xyz'):\n \"\"\"Euler to rotation matrix\n \"\"\"\n c1 = np.cos(theta1)\n s1 = np.sin(theta1)\n c2 = np.cos(theta2)\n s2 = np.sin(theta2)\n c3 = np.cos(theta3)\n s3 = np.sin(theta3)\n\n if order == 'xzx':\n matrix = np.array([[c2, -c3*s2, s2*s3],\n [c1*s2, c1*c2*c3-s1*s3, -c3*s1-c1*c2*s3],\n [s1*s2, c1*s3+c2*c3*s1, c1*c3-c2*s1*s3]])\n elif order == 'xyx':\n matrix = np.array([[c2, s2*s3, c3*s2],\n [s1*s2, c1*c3-c2*s1*s3, -c1*s3-c2*c3*s1],\n [-c1*s2, c3*s1+c1*c2*s3, c1*c2*c3-s1*s3]])\n elif order == 'yxy':\n matrix = np.array([[c1*c3-c2*s1*s3, s1*s2, c1*s3+c2*c3*s1],\n [s2*s3, c2, -c3*s2],\n [-c3*s1-c1*c2*s3, c1*s2, c1*c2*c3-s1*s3]])\n elif order=='yzy':\n matrix = np.array([[c1*c2*c3-s1*s3, -c1*s2, c3*s1+c1*c2*s3],\n [c3*s2, c2, s2*s3],\n [-c1*s3-c2*c3*s1, s1*s2, c1*c3-c2*s1*s3]])\n elif order=='zyz':\n matrix = np.array([[c1*c2*c3-s1*s3, -c3*s1-c1*c2*s3, c1*s2],\n [c1*s3+c2*c3*s1, c1*c3-c2*s1*s3, s1*s2],\n [-c3*s2, s2*s3, c2]])\n elif order=='zxz':\n matrix = np.array([[c1*c3-c2*s1*s3, -c1*s3-c2*c3*s1, s1*s2],\n [c3*s1+c1*c2*s3, c1*c2*c3-s1*s3, -c1*s2],\n [s2*s3, c3*s2, c2]])\n elif order=='xyz':\n matrix = np.array([[c2*c3, -c2*s3, s2],\n [c1*s3+c3*s1*s2, c1*c3-s1*s2*s3, -c2*s1],\n [s1*s3-c1*c3*s2, c3*s1+c1*s2*s3, c1*c2]])\n elif order=='xzy':\n matrix = np.array([[c2*c3, -s2, c2*s3],\n [s1*s3+c1*c3*s2, c1*c2, c1*s2*s3-c3*s1],\n [c3*s1*s2-c1*s3, c2*s1, c1*c3+s1*s2*s3]])\n elif order=='yxz':\n matrix = np.array([[c1*c3+s1*s2*s3, c3*s1*s2-c1*s3, c2*s1],\n [c2*s3, c2*c3, -s2],\n [c1*s2*s3-c3*s1, c1*c3*s2+s1*s3, c1*c2]])\n elif order=='yzx':\n matrix = np.array([[c1*c2, s1*s3-c1*c3*s2, c3*s1+c1*s2*s3],\n [s2, c2*c3, -c2*s3],\n [-c2*s1, c1*s3+c3*s1*s2, c1*c3-s1*s2*s3]])\n elif order=='zyx':\n matrix = np.array([[c1*c2, c1*s2*s3-c3*s1, s1*s3+c1*c3*s2],\n [c2*s1, c1*c3+s1*s2*s3, c3*s1*s2-c1*s3],\n [-s2, c2*s3, c2*c3]])\n elif order=='zxy':\n matrix = np.array([[c1*c3-s1*s2*s3, -c2*s1, c1*s3+c3*s1*s2],\n [c3*s1+c1*s2*s3, c1*c2, s1*s3-c1*c3*s2],\n [-c2*s3, s2, c2*c3]])\n return matrix\n\n\ndef is_rotation_matrix(R):\n \"\"\"Determine whether R is rotation matrix\n \"\"\"\n Rt = np.transpose(R)\n should_be_identity = np.dot(Rt, R)\n I = np.identity(3, dtype = R.dtype)\n n = np.linalg.norm(I - should_be_identity)\n return n < 1e-6\n\n\ndef rotation_matrix_to_euler(R):\n \"\"\"Transform rotation matrix to euler\n \"\"\"\n assert(is_rotation_matrix(R))\n sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n if not singular :\n x = math.atan2(R[2,1] , R[2,2])\n y = math.atan2(-R[2,0], sy)\n z = math.atan2(R[1,0], R[0,0])\n else :\n x = math.atan2(-R[1,2], R[1,1])\n y = math.atan2(-R[2,0], sy)\n z = 0\n return np.array([x, y, z])\n\n\nclass Euler(object):\n def __init__(self, roll, pitch, yaw) -> None:\n \"\"\"Init\n \"\"\"\n self.roll = roll\n self.pitch = pitch\n self.yaw = yaw\n\n def to_quaternion(self):\n \"\"\"Transform euler to quaternion\n \"\"\"\n cr = math.cos(self.roll * 0.5)\n sr = math.sin(self.roll * 0.5)\n cp = math.cos(self.pitch * 0.5)\n sp = math.sin(self.pitch * 0.5)\n cy = math.cos(self.yaw * 0.5)\n sy = math.sin(self.yaw * 0.5)\n qw = cr * cp * cy + sr * sp * sy\n qx = sr * cp * cy - cr * sp * sy\n qy = cr * sp * cy + sr * cp * sy\n qz = cr * cp * sy - sr * sp * cy\n return Quaternion(qw, qx, qy, qz)\n\n\nclass Quaternion(object):\n def __init__(self, w, x, y, z) -> None:\n \"\"\"Init\n \"\"\"\n self.w = w\n self.x = x\n self.y = y\n self.z = z\n\n def to_euler(self):\n \"\"\"Quaternion to euler\n \"\"\"\n t0 = 2 * (self.w * self.x + self.y * self.z)\n t1 = 1 - 2 * (self.x * self.x + self.y * self.y)\n roll = math.atan2(t0, t1)\n\n t2 = 2 * (self.w * self.y - self.z * self.x)\n pitch = math.asin(t2)\n\n t3 = 2 * (self.w * self.z + self.x * self.y)\n t4 = 1 - 2 * (self.y * self.y + self.z * self.z)\n yaw = math.atan2(t3, t4)\n return Euler(roll, pitch, yaw)\n\n def __mul__(self, other):\n \"\"\"Quaternion multiplication\n \"\"\"\n w = self.w * other.w - self.x * other.x - self.y * other.y - self.z * other.z\n x = self.w * other.x + self.x * other.w + self.y * other.z - self.z * other.y\n y = self.w * other.y - self.x * other.z + self.y * other.w + self.z * other.x\n z = self.w * other.z + self.x * other.y - self.y * other.x + self.z * other.w\n return Quaternion(w, x, y, z)\n\n def __str__(self) -> str:\n \"\"\"Transform quaternion to string\n \"\"\"\n return \"w: {}, x: {}, y: {}, z: {}\".format(self.w, self.x, self.y, self.z)\n","repo_name":"YuqiHuai/BaiduApollo","sub_path":"modules/tools/adataset/adataset/apolloscape/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":6132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"18058165205","text":"'''\nCreated on 28 nov. 2016\n\n@author: Adnene\n'''\nfrom statistics.statisticsExtraction3D import workflowStage_histogramGenerator3D\nfrom statistics.statisticsExtraction2D import workflowStage_histogramGenerator2D\n\nPOSSIBLE_HISTOGRAMS_MAPS={\n 'histograms2D':workflowStage_histogramGenerator2D,\n 'histograms3D':workflowStage_histogramGenerator3D\n}\n\n\ndef workflowStage_histogramGenerator(\n inputs={},\n configuration={},\n outputs={},\n workflow_stats={}\n ):\n \n '''\n {\n \n 'id':'stage_id',\n 'type':'histograms_visualization',\n 'inputs': {\n 'dataset':[],\n 'destinationRepository' : 'repository_path' # care the repository path will be deleted first before histograms are created !\n },\n 'configuration': {\n 'numberOfDimensions'=3\n 'dimension1'='GROUPE_ID',\n 'dimension2'='NATIONAL_PARTY',\n 'dimension3'='COUNTRY',\n #or 2 ?\n },\n 'outputs':{\n 'statistics': \n }\n }\n '''\n \n \n localConfiguration={}\n localConfiguration['numberOfDimensions']=configuration.get('numberOfDimensions',3)\n \n if localConfiguration['numberOfDimensions']==2 :\n POSSIBLE_HISTOGRAMS_MAPS['histograms2D'](inputs, configuration, outputs,workflow_stats)\n elif localConfiguration['numberOfDimensions']==3 :\n POSSIBLE_HISTOGRAMS_MAPS['histograms3D'](inputs, configuration, outputs,workflow_stats)\n \n return outputs\n ","repo_name":"Adnene93/DEBuNk","sub_path":"Synthetic/CODE/statistics/statisticsHistogramWorkflowStage.py","file_name":"statisticsHistogramWorkflowStage.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"34793179821","text":"from sys import argv\nimport sys\n\n#command line check\nif len(sys.argv)==1:\n exit(\"ERROR : You forgot the input file. \")\nelse:\n F = open(argv[1],\"r\")\n lines = F.readlines()\n \n for i in range(len(lines)):\n stack = []\n A = lines[i].rstrip().split( )\n for i in A:\n #times operation\n if i == \"*\":\n a = stack.pop()\n b = stack.pop()\n stack.append(float(b) * float(a))\n # subtract operation\n elif i == \"-\":\n a = stack.pop()\n b = stack.pop()\n stack.append(float(b) - float(a))\n #division operation\n elif i == \"/\":\n a = stack.pop()\n b = stack.pop()\n stack.append(float(b)/float(a))\n #plus operation\n elif i == \"+\":\n a = stack.pop()\n b = stack.pop()\n stack.append(float(b)+float(a))\n #number \n else:\n stack.append(float(i))\n print(\"intput: \" + ' '.join(A))\n print(\"result: \" + str (stack[0]) + \"\\n\" )\n \n","repo_name":"DepN20/CIS205","sub_path":"ReversePolishNotation/TreeLab.py","file_name":"TreeLab.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15413879129","text":"from collections import namedtuple\n\n_ConnexionRequest = namedtuple('SwaggerRequest', [\n 'url', 'method', 'path_params', 'query', 'headers',\n 'form', 'body', 'json', 'files', 'context'\n])\n\n\nclass ConnexionRequest(_ConnexionRequest):\n\n def __new__(cls, url, method, path_params=None, query=None, headers=None,\n form=None, body=None, json=None, files=None, context=None):\n return _ConnexionRequest.__new__(\n cls, url, method,\n path_params=path_params or {},\n query=query or {},\n headers=headers or {},\n form=form or {},\n body=body,\n json=json,\n files=files,\n context=context or {}\n )\n","repo_name":"hjalves/connexion","sub_path":"connexion/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"9062434113","text":"def main():\n n = int(input(\"Entre com o valor de n:\"))\n x = 1\n soma = 0\n while x <= n:\n soma = soma + 0\n x = x + 1\n print(\"O somatorio e\", soma)\n\nmain()\n \n","repo_name":"HelloWounderworld/Review-Python","sub_path":"O-que-Fiz-Na-Faculdade/MAC1/Numeros-Inteiros-Com-Python/Somatorio.py","file_name":"Somatorio.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22557911129","text":"import numpy as np\nimport pygame\nfrom pygame import Vector2\n\nfrom py_man.utils.enums.direction.joypad.joypad import JoyPad\nfrom py_man.utils.enums.display.character.character import Character\nfrom py_man.utils.enums.display.display import Display\n\n\nclass Node(object):\n def __init__(self, x, y):\n self.position = Vector2(x, y)\n self.joypad = JoyPad()\n self.name = Character()\n self.neighbors = {\n self.joypad.UP: None,\n self.joypad.RIGHT: None,\n self.joypad.DOWN: None,\n self.joypad.LEFT: None,\n self.joypad.PORTAL: None,\n }\n access_list = [\n self.name.PY_MAN,\n self.name.BLINKY,\n self.name.PINKY,\n self.name.INKY,\n self.name.CLYDE,\n self.name.FRUIT,\n ]\n self.access = {direction: access_list.copy() for direction in self.neighbors}\n\n def block_direction(self, direction, entity):\n if entity.name in self.access[direction]:\n self.access[direction].remove(entity.name)\n\n def allow_direction(self, direction, entity):\n if entity.name not in self.access[direction]:\n self.access[direction].append(entity.name)\n\n def render(self, screen):\n for neighbor in filter(None, self.neighbors.values()):\n inception_line = (int(self.position.x), int(self.position.y))\n final_line = (int(neighbor.position.x), int(neighbor.position.y))\n pygame.draw.line(screen, \"white\", inception_line, final_line, 4)\n pygame.draw.circle(screen, \"red\", inception_line, 12)\n\n\nclass NodeGroup(object):\n def __init__(self, level, spawn_point_key=None):\n self.level = level\n self.display = Display()\n self.node_symbols_array = [\"+\", \"P\", \"n\"]\n self.node_dictionary = {}\n self.joypad = JoyPad()\n self.path_symbols_array = [\".\", \"-\", \"|\", \"p\"]\n data = self.load(level)\n self.generate(data)\n self.link_horizontally(data)\n self.link_vertically(data)\n self.spawn_point_key = spawn_point_key\n\n @staticmethod\n def load(maze_text_file):\n return np.loadtxt(maze_text_file, dtype=\" create a new group\n/leave leave this group\n/invite send a group invite\n/accept accept a group invite\n/users show users in this group\n/banned show ban list\n/ban ban user\n/kick kick user from this group\n/help show commands\n\nclient commands handled by client:\n/w whisper user\n/switch switch to another group\n/color change color of this group\n/quit or /exit exit\n/help show commands\n\"\"\"\n\n\ndef send_system_message_async(msg: str):\n for user in users:\n user.send_system_message_async(\n ServerMessage.to_client(\n target_context=ServerMessage.CONTEXT_USER,\n sender_context=ServerMessage.CONTEXT_SYSTEM,\n sender=system_user,\n target=user,\n content=msg,\n report=True\n )\n )\n\n\ndef format_user_group(group: Group, this_user: ServerUser, user: ServerUser):\n txt = user.name\n if this_user == user:\n txt = colored(txt, 'green')\n if group.admin == user:\n txt += colored('[ADMIN]', 'yellow')\n if user in this_user.ban_list:\n txt += colored('[BANNED]', 'red')\n return txt\n\n\ndef handle_client(socket: sockets.socket, full_address: str):\n # socket.settimeout(SEND_TIMEOUT)\n input_stream = BufferedSocketStream(socket)\n this_user = ServerUser(system_user, senders, socket)\n this_user.print_network = True\n try:\n this_user.send_system_message(\"choose a username\")\n this_user.send_system_message(\"/req username\")\n\n while True:\n try:\n uname = ServerMessage.from_client(input_stream, report_from=this_user).content.strip().lower()\n except ConnectionError as err:\n thread_print(f\"user {this_user.name} disconnected, cause: {err}\")\n return\n set_uname = True\n with server_state_lock.for_read():\n for user in users:\n if user.name == uname:\n this_user.send_system_message(f\"username {uname} already taken\")\n set_uname = False\n break\n if not re.match(r'^[a-z][a-z0-9_-]*[a-z0-9]$', uname):\n this_user.send_system_message(\n f\"name must begin with a-z letter and contain a-z0-9 '_' or '-' and end with a-z0-9\")\n set_uname = False\n if not set_uname or not len(uname) > 0:\n continue\n this_user.name = uname\n break\n\n with server_state_lock.for_write():\n users.append(this_user)\n global_group.users.append(this_user)\n this_user.groups.append(global_group)\n this_user.send_system_message(f\"/set username {this_user.name}\")\n global_group.send_system_message_async(f\"{this_user.name} has connected\")\n\n while True:\n try:\n message = ServerMessage.from_client(input_stream, report_from=this_user)\n except ConnectionError as err:\n thread_print(f\"user {this_user.name} disconnected, cause: {err}\")\n break\n message.target = None\n with server_state_lock.for_read():\n for group in groups:\n if group.name == message.target_str:\n message.target = group\n break\n if not message.target:\n for user in users:\n if user.name == message.target_str:\n message.target = user\n break\n if not message.target:\n if message.target_context == Message.CONTEXT_GROUP:\n this_user.send_system_message_async(f\"group {message.target_str} does not exist, or not subscribed\")\n this_user.send_system_message(f\"/switch {global_group.name}\")\n elif message.target_context == Message.CONTEXT_USER:\n this_user.send_system_message_async(f\"user {message.target_str} does not exist\")\n else:\n this_user.send_system_message_async(f\"target {message.target_str} does not exist\")\n continue\n if message.content.startswith('/create '):\n group_name = message.content[8:].strip()\n if len(group_name) <= 0:\n this_user.send_system_message_async(\"no group name provided try /help command\")\n continue\n exists = False\n with server_state_lock.for_read():\n for group in groups:\n if group.name == group_name:\n exists = True\n break\n if group_name in reserved_names:\n exists = True\n if exists:\n this_user.send_system_message_async(f\"{group_name} name is taken\")\n continue\n group = Group(group_name, system_user, senders)\n with server_state_lock.for_write():\n group.join_user(this_user, f\"you have created the group {group.name}\")\n group.admin = this_user\n groups.append(group)\n this_user.send_system_message(f\"/switch {group.name}\")\n elif message.content == '/lock':\n if not isinstance(message.target, Group):\n this_user.send_system_message_async(\"target is not a group\")\n continue\n if not message.target:\n this_user.send_system_message_async(f\"group {message.target_str} does not exist, or not subscribed\")\n this_user.send_system_message(f\"/switch {global_group.name}\")\n continue\n if message.target.admin is not this_user:\n this_user.send_system_message_async(\"you are not the group admin\")\n continue\n if message.target.locked:\n this_user.send_system_message_async(\"group is already locked\")\n continue\n with server_state_lock.for_write():\n message.target.lock()\n for i in reversed(range(len(message.target.pending_invites))):\n if message.target.pending_invites[i].invited_by is not this_user:\n message.target.pending_invites.pop(i)\n elif message.content == '/unlock':\n if not isinstance(message.target, Group):\n this_user.send_system_message_async(\"target is not a group\")\n continue\n if not message.target:\n this_user.send_system_message_async(f\"group {message.target_str} does not exist, or not subscribed\")\n continue\n if message.target.admin is not this_user:\n this_user.send_system_message_async(\"you are not the group admin\")\n continue\n if not message.target.locked:\n this_user.send_system_message_async(\"group is not locked\")\n continue\n with server_state_lock.for_write():\n message.target.unlock()\n elif message.content == '/leave':\n if not isinstance(message.target, Group):\n this_user.send_system_message_async(\"target is not a group\")\n continue\n if not message.target or this_user not in message.target.users:\n this_user.send_system_message_async(f\"group {message.target_str} does not exist, or not subscribed\")\n continue\n with server_state_lock.for_write():\n message.target.remove_user(this_user, f\"{this_user.name} has left\")\n\n if message.target == global_group:\n global_group.pending_invites.append(Invite(this_user, system_user))\n this_user.send_system_message_async(\n f\"you have unsubscribed from the global group use \\\"/accept {global_group.name}\\\" to come back\")\n else:\n this_user.send_system_message_async(f\"you left the group {message.target.name}\")\n elif message.content == '/users':\n if not isinstance(message.target, Group):\n this_user.send_system_message_async(\"target is not a group\")\n continue\n if not message.target or this_user not in message.target.users:\n this_user.send_system_message_async(f\"group {message.target_str} does not exist, or not subscribed\")\n continue\n with server_state_lock.for_read():\n this_user.send_bytes_async(\n ServerMessage.to_client(\n target_context=Message.CONTEXT_GROUP,\n sender_context=Message.CONTEXT_SYSTEM,\n sender=system_user,\n target=message.target, # it is sent only to this_user\n content=f'users in {message.target.name}:\\n' +\n '\\n'.join(format_user_group(message.target, this_user, user) for user in message.target.users),\n report=True\n )\n )\n elif message.content == '/banned':\n with server_state_lock.for_read():\n this_user.send_bytes_async(\n ServerMessage.to_client(\n target_context=Message.CONTEXT_GROUP,\n sender_context=Message.CONTEXT_SYSTEM,\n sender=system_user,\n target=message.target, # it is sent only to this_user\n content=f'banned users:\\n' +\n '\\n'.join(user.name for user in this_user.ban_list),\n report=True\n )\n )\n elif message.content == '/help':\n this_user.send_system_message_async('''chat commands:\n/create create a new group\n/leave leave this group\n/invite send a group invite\n/accept accept a group invite\n/users show users in this group\n/banned show ban list\n/ban ban user\n/kick kick user from this group\n/help show commands\n''')\n continue\n elif message.content.startswith('/invite '):\n user_name = message.content[8:].strip()\n if not isinstance(message.target, Group):\n this_user.send_system_message_async(\"target is not a group\")\n continue\n group = message.target\n if len(user_name) <= 0:\n this_user.send_system_message_async(\"no username provided try /help command\")\n continue\n\n if not group:\n this_user.send_system_message_async(\"group no longer exists\")\n this_user.send_system_message(f\"/switch {global_group.name}\")\n continue\n if group.locked and group.admin is not this_user:\n this_user.send_system_message_async(\n \"you can't send invites, this group is locked and you are not the admin\")\n continue\n user = None\n with server_state_lock.for_write():\n for u in users:\n if u.name == user_name:\n user = u\n break\n if user_name in reserved_names:\n user = None\n if not user:\n this_user.send_system_message_async(f\"user not found:{user_name}\")\n continue\n if user == this_user:\n this_user.send_system_message_async(\n f\"you can't invite yourself, you're already in group {group.name}\")\n continue\n if user in this_user.ban_list:\n this_user.send_system_message_async(f\"{user.name} is in your ban list\")\n continue\n group.pending_invites.append(Invite(user=user, invited_by=this_user))\n user.send_system_message_async(\n f\"you was invited by {user.name} to join group {group.name} type \\\"/accept {group.name}\\\" to join\")\n this_user.send_system_message_async(f\"invite was sent to {user.name}\")\n elif message.content.startswith('/kick '):\n user_name = message.content[6:].strip()\n sep = user_name.find(' ')\n reason = ''\n if sep != -1:\n user_name = user_name[:sep]\n reason = 'reason: ' + message.content[6:].strip()[sep:].strip()\n if not isinstance(message.target, Group):\n this_user.send_system_message_async(\"target is not a group\")\n continue\n group = message.target\n if len(user_name) <= 0:\n this_user.send_system_message_async(\"no username provided try /help command\")\n continue\n\n if not group:\n this_user.send_system_message_async(\"group no longer exists\")\n this_user.send_system_message(f\"/switch {global_group.name}\")\n continue\n user = None\n with server_state_lock.for_write():\n if group.admin is not this_user:\n this_user.send_system_message_async(\n f\"can't kick {user_name} you are not the admin of {group.name}\")\n continue\n for u in group.users:\n if u.name == user_name:\n user = u\n break\n if user is None:\n this_user.send_system_message_async(f\"{user_name} is not in your group {group.name}\")\n continue\n elif user == this_user:\n this_user.send_system_message_async(\"you can't kick your self, use /leave\")\n continue\n group.remove_user(user, f\"{user.name} was kicked from the group\")\n user.send_system_message_async(f\"you was kicked by the admin from group {group.name} {reason}\")\n user.send_system_message_async(f\"/switch {global_group.name}\")\n this_user.send_system_message_async(f\"{user.name} was kicked\")\n elif message.content.startswith('/ban '):\n user_name = message.content[4:].strip()\n if not isinstance(message.target, Group):\n this_user.send_system_message_async(\"target is not a group\")\n continue\n if len(user_name) <= 0:\n this_user.send_system_message_async(\"no username provided try /help command\")\n continue\n\n with server_state_lock.for_write():\n user = None\n for u in users:\n if u.name == user_name:\n user = u\n break\n if not user:\n this_user.send_system_message_async(f\"user {user.name} does not exist\")\n continue\n for group in this_user.groups:\n if this_user == group.admin and this_user in group.users and user in group.users:\n group.remove_user(user, f\"{user.name} was banned by the admin\")\n user.send_system_message_async(f\"you was kicked from group {group.name}, because the admin banned you\")\n this_user.ban_list.append(user)\n this_user.send_system_message_async(f\"{user.name} is now in your ban list\")\n elif message.content.startswith('/accept '):\n group_name = message.content[8:].strip()\n if len(group_name) <= 0:\n this_user.send_system_message_async(\"no group name provided try /help command\")\n continue\n group = None\n with server_state_lock.for_write():\n for gr in groups:\n if gr.name == group_name:\n group = gr\n break\n invite: Optional[Invite] = None\n if group:\n for i in reversed(range(len(group.pending_invites))):\n if group.pending_invites[i].user == this_user:\n if invite is None:\n invite = group.pending_invites[i]\n elif group.pending_invites[i].invited_by == group.admin:\n invite = group.pending_invites[i]\n # TODO: clear invite list in kick command or leave command\n group.pending_invites.pop(i) # consume all invites\n invalid = group is None or invite is None or (group.locked and invite.invited_by is not group.admin)\n\n if invalid:\n this_user.send_system_message_async(\"invite expired or group does not exist\")\n continue\n if this_user in group.admin.ban_list:\n this_user.send_system_message(f\"You are banned by the group admin and can't join {group.name}\")\n continue\n group.join_user(this_user, f\"{user.name} has entered the group\" if group is not global_group else f\"{user.name} has re-entered the group\")\n this_user.send_system_message(f\"/switch {group.name}\")\n else:\n with server_state_lock.for_read():\n if isinstance(message.target, Group) and this_user not in message.target.users:\n this_user.send_system_message_async(f\"group {message.target_str} does not exist, or not subscribed\")\n continue\n if isinstance(message.target, ServerUser):\n this_user.send_system_message_async(\n f\"You're whispering to {message.target.name}: {message.content}\")\n if isinstance(message.target, ServerUser) and this_user in message.target.ban_list:\n this_user.send_system_message_async(f\"you are banned by {message.target.name}\")\n continue\n if isinstance(message.target, ServerUser) and message.target in this_user.ban_list:\n this_user.send_system_message_async(f\"you banned {message.target.name}\")\n continue\n if isinstance(message.target, Group) and this_user in message.target.admin.ban_list:\n this_user.send_system_message_async(f\"you are banned by {message.target.name}'s admin\")\n continue\n if message.content.strip() == '':\n this_user.send_system_message_async(\"empty message\")\n continue\n # forward to target(s)\n message.target.send_bytes_async(\n ServerMessage.to_client(\n target_context=message.target_context,\n sender_context=Message.CONTEXT_USER,\n target=message.target,\n sender=this_user,\n content=message.content,\n report=True\n )\n )\n except BaseException:\n thread_print(f\"error in handler for {full_address} ({this_user.name}), cause: {traceback.format_exc()}\")\n finally:\n with server_state_lock.for_write():\n if this_user in users:\n users.remove(this_user)\n for j in reversed(range(len(groups))):\n removed = False\n if this_user in groups[j].users:\n groups[j].remove_user(this_user, f\"{this_user.name} has disconnected\")\n if len(groups[j].users) == 0 and groups[j] is not global_group:\n thread_print(f\"abandoned group was removed: {groups[j].name}\")\n groups.pop(j)\n removed = True\n if not removed:\n for i in reversed(range(len(groups[j].pending_invites))):\n if groups[j].pending_invites[i].user == this_user:\n groups[j].pending_invites.pop(i)\n socket.close()\n\n\ndef server():\n import sys\n args = parse_args(sys.argv[1:])\n host = args.get('host', '0.0.0.0') # default all networks\n try:\n port = int(args.get('port', 50600))\n except ValueError as e:\n print(\"port parse failed, expected integer\")\n raise e\n\n server_socket = sockets.socket(sockets.AF_INET, sockets.SOCK_STREAM)\n server_socket.setsockopt(sockets.SOL_SOCKET, sockets.SO_REUSEADDR, 1)\n\n server_socket.bind((host, port))\n\n server_socket.listen(5)\n print(\"chat server is listening in {}:{} press Ctrl+C to stop\".format(host, port))\n max_users = 30 # protect the server\n\n try:\n while True:\n client_socket, address = server_socket.accept()\n with server_state_lock.for_write():\n if len(users) > max_users:\n client_socket.send(\"SERVER_FULL\".encode(\"utf-8\"))\n client_socket.close()\n continue\n print('Accepted:', address[0], ':', address[1])\n # Start a new thread and return its identifier\n c_thread = threading.Thread(target=handle_client,\n args=(client_socket, address[0] + ':' + str(address[1])))\n c_thread.daemon = True\n c_thread.name = 'client-loop'\n c_thread.start()\n finally:\n server_socket.close()\n\n\ndef main():\n t = threading.Thread(target=server)\n t.daemon = True\n t.start()\n soft_join(t)\n print('\\n! Received keyboard interrupt, server will stop, client threads will be dropped.\\n')\n\n\nif __name__ == '__main__':\n if os.name == 'nt':\n import colorama as colorama\n\n colorama.init() # force colors\n main()\n","repo_name":"Eboubaker/chat","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":24706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23514835648","text":"# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, val=0, left=None, right=None):\r\n# self.val = val\r\n# self.left = left\r\n# self.right = right\r\n\r\nclass Solution:\r\n def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:\r\n \r\n # if the node is None on both trees\r\n if t1 == None and t2 == None:\r\n # then the node is None on the new tree\r\n return None\r\n \r\n # if the node only exists on one tree\r\n # then the value of the new node is the value of the existing node\r\n if t1 == None and t2 != None:\r\n return t2\r\n \r\n elif t1 != None and t2 == None:\r\n return t1\r\n \r\n # if the node exists on both trees\r\n else:\r\n # then the value of the new node is their sum\r\n t1.val += t2.val\r\n t1.left = self.mergeTrees(t1.left, t2.left)\r\n t1.right = self.mergeTrees(t1.right, t2.right)\r\n return t1\r\n \r\n \r\n \r\n","repo_name":"alexpsimone/leetcode","sub_path":"easy/617_merge_two_binary_trees.py","file_name":"617_merge_two_binary_trees.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41502903311","text":"import mysql.connector as mysql\r\nfrom random import randint\r\n\r\nfrom AirplaneReservationSystem import *\r\nfrom Subway import *\r\n\r\n'''\r\ndeclaring global variables of Airplane Seat Reservation ---------------------------------------------------------------\r\n'''\r\n# global variables\r\nsignup = False\r\nlogin = False\r\nbook_a_flight = False\r\ntoday_date = ''\r\ntravel_date = ''\r\ntoday_time = ''\r\nusername = \"\"\r\nsource_city = \"\"\r\ndestination_city = \"\"\r\ndate_diff = ''\r\ntravel_date_string = ''\r\nflights = {\"Bengaluru_to_Delhi\": 9,\r\n \"Bengaluru_to_Kolkata\": 10,\r\n \"Bengaluru_to_Mumbai\": 11,\r\n \"Bengaluru_to_Chennai\": 12,\r\n\r\n \"Delhi_to_Kolkata\": 7,\r\n \"Delhi_to_Mumbai\": 8,\r\n \"Delhi_to_Chennai\": 16,\r\n \"Delhi_to_Bengaluru\": 15,\r\n\r\n \"Kolkata_to_Mumbai\": 15,\r\n \"Kolkata_to_Chennai\": 18,\r\n \"Kolkata_to_Delhi\": 12,\r\n \"Kolkata_to_Bengaluru\": 16,\r\n\r\n \"Mumbai_to_Chennai\": 12,\r\n \"Mumbai_to_Delhi\": 13,\r\n \"Mumbai_to_Bengaluru\": 17,\r\n \"Mumbai_to_Kolkata\": 21,\r\n\r\n \"Chennai_to_Bengaluru\": 18,\r\n \"Chennai_to_Delhi\": 22,\r\n \"Chennai_to_Mumbai\": 18,\r\n \"Chennai_to_Kolkata\": 0}\r\n\r\npercentage = 10\r\noccupied_seats = list()\r\nall_seats = [\"1A\", \"1F\", \"2A\", \"2F\", \"3A\", \"3B\", \"3E\", \"3F\", \"4A\", \"4B\", \"4E\", \"4F\", \"5A\", \"5B\", \"5E\", \"5F\",\r\n \"6A\", \"6B\", \"6C\", \"6D\", \"6E\", \"6F\", \"7A\", \"7B\", \"7C\", \"7D\", \"7E\", \"7F\", \"8A\", \"8B\", \"8C\", \"8D\", \"8E\", \"8F\",\r\n \"9A\", \"9B\", \"9C\", \"9D\", \"9E\", \"9F\", \"10A\", \"10B\", \"10C\", \"10D\", \"10E\", \"10F\",\r\n \"11A\", \"11B\", \"11C\", \"11D\", \"11E\", \"11F\"]\r\n\r\n# declaring global variables to accept the seat from the user --------------------------------------------\r\nnumber_of_seats_booked = 0\r\nusers_seat = list(\"\")\r\nall_seats_index = 0\r\nterminator = 0\r\nname = list()\r\nemail = list()\r\nphone_number = list()\r\ncurrent_user_number = 0\r\n\r\n'''\r\nGLOBAL VARIABLES OF SUBWAY ------------------------------------------------------------------------------------------\r\n'''\r\n# declaring global variables [lists]\r\nfood_list = list()\r\nfood_cost = list()\r\nfood = list()\r\n\r\n'''\r\nGLOBAL VARIABLES OF HOTEL ROOM RESERVATION ---------------------------------------------------------------------------\r\n'''\r\n# declaring global variables\r\nsingle_bed = [1, 2, 3, 4, 5]\r\ndouble_bed = [6, 7, 8, 9, 10]\r\ncost_hotel_room_reservation = 0.0\r\nroom_choice = list()\r\nroom_cost = list()\r\nroom = list()\r\n\r\n'''\r\nMAIN ----------------------------------------------------------------------------------------------------------------\r\n'''\r\n\r\n# ----- AIRPLANE SEAT RESERVATION -----\r\n# __main__\r\n# python, mysql connectivity\r\nmycon = mysql.connect(host=\"localhost\", user=\"root\", port='3306', password=\"Akshat@2005\", database=\"class12project\")\r\ncursor = mycon.cursor()\r\n\r\n# displaying welcome message to the user and prompting for signup and login\r\nwhile True:\r\n display_welcome()\r\n choice = int(input(f'{bcolors.OKBLUE}Please enter your choice: {bcolors.ENDC}'))\r\n if choice == 1:\r\n username, signup = create_user(mycon, cursor)\r\n elif choice == 2:\r\n username, login = login_user(cursor)\r\n break\r\n else:\r\n print(f'{bcolors.FAIL}INVALID CHOICE{bcolors.ENDC}')\r\n\r\n# menu for Booking a flight and checking for booked flights\r\nif login:\r\n print(f\"{bcolors.WARNING}[1]{bcolors.ENDC} {bcolors.HEADER}BOOK A FLIGHT{bcolors.ENDC}\\n\"\r\n f\"{bcolors.WARNING}[2]{bcolors.ENDC} {bcolors.HEADER}SEE BOOKED FLIGHTS{bcolors.ENDC}\\n\")\r\n c = input()\r\n if c == '1':\r\n book_a_flight = True\r\n source_city, destination_city = choose_city()\r\n key = source_city + \"_to_\" + destination_city\r\n date_diff, travel_date_string = date_choice(key, flights)\r\n elif c == '2':\r\n show_booked_flights(cursor, username)\r\n # I need to use a goto here to take this back to the beginning of the menu (book flight and see booked flights)\r\n\r\nelif signup:\r\n print(f'{bcolors.HEADER}BOOK A FLIGHT{bcolors.ENDC}')\r\n book_a_flight = True\r\n source_city, destination_city = choose_city()\r\n key = source_city + \"_to_\" + destination_city\r\n date_diff, travel_date_string = date_choice(key, flights)\r\n\r\n# creating the airplane seat booking\r\nif book_a_flight:\r\n if date_diff == '0':\r\n percentage = 3\r\n elif date_diff == '1':\r\n percentage = 5\r\n elif date_diff == '2':\r\n percentage = 7\r\n else:\r\n percentage = 10\r\n\r\n for i in range(0, len(all_seats)):\r\n x = randint(0, 10)\r\n if 0 < x < percentage:\r\n occupied_seats.append(False)\r\n else:\r\n occupied_seats.append(True)\r\n\r\n # seat booking\r\n total_number_of_seats = input(\"Total number of reservations: \")\r\n available_number_of_seats = 0\r\n\r\n for i in range(0, len(occupied_seats)): # finding the available number of seats\r\n if occupied_seats[i]:\r\n available_number_of_seats += 1\r\n while True:\r\n if int(total_number_of_seats) == 0:\r\n total_number_of_seats = input(f\"{bcolors.WARNING}ZERO? Please enter the proper value: {bcolors.ENDC}\")\r\n elif 0 >= int(total_number_of_seats):\r\n total_number_of_seats = input(f\"{bcolors.WARNING}You have entered a negative value... Please enter the \"\r\n f\"number of reservations as a positive value: {bcolors.ENDC}\")\r\n elif int(total_number_of_seats) >= available_number_of_seats:\r\n total_number_of_seats = input(f\"{bcolors.WARNING}We don't have that many seats available... \"\r\n f\"Please enter a lower value: {bcolors.ENDC}\")\r\n else:\r\n break\r\n\r\n # declaring the value of terminator\r\n terminator = int(total_number_of_seats)\r\n terminator = terminator - 1\r\n display_aeroplane_seat(occupied_seats, all_seats)\r\n accept_users_seat(number_of_seats_booked, users_seat, all_seats_index,\r\n occupied_seats, terminator, name, email, phone_number, current_user_number, all_seats,\r\n room, room_cost, room_choice,\r\n food, food_list, food_cost)\r\n\r\n seeker_rooms = 0\r\n seeker_food = 0\r\n for i in range(0, len(name)):\r\n n = name[i]\r\n u = users_seat[i]\r\n p = phone_number[i]\r\n e = email[i]\r\n insert_string = ''\r\n\r\n if room[i] and food[i]:\r\n f_choice = food_list[seeker_food][0] + ', '\r\n for j in range(1, len(food_list[seeker_food])):\r\n for k in range(0, len(food_list[seeker_food][j])):\r\n f_choice = f_choice + food_list[seeker_food][j][k] + ', '\r\n f_cost = food_cost[seeker_food]\r\n r_choice = room_choice[seeker_rooms]\r\n r_cost = room_cost[seeker_rooms]\r\n insert_string = f\"insert into booked_flight_details values('{username}', '{source_city}',\" \\\r\n f\" '{destination_city}', '{travel_date_string}', '{n}', '{u}', {p}, '{e}', NULL,\" \\\r\n f\"'{r_choice}', {r_cost}, '{f_choice}', {f_cost})\"\r\n seeker_food += 1\r\n seeker_rooms += 1\r\n\r\n elif not room[i] and not food[i]:\r\n insert_string = f\"insert into booked_flight_details values('{username}', '{source_city}',\" \\\r\n f\" '{destination_city}', '{travel_date_string}', '{n}', '{u}', {p}, '{e}', NULL, \" \\\r\n f\"'NO ROOM BOOKED', 0.0, 'NO FOOD ORDERED', 0.0)\"\r\n\r\n elif room[i] and not food[i]:\r\n r_choice = room_choice[seeker_rooms]\r\n r_cost = room_cost[seeker_rooms]\r\n insert_string = f\"insert into booked_flight_details values('{username}', '{source_city}',\" \\\r\n f\" '{destination_city}', '{travel_date_string}', '{n}', '{u}', {p}, '{e}', NULL,\" \\\r\n f\"'{r_choice}', {r_cost}, 'NO FOOD ORDERED', 0.0)\"\r\n seeker_rooms += 1\r\n\r\n elif not room[i] and food[i]:\r\n f_choice = food_list[seeker_food][0] + ', '\r\n for j in range(1, len(food_list[seeker_food])):\r\n for k in range(0, len(food_list[seeker_food][j])):\r\n f_choice = f_choice + food_list[seeker_food][j][k] + ', '\r\n f_cost = food_cost[seeker_food]\r\n print(f_choice, f_cost)\r\n insert_string = f\"insert into booked_flight_details values('{username}', '{source_city}',\" \\\r\n f\" '{destination_city}', '{travel_date_string}', '{n}', '{u}', {p}, '{e}', NULL,\" \\\r\n f\"'NO ROOM BOOKED', 0.0, '{f_choice}', {f_cost})\"\r\n seeker_food += 1\r\n\r\n cursor.execute(insert_string)\r\n mycon.commit()\r\n","repo_name":"voyager2005/Travelocity-Python","sub_path":"30thDecember2022/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44290636390","text":"'''\nhttps://cocook.tistory.com/113 참조 1\nhttps://www.youtube.com/watch?v=ZsVVTEfZee8 동영상 강의 시청\n'''\n\n\ndef solution(N, number):\n # 1. dp[i]는 N을 i 번만 사용해서 만들수 있는 집합.,,?\n # 예시) 5,55,555,5555,55555,555555...\n # 정답을 봐도 모르겠따,,,,,,,,,,,,,,,,,,,,,,,,,,\n dp = [set([N * int('1' * i)]) for i in range(1, 9)]\n print(dp)\n for i in range(8): # N을 사용한 횟수\n for j in range(i):\n for num1 in dp[j]: # i 번 사용해서 나타낼 수 있는 수\n for num2 in dp[i - j - 1]: # N-i번 사용해서 나타낼 수 있는 수\n # 사칙연산\n dp[i].add(num1 + num2)\n dp[i].add(num1 - num2)\n dp[i].add(num1 * num2)\n if num2 != 0:\n dp[i].add(num1 // num2)\n\n # 위 과정을 끝내면 N을 i번 사용해서 나타낼 수있는 수가 dp[i]에 저장된다.\n # 만약 그 집합안에 'number'가 있으면\n if number in dp[i]:\n return i + 1 # 정답 출력\n return -1\n\n\ndef solution2(N, number):\n answer = 0\n # 큰 문제 - > 여러개의 작은 문제들로 쪼개어 접근?\n _li = [set() for i in range(8)]\n\n for i in range(len(_li)):\n _li[i].add(int(str(N)) * (i + 1))\n\n for i in range(1, 8): # 1부터 시작하는 이유는, 첫번째는 {5}와 같이 연산 할 것이 없어서\n for j in range(i):\n for op1 in _li[j]:\n for op2 in _li[i - j - 1]:\n _li[i].add(op1 + op2)\n _li[i].add(op1 - op2)\n _li[i].add(op1 * op2)\n if op2 != 0:\n _li[i].add(op1 // op2)\n if number in _li[i]:\n answer = i + 1\n break\n return answer\n\n# print(solution(5, 12))\nprint(solution2(5, 12))\n","repo_name":"Slowth-KIM/crewcrew-coding-test-study","sub_path":"최강헌/코딩테스트 고득점 kit/N으로 표현.py","file_name":"N으로 표현.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"23571384891","text":"import queue\n\ndef bathroomStalls(n, k):\n if n == k:\n return 0, 0\n stallQueue = queue.PriorityQueue(n)\n stallQueue.put((-n, n))\n openStalls, newStalls, otherStalls = 0, 0, 0\n for i in range(0, k):\n openStalls = stallQueue.get()[1] - 1\n newStalls = openStalls // 2\n otherStalls = openStalls - newStalls\n stallQueue.put((-otherStalls, otherStalls))\n stallQueue.put((-newStalls, newStalls))\n\n return otherStalls, newStalls\n\ndef main():\n t = int(input())\n for i in range(1, t + 1):\n n, m = [int(s) for s in input().split(\" \")]\n y, z = bathroomStalls(n ,m)\n print (\"Case #{}: {} {}\".format(i, y, z))\n\nmain()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2790.py","file_name":"2790.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35499482293","text":"from turtle import Turtle, Screen\nimport random\n\nturt_lisy = []\nfor i in range(0,6):\n turt_lisy.append(Turtle(shape=\"turtle\"))\ncolor = [\"red\",\"green\",\"yellow\",\"blue\",\"brown\",\"orange\"]\nflag = 1\nscr = Screen()\nscr.setup(width=500,height=400)\nbid = scr.textinput(\"BID\",\"Who do you think will win\")\nflag = 0\nind = 0\nfor i in range(90,-90,-30):\n turt_lisy[ind].penup()\n turt_lisy[ind].goto(-230,i)\n turt_lisy[ind].color(color[ind])\n ind+=1\n\nwhile flag == 0:\n \n for i in range(0,6):\n turt_lisy[i].forward(random.randint(1,10))\n if turt_lisy[i].xcor() > 230:\n flag = 1\n if turt_lisy[i].pencolor == bid:\n print(\"You won\")\n else:\n print(turt_lisy[i].pencolor())\nscr.exitonclick()","repo_name":"sanskarlather/100-days-of-code-with-python","sub_path":"Day 18/turtle_race.py","file_name":"turtle_race.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3046596126","text":"import logging\nfrom typing import Optional\n\nfrom uvicorn.logging import DefaultFormatter\n\nfrom mlup.utils.oop import MetaSingleton\n\n\nclass SwitchFormatter(DefaultFormatter, metaclass=MetaSingleton):\n def __init__(self, fmt: Optional[str] = None, *args, **kwargs):\n self._fmts = {'default': fmt}\n kwargs_without_fmt = {}\n for arg, val in kwargs.items():\n if arg.startswith('fmt_'):\n self._fmts[arg.split('_', 1)[1]] = val\n else:\n kwargs_without_fmt[arg] = val\n\n super().__init__(fmt=fmt, *args, **kwargs_without_fmt)\n\n def set_fmt(self, fmt_name: str = 'default'):\n fmt = self._fmts[fmt_name]\n self._style = logging.PercentStyle(fmt)\n # This \"if\" for python3.7-\n if hasattr(self._style, 'validate'):\n self._style.validate()\n self._fmt = self._style._fmt\n\n\ndef configure_logging_formatter(formatter_name: str = 'default'):\n SwitchFormatter().set_fmt(formatter_name)\n","repo_name":"nxexox/pymlup","sub_path":"mlup/utils/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"6911004330","text":"import time\nfrom datetime import date\nfrom selenium import webdriver\nfrom selenium.webdriver import Keys\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\n\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option(\"detach\", True)\nservice_obj = Service(\"C:\\\\Users\\\\ananthamahesh.yeruva\\\\Downloads\\\\chromedriver_win32\\\\chromedriver\")\ndriver = webdriver.Chrome(options=options, service=service_obj)\ndriver.get(\"https://console.horse.mycoachapp.org\")\ndriver.maximize_window()\n\n#================================================================================================================#\n# Valid login\n#================================================================================================================#\ndriver.find_element(By.XPATH, \"//input[@ng-model='ctrl.email']\").send_keys(\"ananthamahesh.yeruva@zenq.com\")\ndriver.find_element(By.XPATH, \"//input[@ng-model='ctrl.password']\").send_keys(\"Letmein@123\")\ndriver.find_element(By.XPATH, \"//button[@type='submit']\").click()\ntime.sleep(10) # Sleep command to enter 2FA code\ndriver.find_element(By.XPATH, \"//span[@class='ng-scope']\").click()\ntime.sleep(5)\nprint(driver.title)\n\n# ================================================================================================================#\n# Status filtering\n# ================================================================================================================#\ndriver.find_element(By.XPATH, \"//a[@aria-label='Events']\").click() # Events\ndriver.find_element(By.XPATH, \"//a[@aria-label='Tips']\").click() # Tips\ntime.sleep(5)\n\n#Calendar View\ndriver.find_element(By.XPATH, \"//md-input-container[@class='flex-none hide show-gt-sm']//button[@aria-label='Calendar View'][normalize-space()='Calendar View']\").click()\ntime.sleep(3)\n","repo_name":"Mahesh1605/SeleniumPython-CoachConsole","sub_path":"Console/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27627463667","text":"import urllib.request\nfrom zipfile import ZipFile\nimport subprocess\nimport json\nfrom skrybautils import util\n\n\npackages_file = util.get_packages_file_name()\nvuln_file = util.get_vuln_file_name()\n\npackages_json_file = util.get_packages_json_file_name()\nvuln_json_file = util.get_vuln_json_file_name()\n\nruntime_commands = util.get_runtime_commands()\ndefault_version = '3.8.0'\n\n\ndef parse(p, v):\n with open(f'{util.get_tmp_lambda_package_path()}/{packages_json_file}', 'a+') as pj, open(f'{util.get_tmp_lambda_package_path()}/{vuln_json_file}', 'a+') as vj:\n p.seek(0)\n pobj = []\n packages = json.load(p)\n try:\n if(len(packages) > 0):\n for package in packages:\n if(package.get('name', '') != '' and package.get('version', '') != ''):\n pobj.append(\n {'packageName': package.get('name', ''), 'version': package.get('version', '')})\n except:\n pass\n print(pobj)\n pj.write(json.dumps(pobj))\n\n v.seek(0)\n vobj = []\n vuln_file = json.load(v)\n try:\n if(len(vuln_file) > 0):\n for vuln in vuln_file:\n if(len(vuln) >= 4):\n vobj.append(\n {'packageName': vuln[0], 'affected': vuln[1], 'severity': '', 'references': [vuln[3]]})\n except:\n pass\n print(vobj)\n vj.write(json.dumps(vobj))\n\n\ndef process(code_url, bucket_name, fn_name, scan_time, region):\n filename = '/tmp/lambda.zip'\n urllib.request.urlretrieve(code_url, filename)\n with ZipFile(filename, 'r') as zipObj:\n zipObj.extractall(path='/tmp/scanner')\n with open(f'{util.get_tmp_lambda_package_path()}/{packages_file}', 'a+') as p, open(f'{util.get_tmp_lambda_package_path()}/{vuln_file}', 'a+') as v:\n try:\n inventory_command = ['pip3', 'list', '--format', 'json']\n subprocess.check_call(inventory_command, stdout=p)\n install_vuln_scanner_command = ['pip3', 'install', 'safety']\n subprocess.check_call(install_vuln_scanner_command, stdout=subprocess.DEVNULL)\n vuln_command = ['safety', 'check', '--json']\n subprocess.check_call(vuln_command, stdout=v)\n print('ran report and inventory...')\n parse(p, v)\n util.send_files(bucket_name, f'{scan_time}/{fn_name}-{region}')\n except Exception as e:\n print(e)\n finally:\n cleanup_command = ['rm', '-rf',\n util.get_tmp_lambda_package_path()]\n subprocess.call(cleanup_command)\n","repo_name":"allenmichael/skryba","sub_path":"scanner/processors/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28474100817","text":"import math # DOUBT\n\ndef f():\n t = 0.0\n ymid = 1\n yRK = 1\n while t<=1:\n print('(%s): t=%.2f midpoint:%.20f RK:%.20f'%(mode, t, ymid, yRK))\n t+=h\n ymid=midpointstep(t, ymid, h)\n yRK=RKstep(t, yRK, h)\n return\n\ndef midpointstep(t, y, h):\n return y+h*ydot(t+h/2, y+ydot(t, y)*h/2)\n\ndef RKstep(t, y, h):\n s1 = ydot(t, y)\n s2 = ydot(t+h/2, y+s1*h/2)\n s3 = ydot(t+h/2, y+s2*h/2)\n s4 = ydot(t+h, y+h*s3)\n return y+(s1+2*s2+2*s3+s4)*h/6\n\nmode = '6.18'\nh=0.1\n\ndef ydot(t,y):\n if mode == '6.18':\n return t*y+(t**3)\n if mode == 'a':\n return t\n if mode == 'b':\n return t*t*y\n if mode == 'c':\n return 2*(t+1)*y\n if mode == 'd':\n return 5*(t**4)*y\n if mode == 'e':\n return 1/(y**2)\n if mode == 'f':\n return (t**3)/(y**2)\n\nif __name__ == \"__main__\":\n f()\n for h in [0.1]:#, 0.05, 0.025]:\n for i in range(97,103):\n mode = chr(i)\n f()","repo_name":"Guanxy-baolitu/NumericalAnalysis","sub_path":"6_4_1.py","file_name":"6_4_1.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34822802168","text":"from lib.ModelWrapper import ModelWrapper\nfrom tensorboardX import SummaryWriter\nimport torch\nfrom torchvision import transforms, datasets\nfrom archs.mnist import fc, LeNet\nimport os\nfrom lib import utils\n\nlr = 1.2e-3\ndata_name = 'mnist'\nmodel_name = 'fc'\n# train\ntrain_batch_size = 60\ntrain_itrs = 8000\n# eval\neval_batch_size = 500\neval_itrs = 200\n# init recorder\ninit_record_itr = 1\n\n\ndataset = datasets.MNIST\ntrain_transform = transforms.Compose([transforms.ToTensor()])\neval_transform = transforms.Compose([transforms.ToTensor()])\nif model_name == 'fc':\n model = fc.FC()\nelif model_name == 'lenet':\n model = LeNet.LeNet()\nelse:\n raise Exception(\"No such model!\")\n\nmodel.apply(utils.weight_init_method)\n\n# load data\ntrain_data = dataset('D:/Datasets', train=True, download=True, transform=train_transform)\ntest_data = dataset('D:/Datasets', train=False, transform=eval_transform)\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=train_batch_size, shuffle=True, num_workers=0,\n drop_last=False)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=eval_batch_size, shuffle=False, num_workers=0,\n drop_last=False)\n\n# build model\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else 'cpu')\nmodel = model.to(device)\n\ncriterion = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)\nwrapper = ModelWrapper(model, optimizer, criterion, device)\n\n# train the model\nsave_path = os.path.join('runs', data_name, model_name)\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nwriter = SummaryWriter(logdir=os.path.join(save_path, \"log\"))\n\nitr_index = 1\nwrapper.train()\n\nwhile itr_index <= train_itrs:\n\n if itr_index == init_record_itr: # record the init weights.\n state = {\n 'net': model.state_dict(),\n 'itr': itr_index,\n }\n torch.save(state, os.path.join(save_path, \"init_{}.pkl\".format(itr_index)))\n\n # train loop\n for (inputs, targets) in train_loader:\n loss, acc, _ = wrapper.train_on_batch(inputs, targets)\n writer.add_scalar(\"train acc\", acc, itr_index)\n writer.add_scalar(\"train loss\", loss, itr_index)\n print(\"itr: {}/{}, loss={}, acc={}\".format(itr_index, train_itrs, loss, acc))\n if itr_index % eval_itrs == 0:\n wrapper.eval()\n test_loss, test_acc = wrapper.eval_all(test_loader)\n print(\"testing...\")\n print(\"itr: {}/{}, loss={}, acc={}\".format(itr_index, train_itrs, test_loss, test_acc))\n writer.add_scalar(\"test acc\", test_acc, itr_index)\n writer.add_scalar(\"test loss\", test_loss, itr_index)\n state = {\n 'net': model.state_dict(),\n 'acc': test_acc,\n 'itr': itr_index,\n }\n torch.save(state, os.path.join(save_path, \"ckpt.pkl\"))\n writer.flush()\n # return to train state.\n wrapper.train()\n itr_index += 1\n if itr_index > train_itrs:\n break\n\nwriter.close()\n","repo_name":"ZhangXiao96/Lottery-Ticket-Hypothesis-for-DNNs","sub_path":"example/train_mnist.py","file_name":"train_mnist.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"25819320991","text":"import classes.Bill_Payments\r\nimport classes.Options\r\nfrom database.Bills_db import paybill_fetchtable,get_bal,set_bal_bill,delete_row,set_bal_Bank\r\ndef payBills(User_ID):\r\n print('Your existing billers:')\r\n list_pay=paybill_fetchtable(User_ID)\r\n choice2=input('--Press any other key for Back--\\nChoose the biller: ')\r\n if(choice2.isdigit() and int(choice2)<=len(list_pay)):\r\n print(list_pay[int(choice2)-1])\r\n print('Your current balance is: '+str(get_bal(User_ID)))\r\n amount=int(input('Enter amount you want to pay: '))\r\n if (amount<=get_bal(User_ID)):\r\n choice3=input('Confirm(Y/N): ')\r\n if(choice3.upper()=='Y'):\r\n try:\r\n if(amount==int((list_pay[int(choice2)-1])[2])):\r\n delete_row(User_ID,(list_pay[int(choice2)-1])[0],amount)\r\n elif(amount torch.Tensor :\n assert torch.is_tensor(x);\n shape = x.shape;\n oh = torch.zeros(\n (shape[0], n_classes) + shape[1:], \n device=x.device, \n dtype=x.dtype, \n );\n\n return oh.scatter_(1, x.unsqueeze(1).long(), 1);","repo_name":"humansensinglab/dfcil-hgr","sub_path":"losses/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"11172287066","text":"import sys\ninput = sys.stdin.readline\n\nN , S = map(int,input().split())\ndata = list(map(int,input().split()))\n\nstart = 0\nend = 1\nlength = 2147483647\nsum = data[0]\n\nwhile (end < N+1):\n\n if sum >= S and end-start < length :\n length = end - start\n sum = sum - data[start]\n start = start + 1\n\n elif sum >= S :\n sum = sum - data[start]\n start = start + 1\n\n elif end == N and sum int:\n preSum = 0\n for i in range(k):\n preSum += cardPoints[i]\n ans = preSum\n rare = len(cardPoints) - 1\n front = k - 1\n for _ in range(k):\n preSum = preSum - cardPoints[front] + cardPoints[rare]\n ans = max(ans, preSum)\n front -= 1\n rare -= 1\n return ans","repo_name":"Sol-cito/LeetCoding","sub_path":"1423-maximum-points-you-can-obtain-from-cards/1423-maximum-points-you-can-obtain-from-cards.py","file_name":"1423-maximum-points-you-can-obtain-from-cards.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17711889579","text":"import requests\nimport tkinter\nfrom tkinter import *\n\n\n# Acess api\nlink = 'https://api.adviceslip.com/advice'\nrequest = requests.get(link)\nrequest_json = request.json()\nadvice = request_json.get('slip').get('advice')\n#print(advice)\n\n\n# Window\nmaster = Tk()\nmaster.title('Minuts of wisdom')\nmaster.geometry('490x560+650+200')\nmaster.minsize(490, 560)\nmaster.maxsize(490, 560)\nbackground = PhotoImage(file='images/master.png')\nbackground_master = Label(master, image=background)\nbackground_master.pack()\n\n\n# Content\n\ncont = len(advice)\n\ntitle = Label(master, text='Minuts of wisdom', font=('Verdana', 24, 'bold'), fg='#FFFFFF', background='black')\ntitle.place(width=315, height=30, x=90, y=10)\n\ndef click_advice():\n\n result = Label(master, text=advice[0:46], font=('Verdana', 12, 'bold'), justify=CENTER, fg='#FFFFFF', background='black')\n result.place(width=472, height=40, x=10, y=220)\n result_comp = Label(master, text=advice[47:96], font=('Verdana', 12, 'bold'), justify=CENTER, fg='#FFFFFF', background='black')\n result_comp.place(width=472, height=40, x=10, y=250)\n\n button_close = Button(master, text='Close', font=('Verdana', 12, 'bold'), command=master.destroy)\n button_close.place(width=100, height=30, x=204, y=300)\n\n\n\nbutton = Button(master, text='Click here to your advice!', font=('Verdana', 12, 'bold'), command=click_advice)\nbutton.place(width=305, height=30, x=94, y=180)\n\n\nmaster.mainloop()","repo_name":"lucasaclaro/api-advices","sub_path":"advices/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"429172199","text":"from petereport.settings import MARTOR_UPLOAD_PATH, MARTOR_MEDIA_URL\n\nimport re, os, base64, mimetypes\nfrom pathlib import Path\n\nlocal_media_file_regex = re.compile(r'.*\\((' + MARTOR_UPLOAD_PATH + '.+\\.(?:png|gif|jpg|jpeg))\\).*')\nmimetypes.init()\n\ndef replace_media_url_local_base64(markdown):\n return extract_local_media_files(\n markdown.replace('(' + os.path.join(MARTOR_MEDIA_URL, 'uploads'), '(' + MARTOR_UPLOAD_PATH)\n )\n\ndef extract_local_media_files(markdown):\n mediafiles = re.findall(local_media_file_regex, markdown)\n if len(mediafiles) > 0:\n mediafiles = list(dict.fromkeys(mediafiles))\n for media in mediafiles:\n markdown = markdown.replace(media, local_file_to_base64(media))\n return markdown\n\ndef local_file_to_base64(mediafile):\n path = Path(mediafile)\n if path.is_file():\n with open(path, 'rb') as fh:\n mt = mimetypes.guess_type(path, strict=True)\n if mt:\n mt = mt[0]\n else:\n mt = 'application/octet-stream'\n return 'data:' + mt + ';base64,' + base64.b64encode(fh.read()).decode('utf-8')\n else:\n return mediafile","repo_name":"1modm/petereport","sub_path":"app/preport/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":407,"dataset":"github-code","pt":"61"} +{"seq_id":"11450750612","text":"from PIL import Image, ImageDraw\nimport random\nimport os.path\nimport numpy\nimport glob\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\n\n# CONDITIONS FOR USE:\n# - all pictures in pictures folder are .png images, names with their corresponding label as the first character in the filename. Example: 0.png, 0 (copy).png, 00.png will all have the same class 0.\n# - pictures in backgrounds folder are .jpg and larger than all the .png images\n# - change parameters marked with \"////\"\n\n#//// change lowest and highest scaling of image\nLOWEST_SCALE = 0.05\nHIGHEST_SCALE = 1.0\n\n#//// View images?\nVIEW_PREVIEW_IMAGES = False\n\n#//// change amount of sets desired\nTOTAL_SET_NUMBER = 2\n\n\nclass DatasetCreator:\n\n def __init__(self):\n script_dir = os.path.dirname(os.path.abspath(__file__))\n backgroundsFolderPath = script_dir +'/backgrounds'\n picturesFolderPath = script_dir +'/subjectPics'\n backgroundsPath = glob.glob(backgroundsFolderPath + '/*.jpg')\n picturesPathList = glob.glob(picturesFolderPath + '/*.png')\n self.savepath = script_dir + \"/createdImages\"\n\n self.labelList = []\n for i in range(len(picturesPathList)):\n self.labelList.append(Path(picturesPathList[i]).stem[0])\n\n #onlyfiles = [f for f in os.listdir(backgroundsFolderPath) if os.path.isfile(os.path.join(backgroundsFolderPath, f))]\n\n self.backg_array = numpy.array( [numpy.array(Image.open(img)) for img in backgroundsPath] )\n self.picture_array = numpy.array([numpy.array(Image.open(img)) for img in picturesPathList])\n\n def placePictureRandom(self, label, background, picture, i, j):\n\n pic = Image.fromarray(picture, 'RGBA')\n\n randomScalingFactor = random.uniform(LOWEST_SCALE,HIGHEST_SCALE)\n pic = pic.resize((int(randomScalingFactor*pic.width),int(randomScalingFactor*pic.height)), Image.ANTIALIAS)\n\n picCoor = pic.getbbox()\n picCrop = pic.crop(picCoor)\n\n x1 = random.randint(0, background.shape[1]-picCrop.size[0]) #picture.shape[1]\n y1 = random.randint(0, background.shape[0]-picCrop.size[1]) #picture.shape[0]\n x2 = x1 + picCrop.size[0]\n y2 = y1 + picCrop.size[1]\n\n # Normalizing for YOLO-label format\n width = float(picCrop.size[0]) / float(background.shape[1])\n height = float(picCrop.size[1]) / float(background.shape[0])\n \n x = (((x2-x1) / 2) + x1) / float(background.shape[1])\n y = (((y2-y1) / 2) + y1) / float(background.shape[0])\n\n backg = Image.fromarray(background, 'RGB')\n \n backg.paste(picCrop, (x1, y1), picCrop)\n draw = ImageDraw.Draw(backg)\n\n if (VIEW_PREVIEW_IMAGES == True):\n draw.rectangle([x1,y1,x2,y2], outline=(255,0,0))\n #draw.line([((x-width/2)*background.shape[1],y*background.shape[0]), ((x+width/2)*background.shape[1],y*background.shape[0])], fill=(0,0,255), width = 3)\n #draw.line([(x*background.shape[1],(y-height/2)*background.shape[0]), (x*background.shape[1],(y+height/2)*background.shape[0])], fill=(0,0,255), width = 3)\n backg.show()\n \n \n self.save_to_folder(label,x,y,width,height,backg,i,j)\n\n\n\n def save_to_folder(self,label,x,y,width,height,image,i, j):\n string = str(label) + \" \" + str(x) + \" \" + str(y) + \" \" + str(width) + \" \" + str(height)\n randomInt = str(random.randint(0,1000))\n file=open(self.savepath + \"/\" + \"image\" + str(i+10) + \"-\" + str(j) + \"-\" + randomInt + \".txt\", \"w\")\n file.write(string)\n file.close()\n image.save(self.savepath + \"/\" + \"image\" + str(i+10) + \"-\" + str(j) + \"-\" + randomInt + \".jpg\") \n \n \n\n def createEntireSet(self):\n #print(self.picture_array.shape)\n for i in range(self.picture_array.shape[0]):\n for j in range(self.backg_array.shape[0]):\n self.placePictureRandom(self.labelList[i], self.backg_array[j], self.picture_array[i], i, j)\n\n\ndata = DatasetCreator()\n\nfor i in range(TOTAL_SET_NUMBER):\n data.createEntireSet()\n\n","repo_name":"siljesu/DatasetCreator","sub_path":"DatasetCreator/DatasetCreator_single.py","file_name":"DatasetCreator_single.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74661328835","text":"from django.test import TestCase, TransactionTestCase, SimpleTestCase\nimport vcr\nfrom .models import Tweet, NGram, Parser, NovelParagraph, Document\nfrom .models import InvalidSourceException\nfrom chineseroom.tweets import views\nfrom django.http import QueryDict\n\nimport ipdb\n\n\nclass TwitterApiTest(TransactionTestCase):\n def test_oldest_for_user(self):\n assert Tweet._oldest_for_user('c_alan_zoppa') == None\n Tweet.objects.create(\n message=\"arbitrary\",\n user='c_alan_zoppa',\n twitter_id='2'\n )\n Tweet.objects.create(\n message=\"arbitrary\",\n user='c_alan_zoppa',\n twitter_id='1'\n )\n assert Tweet._oldest_for_user('c_alan_zoppa') == '1'\n\n\n def test_gather_for_user(self):\n with vcr.use_cassette(\n 'tweets/vcr_cassettes/page_in_history.yml',\n record_mode='new_episodes'\n ):\n Tweet._gather_for_user('c_alan_zoppa')\n assert Tweet.objects.count() == 315\n\n\nclass ParserSimpleTest(SimpleTestCase):\n def test_merge_leading_chars(self):\n merged = Parser._merge_leading_chars([\n ('@', 'AAA'),\n ('herbert', 'CCC'),\n ('this', 'AAA'),\n ('is', 'AAA'),\n ('a', 'AAA'),\n ('#', 'AAA'),\n ('message', 'BBB'),\n ('for', 'AAA'),\n ('#', 'AAA'),\n ('you', 'BBB'),\n ], ('#', '@'))\n\n assert merged == [\n ('@herbert', '@+CCC'),\n ('this', 'AAA'),\n ('is', 'AAA'),\n ('a', 'AAA'),\n ('#message', '#+BBB'),\n ('for', 'AAA'),\n ('#you', '#+BBB'),\n ]\n\n def test_twitter_parse(self):\n test_string = '@herbert this is a #message for #you'\n output = Parser._twitter_transform_sentence(test_string)\n\n expected = [\n ('@herbert', '@+NN'),\n ('this', 'DT'),\n ('is', 'VBZ'),\n ('a', 'DT'),\n ('#message', '#+NN'),\n ('for', 'IN'),\n ('#you', '#+PRP')\n ]\n\n assert output == expected\n assert Parser.twitter_parse(test_string) == [expected]\n\n\nclass TwitterNGramTest(TransactionTestCase):\n\n def setUp(self):\n self.twitter_sentence = [\n ('@herbert', '@+NN'),\n ('this', 'DT'),\n ('is', 'VBZ'),\n ('a', 'DT'),\n ('#message', '#+NN'),\n ('for', 'IN'),\n ('#you', '#+PRP')\n ]\n\n def test_params_from_list(self):\n params = NGram._params_from_list(\n [\n ('@herbert', '@+NN'),\n ('this', 'DT'),\n ('is', 'VBZ'),\n ],\n source='c_alan_zoppa@twitter',\n sentence_starter=True,\n sentence_terminator=False\n )\n\n for key, value in {\n 'tag_three': 'VBZ',\n 'tag_two': 'DT',\n 'token_three': 'is',\n 'token_two': 'this',\n 'sentence_starter': True,\n 'sentence_terminator': False,\n 'tag_one': '@+NN',\n 'token_one': '@herbert'\n }.items():\n assert params[key] == value\n assert 'twitter_user_id' in params\n\n def test_ngramify_twitter_sentence(self):\n NGram.new_ngrams_from_parsed_sentences( \n [self.twitter_sentence],\n 'c_alan_zoppa@twitter'\n )\n first = NGram.objects.first()\n assert NGram.objects.count() == 5\n assert first.token_one == '@herbert'\n assert first.token_two == 'this'\n assert first.token_three == 'is'\n assert first.tag_one == '@+NN'\n assert first.tag_two == 'DT'\n assert first.tag_three == 'VBZ'\n\n\nclass EndToEndGatherTest(TransactionTestCase):\n def test_gather_history_for(self):\n with vcr.use_cassette(\n 'tweets/vcr_cassettes/page_in_history.yml',\n record_mode='new_episodes'\n ):\n Tweet.gather_history_for('c_alan_zoppa') \n assert NGram.objects.count() == 1152\n\n\nclass NovelParagraphTests(TransactionTestCase):\n def setUp(self):\n sentence = Parser.twitter_parse(\n \"The quick brown fox jumped over a lazy dog #blessed\"\n )\n NGram.new_ngrams_from_parsed_sentences(sentence, 'fake_user@twitter')\n sentence = Parser.twitter_parse(\n \"As if one could kill time without injuring eternity.\"\n )\n NGram.new_ngrams_from_parsed_sentences(sentence, 'hd_thoreau@twitter')\n\n def test_start_sentence_marker(self):\n first = NGram.objects.get(token_one='The')\n assert first.sentence_starter\n\n def test_end_sentence_marker(self):\n last = NGram.objects.get(token_three='#blessed')\n assert last.sentence_terminator\n\n def test_basics(self):\n nov = NovelParagraph(('fake_user@twitter', 1))\n nov.append_sentence()\n assert nov.human_readable_sentences() == (\n \"The quick brown fox jumped over a lazy dog #blessed.\"\n )\n\n def test_initialize_with_bad_data(self):\n try:\n nov = NovelParagraph(('invalid@twitter', 1))\n passed = False\n except InvalidSourceException:\n passed = True\n assert passed\n \n\n def test_compound(self):\n nov = NovelParagraph(\n ('fake_user@twitter', .5),\n ('hd_thoreau@twitter', .5)\n )\n nov.append_sentence()\n nov.append_sentence()\n\n\nclass DocumentTests(TransactionTestCase):\n def setUp(self):\n self.test_document = Document.objects.create(\n name='Psalm 63',\n text=(\n \"But those that seek my soul, to destroy it, shall go into \"\n \"the lower parts of the earth. They shall fall by the sword: \"\n \"they shall be a portion for foxes.\"\n )\n )\n\n def test_document_parse(self):\n parsed = Parser.document_parse(self.test_document.text)\n assert parsed == [\n [\n ('But', 'CC'), ('those', 'DT'), ('that', 'WDT'),\n ('seek', 'VBP'), ('my', 'PRP$'), ('soul', 'NN'), (',', ','),\n ('to', 'TO'), ('destroy', 'VB'), ('it', 'PRP'), (',', ','),\n ('shall', 'MD'), ('go', 'VB'), ('into', 'IN'), ('the', 'DT'),\n ('lower', 'JJR'), ('parts', 'NNS'), ('of', 'IN'),\n ('the', 'DT'), ('earth', 'NN'), ('.', '.')\n ],\n [\n ('They', 'PRP'), ('shall', 'MD'), ('fall', 'VB'), ('by', 'IN'),\n ('the', 'DT'), ('sword', 'NN'), (':', ':'), ('they', 'PRP'),\n ('shall', 'MD'), ('be', 'VB'), ('a', 'DT'), ('portion', 'NN'),\n ('for', 'IN'), ('foxes', 'NNS'), ('.', '.')\n ]\n ]\n\n def test_rebuild_ngrams_signal(self):\n Document.objects.create(\n name='arbitrary',\n text=\"This is just to test the signal.\"\n )\n assert NGram.objects.filter(document__name='arbitrary').count() == 6\n\n def test_rebuild_ngrams(self):\n source_name = 'document:'+self.test_document.name\n NGram.objects.filter(document__name=self.test_document.name).delete()\n self.test_document.rebuild_ngrams()\n assert NGram.objects.filter(\n document__name=self.test_document.name\n ).count() == 32\n\n\nclass SentencePostprocessing(TransactionTestCase):\n\n def setUp(self):\n example = (\n \"@joe has an example! Take a look; it's at \"\n \"http://www.example.com. Hurry, or you'll miss it.\"\n )\n\n sentence = Parser.twitter_parse(example)\n NGram.new_ngrams_from_parsed_sentences(sentence, 'fake_user@twitter')\n\n def test_humanize_sentence(self):\n nov = NovelParagraph(('fake_user@twitter', 1))\n for i in range(0,100):\n nov.append_sentence()\n humanized = nov.human_readable_sentences()\n for i in [\n \"@joe has an example!\",\n \"Take a look; it's at http://www.example.com.\",\n \"Hurry, or you'll miss it.\"\n ]:\n assert i in humanized\n\n\nclass ReckonSymmetricalTokens(TransactionTestCase):\n def setUp(self):\n example = 'Single path with \"only one (quotation mark.'\n sentence = Parser.twitter_parse(example)\n NGram.new_ngrams_from_parsed_sentences(sentence, 'fake_user@twitter') \n\n def test_quotations(self):\n nov = NovelParagraph(('fake_user@twitter', 1))\n nov.append_sentence()\n humanized = nov.human_readable_sentences()\n assert humanized == \"Single path with ``only one (quotation mark)''.\"\n\n\nclass ViewTests(TransactionTestCase):\n def test_extract_probabilities(self):\n post_data = QueryDict(\n \"csrfmiddlewaretoken=4gyNjAssiJBZ4aEOUr5EHTFFJkggI12i\"\n \"&source-c_alan_zoppa%40twitter=0\"\n \"&source-document%3AHarry+Potter+and+the+Chamber+of+Secrets=20\"\n \"&source-document%3APsalms=80\"\n \"&source-sapinker%40twitter=0\"\n )\n generated = views._extract_probabilities(post_data)\n for expected in [\n ('document:Harry Potter and the Chamber of Secrets', 0.2),\n ('document:Psalms', 0.8)\n ]:\n assert expected in generated\n assert len(generated) == 2\n","repo_name":"alanzoppa/chineseroom","sub_path":"chineseroom/tweets/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17912644409","text":"# -*- mode: python -*-\n# vi: set ft=python :\n\n\"\"\"\nIdentifies the C/C++ compiler by examining the presence or values of various\npredefined C preprocessor macros. Identifies any compiler capable of compiling\nC++ code that is supported by CMake 3.12.0.\n\nNote that there are constraint_values @bazel_tools//tools/cpp:clang and\n@bazel_tools//tools/cpp:gcc that could potentially distinguish between the\nClang and GCC compilers as an alternative to this approach, but as of Bazel\n0.14.1, they appear not to be compatible with the autogenerated toolchain.\n\nExample:\n load(\"@drake//tools/workspace/cc:repository.bzl\", \"cc_repository\")\n cc_repository(name = \"cc\")\n\n foo.bzl:\n load(\"@cc//:compiler.bzl\", \"COMPILER_ID\")\n\n if \"COMPILER_ID\" == \"AppleClang\":\n # Do something...\n\n if \"COMPILER_ID\" == \"Clang\":\n # Do something...\n\n if \"COMPILER_ID\" == \"GNU\":\n # Do something...\n\nArgument:\n name: A unique name for this rule.\n\"\"\"\n\nload(\"@bazel_tools//tools/cpp:unix_cc_configure.bzl\", \"find_cc\")\n\ndef _impl(repository_ctx):\n file_content = \"\"\"# -*- python -*-\n\n# DO NOT EDIT: generated by cc_repository()\n\n# This file exists to make our directory into a Bazel package, so that our\n# neighboring *.bzl file can be loaded elsewhere.\n\"\"\"\n\n repository_ctx.file(\n \"BUILD.bazel\",\n content = file_content,\n executable = False,\n )\n\n # https://github.com/bazelbuild/bazel/blob/0.14.1/tools/cpp/cc_configure.bzl\n if repository_ctx.os.environ.get(\"BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN\", \"0\") == \"1\": # noqa\n fail(\"Could NOT identify C/C++ compiler because CROSSTOOL is empty.\")\n\n if repository_ctx.os.name == \"mac os x\" and repository_ctx.os.environ.get(\"BAZEL_USE_CPP_ONLY_TOOLCHAIN\", \"0\") != \"1\": # noqa\n # https://github.com/bazelbuild/bazel/blob/0.14.1/tools/cpp/osx_cc_configure.bzl\n cc = repository_ctx.path(Label(\"@local_config_cc//:cc_wrapper.sh\"))\n\n else:\n # https://github.com/bazelbuild/bazel/blob/0.14.1/tools/cpp/unix_cc_configure.bzl\n cc = find_cc(repository_ctx, overriden_tools = {})\n\n executable = repository_ctx.path(\"identify_compiler\")\n result = repository_ctx.execute([\n cc,\n repository_ctx.path(\n Label(\"@drake//tools/workspace/cc:identify_compiler.cc\"),\n ),\n \"-o\",\n executable,\n ])\n if result.return_code != 0:\n fail(\n \"Could NOT identify C/C++ compiler because compilation failed.\",\n result.stderr,\n )\n\n result = repository_ctx.execute([executable])\n if result.return_code != 0:\n fail(\"Could NOT identify C/C++ compiler.\", result.stderr)\n\n compiler_id = result.stdout.strip()\n\n if repository_ctx.os.name == \"mac os x\":\n supported_compilers = [\"AppleClang\"]\n else:\n supported_compilers = [\"Clang\", \"GNU\"]\n\n if compiler_id not in supported_compilers:\n print(\"WARNING: {} is NOT a supported C/C++ compiler.\".format(\n compiler_id,\n ))\n\n file_content = \"\"\"# -*- python -*-\n\n# DO NOT EDIT: generated by cc_repository()\n\nCOMPILER_ID = \"{}\"\n\n\"\"\".format(compiler_id)\n\n repository_ctx.file(\n \"compiler.bzl\",\n content = file_content,\n executable = False,\n )\n\ncc_repository = repository_rule(\n environ = [\n \"BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN\",\n \"BAZEL_USE_CPP_ONLY_TOOLCHAIN\",\n \"CC\",\n ],\n implementation = _impl,\n)\n","repo_name":"ruanmiao/Boussinesq","sub_path":"tools/workspace/cc/repository.bzl","file_name":"repository.bzl","file_ext":"bzl","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71571738115","text":"from enum import unique\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\ndb = SQLAlchemy()\n\ndef connect_db(app):\n db.app = app\n db.init_app(app)\n\nDEFAULT_IMAGE_URL = 'https://www.kindpng.com/picc/m/24-248253_user-profile-default-image-png-clipart-png-download.png'\n\nclass User(db.Model):\n __tablename__= 'users'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n first_name = db.Column(db.Text, nullable=False)\n last_name = db.Column(db.Text, nullable=False)\n image_url = db.Column(db.Text, default=DEFAULT_IMAGE_URL)\n\n posts = db.relationship('Post', backref='user')\n\n def __repr__(self):\n p = self\n return f''\n\n @property\n def full_name(self):\n return f'{self.first_name} {self.last_name}'\n\nclass Post(db.Model):\n __tablename__= 'posts'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n title = db.Column(db.Text, nullable=False)\n content = db.Column(db.Text, nullable=False)\n created_at = db.Column(db.DateTime, default=datetime.now)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n\nclass Tag(db.Model):\n __tablename__= 'tags'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.Text, unique=True, nullable=False)\n\n posts = db.relationship('Post', secondary='posts_tags', backref='tags')\n\n\nclass Post_Tag(db.Model):\n __tablename__= 'posts_tags'\n\n post_id = db.Column(db.Integer, db.ForeignKey('posts.id'), primary_key=True)\n tag_id = db.Column(db.Integer, db.ForeignKey('tags.id'), primary_key=True)\n\n","repo_name":"andidietz/blogly_part_three","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12353307484","text":"import re\nimport numpy as np\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Read a file\")\nparser.add_argument(\"--file\", type=str, help=\"File to read\", default=\"output.txt\")\n\nargs = parser.parse_args()\nfile_to_read = \"results/\" + args.file\n\n# Initialize empty lists\nsequential_times = []\nparallel_times = []\n\nlower_percentile = 5\nhigher_percentile = 95\nla = 1\n\n# Open and read the file\nwith open(file_to_read, 'r') as file:\n lines = file.readlines()\n\n # Regular expressions to match the lines with times\n sequential_re = re.compile(r'Time used\\(ms\\) for Original Version: (\\d+) ms.')\n parallel_re = re.compile(r'Time used\\(ms\\) for Parallel Version: (\\d+) ms.')\n\n # Loop through lines\n for line in lines:\n # If the line matches the regular expression for sequential time, add the time to the list\n match_sequential = sequential_re.search(line)\n if match_sequential:\n sequential_times.append(int(match_sequential.group(1)))\n\n # If the line matches the regular expression for parallel time, add the time to the list\n match_parallel = parallel_re.search(line)\n if match_parallel:\n parallel_times.append(int(match_parallel.group(1)))\n\ndef filter_to_confidence_interval(numbers, lower_percentile, higher_percentile):\n # calculate 5th and 95th percentile to create 90% confidence interval\n lower_bound = np.percentile(numbers, lower_percentile)\n upper_bound = np.percentile(numbers, higher_percentile)\n\n # filter list to only numbers within the confidence interval\n filtered_numbers = [num for num in numbers if lower_bound <= num <= upper_bound]\n outside_interval = [num for num in numbers if num not in filtered_numbers]\n\n return filtered_numbers, outside_interval\n\n# # print the average times:\n# time1 = sum(sequential_times)/len(sequential_times)\n# time2 = sum(parallel_times)/len(parallel_times)\n# print(\"Sequential times:\", time1)\n# print(\"Parallel times:\", time2)\n#\n\nfiltered_sequential_times, outside_sequential_times = filter_to_confidence_interval(sequential_times, lower_percentile, higher_percentile)\nfiltered_parallel_times, outside_parallel_times = filter_to_confidence_interval(parallel_times, lower_percentile, higher_percentile)\n# print the average times:\ntime1 = sum(sequential_times)/len(sequential_times)\ntime2 = sum(parallel_times)/len(parallel_times)\nprint(\"Sequential times:\", time1)\nprint(\"Parallel times:\", time2)\nprint(\"enhanced: \", (time1-time2)*100/time1, \"%\")\nprint()\n\n# After filtered\ntime3 = sum(filtered_sequential_times)/len(sequential_times)\ntime4 = sum(filtered_parallel_times)/len(parallel_times)\nprint(\"90% of data for Sequential times:\", time3)\nprint(\"90% of data for Parallel times:\", time4)\nprint(\"enhanced: \", (time3-time4)*100/time3, \"%\")\nprint()\n\nimport matplotlib.pyplot as plt\n# ============================================================================================================\n# NOTICE: Draw all the points and 90% of these points\n\n# def plot_points(plt, numbers, la, color):\n# # create a list with 'la' repeated as many times as there are elements in 'numbers'\n# x_values = [la] * len(numbers)\n#\n# # create the scatter plot\n# plt.scatter(x_values, numbers, color=color)\n#\n# # optionally set labels for x and y axis\n# plt.xlabel('la')\n# plt.ylabel('Times(ms)')\n#\n#\n# all_sequential_times = [filtered_sequential_times, filtered_sequential_times, filtered_sequential_times]\n# for filtered_sequential_times in all_sequential_times:\n# plot_points(plt, filtered_sequential_times, la, 'blue')\n# plot_points(plt, outside_sequential_times, la, 'red')\n# la = la + 1\n#\n# upper_bounds = [max(filtered_sequential_times) for filtered_sequential_times in all_sequential_times]\n# plt.plot([1,2,3], upper_bounds, color='red', label='Upper bound')\n#\n# lower_bounds = [min(filtered_sequential_times) for filtered_sequential_times in all_sequential_times]\n# plt.plot([1,2,3], lower_bounds, color='orange', label='Lower bound')\n#\n# # plot_points(plt, filtered_parallel_times, la, 'orange')\n# # plot_points(plt, outside_parallel_times, la, 'green')\n#\n# # display the plot\n#\n# # plt.show()\n#\n# plt.legend()\n# # plt.show()\n\n# ============================================================================================================\n# NOTICE: Draw the spots and the curve\n\n# y_coords = [16, 28, 32, 45, 59, 45, 34, 32, 21, 12]\n# x_coords = [1.3, 1.5, 1.7, 1.9, 2.1, 2.3, 2.5, 2.7, 2.9, 3.1]\n#\n# # 使用numpy的函数对点进行插值以获得曲线\n# curve = np.poly1d(np.polyfit(x_coords, y_coords, 3))\n#\n# # 定义用于绘制曲线的x值\n# x_curve = np.linspace(min(x_coords), max(x_coords), 1000)\n# y_curve = curve(x_curve)\n#\n# # 绘制点\n# plt.scatter(x_coords, y_coords, c='red')\n# # 绘制曲线\n# plt.plot(x_curve, y_curve, c='blue')\n#\n# plt.title('The influence of density')\n# plt.xlabel('density(td)')\n# plt.ylabel('time(ms)')\n#\n# plt.show()\n# ============================================================================================================\n# plot the graph of points, where each two points are linked by a line\n\n# Data points\n# x = list(range(6))\n# y1 = [42.52, 51.34, 65.46, 69.95, 55.43, 48.97]\n# y2 = [40.52, 55.34, 62.46, 67.95, 50.43, 42.97]\n#\n# # Plotting the first set of data points\n# plt.scatter(x, y1, color='blue', marker='o', label=None)\n# plt.plot(x, y1, color='red', label='td=1.3')\n#\n# # Plotting the second set of data points\n# plt.scatter(x, y2, color='green', marker='x', label=None)\n# plt.plot(x, y2, color='purple', label='td=1.3 (2nd set)')\n#\n# # Labelling the x and y axes\n# plt.xlabel(\"Index\")\n# plt.ylabel(\"Value\")\n#\n# # Displaying the title\n# plt.title(\"Graph of Given Points\")\n#\n# # Displaying the legend\n# plt.legend(loc='upper left')\n#\n# # Displaying the graph\n# # plt.grid(True)\n# plt.tight_layout()\n# plt.show()\n\n","repo_name":"Alfred0325/Diss","sub_path":"RABIT250/sortData.py","file_name":"sortData.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27850558818","text":"import numpy as np\n\nfor c in range(1):#for test\n class network:\n def __init__(self,epoch,lr,middle1_num,middle2_num,output_num,a):\n self.epoch = epoch \n self.lr = lr #learning rate\n self.x_train = x_train\n self.middle1_num = middle1_num #hidden layer 1\n self.middle2_num = middle2_num #hidden layer 2\n self.output_num = output_num #output layer\n self.a = a #sigmoid's constant\n \n #add bias to input\n def add_bias(self,x):\n return np.append(x,1)\n \n #sigmoid function \n def sigmoid(self,x):\n return 1 / (1 + np.exp(self.a * (-x)))\n \n #derived sigmoid function \n def sigmoid_d(self,x):\n return x * (1 - self.a * x)\n \n #ReLU function\n def ReLU(self,x):\n for i in range(len(x)):\n if(x[i][0] > 0):\n pass\n else:\n x[i][0] = 0\n return x\n \n #derived ReLU function \n def ReLU_d(self,x):\n if(x > 0):\n x = 1\n else:\n x = 0 \n \n return x\n \n #calculating dot\n def calculate_u(self,w,x):\n dot = np.dot(w,x)\n return dot\n \n #update weights \n def update(self,d_1,d_2,d_3,z_0,z_1,z_2):\n \n for i in range(self.middle1_num):\n for j in range(self.input_num):\n self.w_1[i][j] -= self.lr * d_1[0][i] * z_0[j]\n \n for i in range(self.middle2_num):\n for j in range(self.middle1_num):\n self.w_2[i][j] -= self.lr * d_2[0][i] * z_1[j][0]\n \n for i in range(self.output_num):\n for j in range(self.middle2_num):\n self.w_3[i][j] -= self.lr * d_3[0][i] * z_2[j][0]\n \n #prediction from test_data \n def predict(self,x):\n z_0 = self.add_bias(x)\n \n u_1 = self.calculate_u(self.w_1,z_0.reshape(3,1))\n z_1 = self.ReLU(u_1)\n \n u_2 = self.calculate_u(self.w_2,z_1)\n z_2 = self.sigmoid(u_2)\n \n u_3 = self.calculate_u(self.w_3,z_2)\n z_3 = self.sigmoid(u_3)\n \n return z_3\n \n #learning process\n def learning(self,x_train,y_train):\n \n #output1 and teaching data\n X = x_train\n t = y_train\n \n #output of 1st layer\n z_i = [self.add_bias(x) for x in X]\n self.input_num = len(z_i[0])\n \n #initialize weight\n self.w_1 = np.random.uniform(-0.15,0.15,(self.middle1_num,self.input_num))\n self.w_2 = np.random.uniform(-0.15,0.15,(self.middle2_num,self.middle1_num))\n self.w_3 = np.random.uniform(-0.15,0.15,(self.output_num,self.middle2_num))\n \n #forward process\n for j in range(self.epoch):\n for z,t in zip(z_i,y_train):\n z_0 = np.array(z) \n \n u_1 = self.calculate_u(self.w_1,z_0.reshape(3,1))\n z_1 = self.ReLU(u_1)\n \n u_2 = self.calculate_u(self.w_2,z_1)\n z_2 = self.sigmoid(u_2)\n \n u_3 = self.calculate_u(self.w_3,z_2)\n z_3 = self.sigmoid(u_3)\n \n #calculating delta\n d_3 = np.empty((1,self.output_num),float)\n d_3[0][0] = z_3 - t\n \n d_2 = np.empty((1,self.middle2_num),float)\n d_2 = self.w_3 * d_3[0][0] * self.sigmoid_d(u_2).T\n \n d_1 = np.empty((1,self.middle1_num),float)\n for i in range(self.middle1_num):\n d_1[0][i] = np.dot(d_2,self.w_2[:,i].T) * self.ReLU_d(u_1[i])\n \n self.update(d_1,d_2,d_3,z_0,z_1,z_2)\n \n #dataset\n x_train = [[0,0],[1,1],[1,0],[0,1]]\n y_train = [0,0,1,1]\n \n #parameters\n epoch = 10000\n lr = 0.01\n middle1_num = 15 \n middle2_num = 15\n output_num = 1\n a = 2\n \n mlp = network(epoch,lr,middle1_num,middle2_num,output_num,a)\n \n #learning process\n mlp.learning(x_train,y_train)\n \n #prediction\n print(\"phase\",c + 1)\n result = [mlp.predict(x) for x in x_train]\n [print(x, \":\", p) for p, x in zip(result, x_train)]\n print(\"\")\n\n\n","repo_name":"2boonvip/classify_XOR","sub_path":"multilayer.py","file_name":"multilayer.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"639210155","text":"import nltk\nnltk.download(\"stopwords\")\nnltk.download(\"punkt\")\nimport pandas as pd\nimport numpy as np\nimport re\nimport string\nimport spacy\nimport pickle\nimport requests\nfrom io import BytesIO\nimport random\nrandom.seed(100)\nnp.random.seed(100)\n\n\ndef pickle_load(link):\n with BytesIO(requests.get(link).content) as f:\n return pickle.load(f)\n\n\ndef data_load():\n dataset = pd.read_csv(\"https://raw.githubusercontent.com/maanassiraj/Sentiment_based_product_recommendation_system/master/sample30.csv\")\n data = dataset[[\"name\", \"reviews_text\", \"reviews_title\"]]\n data.drop_duplicates(inplace=True)\n data = data.loc[~(data[\"name\"].isnull() | data[\"reviews_text\"].isnull() | data[\"reviews_title\"].isnull())]\n data[\"reviews_text\"] = data[\"reviews_text\"] + \" \" + data[\"reviews_title\"]\n data.drop(columns= \"reviews_title\", inplace= True)\n return data\n\n\n\ndef text_preprocessing_1(review):\n unwanted_text = \"this review was collected as part of a promotion.\"\n review = review.lower().replace(unwanted_text, \"\").strip()\n review = re.sub(\"\\.{2,}\", \" \", review)\n review = re.sub(\"[^a-z'\\s]\", \"\", review)\n for ch in string.ascii_lowercase:\n if ch != \"l\":\n review = re.sub(f\"{ch}\" + \"{3,}\", f\"{ch}\", review)\n else:\n review = re.sub(f\"{ch}\" + \"{3,}\", f\"{ch}\" + f\"{ch}\", review)\n return review\n\n\n\ndef text_preprocessing_2(reviews, nlp):\n return [\" \".join([token.lemma_ for token in review if token.pos_ not in [\"PROPN\", \"NOUN\"]]) for review in nlp.pipe(reviews)]\n\n \ndef text_preprocessing_3(review):\n stop_words = [word for word in nltk.corpus.stopwords.words(\"english\") if word not in [\"don't\", \"doesn't\", \"do\", \"not\", \"did\"]]\n review = \" \".join([word for word in nltk.tokenize.word_tokenize(review) if word not in stop_words])\n review = review.replace(\"-PRON-\", \"\")\n return review\n\n\ndef generate_perc_pos_reviews(prod_name, data, nlp, tf_idf_vectorizer, sent_model):\n prod_reviews = data.loc[data[\"name\"] == prod_name, \"reviews_text\"]\n text_preproc_1 = np.vectorize(text_preprocessing_1)\n prod_reviews = pd.Series(text_preproc_1(prod_reviews))\n prod_reviews = pd.Series(text_preprocessing_2(prod_reviews, nlp))\n text_preproc_3 = np.vectorize(text_preprocessing_3)\n prod_reviews = pd.Series(text_preproc_3(prod_reviews))\n prod_reviews.drop_duplicates(inplace= True)\n prod_reviews = tf_idf_vectorizer.transform(prod_reviews)\n predictions = sent_model.predict(prod_reviews)\n return np.sum(predictions) / len(predictions)\n\n\n \ndef generate_top5_prod_recom(user_name):\n nlp = spacy.load(\"en_core_web_sm\", disable = [\"parser\", \"ner\"])\n data = data_load()\n recommendation_eng = pickle_load(\"https://github.com/maanassiraj/Sentiment_based_product_recommendation_system/blob/master/pickle_files/recommendation_engine.pkl?raw=true\")\n predicted_rat = recommendation_eng.loc[:, user_name].sort_values(ascending= False)\n top_20_recom = predicted_rat[predicted_rat > 0].iloc[:20].index\n recom = pd.DataFrame({\"prod_recommendations\": top_20_recom})\n tf_idf_vectorizer = pickle_load(\"https://github.com/maanassiraj/Sentiment_based_product_recommendation_system/blob/master/pickle_files/tf_idf_vectorizer.pkl?raw=true\")\n sent_model = pickle_load(\"https://github.com/maanassiraj/Sentiment_based_product_recommendation_system/blob/master/pickle_files/sentiment_model.pkl?raw=true\")\n recom[\"perc_pos_reviews\"] = recom[\"prod_recommendations\"].apply(generate_perc_pos_reviews, args=(data, nlp, tf_idf_vectorizer, sent_model))\n return list(recom.sort_values(by=\"perc_pos_reviews\", ascending=False)[\"prod_recommendations\"].iloc[:5])","repo_name":"maanassiraj/Sentiment_based_product_recommendation_system","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32934042746","text":"from django.contrib import admin\nfrom . import models\nfrom django.utils.safestring import mark_safe\n\n\n# Register your models here.\n\n\nclass TagAdmin(admin.ModelAdmin):\n list_display = (\n 'titre',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'statut',\n 'date_add',\n 'date_update',\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n\n fieldsets = [\n ('Info ', {'fields': ['titre', ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass CategorieAdmin(admin.ModelAdmin):\n list_display = (\n 'titre',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'statut',\n 'date_add',\n 'date_update',\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n\n fieldsets = [\n ('Info ', {'fields': ['titre', ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass ProduitAdmin(admin.ModelAdmin):\n list_display = (\n 'titre',\n 'affiche_image',\n\n\n 'categorie',\n 'tag',\n 'old_prix',\n 'new_prix',\n 'resume',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'categorie',\n 'statut',\n 'tag'\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n readonly_fields = ['affiche_image']\n\n fieldsets = [\n ('Info ', {'fields': [\n 'titre',\n 'categorie',\n 'tag',\n 'old_prix',\n 'new_prix',\n 'description',\n 'resume',\n\n ]\n }),\n ('Image', {'fields': ['cover', 'affiche_image',]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n def affiche_image(self, obj):\n return mark_safe(''.format(url=obj.cover.url))\n\n\n\nclass ReviewAdmin(admin.ModelAdmin):\n\n\n list_display = ('produit','titre', 'nom', 'email', 'review', 'statut', 'date_add', 'date_update',)\n list_filter = ('nom', 'statut', 'date_add',)\n search_fields = ('name',)\n date_hierarchy = 'date_add'\n fieldsets = (\n ('Info', {\n 'fields': [\n\n 'produit','titre', 'nom', 'email', 'review']\n\n ,\n }),\n ('Statut et Activations', {'fields': ['statut', ]}),\n )\n\n\n\nclass CartAdmin(admin.ModelAdmin):\n list_display = (\n 'auteur',\n 'total',\n 'active',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'statut',\n 'date_add',\n 'date_update',\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n\n fieldsets = [\n ('Info ', {'fields': ['auteur','produit','total','active', ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\n\ndef _register(model, admin_class):\n admin.site.register(model, admin_class)\n\n\n_register(models.Produit, ProduitAdmin)\n_register(models.Review, ReviewAdmin)\n_register(models.Tag, TagAdmin)\n_register(models.Categorie, CategorieAdmin)\n_register(models.Cart, CartAdmin)","repo_name":"paulemxx/Orgo","sub_path":"shop/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"hi","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74691684035","text":"import argparse\nimport os.path\nimport sys\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Append a null byte to end of input file\")\n parser.add_argument(\n \"input_file\", help=\"Path to file null byte will be appended to\")\n args = parser.parse_args()\n\n if not os.path.isfile(args.input_file):\n print(\"file '{0}' doesn't exist\".format(args.input_file))\n return 1 # return error code\n\n try:\n fd = open(args.input_file, \"ab\")\n except Exception as e:\n print(\"Error opening file: \" + e)\n return 2 # return error code\n\n try:\n fd.write(b\"\\x00\")\n except Exception as e:\n print(\"Error writing to file: \" + e)\n return 3 # return error code\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"codeplaysoftware/oneapi-construction-kit","sub_path":"scripts/append_null_byte.py","file_name":"append_null_byte.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"61"} +{"seq_id":"14189025292","text":"import nltk #pip install nltk\r\nfrom nltk.stem.lancaster import LancasterStemmer\r\nimport tflearn #pip install tflearn\r\nimport tensorflow #pip install tensorflow\r\nimport random\r\nimport pickle\r\nimport numpy\r\nfrom tensorflow.python.framework import ops\r\n\r\nstemmer = LancasterStemmer()\r\nimport json\r\n\r\nwords = []\r\nlabels = []\r\ndocs_x = []\r\ndocs_y = []\r\nwith open(\"intents.json\") as file:\r\n data = json.load(file)\r\n\r\nfor intent in data[\"intents\"]:\r\n for pattern in intent[\"patterns\"]:\r\n wrds = nltk.word_tokenize(pattern)\r\n words.extend(wrds)\r\n docs_x.append(wrds)\r\n docs_y.append(intent[\"tag\"])\r\n if intent[\"tag\"] not in labels:\r\n labels.append(intent[\"tag\"])\r\nwords = [stemmer.stem(w.lower()) for w in words if w != \"?\" or w != \"!\"]\r\nwords = sorted(list(set(words)))\r\n\r\nlabels = sorted(labels)\r\n\r\ntraining = []\r\noutput = []\r\n\r\nout_empty = [0 for _ in range(len(labels))]\r\n\r\nfor x, doc in enumerate(docs_x):\r\n bag = []\r\n wrds = [stemmer.stem(w) for w in doc]\r\n\r\n for w in words:\r\n if w in wrds:\r\n bag.append(1)\r\n else:\r\n bag.append(0)\r\n output_row = out_empty[:]\r\n output_row[labels.index(docs_y[x])] = 1\r\n training.append(bag)\r\n output.append(output_row)\r\ntraining = numpy.array(training)\r\nout_empty = numpy.array(output)\r\n\r\nwith open(\"data.pickle\", \"wb\") as f:\r\n pickle.dump((words, labels, training, output), f)\r\n\r\n#print(training, output)\r\n\r\nops.reset_default_graph()\r\n\r\nnet = tflearn.input_data(shape=[None, len(training[0])])\r\nnet = tflearn.fully_connected(net, 8)\r\nnet = tflearn.fully_connected(net, 8)\r\nnet = tflearn.fully_connected(net, len(output[0]), activation=\"softmax\")\r\nnet = tflearn.regression(net)\r\n\r\nmodel = tflearn.DNN(net)\r\n\r\nmodel.fit(training, output, n_epoch=2000, batch_size=8, show_metric=True)\r\nmodel.save(\"model.tflearn\")\r\n\r\n\r\ndef bag_of_words(s, words):\r\n bag = [0 for _ in range(len(words))]\r\n\r\n s_words = nltk.word_tokenize(s)\r\n s_words = [stemmer.stem(word.lower()) for word in s_words]\r\n\r\n for se in s_words:\r\n for i, w in enumerate(words):\r\n if w == se:\r\n bag[i] = 1\r\n\r\n return numpy.array(bag)\r\ndef chat():\r\n print(\"Start talking with the bot!\")\r\n\r\n while True:\r\n inp = input(\"Tell me what is on your heart: \")\r\n if inp.lower() == \"quit\":\r\n break\r\n results = model.predict([bag_of_words(inp, words)])[0]\r\n results_index = numpy.argmax(results)\r\n if results[results_index] > 0.8:\r\n tag = labels[results_index]\r\n for tg in data[\"intents\"]:\r\n if tg['tag'] == tag:\r\n responses = tg[\"responses\"]\r\n print(random.choice(responses))\r\n else:\r\n print(\"I don't understand. Try Again !\")\r\nchat()","repo_name":"absterjr/Chat-Bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"73448352193","text":"from flask import Flask,request,render_template, jsonify\nfrom flask_cors import CORS, cross_origin\nfrom scraper import run_code\nimport json \nimport requests\nimport sys\n\n\napp = Flask(__name__)\nCORS(app)\n@app.route('/scrape', methods=[\"POST\"])\ndef _get_data():\n json_ = request.json\n for x in json_:\n url=x[\"URL\"]\n result = run_code(url)\n string_result=\"\"\n \n for val in result:\n if len(val['Other Product Details'])>1:\n for sent in val['Other Product Details']:\n string_result+=sent\n else:\n string_result=val['Other Product Details'] \n\n try:\n response = requests.post(\"http://127.0.0.1:12345/prediction\", data={\"Statement\": string_result})\n return response.json()\n except requests.exceptions.HTTPError as err:\n return err.response.text\n\n\nif __name__ == \"__main__\":\n try:\n port = int(sys.argv[1])\n except:\n port = 5000\n \n app.run(port=port, debug=True)\n","repo_name":"ishita-kumar/Conscious-Choice","sub_path":"Back_End/scraperapi.py","file_name":"scraperapi.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16924763295","text":"import itertools\nimport os\nimport pathlib\nimport tempfile\nimport zipfile\nfrom collections import deque\nfrom xml.dom import minidom\n\nfrom .utils import Textractor\nfrom pdfminer.high_level import extract_text\n\n\ndef get_pdf_text(input_):\n try:\n text = extract_text(input_)\n except Exception as exc:\n print(\"Could not extract PDF text:\", exc)\n if isinstance(input_, str):\n print(repr(input_))\n else:\n print(\"Binary pdf\")\n return None\n return text\n\n\ndef get_epub_text(input_):\n IGNORE_FILES = set(\n [\n \"titlepage.xhtml\",\n \"halftitlepage.xhtml\",\n \"imprint.xhtml\",\n \"colophon.xhtml\",\n \"copyright.xhtml\",\n \"uncopyright.xhtml\",\n ]\n )\n ret = \"\"\n try:\n with tempfile.TemporaryDirectory() as tmpdir, zipfile.ZipFile(input_, \"r\") as z:\n os.chdir(tmpdir)\n cwd = pathlib.Path.cwd()\n container = z.open(\"META-INF/container.xml\")\n container_root = minidom.parseString(container.read())\n\n # locate the rootfile\n elem = container_root.getElementsByTagName(\"rootfile\")[0]\n rootfile_path = elem.getAttribute(\"full-path\")\n\n # open the rootfile\n rootfile = z.open(rootfile_path)\n rootfile_root = minidom.parseString(rootfile.read())\n\n spine = rootfile_root.getElementsByTagName(\"spine\")\n manifest_items = {\n item.getAttribute(\"id\"): item\n for item in rootfile_root.getElementsByTagName(\"manifest\")[\n 0\n ].getElementsByTagName(\"item\")\n }\n stuff = []\n\n def get_attrs(n):\n href = None\n _id = None\n for i in range(0, n.attributes.length):\n if n.attributes.item(i).name == \"href\":\n href = n.attributes.item(i).value\n elif n.attributes.item(i).name == \"id\":\n _id = n.attributes.item(i).value\n return {\"href\": href, \"id\": _id}\n\n if spine and len(spine) > 0:\n spine = spine[0]\n stuff = [\n item[\"id\"]\n for item in map(\n get_attrs,\n (\n manifest_items[item.getAttribute(\"idref\")]\n for item in spine.getElementsByTagName(\"itemref\")\n ),\n )\n if item[\"id\"].lower() not in IGNORE_FILES\n ]\n members = list(\n itertools.filterfalse(\n lambda m: not m.filename.endswith(\"html\")\n and m.filename not in stuff,\n z.infolist(),\n )\n )\n members = z.extractall(members=members)\n file_queue = deque([])\n parser = Textractor()\n for c in cwd.iterdir():\n file_queue.append(c)\n while len(file_queue) > 0:\n p = file_queue.pop()\n if p.is_dir():\n for c in p.iterdir():\n file_queue.append(c)\n else:\n try:\n if p.suffix.endswith(\"html\"):\n parser.feed(p.read_text(errors=\"strict\"))\n print(\"parser output is \", len(parser.output), \"bytes\")\n ret += parser.output.strip()\n parser.reset()\n except Exception as exc:\n print(\"Exception:\", exc)\n except Exception as exc:\n print(\"Could not extract epub text:\", exc)\n if isinstance(input_, str):\n print(repr(input_))\n else:\n print(\"Binary epub\")\n return None\n return ret\n","repo_name":"epilys/bibliothecula","sub_path":"bumblebat/bibliothecula/text_extract.py","file_name":"text_extract.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"61"} +{"seq_id":"24034952527","text":"import re\nimport time\nimport backoff\nimport requests\nfrom requests.exceptions import RequestException\nimport singer\nimport singer.utils as singer_utils\nfrom singer import metadata, metrics\n\nfrom tap_salesforce.salesforce.bulk import Bulk\nfrom tap_salesforce.salesforce.rest import Rest\nfrom tap_salesforce.salesforce.exceptions import (\n TapSalesforceException,\n TapSalesforceQuotaExceededException)\nfrom tap_salesforce.salesforce.credentials import SalesforceAuth\n\n\nLOGGER = singer.get_logger()\n\nBULK_API_TYPE = \"BULK\"\nREST_API_TYPE = \"REST\"\n\nSTRING_TYPES = set([\n 'id',\n 'string',\n 'picklist',\n 'textarea',\n 'phone',\n 'url',\n 'reference',\n 'multipicklist',\n 'combobox',\n 'encryptedstring',\n 'email',\n 'complexvalue', # TODO: Unverified\n 'masterrecord',\n 'datacategorygroupreference',\n 'base64'\n])\n\nNUMBER_TYPES = set([\n 'double',\n 'currency',\n 'percent'\n])\n\nDATE_TYPES = set([\n 'datetime',\n 'date'\n])\n\nBINARY_TYPES = set([\n 'byte'\n])\n\nLOOSE_TYPES = set([\n 'anyType',\n\n # A calculated field's type can be any of the supported\n # formula data types (see https://developer.salesforce.com/docs/#i1435527)\n 'calculated'\n])\n\n\n# The following objects are not supported by the bulk API.\nUNSUPPORTED_BULK_API_SALESFORCE_OBJECTS = set(['AssetTokenEvent',\n 'AttachedContentNote',\n 'EventWhoRelation',\n 'QuoteTemplateRichTextData',\n 'TaskWhoRelation',\n 'SolutionStatus',\n 'ContractStatus',\n 'RecentlyViewed',\n 'DeclinedEventRelation',\n 'AcceptedEventRelation',\n 'TaskStatus',\n 'PartnerRole',\n 'TaskPriority',\n 'CaseStatus',\n 'UndecidedEventRelation',\n 'OrderStatus'])\n\n# The following objects have certain WHERE clause restrictions so we exclude them.\nQUERY_RESTRICTED_SALESFORCE_OBJECTS = set(['Announcement',\n 'ContentDocumentLink',\n 'CollaborationGroupRecord',\n 'Vote',\n 'IdeaComment',\n 'FieldDefinition',\n 'PlatformAction',\n 'UserEntityAccess',\n 'RelationshipInfo',\n 'ContentFolderMember',\n 'ContentFolderItem',\n 'SearchLayout',\n 'SiteDetail',\n 'EntityParticle',\n 'OwnerChangeOptionInfo',\n 'DataStatistics',\n 'UserFieldAccess',\n 'PicklistValueInfo',\n 'RelationshipDomain',\n 'FlexQueueItem',\n 'NetworkUserHistoryRecent',\n 'FieldHistoryArchive',\n 'RecordActionHistory',\n 'FlowVersionView',\n 'FlowVariableView',\n 'AppTabMember',\n 'ColorDefinition',\n 'IconDefinition',])\n\n# The following objects are not supported by the query method being used.\nQUERY_INCOMPATIBLE_SALESFORCE_OBJECTS = set(['DataType',\n 'ListViewChartInstance',\n 'FeedLike',\n 'OutgoingEmail',\n 'OutgoingEmailRelation',\n 'FeedSignal',\n 'ActivityHistory',\n 'EmailStatus',\n 'UserRecordAccess',\n 'Name',\n 'AggregateResult',\n 'OpenActivity',\n 'ProcessInstanceHistory',\n 'OwnedContentDocument',\n 'FolderedContentDocument',\n 'FeedTrackedChange',\n 'CombinedAttachment',\n 'AttachedContentDocument',\n 'ContentBody',\n 'NoteAndAttachment',\n 'LookedUpFromActivity',\n 'AttachedContentNote',\n 'QuoteTemplateRichTextData'])\n\ndef log_backoff_attempt(details):\n LOGGER.info(\"ConnectionError detected, triggering backoff: %d try\", details.get(\"tries\"))\n\n\ndef field_to_property_schema(field, mdata):\n property_schema = {}\n\n field_name = field['name']\n sf_type = field['type']\n\n if sf_type in STRING_TYPES:\n property_schema['type'] = \"string\"\n elif sf_type in DATE_TYPES:\n date_type = {\"type\": \"string\", \"format\": \"date-time\"}\n string_type = {\"type\": [\"string\", \"null\"]}\n property_schema[\"anyOf\"] = [date_type, string_type]\n elif sf_type == \"boolean\":\n property_schema['type'] = \"boolean\"\n elif sf_type in NUMBER_TYPES:\n property_schema['type'] = \"number\"\n elif sf_type == \"address\":\n property_schema['type'] = \"object\"\n property_schema['properties'] = {\n \"street\": {\"type\": [\"null\", \"string\"]},\n \"state\": {\"type\": [\"null\", \"string\"]},\n \"postalCode\": {\"type\": [\"null\", \"string\"]},\n \"city\": {\"type\": [\"null\", \"string\"]},\n \"country\": {\"type\": [\"null\", \"string\"]},\n \"longitude\": {\"type\": [\"null\", \"number\"]},\n \"latitude\": {\"type\": [\"null\", \"number\"]},\n \"geocodeAccuracy\": {\"type\": [\"null\", \"string\"]}\n }\n elif sf_type in (\"int\", \"long\"):\n property_schema['type'] = \"integer\"\n elif sf_type == \"time\":\n property_schema['type'] = \"string\"\n elif sf_type in LOOSE_TYPES:\n return property_schema, mdata # No type = all types\n elif sf_type in BINARY_TYPES:\n mdata = metadata.write(mdata, ('properties', field_name), \"inclusion\", \"unsupported\")\n mdata = metadata.write(mdata, ('properties', field_name),\n \"unsupported-description\", \"binary data\")\n return property_schema, mdata\n elif sf_type == 'location':\n # geo coordinates are numbers or objects divided into two fields for lat/long\n property_schema['type'] = [\"number\", \"object\", \"null\"]\n property_schema['properties'] = {\n \"longitude\": {\"type\": [\"null\", \"number\"]},\n \"latitude\": {\"type\": [\"null\", \"number\"]}\n }\n elif sf_type == 'json':\n property_schema['type'] = \"string\"\n else:\n raise TapSalesforceException(\"Found unsupported type: {}\".format(sf_type))\n\n # The nillable field cannot be trusted\n if field_name != 'Id' and sf_type != 'location' and sf_type not in DATE_TYPES:\n property_schema['type'] = [\"null\", property_schema['type']]\n\n return property_schema, mdata\n\nclass Salesforce():\n # pylint: disable=too-many-instance-attributes,too-many-arguments\n def __init__(self,\n credentials=None,\n token=None,\n quota_percent_per_run=None,\n quota_percent_total=None,\n is_sandbox=None,\n select_fields_by_default=None,\n default_start_date=None,\n api_type=None):\n self.api_type = api_type.upper() if api_type else None\n self.session = requests.Session()\n if isinstance(quota_percent_per_run, str) and quota_percent_per_run.strip() == '':\n quota_percent_per_run = None\n if isinstance(quota_percent_total, str) and quota_percent_total.strip() == '':\n quota_percent_total = None\n\n self.quota_percent_per_run = float(quota_percent_per_run) if quota_percent_per_run is not None else 25\n self.quota_percent_total = float(quota_percent_total) if quota_percent_total is not None else 80\n self.is_sandbox = is_sandbox is True or (isinstance(is_sandbox, str) and is_sandbox.lower() == 'true')\n self.select_fields_by_default = select_fields_by_default is True or (isinstance(select_fields_by_default, str) and select_fields_by_default.lower() == 'true')\n self.default_start_date = default_start_date\n self.rest_requests_attempted = 0\n self.jobs_completed = 0\n self.data_url = \"{}/services/data/v53.0/{}\"\n self.pk_chunking = False\n\n self.auth = SalesforceAuth.from_credentials(credentials, is_sandbox=self.is_sandbox)\n\n # validate start_date\n singer_utils.strptime(default_start_date)\n\n # pylint: disable=anomalous-backslash-in-string,line-too-long\n def check_rest_quota_usage(self, headers):\n match = re.search('^api-usage=(\\d+)/(\\d+)$', headers.get('Sforce-Limit-Info'))\n\n if match is None:\n return\n\n remaining, allotted = map(int, match.groups())\n\n LOGGER.info(\"Used %s of %s daily REST API quota\", remaining, allotted)\n\n percent_used_from_total = (remaining / allotted) * 100\n max_requests_for_run = int((self.quota_percent_per_run * allotted) / 100)\n\n if percent_used_from_total > self.quota_percent_total:\n total_message = (\"Salesforce has reported {}/{} ({:3.2f}%) total REST quota \" +\n \"used across all Salesforce Applications. Terminating \" +\n \"replication to not continue past configured percentage \" +\n \"of {}% total quota.\").format(remaining,\n allotted,\n percent_used_from_total,\n self.quota_percent_total)\n raise TapSalesforceQuotaExceededException(total_message)\n elif self.rest_requests_attempted > max_requests_for_run:\n partial_message = (\"This replication job has made {} REST requests ({:3.2f}% of \" +\n \"total quota). Terminating replication due to allotted \" +\n \"quota of {}% per replication.\").format(self.rest_requests_attempted,\n (self.rest_requests_attempted / allotted) * 100,\n self.quota_percent_per_run)\n raise TapSalesforceQuotaExceededException(partial_message)\n\n def login(self):\n self.auth.login()\n\n @property\n def instance_url(self):\n return self.auth.instance_url\n\n # pylint: disable=too-many-arguments\n @backoff.on_exception(backoff.expo,\n requests.exceptions.ConnectionError,\n max_tries=10,\n factor=2,\n on_backoff=log_backoff_attempt)\n def _make_request(self, http_method, url, headers=None, body=None, stream=False, params=None):\n if http_method == \"GET\":\n LOGGER.info(\"Making %s request to %s with params: %s\", http_method, url, params)\n resp = self.session.get(url, headers=headers, stream=stream, params=params)\n elif http_method == \"POST\":\n LOGGER.info(\"Making %s request to %s with body %s\", http_method, url, body)\n resp = self.session.post(url, headers=headers, data=body)\n else:\n raise TapSalesforceException(\"Unsupported HTTP method\")\n\n resp.raise_for_status()\n\n if resp.headers.get('Sforce-Limit-Info') is not None:\n self.rest_requests_attempted += 1\n self.check_rest_quota_usage(resp.headers)\n\n return resp\n\n def describe(self, sobject=None):\n \"\"\"Describes all objects or a specific object\"\"\"\n headers = self.auth.rest_headers\n instance_url = self.auth.instance_url\n if sobject is None:\n endpoint = \"sobjects\"\n endpoint_tag = \"sobjects\"\n url = self.data_url.format(instance_url, endpoint)\n else:\n endpoint = \"sobjects/{}/describe\".format(sobject)\n endpoint_tag = sobject\n url = self.data_url.format(instance_url, endpoint)\n\n with metrics.http_request_timer(\"describe\") as timer:\n timer.tags['endpoint'] = endpoint_tag\n resp = self._make_request('GET', url, headers=headers)\n\n return resp.json()\n\n # pylint: disable=no-self-use\n def _get_selected_properties(self, catalog_entry):\n mdata = metadata.to_map(catalog_entry['metadata'])\n properties = catalog_entry['schema'].get('properties', {})\n\n return [k for k in properties.keys()\n if singer.should_sync_field(metadata.get(mdata, ('properties', k), 'inclusion'),\n metadata.get(mdata, ('properties', k), 'selected'),\n self.select_fields_by_default)]\n\n\n def get_start_date(self, state, catalog_entry):\n catalog_metadata = metadata.to_map(catalog_entry['metadata'])\n replication_key = catalog_metadata.get((), {}).get('replication-key')\n\n return (singer.get_bookmark(state,\n catalog_entry['tap_stream_id'],\n replication_key) or self.default_start_date)\n\n def _build_query_string(self, catalog_entry, start_date, end_date=None, order_by_clause=True):\n selected_properties = self._get_selected_properties(catalog_entry)\n\n query = \"SELECT {} FROM {}\".format(\",\".join(selected_properties), catalog_entry['stream'])\n\n catalog_metadata = metadata.to_map(catalog_entry['metadata'])\n replication_key = catalog_metadata.get((), {}).get('replication-key')\n\n if replication_key:\n where_clause = \" WHERE {} >= {} \".format(\n replication_key,\n start_date)\n if end_date:\n end_date_clause = \" AND {} < {}\".format(replication_key, end_date)\n else:\n end_date_clause = \"\"\n\n order_by = \" ORDER BY {} ASC\".format(replication_key)\n if order_by_clause:\n return query + where_clause + end_date_clause + order_by\n\n return query + where_clause + end_date_clause\n else:\n return query\n\n def query(self, catalog_entry, state):\n if self.api_type == BULK_API_TYPE:\n bulk = Bulk(self)\n return bulk.query(catalog_entry, state)\n elif self.api_type == REST_API_TYPE:\n rest = Rest(self)\n return rest.query(catalog_entry, state)\n else:\n raise TapSalesforceException(\n \"api_type should be REST or BULK was: {}\".format(\n self.api_type))\n\n def get_blacklisted_objects(self):\n if self.api_type == BULK_API_TYPE:\n return UNSUPPORTED_BULK_API_SALESFORCE_OBJECTS.union(\n QUERY_RESTRICTED_SALESFORCE_OBJECTS).union(QUERY_INCOMPATIBLE_SALESFORCE_OBJECTS)\n elif self.api_type == REST_API_TYPE:\n return QUERY_RESTRICTED_SALESFORCE_OBJECTS.union(QUERY_INCOMPATIBLE_SALESFORCE_OBJECTS)\n else:\n raise TapSalesforceException(\n \"api_type should be REST or BULK was: {}\".format(\n self.api_type))\n\n # pylint: disable=line-too-long\n def get_blacklisted_fields(self):\n if self.api_type == BULK_API_TYPE:\n return {('EntityDefinition', 'RecordTypesSupported'): \"this field is unsupported by the Bulk API.\"}\n elif self.api_type == REST_API_TYPE:\n return {}\n else:\n raise TapSalesforceException(\n \"api_type should be REST or BULK was: {}\".format(\n self.api_type))\n","repo_name":"MeltanoLabs/tap-salesforce","sub_path":"tap_salesforce/salesforce/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":17151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8402047188","text":"import socket\nimport datetime\nimport pymysql\n\nconn = pymysql.connect(host = '127.0.0.1', user = 'bigdata', password = '12345678', db = 'mysql', charset = 'utf8')\ncursor = conn.cursor() # 커서 설정\n\nHOST = '192.168.0.3'\nPORT = 9999\n\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 객체생성\nclient_socket.connect((HOST, PORT)) # 서버에 접속\nwhile True :\n\n data = client_socket.recv(1024) # 버퍼 크기는 서버와 동일하게\n now = datetime.datetime.today() # 현재 시스템 날짜 및 시간\n nowstr = now.strftime('%Y-%m-%d %H:%M:%S') #문자열로 지정\n print(nowstr)\n rs = data.decode().split(':') # 콜론을 구분자로 모든 데이터를 분리(리스트)\n print(rs)\n sql = \"insert into tblsensor values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n cursor.execute(sql, (nowstr, rs[0], rs[1], rs[2], rs[3], rs[4], rs[5], rs[6], rs[7], rs[8], rs[9]))\n conn.commit()\n\nclient_socket.close()\ncursor.close()","repo_name":"hunsang-you/HSYOO","sub_path":"ExamApp/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70960903874","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse,FileResponse\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nimport os\r\n\r\ndef index(request):\r\n return render(request,\"evtt/index.html\")\r\n\r\n@csrf_exempt\r\ndef transcribe(request):\r\n if request.method == 'POST':\r\n f=request.FILES['file']\r\n with open('voice.m4a', 'wb+') as destination:\r\n for chunk in f.chunks():\r\n destination.write(chunk)\r\n os.system(\"vosk-transcriber -l fa -i {} -o out_text.txt\".format('voice.m4a')) \r\n #return FileResponse(open('out_text.txt','rb'),as_attachment=True)\r\n f=open('out_text.txt','r',encoding = 'utf-8') \r\n return HttpResponse(f.read());\r\n \r\n\r\n","repo_name":"epg900/pvss","sub_path":"evtt/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28821882916","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\nsetup_requirements = [\n \"pytest-runner>=5.2\",\n]\n\ntest_requirements = [\n \"black>=19.10b0\",\n \"codecov>=2.1.4\",\n \"flake8>=3.8.3\",\n \"flake8-debugger>=3.2.1\",\n \"pytest>=5.4.3\",\n \"pytest-cov>=2.9.0\",\n \"pytest-raises>=0.11\",\n]\n\ndev_requirements = [\n *setup_requirements,\n *test_requirements,\n \"bumpversion>=0.6.0\",\n \"coverage>=5.1\",\n \"ipython>=7.15.0\",\n \"m2r>=0.2.1\",\n \"pytest-runner>=5.2\",\n \"Sphinx>=2.0.0b1,<3\",\n \"sphinx_rtd_theme>=0.4.3\",\n \"tox>=3.15.2\",\n \"twine>=3.1.1\",\n \"wheel>=0.34.2\",\n]\n\nstep_workflow_requirements = [\n \"cloudpickle>=1.5.0\", # needed for distributed\n \"boto3==1.15\", \n \"bokeh>=2.1.0\",\n \"dask[bag]>=2.21.0\",\n \"dask_jobqueue>=0.7.0\",\n # SPECIAL RELEASE OF DATASTEP TO RESOLVE MALFORMED MANIFEST\n # https://github.com/AllenCellModeling/datastep/tree/special-release\n #\n # Fortunately we left the 0.0.* series of releases empty\n # New datastep projects will by default use the 0.1.* series of releases\n \"datastep==0.0.1\",\n \"distributed>=2.21.0\",\n \"docutils<0.16\", # needed for botocore (quilt dependency)\n \"fire\",\n \"fsspec>=0.7.4\", # needed for dask[bag]\n \"partd>=1.1.0\", # needed for dask[bag]\n \"prefect>=0.12.5\",\n \"python-dateutil<=2.8.0\", # needed for quilt3 (datastep dependency)\n \"aics_dask_utils\", # needed for multiple steps, e.g., DistributedHandler\n]\n\nrequirements = [\n *step_workflow_requirements,\n # project requires\n \"numpy\",\n \"pandas\",\n \"Pillow\",\n \"pyarrow\",\n \"scipy\",\n \"tqdm\",\n \"scipy\",\n \"scikit-image\",\n \"aicsimageio\",\n \"aicsimageprocessing\",\n \"image-classifier-3d\",\n]\n\n\naics_data_requirements = [\n \"lkaccess>=1.4.20\",\n]\n\nextra_requirements = {\n \"setup\": setup_requirements,\n \"test\": test_requirements,\n \"dev\": dev_requirements,\n \"aics\": aics_data_requirements,\n \"all\": [\n *requirements,\n *dev_requirements,\n ],\n}\n\nsetup(\n author=\"Allen Institute for Cell Science\",\n author_email=\"jacksonb@alleninstitute.org\",\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: Free for non-commercial use\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n description=\"Workflow to manage processing of FOVs and Cells for the Cell Variance Analysis program.\",\n entry_points={\"console_scripts\": [\"cvapipe=cvapipe.bin.cli:cli\"]},\n install_requires=requirements,\n license=\"Allen Institute Software License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"cvapipe\",\n name=\"cvapipe\",\n packages=find_packages(exclude=[\"tests\", \"*.tests\", \"*.tests.*\"]),\n python_requires=\">=3.6\",\n setup_requires=setup_requirements,\n test_suite=\"cvapipe/tests\",\n tests_require=test_requirements,\n extras_require=extra_requirements,\n url=\"https://github.com/AllenCell/cvapipe\",\n # Do not edit this string manually, always use bumpversion\n # Details in CONTRIBUTING.rst\n version=\"0.1.0\",\n zip_safe=False,\n)\n","repo_name":"AllenCell/cvapipe","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"4853872564","text":"import pandas as pd\nimport geopy\nfrom geopy.geocoders import Nominatim\ndata = pd.read_csv(\"map.txt\")\nnom = Nominatim()\ncountries=data['Country']\n\n\nwith open(\"hello\"+\".txt\", 'w') as file:\n for i in countries:\n n= nom.geocode(countries[0])\n file.write(str(n.latitude)+\"\\n\")\n","repo_name":"jukomol/Python-Journey","sub_path":"Python_Projects/web_mapping/maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23255463158","text":"from PyQt6.QtGui import QIcon,QFont,QPixmap\nfrom PyQt6 import QtCore\nfrom PyQt6.QtWidgets import *\nimport sys\n\nfrom PyQt6.QtWidgets import QWidget\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n\n #CONFIGURACOES PADROES DA JANELA\n self.setGeometry(200,200,400,400) # posicao da tela \n self.setWindowTitle('Primeiro Programa') # muda titulo da tela\n self.setWindowIcon(QIcon('imagem.jpg')) # coloca a imagem de icone\n self.setMaximumWidth(1200) # maximo da janela\n self.setMinimumHeight(600) # maximo da janela\n #self.setStyleSheet('background-color: black') # altera a cor da janela\n\n #LABEL\n label = QLabel(self) # criacao de label\n label.setText('hello word') # escrevendo no label\n label.move(120,30) # move o label na tela\n label.setFont(QFont('Sanserif', 20)) # altera a fonte\n label.setStyleSheet('color: red') # altera a cor \n label.clear() # limpa o label\n #pixmap = QPixmap('endereco da imagem') # pega a imagem\n #label.setPixmap(pixmap) # coloca de fundo no label\n\n #BOTAO\n btn = QPushButton('titulo',self) \n btn.setFont('times',25) # altera a font e tamanho\n btn.setStyleSheet('background-color: black') # altera a cor do botao\n #btn.setIcon(QIcon('endereco')) # passa um icone para o botao\n btn.clicked.connect('passa o paramentro ou funcao') # pegando o envento do botao\n\n #LINE_EDIT\n line_edit = QLineEdit()\n line_edit.setFont('time') # muda a fonte do line\n line_edit.setPlaceholderText('digite seu nome') # fica de fundo do line_edit\n line_edit.setText('digite um textoo que ja vai por padrao') # fica predefinido já\n line_edit.setEchoMode(QLineEdit.EchoMode.Password) # deixa o formato com *** de senha\n \n #LAYOUT E GRID\n hbox = QBoxLayout() # coloca na horizontal os elementos\n vbox = QVBoxLayout() # coloca na vertical os elementos\n btn1 = QPushButton()\n line1 = QLineEdit()\n line2 = QLineEdit()\n hbox.addWidget(btn1) # adiciona a tela\n hbox.addWidget(line1)# adiciona a tela\n hbox.addWidget(line2)# adiciona a tela\n vbox.addWidget(btn1)# adiciona a tela\n vbox.addWidget(line1)# adiciona a tela\n vbox.addWidget(line2)# adiciona a tela\n\n # BOTAO RADIO\n rad1 = QRadioButton('whtasapp') # faz botao de selecao redondo\n rad1.toggleed.connect('funcao ') # para conectar o botao do tipo radio\n rad1.sender() #passa se esta selecionado o botao\n\n # BOTAO DE CHECKBOX\n check1 = QCheckBox() # seleciona mais de uma opcao\n check1.setText('instagram')\n check1.toggle.connect('passa funcao') # serve para vincular uma execucao\n check1.isChecked() # serve para saber se esta selecionado ou nao\n\n #SPIN BOX QUE CLICA E AUMANTA O NUMERO\n #valor inicial #valor maximo #valor minimo #prefizado na parte da frente\n self.spin = QSpinBox(self,value = 2, maximum = 10, minumun =1, prefix ='#', suffix='pizzas')\n #escrita na tela do lado\n self.spin.valueChanged.connect()# serve para toda vez que mudar o valor da spin chmar a funcao\n\n #COMBO BOX\n self.combo = QComboBox()\n self.combo.addItem('mussarela') # adiciona ao comboBox um de cada vez\n sabores = ['calabresa,catupry,frango']\n self.combo.addItems(sabores) # adiciona a lista a comboBox\n self.combo.currentTextChanged() # ele passa o sinal de qual esta selecionado\n\n #Lista\n self.list = QListWidget()\n self.list.addItem('mussarela') # adiciona a lista que sera exibido na tela\n self.list.clicked.connect() # manda o que esta selecionado\n texto =self.list.currentItem() # pega o item so nao esta em str\n texto.text() # aqui sim pega o str da lista\n\n #Table\n table = QTableWidget()\n table.setRowCount(10) # defini o tamanho da tabela das linhas\n table.setColumnCount(4) # defini o tamanho das colunas\n table.setItem(0,0, QTableWidgetItem('id')) # adiciona item a tabela\n table.setItem(0,0, QTableWidgetItem('nome')) # adiciona item a tabela\n table.setItem(0,0, QTableWidgetItem('produto')) # adiciona item a tabela\n table.setItem(0,0, QTableWidgetItem('cod_barra')) # adiciona item a tabela\n # da para utilizar o for no lugar do primeiro 0 e do segundo 0\n\n \n\n\n\n\n\n\n\n \n\n\napp = QApplication(sys.argv)\nwindow = Window() # janela\nwindow.show()\nsys.exit(app.exec())","repo_name":"otavio-schmieleski/Python","sub_path":"Interface_Grafica/QT6/comandos.py","file_name":"comandos.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32893523337","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 1 21:45:02 2020\n\n@author: nicol\n\"\"\"\n\n\n###Vidéo 19\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\n\"\"\"\niris = pd.read_csv('iris.csv')\n\nplt.figure()\nsns.pairplot(iris, hue = 'species')\n\n#sns.fonction(x, y, data, hue, size, style)\n\"\"\"\ntitanic = sns.load_dataset('titanic')\n\n\nprint(titanic.head())\n\ntitanic.drop(['alone', 'alive', 'who', 'adult_male', 'embark_town', 'class'], axis = 1, inplace =True)\ntitanic.dropna(axis = 0, inplace = True)\nprint(titanic.head())\n\nplt.figure()\n# sns.pairplot(titanic)\nplt.figure()\nsns.catplot(x = 'pclass', y ='age', data = titanic, hue = 'sex')\nplt.figure()\nsns.boxplot(x = 'pclass', y ='age', data = titanic, hue = 'sex')\nplt.figure()\nsns.distplot(titanic['fare'])\nplt.figure()\nsns.jointplot('age', 'fare', data = titanic, kind ='hex')\nplt.figure()\nsns.heatmap(titanic.corr())\n","repo_name":"NVigne-cloud/Python","sub_path":"Machine_Learnia_11.py","file_name":"Machine_Learnia_11.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44259644840","text":"from utils.crud_helpers import get_user_by_id\nfrom sqlalchemy.orm import Session\nfrom fastapi import HTTPException\nfrom hashlib import sha256\nfrom models import User as UserModel\nimport os\n\n\ndef decode_token(db: Session, token: str) -> UserModel:\n token_data = token.split(\":\")\n user_id = int(token_data[0])\n token_payload = token_data[1]\n user = get_user_by_id(db, user_id)\n if not user:\n raise HTTPException(status_code=401, detail=\"Invalid authentication credentials\")\n \n payload = user.login + os.environ[\"SECRET_SOULT\"]\n if sha256(payload.encode('utf-8')).hexdigest() == token_payload:\n return user\n raise HTTPException(status_code=401, detail=\"Invalid authentication credentials\")\n \n\ndef encode_token(user: UserModel) -> str:\n token_data = user.login + os.environ[\"SECRET_SOULT\"]\n token = str(user.id) + \":\" + sha256(token_data.encode('utf-8')).hexdigest()\n return token\n","repo_name":"Melekh11/todo-app","sub_path":"backend/utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42206174223","text":"\"\"\"Module for getting a relationship between channels and energies.\"\"\"\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\n\nfrom read_spe import data, CHANNELS\nfrom peaks import peaks, peak_stdevs\n\nplt.rcParams.update({'font.size': 22})\n\nline = np.polyfit(*list(zip(*peaks)), deg=1)\nenergies = np.polyval(line, CHANNELS)\n\n\ndef energy_from_channel(channel):\n \"\"\"Given a channel number, return the fit energy.\"\"\"\n return line[0]*channel + line[1]\n\n\ndef channel_from_energy(energy):\n \"\"\"\n Given an energy, return the corresponding channel number.\n\n (result may not be an integer!)\n \"\"\"\n return (energy - line[1]) / line[0]\n\n\nif __name__ == \"__main__\":\n # make a scatterplot\n\n peak_channels, peak_energies = list(zip(*peaks))\n\n plt.figure(figsize=(15, 15))\n plt.xlabel(\"Measured Channel [Mean of Gaussian Fit]\")\n plt.ylabel(\"Known Energy [keV]\")\n\n y_error = np.zeros_like(peak_channels)\n x_error = peak_stdevs\n print(x_error)\n\n plt.errorbar(\n peak_channels, peak_energies,\n xerr=x_error, yerr=y_error,\n fmt='o', ecolor='k', color='k')\n\n co_patch = mpatches.Patch(color='g', label=data.Co60.label)\n ba_patch = mpatches.Patch(color='b', label=data.Ba133.label)\n na_patch = mpatches.Patch(color='r', label=data.Na22.label)\n colors = ['g']*2 + ['b']*5 + ['r']*1\n plt.plot(CHANNELS, energies)\n plt.scatter(peak_channels, peak_energies, s=10,\n c=colors, marker=\"o\", zorder=100, linewidths=5)\n\n plt.legend(handles=[co_patch, ba_patch, na_patch])\n\n plt.savefig(\"images/energy_calibration.pdf\")\n plt.cla()\n plt.clf()\n\n # calculate channel from energy for an example energy\n print(channel_from_energy(1274.5))\n","repo_name":"callum-mccracken/GRIDS_2022","sub_path":"HPGe/energy_calibration.py","file_name":"energy_calibration.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33168855538","text":"import pyttsx3\nimport speech_recognition as sr\n\nengine = pyttsx3.init() # object creation\nvoices = engine.getProperty('voices') \nengine.setProperty('voice', voices[2].id) \nengine.setProperty('rate', 100) \n\n\n# entrada = input('Ingresa un mensaje\\n').lower().strip()\n\nr = sr.Recognizer()\n\nwith sr.Microphone() as source:\n print(\"Ingresa un mensaje:\")\n audio = r.listen(source)\n\n# if 'hola' in entrada:\n# print('Hola 😃!')\n \n# elif 'como estas' in entrada:\n# print('Muy bien, gracias! 😁')\n \n# elif 'adios' in entrada:\n# print('Adios, ten un buen día 😉!!')\n# else:\n# print('No entendí! 😣')\n\n\ntry:\n entrada = r.recognize_google(audio)\nexcept sr.UnknownValueError:\n print(\"No se pudo reconocer\")\nexcept sr.RequestError as e:\n print('No se pudo conectar al servicio')\n\n\nif 'hola' in entrada:\n engine.say('Hola, encantada de conocerte!')\n \nelif 'como estas' in entrada:\n engine.say('Muy bien, gracias! 😁')\n \nelif 'adios' in entrada:\n engine.say('Adios, ten un buen día 😉!!')\nelse:\n engine.say('No entendí! 😣')\n \nengine.runAndWait()\nengine.stop()\n\n\n\n\n# for voz in voices:\n# print(voz)","repo_name":"eastor112/prograIntro2021","sub_path":"Clase 10/09_chatbot.py","file_name":"09_chatbot.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23436459873","text":"import argparse\nimport glob\n\nimport cv2\nimport numpy as np\nimport os\n\n\ndef show_in_one(images, show_size=(300, 300), blank_size=2, window_name=\"merge\"):\n small_h, small_w = images[0].shape[:2]\n column = int(show_size[1] / (small_w + blank_size))\n row = int(show_size[0] / (small_h + blank_size))\n shape = [show_size[0], show_size[1]]\n for i in range(2, len(images[0].shape)):\n shape.append(images[0].shape[i])\n\n merge_img = np.zeros(tuple(shape), images[0].dtype)\n\n max_count = len(images)\n count = 0\n for i in range(row):\n if count >= max_count:\n break\n for j in range(column):\n if count < max_count:\n im = images[count]\n t_h_start = i * (small_h + blank_size)\n t_w_start = j * (small_w + blank_size)\n t_h_end = t_h_start + im.shape[0]\n t_w_end = t_w_start + im.shape[1]\n merge_img[t_h_start:t_h_end, t_w_start:t_w_end] = im\n count = count + 1\n else:\n break\n if count < max_count:\n print(\"ingnore count %s\" % (max_count - count))\n cv2.namedWindow(window_name)\n cv2.imshow(window_name, merge_img)\n\n\nif __name__ == '__main__':\n \n test_dir = \"/home/xinchao/dd/explore/ORB_SLAM2/prj/python/save_fail2/local_map/\"\n path = test_dir\n\n debug_images = []\n for infile in glob.glob(os.path.join(path, '*.*')):\n ext = os.path.splitext(infile)[1][1:] # get the filename extenstion\n if ext == \"png\" or ext == \"jpg\" or ext == \"bmp\" or ext == \"tiff\" or ext == \"pbm\":\n print(infile)\n img = cv2.imread(infile)\n if img is None:\n continue\n else:\n debug_images.append(img)\n\n show_in_one(debug_images)\n cv2.waitKey(0)\n cv2.destroyWindow()\n","repo_name":"amusingchao/learngit","sub_path":"code/moro_code/python/multi_images_show.py","file_name":"multi_images_show.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"2742152618","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\n\nclass EncoderLSTM(nn.Module):\n \"\"\"Encodes navigation instructions, returning hidden state context (for\n attention methods) and a decoder initial state.\"\"\"\n\n def __init__(\n self,\n args,\n embedding,\n embedding_size,\n hidden_size,\n dropout_ratio,\n bidirectional=False,\n num_layers=1,\n ):\n super(EncoderLSTM, self).__init__()\n self.args = args\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.drop = nn.Dropout(p=dropout_ratio)\n if bidirectional:\n print(\"Using Bidir in EncoderLSTM\")\n self.num_directions = 2 if bidirectional else 1\n self.num_layers = num_layers\n self.embedding = embedding\n input_size = embedding_size\n self.lstm = nn.LSTM(\n input_size,\n hidden_size,\n self.num_layers,\n batch_first=True,\n dropout=dropout_ratio,\n bidirectional=bidirectional,\n )\n self.encoder2decoder = nn.Linear(\n hidden_size * self.num_directions, hidden_size * self.num_directions\n )\n\n def init_state(self, inputs):\n \"\"\" Initialize to zero cell states and hidden states.\"\"\"\n batch_size = inputs.size(0)\n h0 = Variable(\n torch.zeros(\n self.num_layers * self.num_directions, batch_size, self.hidden_size\n ),\n requires_grad=False,\n )\n c0 = Variable(\n torch.zeros(\n self.num_layers * self.num_directions, batch_size, self.hidden_size\n ),\n requires_grad=False,\n )\n\n return h0.to(self.args.device), c0.to(self.args.device)\n\n def forward(\n self,\n inputs,\n lengths,\n position_ids=None,\n token_type_ids=None,\n ):\n \"\"\"Expects input vocab indices as (batch, seq_len). Also requires a\n list of lengths for dynamic batching.\"\"\"\n\n embeds = self.embedding(\n inputs, position_ids=position_ids, token_type_ids=token_type_ids\n )\n h0, c0 = self.init_state(inputs)\n packed_embeds = pack_padded_sequence(embeds, lengths, batch_first=True)\n enc_h, (enc_h_t, enc_c_t) = self.lstm(packed_embeds, (h0, c0))\n\n if (\n self.num_directions == 2\n ): # The size of enc_h_t is (num_layers * num_directions, batch, hidden_size)\n h_t = torch.cat((enc_h_t[-1], enc_h_t[-2]), 1)\n c_t = torch.cat((enc_c_t[-1], enc_c_t[-2]), 1)\n else:\n h_t = enc_h_t[-1]\n c_t = enc_c_t[-1] # (batch, hidden_size)\n\n ctx, _ = pad_packed_sequence(enc_h, batch_first=True)\n\n self.args.sub_out = \"tanh\"\n self.args.zero_init = False\n\n if self.args.sub_out == \"max\":\n ctx_max, _ = ctx.max(1)\n decoder_init = nn.Tanh()(self.encoder2decoder(ctx_max))\n elif self.args.sub_out == \"tanh\":\n decoder_init = nn.Tanh()(self.encoder2decoder(h_t))\n else:\n assert False\n\n ctx = self.drop(ctx)\n if self.args.zero_init:\n return ctx, torch.zeros_like(decoder_init), torch.zeros_like(c_t)\n else:\n return (\n ctx,\n decoder_init,\n c_t,\n ) # (batch, seq_len, hidden_size*num_directions)\n # (batch, hidden_size)\n\n\nclass OscarEncoder(nn.Module):\n \"\"\"Encodes navigation instructions, returning hidden state context (for\n attention methods) and a decoder initial state.\"\"\"\n\n def __init__(\n self,\n args,\n bert,\n hidden_size,\n decoder_hidden_size,\n dropout_ratio,\n bidirectional=False,\n num_layers=1,\n reverse_input=False,\n ):\n super(OscarEncoder, self).__init__()\n\n self.transformer_hidden_size = 768\n self.reverse_input = reverse_input\n self.dec_hidden_size = decoder_hidden_size\n\n self.args = args\n\n self.bert = bert\n self.hidden_size = hidden_size\n self.drop = nn.Dropout(p=dropout_ratio)\n if bidirectional:\n print(\"Using Bidir in EncoderLSTM\")\n self.num_directions = 2 if bidirectional else 1\n self.num_layers = num_layers\n\n self.lstm = nn.LSTM(\n self.transformer_hidden_size,\n self.hidden_size,\n self.num_layers,\n batch_first=True,\n dropout=dropout_ratio,\n bidirectional=bidirectional,\n )\n self.encoder_lstm2decoder_ht = nn.Linear(\n hidden_size * self.num_directions, decoder_hidden_size\n )\n self.encoder_lstm2decoder_ct = nn.Linear(\n hidden_size * self.num_directions, decoder_hidden_size\n )\n\n def init_state(self, inputs):\n \"\"\" Initialize to zero cell states and hidden states.\"\"\"\n batch_size = inputs.size(0)\n h0 = Variable(\n torch.zeros(\n self.num_layers * self.num_directions, batch_size, self.hidden_size\n ),\n requires_grad=False,\n )\n c0 = Variable(\n torch.zeros(\n self.num_layers * self.num_directions, batch_size, self.hidden_size\n ),\n requires_grad=False,\n )\n\n return h0.to(self.args.device), c0.to(self.args.device)\n\n def forward(\n self,\n inputs,\n lengths,\n mask,\n position_ids=None,\n token_type_ids=None,\n ):\n \"\"\"Expects input vocab indices as (batch, seq_len). Also requires a\n list of lengths for dynamic batching.\"\"\"\n\n seq_max_len = mask.size(1)\n att_mask = ~mask\n\n outputs = self.bert(\n inputs,\n token_type_ids=token_type_ids,\n attention_mask=att_mask,\n position_ids=position_ids,\n )\n\n output = outputs[0]\n\n if self.reverse_input:\n reversed_output = torch.zeros(output.size()).to(output.device)\n reverse_idx = torch.arange(seq_max_len - 1, -1, -1)\n reversed_output[att_mask] = output[:, reverse_idx][att_mask[:, reverse_idx]]\n output = reversed_output\n\n h0, c0 = self.init_state(inputs)\n packed_embeds = pack_padded_sequence(output, lengths, batch_first=True)\n enc_h, (enc_h_t, enc_c_t) = self.lstm(packed_embeds, (h0, c0))\n\n if (\n self.num_directions == 2\n ): # The size of enc_h_t is (num_layers * num_directions, batch, hidden_size)\n h_t = torch.cat((enc_h_t[-1], enc_h_t[-2]), 1)\n c_t = torch.cat((enc_c_t[-1], enc_c_t[-2]), 1)\n else:\n h_t = enc_h_t[-1]\n c_t = enc_c_t[-1] # (batch, hidden_size)\n\n decoder_init = nn.Tanh()(self.encoder_lstm2decoder_ht(h_t))\n if self.hidden_size * self.num_directions != self.dec_hidden_size:\n c_t = self.encoder_lstm2decoder_ct(c_t)\n\n ctx, lengths = pad_packed_sequence(enc_h, batch_first=True)\n\n ctx = self.drop(ctx)\n\n return (\n ctx,\n decoder_init,\n c_t,\n ) # (batch, seq_len, hidden_size*num_directions)\n\n\nclass SoftDotAttention(nn.Module):\n \"\"\"Soft Dot Attention.\n\n Ref: http://www.aclweb.org/anthology/D15-1166\n Adapted from PyTorch OPEN NMT.\n \"\"\"\n\n def __init__(self, dim):\n \"\"\"Initialize layer.\"\"\"\n super(SoftDotAttention, self).__init__()\n self.linear_in = nn.Linear(dim, dim, bias=False)\n self.sm = nn.Softmax(dim=1)\n self.linear_out = nn.Linear(dim * 2, dim, bias=False)\n self.tanh = nn.Tanh()\n\n def forward(self, h, context, mask=None):\n \"\"\"Propagate h through the network.\n\n h: batch x dim\n context: batch x seq_len x dim\n mask: batch x seq_len indices to be masked\n \"\"\"\n target = self.linear_in(h).unsqueeze(2) # batch x dim x 1\n\n # Get attention\n attn = torch.bmm(context, target).squeeze(2) # batch x seq_len\n if mask is not None:\n # -Inf masking prior to the softmax\n attn.data.masked_fill_(mask.bool(), -float(\"inf\"))\n attn = self.sm(attn)\n attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x seq_len\n\n weighted_context = torch.bmm(attn3, context).squeeze(1) # batch x dim\n h_tilde = torch.cat((weighted_context, h), 1)\n\n h_tilde = self.tanh(self.linear_out(h_tilde))\n return h_tilde, attn\n\n\nclass AttnDecoderLSTM(nn.Module):\n \"\"\" An unrolled LSTM with attention over instructions for decoding navigation actions. \"\"\"\n\n def __init__(\n self,\n input_action_size,\n output_action_size,\n embedding_size,\n hidden_size,\n dropout_ratio,\n feature_size=2048,\n ):\n super(AttnDecoderLSTM, self).__init__()\n self.embedding_size = embedding_size\n self.feature_size = feature_size\n self.hidden_size = hidden_size\n self.embedding = nn.Embedding(input_action_size, embedding_size)\n self.drop = nn.Dropout(p=dropout_ratio)\n self.lstm = nn.LSTMCell(embedding_size + feature_size, hidden_size)\n self.attention_layer = SoftDotAttention(hidden_size)\n self.decoder2action = nn.Linear(hidden_size, output_action_size)\n\n def forward(self, action, feature, h_0, c_0, ctx, ctx_mask=None):\n \"\"\"Takes a single step in the decoder LSTM (allowing sampling).\n\n action: batch x 1\n feature: batch x feature_size\n h_0: batch x hidden_size\n c_0: batch x hidden_size\n ctx: batch x seq_len x dim\n ctx_mask: batch x seq_len - indices to be masked\n \"\"\"\n action_embeds = self.embedding(action) # (batch, 1, embedding_size)\n action_embeds = action_embeds.squeeze()\n concat_input = torch.cat(\n (action_embeds, feature), 1\n ) # (batch, embedding_size+feature_size)\n drop = self.drop(concat_input)\n h_1, c_1 = self.lstm(drop, (h_0, c_0))\n h_1_drop = self.drop(h_1)\n h_tilde, alpha = self.attention_layer(h_1_drop, ctx, ctx_mask)\n logit = self.decoder2action(h_tilde)\n return h_1, c_1, alpha, logit\n","repo_name":"alexa/visitron","sub_path":"tasks/turn_based/agent_models.py","file_name":"agent_models.py","file_ext":"py","file_size_in_byte":10390,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"10298335654","text":"class Solution:\n def xorOperation(self, n: int, start: int) -> int:\n nums = []\n for i in range(n):\n nums.append(start+2*i)\n print(nums)\n x = 0\n for j in nums:\n x ^= j\n return(x)","repo_name":"iamnitya/LeetCode","sub_path":"1486-xor-operation-in-an-array/1486-xor-operation-in-an-array.py","file_name":"1486-xor-operation-in-an-array.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3611052208","text":"from flask_babel import gettext as _\nfrom flask_wtf import FlaskForm\nfrom flask_wtf.file import (\n FileField, \n FileAllowed, \n FileRequired, \n FileStorage\n)\nfrom flask_uploads import (\n UploadSet,\n IMAGES\n)\nfrom wtforms import (\n IntegerField,\n StringField,\n DecimalField,\n SelectField,\n TextAreaField,\n HiddenField,\n BooleanField,\n SelectMultipleField\n)\nfrom wtforms.validators import (\n Required,\n InputRequired,\n Length,\n EqualTo,\n NumberRange\n)\n\nfrom app.database import db\n\nfrom app.helpers import (\n render_template, \n log_info,\n toint\n)\n\nfrom app.helpers.date_time import current_timestamp\n\n\nfrom app.models.coupon import CouponBatch\nfrom app.forms import Form\n\nclass RechargeForm(Form):\n '''充值Form'''\n nickname = StringField(_(u'用户名'),\n render_kw={'readonly':'readonly'})\n\n avatar = FileField(_(u'头像'),\n validators=[\n FileAllowed(UploadSet('images', IMAGES), message=_(u'只允许上传图片')),\n ],\n render_kw={'class':'hide'}) \n\n uid = IntegerField(_(u'用户ID'),\n validators=[\n Required(message=_(u'请填写用户的UID')),\n NumberRange(min=0, message=_(u'不能小于0'))\n ],\n render_kw={'readonly':'readonly'}\n )\n\n recharge_amount = DecimalField(_(u'充值金额'),\n validators=[\n Required(message=_(u'请填写正确的充值金额')),\n NumberRange(min=0, message=_(u'金额不能小于0'))\n ]\n )\n\nclass CouponForm(Form):\n '''优惠券分发Form'''\n nickname = StringField(_(u'用户名'),\n render_kw={'readonly':'readonly'})\n\n avatar = FileField(_(u'头像'),\n validators=[\n FileAllowed(UploadSet('images', IMAGES), message=_(u'只允许上传图片')),\n ],\n render_kw={'class':'hide'}) \n\n uid = IntegerField(_(u'用户ID'),\n validators=[\n Required(message=_(u'请填写用户的UID')),\n NumberRange(min=0, message=_(u'不能小于0'))\n ],\n render_kw={'readonly':'readonly'}\n )\n\n cb_id = SelectField(_(u'优惠券'),\n coerce=int,\n validators=[\n Required(message=_(u'请选择要派发的优惠券')),\n ]\n )\n def __init__(self, *args, **kwargs):\n super(CouponForm, self).__init__(*args, **kwargs)\n\n _coupons = db.session.query(CouponBatch.cb_id, CouponBatch.coupon_name).\\\n filter(current_timestamp() <= CouponBatch.end_time).\\\n filter(CouponBatch.give_num < CouponBatch.publish_num).\\\n filter(CouponBatch.is_valid == 1).all()\n \n self.cb_id.choices = _coupons","repo_name":"kapokcloud-inc/theonestore","sub_path":"app/forms/admin/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"14409677982","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport pandas as pd\nfiles=[\"january.xlsx\",\"february.xlsx\",'march.xlsx']\n#files.append('April.xlsx')\nprint(type(files))\n\ncombined=pd.DataFrame()\n\nfor file in files:\n df=pd.read_excel(file,skiprows=3)\n combined=combined.append(df,ignore_index=True)\n \n \ncombined.to_excel(\"combined.xlsx\")\n\n\n# In[40]:\n\n\nimport pandas as pd\nfiles=input(\"Enter the months filename:\")\nprint(list(files.split()))\n\n\n# In[ ]:\n\n\nempty=pd.DataFrame()\nempty.append(jandf,febdf)\nempty.to_excel(\"requo.lxs\",index=False)\n\n\n# In[3]:\n\n\nprint(df) # this df belongs to last excel file\n\n\n# In[6]:\n\n\ndf_sel1=df['Location'].isin(['Los Angeles'])\nprint(df_sel1)\nprint()\nprint(df[df_sel1])\n\n\n# In[12]:\n\n\ndf_sel2=df['Location'].isin(['Los Angeles','Atlanta'])\nprint(df_sel2)\nprint(df[df_sel2])\n\n\n# In[13]:\n\n\n# not in\ndf[~df['Location'].isin(['Los Angeles','Atlanta'])]\n\n\n# In[15]:\n\n\n# numeric selecting\ndf[df['Bananas']>2258]# \n\n\n# In[16]:\n\n\ndf[df['Location'].map(lambda x:x.startswith('To'))]\n\n\n# In[17]:\n\n\ndf[df['Location'].map(lambda x:x.endswith('k'))]\n\n\n# In[20]:\n\n\ndf[df['Location'].map (lambda x:x.startswith('To')) & (df['Total']> 12900)]\n\n\n# In[22]:\n\n\ndf[df['Location'].str.contains('o')]\n\n\n# In[41]:\n\n\no_contain_name_customer_data=df[df['Location'].str.contains('o')]\n\n\n# In[23]:\n\n\ndf['Apples'].unique()\n\n\n# In[24]:\n\n\ndf['Apples'].nunique()\n\n\n# In[27]:\n\n\n# drop duplciates\ndf.drop_duplicates(subset=['Apples'])\n\n\n# In[30]:\n\n\n# sorting\ndf.sort_values(by=['Location'],ascending=True)\n\n\n# In[31]:\n\n\ndf.sort_values(by=['Location'],ascending=False)\n\n\n# In[35]:\n\n\ndf.sort_values(by=['Location','Apples'],ascending=[True,False])\n\n\n# In[36]:\n\n\ndf.sort_index(ascending=False)\n\n\n# In[37]:\n\n\ndf\n\n\n# In[45]:\n\n\na=10\nprint(b)\n\n\n# In[48]:\n\n\nimport pandas as pd\ndf=pd.read_excel(\"march.xlsx\")\nprint(dfmutual)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Oct052021/tutorial_01","sub_path":"Project02_excelwithpython02.py","file_name":"Project02_excelwithpython02.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29444502121","text":"import backend.llvm as llvm\nimport backend.llvm.translator as translator\nfrom dataclasses import dataclass\nfrom itertools import count\n\nBIN_OPS = dict((v, k) for k, v in translator.BIN_OPS.items())\n\ndef optimize_function(f: llvm.TopDef):\n \"\"\"replaces alloc/store/load statements with register operations\"\"\"\n id_gens = {}\n label2block = dict((b.label, b) for b in f.blocks)\n phi_map = dict((b, {}) for b in f.blocks)\n\n def get_live_vars(b: llvm.Block):\n dead = {}\n live = {}\n for s in b.stmts:\n if isinstance(s, llvm.StmtLoad) and not s.noopt and s.addr not in dead:\n live[s.addr] = s.type\n elif isinstance(s, llvm.StmtStore) and not s.noopt:\n dead[s.addr] = s.type\n return live\n\n class StmtLocalPhi(llvm.StmtPhi):\n \"\"\"\n used to differentiate new phi statements used for computing values of local variables\n from phi statements added earlier used for computing boolean expressions\n \"\"\"\n pass\n\n for b in f.blocks: # insert phis\n if len(b.preds) == 0:\n continue\n lvs = get_live_vars(b)\n\n # add phis for all variables live at the start of the block\n for lv, lvtype in lvs.items():\n b.stmts.insert(0, StmtLocalPhi(lv, lvtype, [(lv, p.label) for p in b.preds]))\n\n def fresh_loc(v):\n if v not in id_gens:\n id_gens[v] = count(1)\n return f'{v}.{id_gens[v].__next__()}'\n\n @dataclass\n class StmtAss:\n \"\"\"fake assignment statement (%dst := %src)\"\"\"\n dst: str\n src: object\n\n def __str__(self):\n return f'{self.dst} = {self.src}'\n\n for b in f.blocks: # ssaify\n nstmts = []\n pm = phi_map[b]\n for s in b.stmts:\n if isinstance(s, llvm.StmtStore) and not s.noopt:\n pm[s.addr] = fresh_loc(s.addr)\n nstmts.append(StmtAss(pm[s.addr], s.val))\n elif isinstance(s, StmtLocalPhi):\n pm[s.var] = fresh_loc(s.var)\n nstmts.append(StmtLocalPhi(pm[s.var], s.type, s.vals))\n elif isinstance(s, llvm.StmtLoad) and not s.noopt:\n nstmts.append(StmtAss(s.var, pm[s.addr]))\n elif isinstance(s, llvm.StmtAlloc) and not s.noopt:\n pass\n else:\n nstmts.append(s)\n b.stmts = nstmts\n\n extra_phis = dict((b, []) for b in f.blocks)\n\n def get_phi_val(b: llvm.Block, t, v):\n \"\"\"\n - recursively finds value for phi statement in block's predecessors\n - remembers extra phi statements to be added if necessary\n \"\"\"\n try:\n return phi_map[b][v]\n except KeyError:\n nv = fresh_loc(v)\n phi_map[b][v] = nv\n phi_vals = [(get_phi_val(p, t, v), p.label) for p in b.preds]\n extra_phis[b].append(StmtLocalPhi(nv, t, phi_vals))\n return nv\n\n for b in f.blocks: # fix phis\n for s in b.stmts:\n if isinstance(s, StmtLocalPhi):\n s.vals = [(get_phi_val(label2block[lbl], s.type, v), lbl) for v, lbl in s.vals]\n\n for b in f.blocks: # add extra phis\n b.stmts = extra_phis[b] + b.stmts\n\n def get_val(v):\n if v not in var_map:\n return v\n return get_val(var_map[v])\n\n def get_vals(b: llvm.Block):\n for s in b.stmts:\n if isinstance(s, llvm.StmtBinOp):\n s.arg1 = get_val(s.arg1)\n s.arg2 = get_val(s.arg2)\n elif isinstance(s, llvm.StmtCall):\n s.args = [(t, get_val(v)) for t, v in s.args]\n elif isinstance(s, llvm.StmtReturn):\n s.val = get_val(s.val)\n elif isinstance(s, llvm.StmtCondJump):\n s.cond = get_val(s.cond)\n elif isinstance(s, llvm.StmtPhi):\n s.vals = [(get_val(v), lbl) for v, lbl in s.vals]\n elif isinstance(s, llvm.StmtAllocArray):\n s.count = get_val(s.count)\n elif isinstance(s, llvm.StmtLoad):\n s.addr = get_val(s.addr)\n elif isinstance(s, llvm.StmtStore):\n s.val = get_val(s.val)\n s.addr = get_val(s.addr)\n elif isinstance(s, llvm.StmtGetElementPtr):\n s.addr = get_val(s.addr)\n s.idx = [(t, get_val(v)) for t, v in s.idx]\n\n while True:\n for b in f.blocks: # replace trivial phis with assignments\n nstmts = []\n for s in b.stmts:\n if isinstance(s, StmtLocalPhi):\n different_vals = set(v for v, _ in s.vals)\n if len(different_vals) == 1:\n nstmts.append(StmtAss(s.var, s.vals[0][0]))\n else:\n nstmts.append(s)\n else:\n nstmts.append(s)\n b.stmts = nstmts\n\n var_map = {}\n for b in f.blocks: # make map of assignments\n for s in b.stmts:\n if isinstance(s, StmtAss):\n var_map[s.dst] = s.src\n\n if len(var_map) == 0: # stop if there are no more assignments\n break\n\n for b in f.blocks: # eliminate assignments\n get_vals(b)\n b.stmts = list(filter(lambda s: not isinstance(s, StmtAss), b.stmts))\n\n while True:\n var_map = {}\n for b in f.blocks: # make map of constant expression values\n for s in b.stmts:\n if isinstance(s, llvm.StmtBinOp) and isinstance(s.arg1, int) and isinstance(s.arg2, int):\n if s.op == llvm.OP_DIV:\n var_map[s.var] = s.arg1 // s.arg2\n elif s.op == llvm.OP_REM:\n var_map[s.var] = (s.arg1 % s.arg2) - (s.arg2 if s.arg1 < 0 else 0)\n else:\n var_map[s.var] = int(eval(f'{s.arg1} {BIN_OPS[s.op]} {s.arg2}'))\n\n if len(var_map) == 0: # stop if there are no more constant expressions\n break\n\n for b in f.blocks: # eliminate constant expressions\n get_vals(b)\n b.stmts = list(filter(lambda s: not (isinstance(s, llvm.StmtBinOp) and s.var in var_map), b.stmts))\n\n\ndef optimize_program(p: llvm.Program):\n for d in p.topdefs:\n if isinstance(d, llvm.BuiltinFunDecl):\n continue\n optimize_function(d)\n","repo_name":"morch1/latte-compiler","sub_path":"src/backend/llvm/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":6411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42526881333","text":"# ==============================================================================================\n# long_short_baseline\n#\n# Made by:\n# ______ _ _ _____ _ ______ _\n# | _ \\ | | | | / __ \\ | | | _ \\ | |\n# | | | | _ _ | |_ ___ | |__ | / \\/ _ __ _ _ _ __ | |_ ___ | | | | __ _ __| |\n# | | | || | | || __|/ __|| '_ \\ | | | '__|| | | || '_ \\ | __|/ _ \\ | | | |/ _` | / _` |\n# | |/ / | |_| || |_| (__ | | | || \\__/\\| | | |_| || |_) || |_| (_) || |/ /| (_| || (_| |\n# |___/ \\__,_| \\__|\\___||_| |_| \\____/|_| \\__, || .__/ \\__|\\___/ |___/ \\__,_| \\__,_|\n# __/ || |\n# |___/ |_|\n# Version : 1.0\n# Date : 2023-04\n# Remarks :\n# As published, explained and tested in my Youtube video:\n#\n# Visit my site for more information:\n# Become my Patron: https://www.patreon.com/dutchalgotrading\n# -\n# -\n# ==============================================================================================\n# --- Used commands for later reference ---\n# source .env/bin/activate\n# freqtrade --version\n# freqtrade new-config\n# freqtrade new-strategy --strategy \n# freqtrade test-pairlist -c user_data/futures_config.json\n# freqtrade download-data -c user_data/futures_config.json --timerange 20170606- -t 1d 4h 1h 30m 15m 5m 1m\n# freqtrade backtesting -c user_data/futures_config.json -s --timerange=20190101-20210530 --timeframe=1d\n# freqtrade backtesting-analysis\n#\n# pragma pylint: disable=missing-docstring, invalid-name, pointless-string-statement\n# flake8: noqa: F401\n# isort: skip_file\n# --- Do not remove these libs ---\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nfrom datetime import datetime\nfrom typing import Optional, Union\n\nfrom freqtrade.strategy import (\n BooleanParameter,\n CategoricalParameter,\n DecimalParameter,\n IntParameter,\n IStrategy,\n merge_informative_pair,\n)\n\n# --------------------------------\n# Add your lib to import here\nimport talib.abstract as ta\nimport pandas_ta as pta\nfrom technical import qtpylib\n\n\nclass long_short_baseline(IStrategy):\n # Strategy interface version - allow new iterations of the strategy interface.\n # Check the documentation or the Sample strategy to get the latest version.\n INTERFACE_VERSION = 3\n\n # Proposed timeframe for the strategy. Can be altered to your own preferred timeframe.\n timeframe = \"1d\"\n\n # Can this strategy go short?\n can_short: bool = True\n\n # Minimal ROI designed for the strategy.\n # Set to 100% since the exit signal determines the trade exit.\n minimal_roi = {\"0\": 1.0}\n\n # Optimal stoploss designed for the strategy.\n # Set to 100% since the exit signal dermines the trade exit.\n stoploss = -1.0\n\n # Trailing stoploss\n trailing_stop = False\n\n # Run \"populate_indicators()\" only for new candle.\n process_only_new_candles = True\n\n # These values can be overridden in the config.\n use_exit_signal = True\n exit_profit_only = False\n ignore_roi_if_entry_signal = False\n\n # Number of candles the strategy requires before producing valid signals\n # Set to the default of 30.\n startup_candle_count: int = 30\n\n # Optional order type mapping.\n order_types = {\n \"entry\": \"limit\",\n \"exit\": \"limit\",\n \"stoploss\": \"market\",\n \"stoploss_on_exchange\": False,\n }\n\n # Optional order time in force.\n order_time_in_force = {\"entry\": \"GTC\", \"exit\": \"GTC\"}\n\n @property\n def plot_config(self):\n return {\n # Main plot indicators (Moving averages, ...)\n \"main_plot\": {\n \"tema\": {},\n \"sar\": {\"color\": \"white\"},\n },\n \"subplots\": {\n # Subplots - each dict defines one additional plot\n \"MACD\": {\n \"macd\": {\"color\": \"blue\"},\n \"macdsignal\": {\"color\": \"orange\"},\n },\n \"RSI\": {\n \"rsi\": {\"color\": \"red\"},\n },\n },\n }\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n dataframe[\"EMA_QUICK\"] = ta.SMA(dataframe, timeperiod=7)\n dataframe[\"EMA_SLOW\"] = ta.SMA(dataframe, timeperiod=21)\n\n print(dataframe)\n return dataframe\n\n def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n dataframe.loc[\n (\n (dataframe[\"close\"] > dataframe[\"EMA_QUICK\"])\n & (dataframe[\"EMA_QUICK\"] > dataframe[\"EMA_SLOW\"])\n ),\n [\"enter_long\", \"enter_tag\"],\n ] = (1, \"Strong_long_signal\")\n\n # For short trades, use the section below\n dataframe.loc[\n (\n (dataframe[\"close\"] < dataframe[\"EMA_QUICK\"])\n & (dataframe[\"EMA_QUICK\"] < dataframe[\"EMA_SLOW\"])\n ),\n [\"enter_short\", \"enter_tag\"],\n ] = (1, \"Strong_short_signal\")\n\n return dataframe\n\n def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n dataframe.loc[\n ((dataframe[\"close\"] < dataframe[\"EMA_QUICK\"]) & (dataframe[\"volume\"] > 0)), # Guard\n [\"exit_long\", \"exit_tag\"],\n ] = (1, \"Close_below_kijun\")\n\n # For short trades, use the section below\n dataframe.loc[\n ((dataframe[\"close\"] > dataframe[\"EMA_QUICK\"]) & (dataframe[\"volume\"] > 0)), # Guard\n [\"exit_short\", \"exit_tag\"],\n ] = (1, \"Close_above_kijun\")\n\n return dataframe\n","repo_name":"cyberjunky/willemstijn_user_data","sub_path":"strategies/long_short_baseline.py","file_name":"long_short_baseline.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37864325982","text":"class Config:\n def __init__(self):\n # True if running multiple architectures, for finding best configuration\n # False if running HIDDEN_LAYERS from Config.\n self.MULTIPLE_ARCHITECTURES = True\n # if MULTIPLE_ARCHITECTURES is True, all of these will run instead of other Configs.\n\n self.TEST_LAYERS_MIN = 5 # NOTE, if changing this, u need to change what is written to file.\n self.TEST_LAYERS_MAX = 5 # NOTE, if changing this, u need to change what is written to file.\n self.TEST_NOTES = [128, 256]\n self.TEST_REGULARIZERS = ['none','l1']\n self.TEST_ACTIVATION_FUNCTIONS = ['relu', 'tanh']\n self.TEST_CLASS_WEIGHTS = [1., 40.]\n\n # if not testing multiple configurations, these will be used.\n self.HIDDEN_LAYERS = [128, 32] # 0.656\n self.REGULARIZER = None\n self.ACTIVATION_FUNCTION = 'relu'\n self.CLASS_WEIGHT = {0: 1., 1: 30.}\n\n # This is not the amount of epochs, but the cap (if using early stopping)\n self.EPOCHS = 200\n self.BATCH_SIZE = 20\n self.EARLY_STOPPING = True\n self.EARLY_STOPPING_PATIENCE = 2\n\n self.BIAS = True\n self.LOSS = 'binary_crossentropy'\n self.OPTIMIZER = 'adam'\n self.SHUFFLE = False\n\n self.TRAINING_CUT = 0.7\n # set to 0 for normal ratio from pickle file.\n self.NEGATIVE_SAMPLES_RATIO = 0\n\n self.FILE = 'output/full-24hours-aggr-mean-shuffled.pickle'\n","repo_name":"josef-kriz/Semester-Project-CS-IT8","sub_path":"training-data/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2647244041","text":"#DISCORDD\nimport os\nimport discord\nfrom discord.ext import commands\n\nclass GladosDiscordClient:\n def __init__(self):\n self.discord_token = os.environ.get(\"DISCORD_TOKEN\")\n self.discordbot_prefix = \"!\"\n \n self.discordintents = discord.Intents.default()\n self.discordintents.messages = True\n self.discordintents.members = True\n self.discordintents.message_content = True\n\n self.bot = commands.Bot(command_prefix=self.discordbot_prefix, intents=self.discordintents)\n self._setup_bot()\n self.msgBuffer = []\n def _setup_bot(self):\n @self.bot.event\n async def on_ready():\n print(f'Bot conectado como {self.bot.user.name} - {self.bot.user.id}')\n\n @self.bot.command(name='hola')\n async def hello(ctx):\n await ctx.send(f\"Hola, {ctx.message.author.name}!\")\n\n @self.bot.event\n async def on_message(message):\n if message.author == self.bot.user:\n return\n\n # Reaccionar a menciones\n if self.bot.user in message.mentions:\n self.msgBuffer.append(message.content)\n\n # Reaccionar a DMs\n if isinstance(message.channel, discord.DMChannel):\n self.msgBuffer.append(message.content)\n\n # Nota: Si tienes comandos, necesitas procesarlos también.\n await self.bot.process_commands(message)\n\n def run(self):\n print(\"Connecting Discord...\")\n self.bot.run(self.discord_token)\n\n async def sendMsg(self,msg, channel = \"general\"):\n print(\"Looking discord channel: \"+channel)\n print(self.bot.guilds[0].text_channels)\n ch = discord.utils.get(self.bot.guilds[0].text_channels, name=channel)\n await ch.send(msg)\n \n def getMsgs(self) : \n result = self.msgBuffer\n self.msgBuffer = []\n return result\n","repo_name":"makespacemadrid/GLaDOS","sub_path":"GLaDOS_bot/GladosDiscordClient.py","file_name":"GladosDiscordClient.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40923972332","text":"from flask import Flask, render_template, request\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom io import BytesIO\r\nimport base64\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/upload', methods=['POST'])\r\ndef upload():\r\n if request.method == 'POST':\r\n file = request.files['file']\r\n df = pd.read_csv(file)\r\n \r\n # Extract company name from the uploaded file name\r\n company_name = file.filename.split('.')[0]\r\n\r\n # Convert Date column to datetime format\r\n df['Date'] = pd.to_datetime(df['Date'])\r\n\r\n # Calculate the 20-day moving average\r\n df['20-day MA'] = df['Close'].rolling(window=20).mean()\r\n\r\n # Generate chart\r\n plt.figure(figsize=(10, 6))\r\n plt.plot(df['Date'], df['Close'], label='Close Price')\r\n plt.plot(df['Date'], df['20-day MA'], label='20-day Moving Average')\r\n plt.xlabel('Date')\r\n plt.ylabel('Price')\r\n plt.title(f'Stock Analysis for {company_name}')\r\n plt.legend()\r\n highest_price = df['Close'].max()\r\n lowest_price = df['Close'].min()\r\n highest_date = df[df['Close'] == highest_price]['Date'].iloc[0]\r\n lowest_date = df[df['Close'] == lowest_price]['Date'].iloc[0]\r\n\r\n buffer = BytesIO()\r\n plt.savefig(buffer, format='png')\r\n chart_base64 = base64.b64encode(buffer.getvalue()).decode()\r\n buffer.close()\r\n\r\n # Generate explanation based on the chart\r\n explanation = f\"This chart displays the stock's closing prices and the 20-day moving average over time. It provides insights into the price trends of {company_name} stock.\"\r\n\r\n # Render the results page with the generated chart, company name, and explanation\r\n return render_template('results.html', chart_base64=chart_base64, explanation=explanation, company_name=company_name, highest_price=highest_price, lowest_price=lowest_price,highest_date=highest_date, lowest_date=lowest_date)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"nigel-george/Company-Stock-Analysis","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73258957955","text":"from typing import Tuple, Union, Type\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modules.modulated_deform_conv import ModulatedDeformConvPack\n\n\nclass RecurrentUNet(nn.Module):\n \"\"\"\n This class implements a recurrent U-Net to perform super resolution base on the DeepFovea architecture\n \"\"\"\n\n def __init__(self,\n channels_encoding: Tuple[Tuple[int, int]] = (\n (3 * 6, 32), (32, 64), (64, 128), (128, 128), (128, 128)),\n channels_decoding: Tuple[Tuple[int, int]] = ((384, 128), (384, 128), (256, 64), (112, 16)),\n channels_super_resolution_blocks: Tuple[Tuple[int, int]] = ((48, 8), (40, 3 * 6))) -> None:\n \"\"\"\n Constructor method\n :param channels_encoding: (Tuple[Tuple[int, int]]) In and out channels in each encoding path\n :param channels_decoding: (Tuple[Tuple[int, int]]) In and out channels in each decoding path\n :param channels_super_resolution_blocks: (Tuple[Tuple[int, int]]) In and out channels in each s.r. block\n \"\"\"\n # Call super constructor\n super(RecurrentUNet, self).__init__()\n # Init decoder blocks\n self.encoder_blocks = nn.ModuleList()\n for channel in channels_encoding:\n self.encoder_blocks.append(\n ResidualBlock(in_channels=channel[0], out_channels=channel[1]))\n # Init decoder blocks\n self.decoder_blocks = nn.ModuleList()\n for channel in channels_decoding:\n self.decoder_blocks.append(TemporalBlock(in_channels=channel[0], out_channels=channel[1]))\n # Init super-resolution blocks\n self.super_resolution_blocks = nn.ModuleList()\n for index, channel in enumerate(channels_super_resolution_blocks):\n if index == len(channels_super_resolution_blocks) - 1:\n self.super_resolution_blocks.append(\n SuperResolutionBlock(in_channels=channel[0], out_channels=channel[1], final_output_channels=True))\n else:\n self.super_resolution_blocks.append(\n SuperResolutionBlock(in_channels=channel[0], out_channels=channel[1]))\n\n def reset_recurrent_tensor(self) -> None:\n \"\"\"\n Method resets the recurrent tensor which gets set by calling forward again\n \"\"\"\n for block in self.decoder_blocks:\n block.reset_recurrent_tensor()\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n :param input: (torch.Tensor) Input frame\n :return: (torch.Tensor) Super resolution output frame\n \"\"\"\n # Init list to store encoder outputs\n encoder_activations = []\n # Forward pass of encoder blocks\n for index, encoder_block in enumerate(self.encoder_blocks):\n input = encoder_block(input)\n if index != len(self.encoder_blocks) - 1:\n encoder_activations.append(input)\n # Forward pass of decoder blocks\n for index, decoder_block in enumerate(self.decoder_blocks):\n # Bottleneck output case\n if index == 0:\n output = decoder_block(\n torch.cat(\n (F.interpolate(input, scale_factor=2, mode='bilinear', align_corners=False),\n encoder_activations[-(index + 1)]), dim=1))\n # Normal case\n else:\n output = decoder_block(torch.cat((output, encoder_activations[-(index + 1)]), dim=1))\n # Forward pass of the super resolution blocks\n for index, super_resolution_block in enumerate(self.super_resolution_blocks):\n output = super_resolution_block(\n torch.cat((output, F.interpolate(encoder_activations[0], size=output.shape[2:], mode='bilinear',\n align_corners=False)), dim=1))\n return output\n\n\nclass ResidualBlock(nn.Module):\n\n def __init__(self, in_channels: int, out_channels: int) -> None:\n \"\"\"\n Constructor method\n :param in_channels: (int) Number of input channels\n :param out_channels: (int) Number of output channels\n \"\"\"\n # Call super constructor\n super(ResidualBlock, self).__init__()\n # Init main layers\n self.layer = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=(3, 3), padding=(1, 1),\n stride=(1, 1), bias=True),\n nn.ELU(),\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), padding=(1, 1),\n stride=(1, 1), bias=True),\n nn.ELU()\n )\n # Init residual mapping\n self.residual_mapping = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1),\n padding=(0, 0), stride=(1, 1), bias=True) \\\n if in_channels != out_channels else nn.Identity()\n # Init pooling operation\n self.pooling = nn.AvgPool2d(kernel_size=(2, 2))\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n :param input: (torch.Tensor) Input tensor of shape (batch size, in channels, height, width)\n :return: (torch.Tensor) Output tensor of shape (batch size, out channels, height / 2, width / 2)\n \"\"\"\n # Forward pass main layers\n output = self.layer(input)\n # Residual mapping\n output = output + self.residual_mapping(input)\n # Perform pooling\n output = self.pooling(output)\n return output\n\n\nclass TemporalBlock(nn.Module):\n\n def __init__(self, in_channels: int, out_channels: int) -> None:\n \"\"\"\n Constructor method\n :param in_channels: (int) Number of input channels\n :param out_channels: (int) Number of output channels\n \"\"\"\n # Call super constructor\n super(TemporalBlock, self).__init__()\n # Save number of output channels for residual activation\n self.out_channels = out_channels\n # Init layer\n self.convolution_1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=(3, 3), padding=(1, 1), stride=(1, 1), bias=True)\n self.layer_norm = None\n self.activation_1 = nn.ELU()\n self.convolution_2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3),\n padding=(1, 1), stride=(1, 1), bias=True)\n self.activation_2 = nn.ELU()\n # Init residual mapping\n self.residual_mapping = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=(1, 1), padding=(0, 0), stride=(1, 1), bias=True)\n # Init upsampling layer\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)\n # Init previous activation\n self.previous_activation = None\n\n def reset_recurrent_tensor(self) -> None:\n \"\"\"\n Method resets the recurrent tensor which gets set by calling forward again\n \"\"\"\n self.previous_activation = None\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n :param input: (torch.Tensor) Input tensor\n :return: (torch.Tensor) Output tensor\n \"\"\"\n # Init recurrent activation if needed with a random tensor from N(0, 0.02)\n if self.previous_activation is None:\n self.previous_activation = torch.randn((input.shape[0], self.out_channels, input.shape[2], input.shape[3]),\n dtype=torch.float, device=input.device) * 0.02\n # Concatenate previous activation with input\n input = torch.cat((input, self.previous_activation), dim=1)\n # Perform operations\n output = self.convolution_1(input)\n # Init layer norm with shape of input if needed\n if self.layer_norm is None:\n self.layer_norm = nn.LayerNorm(output.shape[1:], elementwise_affine=True)\n # Layer to device\n self.layer_norm.to(self.convolution_1.weight.device)\n # Perform layer norm\n output = self.layer_norm(output)\n # Save activation as previous activation\n self.previous_activation = output.detach().clone()\n # Perform rest of operations\n output = self.convolution_2(output)\n output = self.activation_2(output)\n # Perform residual mapping\n output = output + self.residual_mapping(input)\n # Perform upsampling\n output = self.upsample(output)\n return output\n\n\nclass SuperResolutionBlock(nn.Module):\n \"\"\"\n This class implements a super resolution block which is used after the original recurrent U-Net\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int, final_output_channels: int = 3 * 12,\n final_block: bool = False) -> None:\n \"\"\"\n Constructor method\n :param in_channels: (int) Number of input channels\n :param out_channels: (int) Number of output channels\n :param final_output_channels: (int) Number of output channels for the mapping to image space\n \"\"\"\n # Call super constructor\n super(SuperResolutionBlock, self).__init__()\n # Init layers\n self.layers = nn.Sequential(\n ModulatedDeformConvPack(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3),\n padding=(1, 1), stride=(1, 1), bias=True),\n nn.ELU(),\n nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),\n ModulatedDeformConvPack(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3),\n padding=(1, 1), stride=(1, 1), bias=True),\n nn.ELU(),\n )\n # Init residual mapping\n self.residual_mapping = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1),\n padding=(0, 0), stride=(1, 1), bias=True),\n nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)\n )\n\n # Init output layer\n self.output_layer = ModulatedDeformConvPack(in_channels=out_channels, out_channels=final_output_channels,\n kernel_size=(1, 1), padding=(0, 0), stride=(1, 1),\n bias=True) if final_block else nn.Identity()\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n :param input: (torch.Tensor)\n :return: (Tuple[torch.Tensor, torch.Tensor]) First, output tensor of main convolution. Second, image output\n \"\"\"\n # Perform main layers\n output = self.layers(input)\n # Perform residual mapping\n output = output + self.residual_mapping(input)\n # Make image output\n output = self.output_layer(output)\n return output\n\n\nclass AxialAttention3d(nn.Module):\n \"\"\"\n This class implements the axial attention operation for 3d volumes.\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int, dim: int, span: int, groups: int = 8) -> None:\n \"\"\"\n Constructor method\n :param in_channels: (int) Input channels to be employed\n :param out_channels: (int) Output channels to be utilized\n :param dim: (int) Dimension attention is applied to (0 = height, 1 = width, 2 = depth)\n :param span: (int) Span of attention to be used\n :param groups: (int) Multi head attention groups to be used\n \"\"\"\n # Call super constructor\n super(AxialAttention3d, self).__init__()\n # Check parameters\n assert (in_channels % groups == 0) and (out_channels % groups == 0), \\\n \"In and output channels must be a factor of the utilized groups.\"\n assert dim in [0, 1, 2], \"Illegal argument for dimension\"\n # Save parameters\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.dim = dim\n self.span = span\n self.groups = groups\n self.group_channels = out_channels // groups\n # Init initial query, key and value mapping\n self.query_key_value_mapping = nn.Sequential(\n nn.Conv1d(in_channels=in_channels, out_channels=2 * out_channels, kernel_size=1,\n stride=1, padding=0, bias=False),\n nn.BatchNorm1d(num_features=2 * out_channels, track_running_stats=True, affine=True)\n )\n # Init output normalization\n self.output_normalization = nn.BatchNorm1d(num_features=2 * out_channels, track_running_stats=True, affine=True)\n # Init similarity normalization\n self.similarity_normalization = nn.BatchNorm2d(num_features=3 * self.groups, track_running_stats=True,\n affine=True)\n # Init embeddings\n self.relative_embeddings = nn.Parameter(torch.randn(2 * self.group_channels, 2 * self.span - 1),\n requires_grad=True)\n relative_indexes = torch.arange(self.span, dtype=torch.long).unsqueeze(dim=1) \\\n - torch.arange(self.span, dtype=torch.long).unsqueeze(dim=0) \\\n + self.span - 1\n self.register_buffer(\"relative_indexes\", relative_indexes.view(-1))\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n :param input: (torch.Tensor) Input tensor of the shape [batch size, in channels, h, w, d]\n :return: (torch.Tensor) Output tensor of the shape [batch size, out channels, h, w, d]\n \"\"\"\n # Reshape input dependent on the dimension to be utilized\n if self.dim == 0: # Attention over volume height\n input = input.permute(0, 3, 4, 1, 2) # [batch size, width, depth, in channels, height]\n elif self.dim == 1: # Attention over volume width\n input = input.permute(0, 2, 4, 1, 3) # [batch size, height, depth, in channels, width]\n else: # Attention over volume depth\n input = input.permute(0, 2, 3, 1, 4) # [batch size, height, width, in channels, depth]\n # Save shapes\n batch_size, dim_1, dim_2, channels, dim_attention = input.shape\n # Reshape tensor to the shape [batch size * dim 1 * dim 2, channels, dim attention]\n input = input.reshape(batch_size * dim_1 * dim_2, channels, dim_attention).contiguous()\n # Perform query, key and value mapping\n query_key_value = self.query_key_value_mapping(input)\n # Split tensor to get the query, key and value tensors\n query, key, value = query_key_value \\\n .reshape(batch_size * dim_1 * dim_2, self.groups, self.group_channels * 2, dim_attention) \\\n .split([self.group_channels // 2, self.group_channels // 2, self.group_channels], dim=2)\n # Get all embeddings\n embeddings = self.relative_embeddings.index_select(dim=1, index=self.relative_indexes) \\\n .view(2 * self.group_channels, self.span, self.span)\n # Split embeddings\n query_embedding, key_embedding, value_embedding = \\\n embeddings.split([self.group_channels // 2, self.group_channels // 2, self.group_channels], dim=0)\n # Apply embeddings to query, key and value\n query_embedded = torch.einsum(\"bgci, cij -> bgij\", query, query_embedding)\n key_embedded = torch.einsum(\"bgci, cij -> bgij\", key, key_embedding)\n # Matmul between query and key\n query_key = torch.einsum(\"bgci, bgcj -> bgij\", query_embedded, key_embedded)\n # Construct similarity map\n similarity = torch.cat([query_key, query_embedded, key_embedded], dim=1)\n # Perform normalization\n similarity = self.similarity_normalization(similarity) \\\n .view(batch_size * dim_1 * dim_2, 3, self.groups, dim_attention, dim_attention).sum(dim=1)\n # Apply softmax\n similarity = F.softmax(similarity, dim=3)\n # Calc attention map\n attention_map = torch.einsum(\"bgij, bgcj->bgci\", similarity, value)\n # Calc attention embedded\n attention_map_embedded = torch.einsum(\"bgij, cij->bgci\", similarity, value_embedding)\n # Construct output\n output = torch.cat([attention_map, attention_map_embedded], dim=-1) \\\n .view(batch_size * dim_1 * dim_2, 2 * self.out_channels, dim_attention)\n # Final output batch normalization\n output = self.output_normalization(output).view(batch_size, dim_1, dim_2, self.out_channels, 2,\n dim_attention).sum(dim=-2)\n # Reshape output back to original shape\n if self.dim == 0: # [batch size, width, depth, in channels, height]\n output = output.permute(0, 3, 4, 1, 2)\n elif self.dim == 1: # [batch size, height, depth, in channels, width]\n output = output.permute(0, 3, 1, 4, 2)\n else: # [batch size, height, width, in channels, depth]\n output = output.permute(0, 3, 1, 2, 4)\n return output\n\n\nclass AxialAttention2d(AxialAttention3d):\n \"\"\"\n This class implements the axial attention operation for 2d images.\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int, dim: int, span: int, groups: int = 8) -> None:\n \"\"\"\n Constructor method\n :param in_channels: (int) Input channels to be employed\n :param out_channels: (int) Output channels to be utilized\n :param dim: (int) Dimension attention is applied to (0 = height, 1 = width, 2 = depth)\n :param span: (int) Span of attention to be used\n :param groups: (int) Multi head attention groups to be used\n \"\"\"\n # Check parameters\n assert dim in [0, 1], \"Illegal argument for dimension\"\n # Call super constructor\n super(AxialAttention2d, self).__index__(in_channels=in_channels, out_channels=out_channels, dim=dim, span=span,\n groups=groups)\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n :param input: (torch.Tensor) Input tensor of the shape [batch size, in channels, h, w]\n :return: (torch.Tensor) Output tensor of the shape [batch size, out channels, h, w]\n \"\"\"\n # Reshape tensor to use 3d axial-attention\n input = input.unsqueeze(dim=0)\n # Perform axial-attention\n output = super().forward(input=input)\n # Reshape output to get desired 2d tensor\n output = output.squeeze(dim=0)\n return output\n\n\nclass AxialAttention3dBlock(nn.Module):\n \"\"\"\n This class implements the axial attention block proposed in:\n https://arxiv.org/pdf/2003.07853.pdf\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int, span: Union[int, Tuple[int, int, int]], groups: int = 4,\n normalization: Type = nn.BatchNorm3d, activation: Type = nn.ReLU, downscale: bool = True,\n dropout: float = 0.0) -> None:\n \"\"\"\n Constructor method\n :param in_channels: (int) Input channels to be employed\n :param out_channels: (int) Output channels to be utilized\n :param span: (Union[int, Tuple[int, int, int]]) Spans to be used in attention layers\n :param groups: (int) Multi head attention groups to be used\n :param normalization: (Type) Type of normalization to be used\n :param activation: (Type) Type of activation to be utilized\n :param downscale: (bool) If true spatial dimensions of the output tensor are downscaled by a factor of two\n :param dropout: (float) Dropout rate to be utilized\n \"\"\"\n # Call super constructor\n super(AxialAttention3dBlock, self).__init__()\n # Span to tuple\n span = span if isinstance(span, tuple) else (span, span, span)\n # Init input mapping\n self.input_mapping = nn.Sequential(\n nn.Conv3d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=(3, 3, 3), padding=(1, 1, 1), stride=(1, 1, 1), bias=False),\n normalization(num_features=out_channels, affine=True, track_running_stats=True),\n activation()\n )\n # Init axial attention mapping\n self.axial_attention_mapping = nn.Sequential(\n AxialAttention3d(in_channels=out_channels, out_channels=out_channels, dim=0, span=span[0], groups=groups),\n AxialAttention3d(in_channels=out_channels, out_channels=out_channels, dim=1, span=span[1], groups=groups),\n AxialAttention3d(in_channels=out_channels, out_channels=out_channels, dim=2, span=span[2], groups=groups),\n )\n # Init dropout layer\n self.dropout = nn.Dropout(p=dropout, inplace=True)\n # Init output mapping\n self.output_mapping = nn.Sequential(\n nn.Conv3d(in_channels=out_channels, out_channels=out_channels,\n kernel_size=(3, 3, 3), padding=(1, 1, 1), stride=(1, 1, 1), bias=False),\n normalization(num_features=out_channels, affine=True, track_running_stats=True)\n )\n # Init residual mapping\n self.residual_mapping = nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1, 1),\n padding=(0, 0, 0), stride=(1, 1, 1),\n bias=False) if in_channels != out_channels else nn.Identity()\n # Init final activation\n self.final_activation = activation()\n # Init pooling layer for downscaling the spatial dimensions\n self.pooling_layer = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) if downscale else nn.Identity()\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n :param input: (torch.Tensor) Input volume tensor of the shape [batch size, in channels, h, w, d]\n :return: (torch.Tensor) Output volume tensor of the shape [batch size, out channels, h / 2, w / 2, d / 2]\n \"\"\"\n # Perform input mapping\n output = self.input_mapping(input)\n # Perform attention\n output = self.axial_attention_mapping(output)\n # Perform dropout\n output = self.dropout(output)\n # Perform output mapping\n output = self.output_mapping(self.pooling_layer(output))\n # Perform residual mapping\n output = output + self.pooling_layer(self.residual_mapping(input))\n # Perform final activation\n output = self.final_activation(output)\n return output\n\n\nclass AxialAttention2dBlock(nn.Module):\n \"\"\"\n This class implements the axial attention block proposed in:\n https://arxiv.org/pdf/2003.07853.pdf\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int, span: Union[int, Tuple[int, int]], groups: int = 4,\n normalization: Type = nn.BatchNorm2d, activation: Type = nn.ReLU, downscale: bool = True,\n dropout: float = 0.0) -> None:\n \"\"\"\n Constructor method\n :param in_channels: (int) Input channels to be employed\n :param out_channels: (int) Output channels to be utilized\n :param span: (Union[int, Tuple[int, int, int]]) Spans to be used in attention layers\n :param groups: (int) Multi head attention groups to be used\n :param normalization: (Type) Type of normalization to be used\n :param activation: (Type) Type of activation to be utilized\n :param downscale: (bool) If true spatial dimensions of the output tensor are downscaled by a factor of two\n :param dropout: (float) Dropout rate to be utilized\n \"\"\"\n # Call super constructor\n super(AxialAttention2dBlock, self).__init__()\n # Span to tuple\n span = span if isinstance(span, tuple) else (span, span)\n # Init input mapping\n self.input_mapping = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=(3, 3), padding=(1, 1), stride=(1, 1), bias=False),\n normalization(num_features=out_channels, affine=True, track_running_stats=True),\n activation()\n )\n # Init axial attention mapping\n self.axial_attention_mapping = nn.Sequential(\n AxialAttention2d(in_channels=out_channels, out_channels=out_channels, dim=0, span=span[0], groups=groups),\n AxialAttention2d(in_channels=out_channels, out_channels=out_channels, dim=1, span=span[1], groups=groups),\n )\n # Init dropout layer\n self.dropout = nn.Dropout(p=dropout, inplace=True)\n # Init output mapping\n self.output_mapping = nn.Sequential(\n nn.Conv2d(in_channels=out_channels, out_channels=out_channels,\n kernel_size=(3, 3), padding=(1, 1), stride=(1, 1), bias=False),\n normalization(num_features=out_channels, affine=True, track_running_stats=True)\n )\n # Init residual mapping\n self.residual_mapping = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1),\n padding=(0, 0), stride=(1, 1),\n bias=False) if in_channels != out_channels else nn.Identity()\n # Init final activation\n self.final_activation = activation()\n # Init pooling layer for downscaling the spatial dimensions\n self.pooling_layer = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) if downscale else nn.Identity()\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n :param input: (torch.Tensor) Input volume tensor of the shape [batch size, in channels, h, w, d]\n :return: (torch.Tensor) Output volume tensor of the shape [batch size, out channels, h / 2, w / 2, d / 2]\n \"\"\"\n # Perform input mapping\n output = self.input_mapping(input)\n # Perform attention\n output = self.axial_attention_mapping(output)\n # Perform dropout\n output = self.dropout(output)\n # Perform output mapping\n output = self.output_mapping(self.pooling_layer(output))\n # Perform residual mapping\n output = output + self.pooling_layer(self.residual_mapping(input))\n # Perform final activation\n output = self.final_activation(output)\n return output\n\n\nclass ConvexUpsample2d(nn.Module):\n \"\"\"\n This class implements the 2d convex upsampling operation proposed in:\n https://arxiv.org/pdf/2003.12039.pdf\n \"\"\"\n\n def __init__(self, factor: int = 2, kernel_size: Union[int, Tuple[int, int]] = (3, 3),\n padding: Union[int, Tuple[int, int]] = (1, 1)) -> None:\n \"\"\"\n Constructor method\n :param factor: (int) Upsampling factor\n :param kernel_size: (Union[int, Tuple[int, int]]) Convex upsampling kernel size\n :param padding: (Union[int, Tuple[int, int]]) Padding to by applied in unfold operation\n \"\"\"\n # Call super constructor\n super(ConvexUpsample2d, self).__init__()\n # Save parameters\n self.factor = factor\n self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)\n self.padding = padding if isinstance(padding, tuple) else (padding, padding)\n\n def forward(self, input: torch.Tensor, weights: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n :param input: (torch.Tensor) Input tensor to be upscaled of the shape [batch size, channels, h, w]\n :param weights: (torch.Tensor) Weights tensor of convex upsampling with shape\n [batch size, channels * (kernel size)^2 * factor^2, w, h]\n :return: (torch.Tensor) Upscaled input tensor of the shape [batch size, channels, 2 * h, 2 * w]\n \"\"\"\n # Save shapes of input tensor\n batch_size, channels, height, width = input.shape\n # Unfold input tensor\n input = F.unfold(input=input, kernel_size=self.kernel_size, padding=self.padding, stride=(1, 1),\n dilation=(1, 1))\n # Reshape unfolded input\n input = input.reshape(batch_size, channels, self.kernel_size[0] * self.kernel_size[1], 1, 1, height, width)\n # Reshape weights\n weights = weights.reshape(batch_size, channels, self.kernel_size[0] * self.kernel_size[1], self.factor,\n self.factor, height, width)\n # Apply weights\n output = (weights * input).sum(dim=2)\n # Reshape output to the desired output resolution\n output = output.permute(0, 1, 4, 2, 5, 3) \\\n .reshape(batch_size, channels, self.factor * height, self.factor * width)\n return output\n\n\nclass StandaloneConvexUpsmapling2d(nn.Module):\n \"\"\"\n This class implements a learnable standalone 2d convex upsamplingoperation.\n \"\"\"\n\n def __init__(self, channels: int, factor: int = 2, kernel_size: Union[int, Tuple[int, int]] = (3, 3),\n padding: Union[int, Tuple[int, int]] = (1, 1), ) -> None:\n # Call super constructor\n super(StandaloneConvexUpsmapling2d, self).__init__()\n","repo_name":"ChristophReich1996/DeepFoveaPP_for_Video_Reconstruction_and_Super_Resolution","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":29449,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"39844486092","text":"import numpy as np\nimport utils\nfrom task2a import pre_process_images\nnp.random.seed(1)\n\n\ndef cross_entropy_loss(targets: np.ndarray, outputs: np.ndarray):\n \"\"\"\n Args:\n targets: labels/targets of each image of shape: [batch size, num_classes]\n outputs: outputs of model of shape: [batch size, num_classes]\n Returns:\n Cross entropy error (float)\n \"\"\"\n assert targets.shape == outputs.shape,\\\n f\"Targets shape: {targets.shape}, outputs: {outputs.shape}\"\n N = targets.shape[0]\n K = targets.shape[1]\n\n #ce = targets * np.log(outputs)\n ce = -(1/(N*K)) * np.sum(np.sum(targets*np.log(outputs)))\n\n return ce\n\n\nclass SoftmaxModel:\n\n def __init__(self, l2_reg_lambda: float):\n # Define number of input nodes\n self.I = 785\n\n # Define number of output nodes\n self.num_outputs = 10\n self.w = np.zeros((self.I, self.num_outputs))\n self.grad = None\n\n self.l2_reg_lambda = l2_reg_lambda\n\n def forward(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Args:\n X: images of shape [batch size, 785]\n Returns:\n y: output of model with shape [batch size, num_outputs]\n \"\"\"\n z = np.dot((self.w).T, X.T)\n\n ez = np.exp(z).T\n\n denominator = np.tile(sum(ez.T), (self.w.shape[1], 1))\n\n y = ez / denominator.T\n print(y.shape)\n \"\"\"\n denominator = np.tile(np.sum(np.exp(z).transpose()).transpose(), (self.w.shape[1],1))\n #y = np.exp(z).transpose() / (np.tile(sum(np.exp(z).transpose()), (self.w.shape[1],1))).transpose()\n \"\"\"\n\n return y\n\n\n\n def backward(self, X: np.ndarray, outputs: np.ndarray, targets: np.ndarray) -> None:\n \"\"\"\n Args:\n X: images of shape [batch size, 785]\n outputs: outputs of model of shape: [batch size, num_outputs]\n targets: labels/targets of each image of shape: [batch size, num_classes]\n \"\"\"\n assert targets.shape == outputs.shape,\\\n f\"Output shape: {outputs.shape}, targets: {targets.shape}\"\n self.grad = np.zeros_like(self.w)\n assert self.grad.shape == self.w.shape,\\\n f\"Grad shape: {self.grad.shape}, w: {self.w.shape}\"\n\n self.grad = -(1/(targets.shape[0]*targets.shape[1]))*np.dot((targets - outputs).T, X).T\n\n def update_weights(self, learning_rate: float, batch_size: int) -> None:\n \"\"\"\n Funnction that updates the weights of the network with a given batch size \n and learning rate ( not really needed!)\n \"\"\"\n self.w = np.add(np.multiply((-learning_rate), self.grad), self.w)\n\n def zero_grad(self) -> None:\n self.grad = None\n\n\ndef one_hot_encode(Y: np.ndarray, num_classes: int):\n \"\"\"\n Args:\n Y: shape [Num examples, 1]\n num_classes: Number of classes to use for one-hot encoding\n Returns:\n Y: shape [Num examples, num classes]\n \"\"\"\n encode = np.array(Y).reshape(-1)\n\n return np.eye(num_classes)[encode]\n\n\ndef gradient_approximation_test(model: SoftmaxModel, X: np.ndarray, Y: np.ndarray):\n \"\"\"\n Numerical approximation for gradients. Should not be edited. \n Details about this test is given in the appendix in the assignment.\n \"\"\"\n epsilon = 1e-2\n for i in range(model.w.shape[0]):\n for j in range(model.w.shape[1]): \n orig = model.w[i, j].copy()\n model.w[i, j] = orig + epsilon\n logits = model.forward(X)\n cost1 = cross_entropy_loss(Y, logits)\n model.w[i, j] = orig - epsilon\n logits = model.forward(X)\n cost2 = cross_entropy_loss(Y, logits)\n gradient_approximation = (cost1 - cost2) / (2 * epsilon)\n model.w[i, j] = orig\n # Actual gradient\n logits = model.forward(X)\n model.backward(X, logits, Y)\n difference = gradient_approximation - model.grad[i, j]\n assert abs(difference) <= epsilon**2,\\\n f\"Calculated gradient is incorrect. \" \\\n f\"Approximation: {gradient_approximation}, actual gradient: {model.grad[i, j]}\\n\" \\\n f\"If this test fails there could be errors in your cross entropy loss function, \" \\\n f\"forward function or backward function\"\n\n\nif __name__ == \"__main__\":\n # Simple test on one-hot encoding\n Y = np.zeros((1, 1), dtype=int)\n Y[0, 0] = 3\n Y = one_hot_encode(Y, 10)\n assert Y[0, 3] == 1 and Y.sum() == 1, \\\n f\"Expected the vector to be [0,0,0,1,0,0,0,0,0,0], but got {Y}\"\n\n X_train, Y_train, *_ = utils.load_full_mnist(0.1)\n X_train = pre_process_images(X_train)\n Y_train = one_hot_encode(Y_train, 10)\n assert X_train.shape[1] == 785,\\\n f\"Expected X_train to have 785 elements per image. Shape was: {X_train.shape}\"\n \n # Simple test for forward pass. Note that this does not cover all errors!\n model = SoftmaxModel(0.0)\n logits = model.forward(X_train)\n np.testing.assert_almost_equal(\n logits.mean(), 1/10,\n err_msg=\"Since the weights are all 0's, the softmax activation should be 1/10\")\n\n # Gradient approximation check for 100 images\n X_train = X_train[:100]\n Y_train = Y_train[:100]\n for i in range(2):\n gradient_approximation_test(model, X_train, Y_train)\n model.w = np.random.randn(*model.w.shape)\n","repo_name":"larsmmo/TDT4265-Computer-Vision-and-Deep-Learning","sub_path":"assignment1/task4a.py","file_name":"task4a.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"38805088862","text":"import cv2\nfrom face_detection import face\nfrom keras.models import load_model\nimport numpy as np\nfrom embedding import emb\n#from retreive_pymongo_data import database\n\n\nlabel=None\na={0:0,1:0}\npeople={0:\"satinder\",1:\"mont\"}\nabhi=None\n#data=database()\ne=emb()\nfd=face()\n\nprint('attendance till now is ')\n#data.view()\n\nmodel=load_model('face_reco2.MODEL')\n\n\ndef test():\n test_run=cv2.imread('1.jpg',1)\n test_run=cv2.resize(test_run,(160,160))\n #test_run=np.rollaxis(test_run,2,0)\n test_run=test_run.astype('float')/255.0\n test_run=np.expand_dims(test_run,axis=0)\n test_run=e.calculate(test_run)\n test_run=np.expand_dims(test_run,axis=0)\n test_run=model.predict(test_run)[0]\n\n\ncap=cv2.VideoCapture(1)\nret=True\ntest()\nwhile ret:\n ret,frame=cap.read()\n frame=cv2.flip(frame,1)\n det,coor=fd.detectFace(frame)\n\n if(det is not None):\n for i in range(len(det)):\n detected=det[i]\n k=coor[i]\n f=detected\n detected=cv2.resize(detected,(160,160))\n #detected=np.rollaxis(detected,2,0)\n detected=detected.astype('float')/255.0\n detected=np.expand_dims(detected,axis=0)\n feed=e.calculate(detected)\n feed=np.expand_dims(feed,axis=0)\n prediction=model.predict(feed)[0]\n\n result=int(np.argmax(prediction))\n if(np.max(prediction)>.70):\n for i in people:\n if(result==i):\n label=people[i]\n if(a[i]==0):\n print(\"a\")\n a[i]=1\n abhi=i\n else:\n label='unknown'\n #data.update(label)\n\n\n cv2.putText(frame,label,(k[0],k[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)\n #if(abhi is not None):\n #if(a[abhi]==1):\n #cv2.putText(frame,\"your attendance is complete\",(x,y-30),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)\n cv2.rectangle(frame,(k[0],k[1]),(k[0]+k[2],k[1]+k[3]),(252,160,39),3)\n cv2.imshow('onlyFace',f)\n cv2.imshow('frame',frame)\n if(cv2.waitKey(1) & 0XFF==ord('q')):\n break\ncap.release()\ncv2.destroyAllWindows()\ndata.export_csv()\n","repo_name":"satinder147/Attendance-using-Face","sub_path":"recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"61"} +{"seq_id":"29206985875","text":"# Dependencies\nfrom bs4 import BeautifulSoup as bs\nimport requests\nfrom splinter import Browser\nimport pandas as pd\n\n\ndef init_browser():\n # @NOTE: Replace the path with your actual path to the chromedriver\n executable_path = {'executable_path': 'chromedriver.exe'}\n return Browser('chrome', **executable_path, headless=False)\n\ndef scrape():\n\n browser = init_browser()\n\n # NASA MARS NEWS\n # -------------------------------------------------------------------------------------------------------------------------------------\n url = \"https://mars.nasa.gov/news/\"\n browser.visit(url)\n\n for x in range(5):\n try:\n browser.click_link_by_partial_text('MORE')\n except:\n print(\"Scraping Complete\")\n\n html = browser.html\n soup = bs(html,\"html.parser\") \n results = soup.find_all('li',class_='slide')\n list_mars_news =[]\n for result in results:\n # Error handling\n try:\n news_title = result.find('div',class_='content_title').text.lstrip().rstrip()\n news_p = result.find('div',class_='article_teaser_body').text.lstrip().rstrip()\n # print(news_title)\n # print(news_p)\n dict_result ={\n \"news_title\" : news_title,\n \"news_p\" : news_p\n }\n list_mars_news.append(dict_result)\n except AttributeError as e:\n print(e)\n\n\n # JPL MARS SPACE IMAGES - FEATURED IMAGE\n # -------------------------------------------------------------------------------------------------------------------------------------\n url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n browser.visit(url)\n\n html = browser.html\n soup = bs(html,\"html.parser\")\n results = \"https://www.jpl.nasa.gov\" + soup.find('div', class_='carousel_items').article.footer.a[\"data-fancybox-href\"]\n\n list_mars_images = []\n list_mars_images.append(results)\n\n # for x in range(5):\n # try:\n # browser.click_link_by_partial_text('MORE')\n # except:\n # print(\"Scraping Complete\")\n\n # html = browser.html\n # soup = bs(html,\"html.parser\")\n # article = soup.find('ul', class_='articles')\n # results = article.find_all('li', class_='slide')\n # list_mars_images =[]\n # for result in results:\n # try:\n # featured_image_url = result.a[\"data-fancybox-href\"]\n # if (featured_image_url):\n # featured_image_url = \"https://www.jpl.nasa.gov\" + featured_image_url\n # # print(featured_image_url)\n # list_mars_images.append(featured_image_url)\n # except AttributeError as e:\n # print('Error:',e)\n\n\n # MARS WEATHER\n # -------------------------------------------------------------------------------------------------------------------------------------\n url = \"https://twitter.com/marswxreport?lang=en\"\n response = requests.get(url)\n soup = bs(response.text,\"html.parser\")\n print(soup.prettify())\n\n results = soup.find_all('div', class_='js-tweet-text-container')\n # print(\" \".join(results[0].p.text.split()[:-1]) + \" hPa\")\n mars_weather = \" \".join(results[0].p.text.split()[:-1]) + \" hPa\"\n\n\n # MARS FACTS\n # -------------------------------------------------------------------------------------------------------------------------------------\n\n url = \"https://space-facts.com/mars/\"\n tables = pd.read_html(url)\n html_table = []\n for item, row in tables[0].iterrows():\n dict_xy = {\n \"element\": row.iloc[0],\n \"value\": row.iloc[1]\n }\n html_table.append(dict_xy)\n\n # html_table={\n # \"elements\": list_elements,\n # \"values\": list_values\n # }\n\n\n # df = tables[0]\n # df.columns = ['Elements', 'Values']\n # df.set_index('Elements', inplace=True)\n\n # html_table = df.to_html()\n\n # html_table = html_table.replace('\\n', '')\n\n # df.to_html('table.html')\n\n \n\n\n # MARS HEMISPHERES\n # ------------------------------------------------------------------------------------------------------------------------------------- \n url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser.visit(url)\n\n html = browser.html\n soup = bs(html,\"html.parser\")\n results = soup.find_all('div', class_='item')\n hemisphere_image_urls = []\n for result in results:\n try:\n x = result.find('div', class_='description')\n hemisphere = x.a.text\n\n browser.click_link_by_partial_text(hemisphere) \n html_x = browser.html\n\n soup_x = bs(html_x,\"html.parser\")\n link = soup_x.find('div', class_='downloads').find('li').a[\"href\"]\n title = soup_x.find('h2', class_='title').text\n \n hemisphere_image_urls_dict = {\n \"title\": title, \n \"img_url\":link\n }\n hemisphere_image_urls.append(hemisphere_image_urls_dict) \n \n # print(title)\n # print(link)\n # print(\"-----------------------------------------------------------------------------------------------------------\") \n \n browser.visit(url)\n \n except AttributeError as e:\n print('Error:',e)\n\n browser.quit()\n\n # CREATE A DICTIONARY WITH ALL THE DATA ACQUIRED\n # ------------------------------------------------------------------------------------------------------------------------------------- \n\n mars_dict = {\n \"mars_news\" : list_mars_news,\n \"mars_images\" : list_mars_images,\n \"mars_weather\" : mars_weather,\n \"html_table\" : html_table,\n \"hemisphere_image_urls\" : hemisphere_image_urls\n }\n\n return mars_dict","repo_name":"jwaliaga/Web-Scraping-and-Document-Databases","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43548543337","text":"import graphics\nfrom graphics import update\n\nwindow1 = graphics.GraphWin(\"rectangles\", 600, 600)\nscroll = []\n\nfor i in range(1):\n\n rectangle1 = graphics.Rectangle(graphics.Point(0, 0), graphics.Point(600, 30))\n rectangle1.setFill('purple')\n rectangle1.draw(window1)\n scroll.append(rectangle1)\n\n rectangle2 = graphics.Rectangle(graphics.Point(0, -100), graphics.Point(600, -50))\n rectangle2.setFill('green')\n rectangle2.draw(window1)\n scroll.append(rectangle2)\n\nwhile True:\n\n for r in scroll:\n\n r.move(0,1)\n update(100)\n\n if r.getP2().getY() > 599:\n r.move(0,-500)\n\n\n\n\n\n","repo_name":"ksindy/ScrollBox","sub_path":"scroll_box_graphics.py","file_name":"scroll_box_graphics.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"742920411","text":"# Ejercicio par e impar\nnum = int(input('Ingrese un numero: '))\n\n# Preguntamos, si el modulo de dos es igual a 0\nif num % 2 == 0:\n # El numero sera par.\n print(f'El numero {num} es par.')\n \nelse:\n # Si no es igual a 0, el numero es impar.\n print(f'El numero {num} es impar.')\n","repo_name":"Er1ck20/Python---Ejercicios","sub_path":"Par_Impar.py","file_name":"Par_Impar.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72303849154","text":"import sys,os\nsys.path.append(os.path.abspath('../realtime_actions'))\nimport struct\nimport numpy as np \nimport cv2\nimport utils.dirs\nimport utils.dirs\n\nclass Header(object):\n def __init__(self,n_frames,width,height):\n self.n_frames=n_frames\n self.width=width\n self.height=height\n self.frame_size=self.width*self.height\n\n def size(self):\n return self.n_frames*self.width*self.height\n \n def __str__(self):\n f=str(self.n_frames)\n w=str(self.width)\n h=str(self.height)\n return f +\",\"+w+\",\"+h +\"\\n\"\n\nclass RawAction(object):\n def __init__(self,frames):\n self.frames=frames\n\n def normalize(self):\n act_i=np.array(self.frames,dtype=float)\n min_value=np.min(act_i[act_i!=0])\n act_i[act_i!=0]-=min_value+1\n max_value=np.max(act_i)\n act_i/=max_value\n act_i*=128.00\n act_i[act_i!=0]=128-act_i[act_i!=0]\n self.frames=[ act_i[i] for i in range(act_i.shape[0])]\n #print(type(self.frames[0]))\n\n@utils.dirs.apply_to_files\ndef from_binary(action_path,out_path):\n raw_action=read_binary(str(action_path))\n raw_action.normalize()\n action_name=action_path.get_name()\n action_name=action_name.split(\".\")[0]\n utils.dirs.make_dir(out_path)\n for i,frame_i in enumerate(raw_action.frames): \n #frame_i=standarize(frame_i)\n name=action_name+str(i)+\".jpg\"\n full_path=out_path.create(name)\n cv2.imwrite(str(full_path),frame_i)\n\ndef standarize(img):\n img_nonzero=np.nonzero(img)\n z_max=np.min(img[img_nonzero])-1\n img[img_nonzero]-=z_max\n return img\n\ndef read_binary(action_path):\n with open(action_path, mode='rb') as f:\n \tint_action=np.fromfile(f, dtype=np.uint32)\n header=read_header(int_action)\n #print(header)\n assert (len(int_action)-header.size())==3\n frames=read_frames(header,int_action)\n return RawAction(frames)\n\ndef read_header(int_action):\n n_frames=int_action[0]\n width=int_action[1]\n height=int_action[2]\n return Header(n_frames,width,height)\n\ndef read_frames(hd,int_action):\n indexes=range(hd.n_frames)\n return [read_frame(i,int_action,hd) for i in indexes]\n\ndef read_frame(i,int_action,hd):\n start=3+i*hd.frame_size\n end=start+hd.frame_size\n frame=int_action[start:end]\n frame=np.array(frame)\n frame=frame.astype(float,copy=False)\n frame=np.reshape(frame,(hd.height,hd.width))\n return frame\n\nif __name__ == \"__main__\":\n from_binary(\"../dataset3/raw\",\"../dataset3/depth\")","repo_name":"tjacek/realtime_actions","sub_path":"preproc/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7616537529","text":"\"\"\"website URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom os import stat\n\nfrom rest_framework.schemas import get_schema_view\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom api import views\nfrom base import views\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nfrom rest_framework import routers\n\nfrom django.views.generic.base import TemplateView\n\n# router = routers.DefaultRouter()\n# router.register(r'users', views.UserDetails)\n# router.register(r'groups', views.GroupDetails)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.showHome, name = 'home'),\n path('signup/',views.showSignUp, name = 'signup'),\n path('signin/', views.showSignIn, name = 'signin'),\n path('build/', views.showBuildPage, name = 'build'),\n path('recommend/', views.showRecommendedPage, name = 'recommend'),\n path('cpu/', views.get_cpu, name = 'cpu'),\n path('gpu/', views.get_gpu, name = 'gpu'),\n path('motherboard/', views.get_motherboard, name='motherboard'),\n path('psu/', views.get_psu, name='psu'),\n path('memory/', views.get_ram, name='memory'),\n path('storage/', views.get_storage, name='storage'),\n path('storage2/', views.get_storage2, name='storage2'),\n path('case/', views.get_case, name='case'),\n path('liquidCooling/', views.get_liquidCool, name='liquidCooling'),\n path('airCooling/', views.get_airCool, name='airCooling'),\n path('recommendBuild/', views.renderRecommendedBuild, name='recommendBuild'),\n path('recovery/', views.showRecoveryPage, name ='recovery'),\n path('logout/', views.logoutView, name = 'logout'),\n path('newpassword/', views.newPasswordView, name = 'newpassword'),\n path('openapi', get_schema_view(\n title=\"MonkeBuilderAPI\",\n description=\"API for PC builder site\",\n version=\"1.0.0\"\n ), name='openapi-schema'),\n \n\n # path('', views.home_view),\n # path('', TemplateView.as_view(template_name='home.html'), name=\"home\"),\n # path('', include(router.urls)),\n \n # path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n \n]\n\nurlpatterns += staticfiles_urlpatterns()\n\n\n","repo_name":"matthew-mcc/CPSC471_Project","sub_path":"website/website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39553382361","text":"from __future__ import print_function\nimport tensorflow as tf\nimport threading as th\nimport numpy as np\nfrom glob import glob\nfrom math import ceil\n\nfrom keras.layers import Dense, GRU, Embedding\nfrom keras.metrics import binary_accuracy as accuracy\nfrom keras.objectives import binary_crossentropy as crossentropy\nfrom keras.models import Sequential\nfrom keras import backend as K\n\nclass RNNModel:\n\n def __init__(self, qsize=256, vocab_size=10000, vector_size=0, load=None):\n # Create one model for each bucket\n self._model = self._buildModel(qsize, vocab_size, vector_size)\n self._saver = tf.train.Saver()\n self._sess = tf.Session()\n if load is None:\n self._sess.run(tf.global_variables_initializer())\n else:\n self._saver.restore(self._sess, load)\n\n def _buildModel(self, qsize, vsize, vector_size):\n t_shape, t_dtype = (), tf.float32\n if not vector_size:\n x_shape = (None,)\n x_dtype = tf.int32\n else:\n x_shape = (None, vector_size)\n x_dtype = tf.float32\n x_in = tf.placeholder(x_dtype, shape=x_shape)\n t_in = tf.placeholder(t_dtype, shape=t_shape)\n batch_size = tf.placeholder_with_default(tf.constant(64), shape=())\n\n # Input queue\n q = tf.PaddingFIFOQueue(qsize, (x_dtype, t_dtype),\n shapes=(x_shape, t_shape))\n enqueue_op = q.enqueue((x_in, t_in))\n\n # Fetched variables\n x, t = q.dequeue_many(batch_size)\n\n # Model definition\n model = Sequential()\n if not vector_size:\n model.add(Embedding(output_dim=512,\n input_dim=vsize+2, mask_zero=True))\n model.add(GRU(32))\n else:\n model.add(GRU(32, input_dim=vector_size))\n model.add(Dense(64, activation='relu'))\n model.add(Dense(64, activation='relu'))\n model.add(Dense(64, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n y = K.squeeze(model(x), 1) # shape = (batch_size,) - same as t\n\n # Metrics\n loss = K.mean(K.binary_crossentropy(y, t))\n acc = accuracy(t, y)\n\n # Trainer\n train_op = tf.train.AdamOptimizer().minimize(loss)\n\n return {\n 'x': x_in,\n 't': t_in,\n 'y': y,\n 'q': q,\n 'acc': acc,\n 'loss': loss,\n 'train_op': train_op,\n 'enqueue_op': enqueue_op,\n 'batch_size': batch_size\n }\n\n def fit(self, data, epochs=10, batch_size=64):\n\n coord = tf.train.Coordinator()\n stats = {'losses': [], 'accuracies': [], 'count': 0}\n valid_stats = {'accuracies': [], 'losses': [], 'count': 0}\n m = self._model\n sess = self._sess\n\n # Trainer code\n def train():\n remaining = data.train_size\n # print(\"Starting trainer ...\")\n\n for i in range(epochs):\n print(\"Epoch {}\".format(i+1))\n\n while remaining:\n if coord.should_stop(): return\n\n batch_size_ = min(batch_size, remaining)\n try:\n # Run a single train pass (eval and backprop)\n r = sess.run([m['train_op'], m['loss'], m['acc']],\n feed_dict={m['batch_size']: batch_size_})\n except tf.errors.OutOfRangeError:\n # Queue is closed, and nothing remains\n return\n except Exception as e:\n # Some unexpected error occurred, halt everything\n coord.request_stop(e)\n return\n\n stats['count'] += batch_size_\n stats['losses'].append(r[1])\n stats['accuracies'].append(r[2])\n remaining -= batch_size_\n\n print(\"@{}/{} - loss: {:.5f}, acc: {:.2%}\".format(\n stats['count'], data.train_size,\n np.mean(stats['losses']),\n np.mean(stats['accuracies'])\n ), end='\\r')\n\n print(''.join([' '] * 80), end='\\r') # clear last line\n\n valid_stats['count'] = 0\n valid_stats['losses'] = []\n valid_stats['accuracies'] = []\n remaining = data.valid_size\n while remaining:\n if coord.should_stop(): return\n\n batch_size_ = min(batch_size, remaining)\n try:\n # Run a single train pass (eval and backprop)\n r = sess.run([m['loss'], m['acc']],\n feed_dict={m['batch_size']: batch_size_})\n except tf.errors.OutOfRangeError:\n # Queue is closed, and nothing remains\n return\n except Exception as e:\n # Some unexpected error occurred, halt everything\n coord.request_stop(e)\n return\n valid_stats['count'] += batch_size_\n valid_stats['losses'].append(r[0])\n valid_stats['accuracies'].append(r[1])\n remaining -= batch_size_\n\n # Print epoch statistics\n if valid_stats['count']:\n print(\"Stats: loss {:.5f}, acc {:.2%}, count {} \"\n \"- val_loss {:.5f}, val_acc {:.2%}, val_count {}\"\n .format(np.mean(stats['losses']),\n np.mean(stats['accuracies']),\n stats['count'],\n np.mean(valid_stats['losses']),\n np.mean(valid_stats['accuracies']),\n valid_stats['count']))\n else: # if there's no validation set\n print(\"Stats: loss {:.5f}, acc {:.2%}, count {} \"\n .format(np.mean(stats['losses']),\n np.mean(stats['accuracies']),\n stats['count']))\n\n # Reset stats each epoch\n stats['count'] = 0\n stats['losses'] = []\n stats['accuracies'] = []\n remaining = data.train_size\n\n coord.request_stop()\n # print(\"Ending trainer...\")\n\n # Create the trainer thread\n trainer = th.Thread(target=train)\n trainer.start()\n\n try:\n # Start sending out data\n for _ in range(epochs):\n # Send training set\n for x, t in zip(data.train, data.train_labels):\n if coord.should_stop(): break\n sess.run(m['enqueue_op'], feed_dict={m['x']: x, m['t']: t})\n # Send validation set\n for x, t in zip(data.valid, data.valid_labels):\n if coord.should_stop(): break\n sess.run(m['enqueue_op'], feed_dict={m['x']: x, m['t']: t})\n data.shuffle() # shuffle at epoch end\n except KeyboardInterrupt:\n print(''.join([' '] * 80), end='\\r') # clear last line\n print(\"User cancelled.\")\n coord.request_stop()\n except tf.errors.CancelledError:\n pass # trainer closed up the queue, no problem\n except Exception as e: # Unexpected exception\n coord.request_stop(e)\n\n coord.join([trainer])\n\n def test(self, data, batch_size=64):\n coord = tf.train.Coordinator()\n results = {'accuracies': [], 'count': 0}\n m = self._model\n sess = self._sess\n\n # Evaluator code\n def evaluator():\n remaining = data.test_size\n # print(\"Starting evaluator...\")\n while remaining:\n if coord.should_stop(): break\n batch_size_ = min(batch_size, remaining)\n try:\n # Run a single train pass (eval and backprop)\n acc = sess.run(m['acc'],\n feed_dict={m['batch_size']: batch_size_})\n except tf.errors.OutOfRangeError:\n # Queue is closed, but nothing remains\n break\n except Exception as e:\n # Some unexpected error occurred, halt everything\n coord.request_stop(e)\n break\n results['accuracies'].append(acc)\n results['count'] += batch_size_\n remaining -= batch_size_\n coord.request_stop()\n # print(\"Ending evaluator...\")\n\n # Create the trainer threads\n evaluator = th.Thread(target=evaluator)\n evaluator.start()\n\n try:\n for x, t in zip(data.test, data.test_labels):\n if coord.should_stop(): break\n sess.run(m['enqueue_op'], feed_dict={m['x']: x, m['t']: t})\n except KeyboardInterrupt:\n print(\"User cancelled.\")\n coord.request_stop()\n except Exception as e: # Unexpected exception\n coord.request_stop(e)\n\n coord.join([evaluator])\n\n print(\"Accuracy: {} (over {} test samples)\"\n .format(np.mean(results['accuracies']), results['count']))\n\n def save(self, path):\n self._saver.save(self._sess, path)\n\n def load(self, path):\n self._saver.restore(self._sess, path)\n","repo_name":"myann/deeplearning-chf","sub_path":"models/rnn_model.py","file_name":"rnn_model.py","file_ext":"py","file_size_in_byte":9688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29684006334","text":"import signal\nimport socket\nfrom modules.structures import ClientRequest, ServerResponse\nfrom modules.classes import Card, CardRoomPair, CardTtlPair\nfrom modules.response_codes import ResponseCodes\nimport redis\nimport os\nimport threading\nfrom redis import Redis\nimport posix_ipc\n\n\nPID_FILE = '.pid'\nSEMAPHORE_NAME = 'server_mutex'\nSERVER_HOST = 'localhost'\nSERVER_PORT = 9090\nREDIS_PORT = 6379\n\n\ndef exit_program():\n \"\"\"\n Корректный выход из программы\n :return:\n \"\"\"\n for i in active_sockets:\n i.close() # закрываем сокеты\n semaphore.release() # освобождаем семафор\n semaphore.close()\n os.kill(pid, signal.SIGTERM) # убиваем процесс\n\n\ndef remove_pid_file(*args, **kwargs):\n \"\"\"\n Удаление pid файла (ведет к выходу из программы)\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n if args:\n # если есть ошибки, то выводим их\n print(args)\n\n try:\n os.remove(PID_FILE) # удаляем .pid файл\n except OSError:\n pass\n\n\ndef pid_file_listener():\n \"\"\"\n Прослушиватель .pid файла, выходит из программы при его удалении\n :return:\n \"\"\"\n while os.path.isfile(PID_FILE):\n pass # ожидаем удаления файла\n print('.pid файл удален, выключение...')\n exit_program()\n\n\ndef stop_signals_handler(signum, frame):\n \"\"\"\n Обработчик сигналов выключения (при выходе черех CTRL + C)\n :param signum:\n :param frame:\n :return:\n \"\"\"\n remove_pid_file()\n\n\ndef client_listener(client: socket, addr: tuple[str, int], redis_connection: Redis):\n \"\"\"\n Обслуживание запросов клиента\n :param client: сокет клиента\n :param addr: адрес клиента\n :param redis_connection: подключение к редис\n :return:\n \"\"\"\n while True:\n # обслуживаем клиента, пока он не захочет первать соединение\n data = client.recv(1024)\n request_code, encoded_body = ClientRequest.decode(data) # декодим запрос клиента\n print(f'Получен запрос {request_code}')\n\n if request_code == 0:\n # команда проверки доступа\n pair = CardRoomPair.decode(encoded_body)\n if redis_connection.sismember(pair.card_number, pair.room):\n response = ServerResponse(success=True, code=ResponseCodes.OK)\n else:\n response = ServerResponse(success=False, code=ResponseCodes.ACCESS_DENIED)\n\n elif request_code == 1:\n # команда активации карты\n card = CardTtlPair.decode(encoded_body)\n if redis_connection.exists(card.card_number):\n response = ServerResponse(success=False, code=ResponseCodes.ALREADY_ACTIVE)\n else:\n # добавляем элемент с пустым значением (т.к. сет не может быть пустым)\n redis_connection.sadd(card.card_number, '')\n seconds_in_day = 86400 # редис хранит TTL в секундах, а приходят дни, так что преобразуем к секундам\n time_to_live = int(card.time_to_live) * seconds_in_day\n redis_connection.expire(card.card_number, time_to_live)\n response = ServerResponse(success=True, code=ResponseCodes.OK)\n\n elif request_code == 2:\n # команда деактивации карты\n card = Card.decode(encoded_body)\n redis_connection.delete(card.card_number) # удаляем ключ\n response = ServerResponse(success=True, code=ResponseCodes.OK)\n\n elif request_code == 3:\n # команда привязки карты к номеру\n pair = CardRoomPair.decode(encoded_body)\n if not redis_connection.exists(pair.card_number):\n response = ServerResponse(success=False, code=ResponseCodes.INACTIVE)\n else:\n redis_connection.sadd(pair.card_number, pair.room) # добавляем элемент к множеству\n response = ServerResponse(success=True, code=ResponseCodes.OK)\n\n elif request_code == 4:\n # команда отвязки карты от номера\n pair = CardRoomPair.decode(encoded_body)\n redis_connection.srem(pair.card_number, pair.room) # удаляем элемент из множества\n msg = f'Карта {pair.card_number} отвязана от номера {pair.room}'\n response = ServerResponse(success=True, code=ResponseCodes.OK)\n\n elif request_code == 5:\n # команда выключения сервера\n print('Получена команда отключения')\n remove_pid_file() # удаление pid файла заставляет программу выключатсья\n return\n elif request_code == 6:\n # команда отключения клиента\n print(f'Клиент {addr} отключился')\n client.close()\n active_sockets.remove(client) # удаляем из активных сокетов\n return\n else:\n response = ServerResponse(success=False, code=ResponseCodes.UNKNOWN_COMMAND)\n\n client.send(response.encode()) # посылаем ответ клиенту\n\n\ndef run_server():\n \"\"\"\n Запуск сервера\n :return:\n \"\"\"\n redis_connection = redis.StrictRedis(\n host='localhost',\n port=REDIS_PORT,\n decode_responses=True,\n charset='utf-8',\n )\n\n # биндим сокет и начинаем прослушивание\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # помечаем как переиспользуемый\n active_sockets.append(server)\n server.bind((SERVER_HOST, SERVER_PORT))\n server.listen(5)\n print('Ожидание подключения')\n\n while True:\n try:\n client, addr = server.accept()\n except socket.error:\n break\n active_sockets.append(client)\n print(f'Клиент {addr} подключился')\n # запускаем взаимодейтсвие в новом потоке\n threading.Thread(target=client_listener, args=(client, addr, redis_connection)).start()\n\n\nif __name__ == '__main__':\n try:\n # создаем семафор, если его нету\n semaphore = posix_ipc.Semaphore(SEMAPHORE_NAME, posix_ipc.O_CREX, initial_value=1)\n except posix_ipc.ExistentialError:\n # иначе активируем уже имеющийся\n semaphore = posix_ipc.Semaphore(SEMAPHORE_NAME)\n semaphore.acquire() # ставим блокировку\n\n pid = os.getpid()\n with open(PID_FILE, 'w') as pid_file:\n pid_file.write(str(pid)) # записываем id процесса в файл\n\n # вешаем обработчики при выходе через CTRL + C\n signal.signal(signal.SIGINT, stop_signals_handler)\n\n threading.excepthook = remove_pid_file # отлавливаем исключения в потоках для корректного выхода из программы\n threading.Thread(target=pid_file_listener).start() # просушивание .pid файла\n active_sockets = [] # список активный сокетов (будут отключены при выходе из программы)\n run_server() # запускаем сервер\n","repo_name":"SergeiGD/sockets-binary","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35886420028","text":"#!/usr/bin/env python\n\n# commaCode script takes list and returns readable string\n\n# Initialize the list\nyourList = ['spam', 'eggs', 'apple', 'banana']\n\n# Initialize the final string to be returned\nfinalString = ''\n\nif not yourList: # If the list is empty, let the user know \n print(\"List is empty\")\nelse:\n for n in range(0,len(yourList)): # Iterate through the list and append item and comma to finalString\n if n == len(yourList)-1: # At the end of the list, add 'and' before the last item\n finalString += \"and \" + yourList[n]\n else:\n finalString += yourList[n] + \", \"\nprint(finalString) # Prints \"spam, eggs, apple, and banana\"\n","repo_name":"joonnoh/Automatetheboringstuff","sub_path":"commaCode.py","file_name":"commaCode.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10620264224","text":"class Solution:\n def dailyTemperatures(self, T: List[int]) -> List[int]:\n if not T:\n return []\n size = len(T)\n s = [0]\n ans = [0] * size\n for i in range(1, size):\n while s and T[i] > T[s[-1]]:\n j = s.pop()\n ans[j] = i - j\n s.append(i)\n return ans","repo_name":"Dawinia/LeetCode","sub_path":"Stack/739. 每日温度.py","file_name":"739. 每日温度.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16208480858","text":"\"\"\"\nQuestion:\n Move Zeroes\n\n Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.\n\n For example, given nums = [0, 1, 0, 3, 12], after calling your function, nums should be [1, 3, 12, 0, 0].\n\n Note:\n You must do this in-place without making a copy of the array.\n Minimize the total number of operations.\n\n Credits:\n Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.\n\nPerformance:\n 1. Total Accepted: 15730 Total Submissions: 38045 Difficulty: Easy\n 2. Sorry. We do not have enough accepted submissions.\n\"\"\"\n\nclass Solution(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n reached_zero_count = 0\n\n for idx, num in enumerate(nums):\n if num == 0:\n reached_zero_count += 1\n if num != 0:\n if reached_zero_count > 0: # make sure has reached at least a zero.\n nums[idx - reached_zero_count] = num\n nums[idx] = 0\n\n\ndef test_func(nums, result):\n Solution().moveZeroes(nums)\n assert nums == result, [nums, result]\n\ntest_func([], [])\ntest_func([0], [0])\ntest_func([1], [1])\ntest_func([0, 0], [0, 0])\ntest_func([0, 1], [1, 0])\ntest_func([1, 1], [1, 1])\ntest_func([0, 1, 0, 3, 12], [1, 3, 12, 0, 0])\ntest_func([0, 1, 0, 3, 12, 0], [1, 3, 12, 0, 0, 0])\ntest_func([0, 1, 0, 0, 0, 3, 12, 0], [1, 3, 12, 0, 0, 0, 0, 0])\n","repo_name":"dchentech/leetcode","sub_path":"283-move-zeroes.py","file_name":"283-move-zeroes.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11881933409","text":"import numpy as np\n\n\ndef blockIndex(i, j, rows, cols, order='C'):\n \"\"\"For a matrix block, we return the index of row and columns.\n\n For a matrix we choose a block using the upper left corner positioned\n at (i, j) and size (row, col). Each element of the block has row and\n col index, they are returned in two arrays. The order controls we use\n row or column major order\n\n For example, blockIndex(1, 3, 2, 3, 'C') returns\n (array([1, 1, 1, 2, 2, 2]), array([3, 4, 5, 3, 4, 5]))\n :param i: int, the row of the upper left corner\n :param j: int, the column of the upper left corner\n :param rows: int, number of rows of the block\n :param cols: int, number of columns of the block\n :param order, char, ('C'/'F') if we return row or column major\n \"\"\"\n if order == 'C':\n row = i + (np.arange(rows)[:, np.newaxis] + np.zeros(cols)).flatten()\n col = j + (np.zeros(rows)[:, np.newaxis] + np.arange(cols)).flatten()\n elif order == 'F':\n row = i + (np.zeros(cols)[:, np.newaxis] + np.arange(rows)).flatten()\n col = j + (np.arange(cols)[:, np.newaxis] + np.zeros(rows)).flatten()\n else:\n raise Exception(\"Unsupported order\")\n return row, col\n\n\ndef finiteDiff(fun, x, h, *args, **kwargs):\n \"\"\"Perform finite difference for gradient / Jacobian estimation.\n\n Now I only support forward difference since it is useful enough.\n The function should be of y = fun(x, *args, **kwargs)\n\n :param fun: callable, the function to be used.\n :param x: ndarray, the point to evaluate upon\n :param h: float, step size\n :return: ndarray, 1d or 2d or higher, depends on shape of function return.\n \"\"\"\n y0 = fun(x, *args, **kwargs)\n rst = []\n z = x.copy()\n for i in range(x.size):\n z[:] = x\n z[np.unravel_index(i, x.shape)] += h\n yi = fun(z, *args, **kwargs)\n rst.append((yi - y0) / h)\n return np.array(rst).T\n\n\nif __name__ == '__main__':\n print(blockIndex(1, 2, 2, 3, 'C'))\n print(blockIndex(1, 2, 2, 3, 'F'))\n","repo_name":"paperstiger/gaolib","sub_path":"gaolib/math/extnp.py","file_name":"extnp.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14215513783","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass Encoder(nn.Module):\r\n def __init__(self, args):\r\n super(Encoder, self).__init__()\r\n self.bn0 = nn.BatchNorm1d(args.num_features)\r\n\r\n self.conv1 = nn.Conv1d(args.num_features, 64, 3, stride=1, padding=1)\r\n self.bn1 = nn.BatchNorm1d(64)\r\n \r\n self.conv2 = nn.Conv1d(64, 64, 3, stride=1, padding=1)\r\n self.bn2 = nn.BatchNorm1d(64)\r\n \r\n self.conv3 = nn.Conv1d(64, 128, 3, stride=1, padding=1)\r\n self.bn3 = nn.BatchNorm1d(128)\r\n \r\n self.conv4 = nn.Conv1d(128, 128, 3, stride=1, padding=1)\r\n self.bn4 = nn.BatchNorm1d(128)\r\n \r\n self.lstm1 = nn.LSTM(128, args.lstm1_size)\r\n self.bn5 = nn.BatchNorm1d(args.lstm1_size)\r\n self.drop = nn.Dropout()\r\n \r\n self.lstm2 = nn.LSTM(args.lstm1_size, args.lstm2_size)\r\n \r\n def forward(self, x):\r\n # (batch, seq_len, in_size)\r\n x = x.permute(0,2,1) # (batch, channels, length)\r\n x = self.bn0(x) # (batch, channels, length)\r\n \r\n x = self.conv1(x) # (batch, channels, length)\r\n x = self.bn1(x) # same shape\r\n x = F.relu(x) # same shape\r\n \r\n x = self.conv2(x)# (batch, channels, length)\r\n x = self.bn2(x) # same shape\r\n x = F.relu(x)# same shape\r\n \r\n x = F.max_pool1d(x,2) # (batch, channels, length/2)\r\n \r\n x = self.conv3(x) # (batch, channels, length/2)\r\n x = self.bn3(x) # same shape\r\n x = F.relu(x) # same shape\r\n \r\n x = self.conv4(x) # (batch, channels, length/2)\r\n x = self.bn4(x) # same shape\r\n x = F.relu(x) # same shape\r\n \r\n x = F.max_pool1d(x,2) # (batch, channels, length/4)\r\n \r\n # change of notation: (batch, in_features, seq_len)\r\n x = x.permute(2,0,1) # (seq_len, batch, in_features)\r\n x, _ = self.lstm1(x) # (seq_len, batch, lstm1_size)\r\n x = x.permute(1,2,0) # (batch, lstm1_size, seq_len)\r\n x = self.bn5(x) # same shape\r\n x = self.drop(x) # same shape\r\n x = F.relu(x) # same shape\r\n \r\n x = x.permute(2,0,1) # (seq_len, batch, lstm1_size)\r\n hs_bar, (hS,_) = self.lstm2(x) \r\n # x (seq_len, batch, lstm2_size), hS (1, batch, lstm2_size)\r\n return hs_bar, hS\r\n\r\n\r\nclass GlobalAttn(nn.Module):\r\n def __init__(self, args):\r\n super(GlobalAttn, self).__init__()\r\n # lstm_size is the size of top layer of encoder\r\n self.W = nn.Linear(args.lstm_size, args.lstm_size)\r\n \r\n def score(self, hs_bar, ht):\r\n '''\r\n The score function is the general score. \r\n \r\n Returns:\r\n score: of shape (batch, seq_len, 1)\r\n '''\r\n # (batch, lstm_size)\r\n ht = torch.unsqueeze(ht, dim=1) # (batch, 1, lstm_size)\r\n ht = torch.transpose(ht,1,2) # (batch, lstm_size, 1)\r\n\r\n # (seq_len, batch, lstm_size)\r\n hs_bar = torch.transpose(hs_bar,0,1) # (batch, seq_len, lstm_size)\r\n\r\n #self.W(hs_bar) (batch, seq_len, lstm_size), ht (batch, lstm_size, 1)\r\n score = torch.bmm(self.W(hs_bar), ht) # (batch, seq_len, 1)\r\n return score\r\n \r\n def forward(self, hs_bar, ht):\r\n '''\r\n Args: \r\n hs_bar: all hidden states of the encoder\r\n of shape (seq_len, batch_size, lstm_size)\r\n ht: hidden state of decoder at current time t\r\n of shape (batch_size, lstm_size)\r\n Returns:\r\n context_vector: shape (batch_size, lstm_size)\r\n '''\r\n score = self.score(hs_bar, ht) # (batch, seq_len, 1)\r\n \r\n attn_w = F.softmax(score, dim=1) # (batch, seq_len, 1)\r\n hs_bar = torch.transpose(hs_bar,0,1) # (batch, seq_len, lstm_size)\r\n \r\n context_vector = attn_w * hs_bar # (batch, seq_len, lstm_size)\r\n context_vector = torch.sum(context_vector, dim=1) # (batch, lstm_size)\r\n return context_vector\r\n\r\n\r\nclass Decoder(nn.Module):\r\n def __init__(self, args):\r\n super(Decoder, self).__init__() \r\n self.unroll_len = 30 # For how many steps unroll the lstm\r\n\r\n self.lstm = nn.LSTMCell(args.lstm_size, args.lstm_size)\r\n self.bn = nn.BatchNorm1d(self.unroll_len * args.lstm_size)\r\n self.drop = nn.Dropout()\r\n self.fc = nn.Linear(self.unroll_len * args.lstm_size, args.target_size)\r\n\r\n def forward(self, attention, enc_h, hS):\r\n '''\r\n Arguments:\r\n attn: attention model\r\n enc_h: all hidden states of the encoder\r\n of shape (seq_len, batch_size, lstm_size)\r\n hS: hidden state of the encoder at last time step S\r\n of shape (1, batch_size, lstm_size) \r\n Returns:\r\n x: predictions by the decoder\r\n of shape (batch_size, out_size)\r\n '''\r\n ht = torch.squeeze(hS, dim=0) # (batch_size, lstm_size)\r\n ct = torch.zeros_like(ht) # (batch_size, lstm_size)\r\n dec_h = [] \r\n\r\n for _ in range(self.unroll_len):\r\n context_vector = attention(enc_h, ht)\r\n (ht, ct) = self.lstm(context_vector, (ht, ct))\r\n # ht (batch_size, lstm_size), ct (batch_size, lstm_size)\r\n dec_h.append(ht)\r\n \r\n x = torch.stack(dec_h, dim=1) # (batch_size, unroll_len, lstm_size)\r\n # (batch_size, unroll_len * lstm_size)\r\n x = x.reshape(x.shape[0], x.shape[1] * x.shape[2]) \r\n x = self.bn(x) # same shape\r\n x = self.drop(x) # same shape\r\n x = F.relu(x) # same shape\r\n x = self.fc(x) # (batch_size, target_size)\r\n return x","repo_name":"harshitsinghrao/lstm-attention-weather-prediction","sub_path":"neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"33337486385","text":"class logicGate:\n \"\"\"门电路的父类,仅实现标签\"\"\"\n\n def __init__(self, n):\n self.lable = n\n self.output = None\n\n def getLable(self):\n return self.lable\n\n def getOutput(self):\n self.output = self.performGateLogic() #先不实现performGateLogic(),\n #子类中决定如何实现\n return self.output\n\n\nclass BinaryGate(logicGate):\n \"\"\"两个输入端的门电路,作为与门和或门的父类\"\"\"\n\n def __init__(self, n):\n logicGate.__init__(self, n)\n\n self.pinA = None\n self.pinB = None\n\n def getPinA(self):\n if self.pinA == None:\n return int(input(\"Enter Pin A input for gate\" + self.getLable()+\n \"-->\"))\n else:\n return self.pinA.getFrom().getOutput()\n\n def getPinB(self):\n if self.pinB == None:\n return int(input(\"Enter Pin B input for gate\" + self.getLable()+\n \"-->\"))\n else:\n return self.pinB.getFrom().getOutput()\n\n def setNextPin(self, source):\n '''选择一条pin与source连接\n\n source -- a gate, the end of\n '''\n if self.pinA == None:\n self.pinA = source\n else:\n if self.pinB == None:\n self.pinB = source\n else:\n raise RuntimeError(\"Error: NO EMPTY PINS\")\n\n\nclass UnaryGate(logicGate):\n \"\"\"单个输入的门电路,作为非门的父类\"\"\"\n\n def __init__(self, n):\n logicGate.__init__(self, n)\n\n self.pin = None\n\n def getPin(self):\n if self.pin == None:\n return int(input(\"Enter Pin input for gate\" + self.getLable()+\n \"-->\"))\n else:\n return self.pin.getFrom().getOutput()\n\n def setNextPin(self, source):\n '''选择一条pin与source连接\n\n source -- a gate, the end of\n '''\n if self.pin == None:\n self.pin = source\n else:\n raise RuntimeError(\"Error: NO EMPTY PINS\")\n\n\nclass AndGate(BinaryGate):\n \"\"\"与门,做与运算,return 0/1.\"\"\"\n\n def __init__(self, n):\n BinaryGate.__init__(self, n)\n\n def performGateLogic(self):\n a = self.getPinA()\n b = self.getPinB()\n if a==1 and b==1:\n return 1\n else:\n return 0\n\n\nclass OrGate(BinaryGate):\n \"\"\"或门,做或运算,return 0/1.\"\"\"\n\n def __init__(self, n):\n BinaryGate.__init__(self, n)\n\n def performGateLogic(self):\n a = self.getPinA()\n b = self.getPinB()\n if a==1 or b==1:\n return 1\n else:\n return 0\n\n\nclass NoGate(UnaryGate):\n \"\"\"非门,非运算,return 0/1.\"\"\"\n\n def __init__(self, n):\n UnaryGate.__init__(self, n)\n\n def performGateLogic(self):\n a = self.getPin()\n if a==1:\n return 0\n else:\n return 1\n\n\nclass Connector:\n \"\"\"门电路的连接器,将前门的输出端连接到后门的输入端\"\"\"\n\n def __init__(self, fgate, tgate):\n self.fromgate = fgate\n self.togate = tgate\n\n tgate.setNextPin(self) #门类实现\n\n def getFrom(self):\n return self.fromgate\n\n def getTo(self):\n return self.togate\n\n\n\n\nif __name__ == \"__main__\":\n g1 = AndGate(\"G1\")\n g2 = AndGate(\"G2\")\n g3 = OrGate(\"G3\")\n g4 = NoGate(\"G4\")\n c1 = Connector(g1, g3)\n c2 = Connector(g2, g3)\n c3 = Connector(g3, g4)\n print(g4.getOutput())\n","repo_name":"gorkr/learn-code","sub_path":"python-data-structure/Gate.py","file_name":"Gate.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22623383100","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter\nfrom opentelemetry.sdk.resources import SERVICE_NAME, Resource\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (BatchSpanProcessor)\n\nimport random\nimport time\n\n\nreply = '{\"meta\":{\"count\":1,\"limit\":1,\"offset\":0},' \\\n '\"links\":{\"first\":\"/api/rbac/v1/access/?application=policies&limit=1&offset=0\",' \\\n '\"next\":null,\"previous\":null,' \\\n '\"last\":\"/api/rbac/v1/access/?application=policies&limit=1&offset=0\"},' \\\n '\"data\":[{\"resourceDefinitions\":[],\"permission\":\"policies:*:*\"}]}'\n\n\nclass MyRequestHandler(BaseHTTPRequestHandler):\n \"\"\"HttpServer request handler for illustration purposes\"\"\"\n\n def do_GET(self):\n with tracer.start_as_current_span(\"do-get-span\") as span:\n self.send_response(200)\n self.send_header(\"Content-Type\", \"application/json\")\n span.set_attribute(\"type\", \"json\")\n # Simulate random slowness\n if random.randint(0, 10) > 7:\n time.sleep(1.5)\n span.set_attribute(\"delayed\", \"true\")\n self.end_headers()\n self.wfile.write(bytes(reply, \"utf-8\"))\n\n\nif __name__ == \"__main__\":\n\n # Set up exporting\n resource = Resource(attributes={\n SERVICE_NAME: \"fake-rbac\"\n })\n # Configure the provider with the service name\n provider = TracerProvider(resource=resource)\n # For the grpc endpoint on port 4317, this is not needed.\n processor = BatchSpanProcessor(OTLPSpanExporter(endpoint=\"http://localhost:4317\"))\n provider.add_span_processor(processor)\n trace.set_tracer_provider(provider)\n\n tracer = trace.get_tracer(__name__)\n\n server = HTTPServer((\"localhost\", 8787), MyRequestHandler)\n print(\"Server started on port 8787\")\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n pass\n\n server.server_close()\n print(\"Server stopped.\")\n","repo_name":"pilhuhn/fake-rbac","sub_path":"server-minimal.py","file_name":"server-minimal.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35679550894","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 29 13:33:04 2019\n\n@author: Daniel\n\"\"\"\nfrom cutplan._coordinates import GetLogCoords\nfrom cutplan._logplotter import LogPlotter\n\nfrom traits.api import HasTraits, Instance, Button, on_trait_change\nfrom traitsui.api import View, Item, Group\nfrom mayavi.core.ui.api import MlabSceneModel\nfrom tvtk.pyface.api import Scene\nfrom tvtk.pyface.scene_editor import SceneEditor\n\n\nclass LogPlotGUI(HasTraits):\n\n scene = Instance(MlabSceneModel, ())\n\n button1 = Button('Reset')\n\n def __init__(self, cpSchedule, logPath, id=0):\n HasTraits.__init__(self)\n self.id = id\n\n self.logPath = logPath\n self.CPSched = cpSchedule\n self.AveR = cpSchedule.AveR\n\n self.showLog = True\n self.showBoards = True\n self.showFL = False\n self.show3m = False\n self.showPerc = False\n self.showFront = False\n self.showOpenFace = False\n\n if self.CPSched.Cutplans.shape[0] > 0:\n c = self.CPSched.Cutplans.iloc[self.id]\n log = self.CPSched.AverageLog(self.id)\n self.scene.mlab.clf()\n self.coords = GetLogCoords(log, c)\n self.myOff = self.coords.Offset\n self.plotter = LogPlotter(self.scene, c, self.coords)\n # self.plotter.PlotLog()\n else:\n c = None\n self.plotter = None\n\n @on_trait_change('scene.activated')\n def scene_load(self):\n self.plotter.FrontView()\n\n def drawFunction(self):\n # Notice how each mlab call points explicitely to the figure it\n # applies to.\n\n # CURRENTLY CAN ONLY OPEN FIRST CUTPLAN IN SCHEDULE\n c = self.CPSched.Cutplans.iloc[self.id]\n log = self.CPSched.AverageLog(self.id)\n self.plotter.FrontView()\n self.coords = GetLogCoords(log, c, False)\n self.myOff = self.coords.Offset\n self.plotter = LogPlotter(\n self.scene, c, self.coords,\n view=self.showFront, unit=self.showPerc)\n self.plotter.PlotLog(0.9*self.showLog)\n if self.showBoards and self.CPSched.completed[self.id]:\n self.plotter.ShowBoards(self.AveR[self.id])\n # uC1 = (self.CPSched.MinW[id][0], self.CPSched.MinH[id][0])\n if self.showFL and self.CPSched.completed[self.id]:\n uC1 = self.CPSched.GetMinWLog(self.id, 0)\n self.plotter.ShowOval(uC1, self.coords)\n # uC2 = (self.CPSched.MinW[id][1], self.CPSched.MinH[id][1])\n if self.show3m and self.CPSched.completed[self.id]:\n uC2 = self.CPSched.GetMinWLog(self.id, 1)\n self.plotter.ShowOval(\n uC2, self.coords, cbL=3, colour=(0.75, 0.1, 0.1))\n if self.showOpenFace:\n self.plotter.ShowOpenFace(self.coords)\n\n def redraw_scene(self):\n self.scene.mlab.clf()\n self.drawFunction()\n self.plotter.FrontView()\n # self.plotter.FrontView()\n\n def ChangeView(self):\n self.view = self.scene.mlab.view()\n self.plotter.FrontView()\n\n def update_scene(self):\n v = self.scene.mlab.view()\n roll = self.scene.mlab.roll()\n zoom = self.scene.camera.parallel_scale\n\n self.scene.mlab.clf()\n self.drawFunction()\n self.plotter.FrontView()\n\n self.scene.mlab.view(*v)\n self.scene.mlab.roll(roll)\n self.scene.camera.parallel_scale = zoom\n\n # The layout of the dialog created\n view = View(Group(\n Item(\n 'scene', editor=SceneEditor(scene_class=Scene),\n resizable=False), show_labels=False))\n","repo_name":"DanKulasingham/PublicCAGUI","sub_path":"cutplan/_gui.py","file_name":"_gui.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38232081199","text":"from click import Option, UsageError\nfrom click.core import Command\n\n\nclass MutuallyExclusiveOption(Option):\n def __init__(self, *args, **kwargs):\n self.mutually_exclusive = set(kwargs.pop(\"mutually_exclusive\", []))\n help = kwargs.get(\"help\", \"\")\n if self.mutually_exclusive:\n ex_str = \", \".join(self.mutually_exclusive)\n kwargs[\"help\"] = help + (\n \" NOTE: This argument is mutually exclusive with \"\n \" arguments: [\" + ex_str + \"].\"\n )\n super(MutuallyExclusiveOption, self).__init__(*args, **kwargs)\n\n def handle_parse_result(self, ctx, opts, args):\n if self.mutually_exclusive.intersection(opts) and self.name in opts:\n raise UsageError(\n \"Illegal usage: `{}` is mutually exclusive with \"\n \"arguments `{}`.\".format(self.name, \", \".join(self.mutually_exclusive))\n )\n\n return super(MutuallyExclusiveOption, self).handle_parse_result(ctx, opts, args)\n\n\nclass DefaultCommand(Command):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.params.insert(\n 0,\n Option(\n (\"--verbosity\", \"-v\"),\n help=\"Accumulative verbosity flags; -v: INFO, -vv: DEBUG, default: CRITICAL\",\n count=True,\n default=0,\n ),\n )\n","repo_name":"gis-ops/osrm-tester","sub_path":"osrm_tester/utils/click.py","file_name":"click.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34876706946","text":"import argparse\nimport json\nimport logging\nimport os\nimport sys\n\nimport sumstats_service.resources.payload as pl\nimport sumstats_service.resources.study_service as st\nfrom sumstats_service import config\n\nlogging.basicConfig(level=logging.DEBUG, format=\"(%(levelname)s): %(message)s\")\nlogger = logging.getLogger(__name__)\n\n\ndef parse_payload(content, studyid, callback_id):\n payload = pl.Payload(callback_id=callback_id, payload=content)\n payload.create_study_obj_list()\n payload.set_callback_id_for_studies()\n study_meta = [s for s in payload.study_obj_list if s.study_id == studyid]\n if len(study_meta) != 1:\n print(\"could not find only one matching study id in payload\")\n return False\n return (\n study_meta[0].file_path,\n study_meta[0].md5,\n study_meta[0].assembly,\n study_meta[0].readme,\n study_meta[0].entryUUID,\n )\n\n\ndef validate_study(\n callback_id,\n study_id,\n filepath,\n md5,\n assembly,\n readme,\n entryUUID,\n out=None,\n minrows=None,\n forcevalid=False,\n zero_p_values=False,\n):\n logger.info(\"validating study data\")\n study = st.Study(\n callback_id=callback_id,\n study_id=study_id,\n file_path=filepath,\n md5=md5,\n assembly=assembly,\n readme=readme,\n entryUUID=entryUUID,\n )\n study.validate_study(\n minrows=minrows, forcevalid=forcevalid, zero_p_values=zero_p_values\n )\n write_result(study, out)\n if study.data_valid != 1:\n sys.exit(1)\n else:\n sys.exit(0)\n\n\ndef copy_file_for_validation(\n callback_id, study_id, filepath, entryUUID, md5, assembly, out=None\n):\n study = st.Study(\n callback_id=callback_id,\n study_id=study_id,\n file_path=filepath,\n entryUUID=entryUUID,\n md5=md5,\n assembly=assembly,\n )\n study.retrieve_study_file()\n if study.retrieved != 1:\n write_result(study, out)\n sys.exit(1)\n else:\n sys.exit(0)\n\n\ndef write_result(study, out):\n result = {\n \"id\": study.study_id,\n \"retrieved\": study.retrieved,\n \"dataValid\": study.data_valid,\n \"errorCode\": study.error_code,\n }\n logger.info(\"result obj: {}\".format(json.dumps(result)))\n with open(out, \"w\") as f:\n f.write(json.dumps(result))\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")\n\n\ndef is_path(string):\n try:\n path_object = os.path.isfile(string)\n return path_object\n except TypeError as e:\n return False\n\n\ndef main():\n argparser = argparse.ArgumentParser()\n argparser.add_argument(\"-cid\", help=\"The callback ID\", required=True)\n argparser.add_argument(\"-id\", help=\"The ID of the study\", required=True)\n argparser.add_argument(\"-payload\", help=\"JSON payload (input)\", required=True)\n argparser.add_argument(\n \"-out\",\n help=\"JSON output file (e.g. SOME_ID.json)\",\n required=False,\n default=\"validation.json\",\n )\n argparser.add_argument(\n \"-storepath\",\n help=\"The storage path you want the data written to e.g. /path/to/data\",\n required=False,\n default=config.STORAGE_PATH,\n )\n argparser.add_argument(\n \"-validated_path\",\n help=\"The path you want the validated files written to e.g. /path/to/data\",\n required=False,\n default=config.VALIDATED_PATH,\n )\n argparser.add_argument(\n \"-depo_path\",\n help=\"The path you want the submitted files written to e.g. /path/to/data\",\n required=False,\n default=config.DEPO_PATH,\n )\n argparser.add_argument(\n \"-ftpserver\",\n help=\"The FTP server name where your files are\",\n required=False,\n default=config.FTP_SERVER,\n )\n argparser.add_argument(\n \"-ftpuser\", help=\"The FTP username\", required=False, default=config.FTP_USERNAME\n )\n argparser.add_argument(\n \"-ftppass\", help=\"The FTP password\", required=False, default=config.FTP_PASSWORD\n )\n argparser.add_argument(\n \"-minrows\",\n help=\"The minimum required rows in a sumsats file for validation to pass\",\n required=False,\n default=None,\n )\n argparser.add_argument(\n \"-zero_p\",\n help=\"Setting True will allow p_values to be zero\",\n type=str2bool,\n nargs=\"?\",\n const=True,\n required=False,\n default=False,\n )\n argparser.add_argument(\n \"-forcevalid\",\n help=\"Setting to True will force the validation to be true\",\n type=str2bool,\n nargs=\"?\",\n const=True,\n required=False,\n default=False,\n )\n argparser.add_argument(\n \"--copy_only\",\n help=\"Setting to True will only copy the file to the validation path\",\n type=str2bool,\n nargs=\"?\",\n const=True,\n required=False,\n default=False,\n )\n\n args = argparser.parse_args()\n if args.storepath:\n config.STORAGE_PATH = args.storepath\n if args.validated_path:\n config.VALIDATED_PATH = args.validated_path\n if args.depo_path:\n config.DEPO_PATH = args.depo_path\n if args.ftpserver:\n config.FTP_SERVER = args.ftpserver\n if args.ftpuser:\n config.FTP_USERNAME = args.ftpuser\n if args.ftppass:\n config.FTP_PASSWORD = args.ftppass\n\n if is_path(args.payload):\n with open(args.payload, \"r\") as f:\n content = json.load(f)\n else:\n # if content is given as json string\n content = json.loads(args.payload)\n\n filepath, md5, assembly, readme, entryUUID = parse_payload(\n content, args.id, args.cid\n )\n out = os.path.join(args.validated_path, args.cid, args.out)\n logger.info(f\"validation out json: {out}\")\n if args.copy_only:\n copy_file_for_validation(\n callback_id=args.cid,\n study_id=args.id,\n filepath=filepath,\n entryUUID=entryUUID,\n md5=md5,\n assembly=assembly,\n out=out,\n )\n else:\n minrows = (\n None if len(args.minrows) == 0 or args.minrows == \"None\" else args.minrows\n )\n validate_study(\n args.cid,\n args.id,\n filepath,\n md5,\n assembly,\n readme,\n entryUUID,\n out,\n minrows,\n args.forcevalid,\n args.zero_p,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"EBISPOT/gwas-sumstats-service","sub_path":"sumstats_service/resources/validate_study.py","file_name":"validate_study.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"13690154092","text":"\nfrom south.db import db\nfrom django.db import models\nfrom photos.models import *\n\nclass Migration:\n \n def forwards(self, orm):\n \n # Adding model 'Photo'\n db.create_table('photos_photo', (\n ('id', orm['photos.Photo:id']),\n ('photo', orm['photos.Photo:photo']),\n ('caption', orm['photos.Photo:caption']),\n ('order', orm['photos.Photo:order']),\n ))\n db.send_create_signal('photos', ['Photo'])\n \n \n \n def backwards(self, orm):\n \n # Deleting model 'Photo'\n db.delete_table('photos_photo')\n \n \n \n models = {\n 'photos.photo': {\n 'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'blank': 'True'}),\n 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})\n }\n }\n \n complete_apps = ['photos']\n","repo_name":"philippbosch/aotp-w32","sub_path":"photos/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72702454914","text":"# UPLOAD & Manage Objects in IBM CLOUD STORAGE via http\nimport base64\nimport re\nfrom isac_simo.settings import IBM_BUCKET\nfrom isac_simo import settings\nimport requests\n\nimport ibm_boto3\nfrom ibm_botocore.client import Config, ClientError\n\n# Constants for IBM COS values\nCOS_ENDPOINT = settings.IBM_BUCKET_ENDPOINT\nCOS_API_KEY_ID = settings.IBM_BUCKET_TOKEN\nCOS_INSTANCE_CRN = settings.IBM_BUCKET_CRN\n\n# Create resource\ncos = ibm_boto3.resource(\"s3\",\n ibm_api_key_id=COS_API_KEY_ID,\n ibm_service_instance_id=COS_INSTANCE_CRN,\n config=Config(signature_version=\"oauth\"),\n endpoint_url=COS_ENDPOINT\n)\n\n# client = cos.meta.client\n\n# UPLOAD\ndef upload_object(object_key, path, opened=False):\n try:\n # set 5 MB chunks\n part_size = 1024 * 1024 * 5\n file_threshold = 1024 * 1024 * 15\n transfer_config = ibm_boto3.s3.transfer.TransferConfig(\n multipart_threshold=file_threshold,\n multipart_chunksize=part_size\n )\n\n if opened:\n cos.Object(settings.IBM_BUCKET, object_key).upload_fileobj(\n Fileobj=path,\n Config=transfer_config\n )\n path.close()\n else:\n with open(path, \"rb\") as file_data:\n cos.Object(settings.IBM_BUCKET, object_key).upload_fileobj(\n Fileobj=file_data,\n Config=transfer_config\n )\n return True\n except Exception as e:\n print(e)\n return False\n\n# UPLOAD RAW OBJECT\ndef upload_raw_object(object_key, raw_data):\n try:\n cos.Object(settings.IBM_BUCKET, object_key).put(\n Body=raw_data\n )\n return True\n except Exception as e:\n print(e)\n return False\n\n# DELETE\ndef delete_object(object_key):\n try:\n cos.Object(settings.IBM_BUCKET, object_key).delete()\n return True\n except Exception as e:\n print(e)\n return False\n\n# MOVE\ndef move_object(new_object_key, old_object_key):\n try:\n cos.Object(settings.IBM_BUCKET, new_object_key).copy_from(CopySource=(settings.IBM_BUCKET+'/'+old_object_key))\n cos.Object(settings.IBM_BUCKET, old_object_key).delete()\n return True\n except Exception as e:\n print(e)\n return False\n\n# GET OBJECT BINARY\ndef get_object(object_key):\n try:\n file = cos.Object(settings.IBM_BUCKET, object_key).get()\n # file[\"Body\"].read() # ContentType, ETag, ContentLength, LastModified\n return file\n except Exception as e:\n print(e)\n return False\n\n# Generate list of all files/images in a chosen COS bucket folder (directory named as object_type)\ndef get_object_list(object_type, limit=5000):\n try:\n i = 0\n image_list = [];\n if settings.IBM_BUCKET_PUBLIC_ENDPOINT:\n bucket = cos.Bucket(settings.IBM_BUCKET)\n for idx,obj in enumerate(bucket.objects.filter(Prefix = str(object_type)+\"/\")):\n url = (settings.IBM_BUCKET_PUBLIC_ENDPOINT + obj.key) if settings.IBM_BUCKET_PUBLIC_ENDPOINT.endswith(\"/\") else (settings.IBM_BUCKET_PUBLIC_ENDPOINT + \"/\" + obj.key)\n image_list.append({\n \"key\": re.sub(r\".*?/([a-z0-9]+)\\.[a-z0-9]+\", r\"\\1\", obj.key), # Convert folder link key to unique file name only\n \"url\": url\n })\n\n # LIMIT MAX IMAGE LIST\n if idx + 1 >= limit:\n break\n\n # url = client.generate_presigned_url('get_object', ExpiresIn=0, Params={'Bucket': settings.IBM_BUCKET, 'Key': obj.key})\n\n return image_list\n except Exception as e:\n print(e)\n return False","repo_name":"ISAC-SIMO/ISAC-SIMO-Django-Backend","sub_path":"crowdsource/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"41786146858","text":"import ila\nimport os\nfrom math import log\nfrom fixpoint import fixpointTemp, fixpoint, fpconst, fpconvert\n\n# Parameters & Defines\n\nLEARN_RATE = 0.5\nFP01_D = fixpointTemp(64,1)\nFP05_D = fixpointTemp(32,3)\nFPsum3 = fixpointTemp(64,64)\nFPpow = fixpointTemp(32,32)\nFPsum = fixpointTemp(16,15,True)\nFPedge = fixpointTemp(8,7,True)\nFPconst= fixpointTemp(16,4,True)\nFPu16 = fixpointTemp(16,16)\n\n\ndef buildILA():\n #---------------------------\n # define universal constant\n #---------------------------\n K = 5\n NUM_MOVIE_MAX = 100\n NUM_HIDDEN_MAX = 100\n NUM_VISIBLE_MAX= NUM_MOVIE_MAX * K\n DATAMEM_ADDR_WIDTH = int(log(NUM_VISIBLE_MAX+1)/log(2)) + 1 # 9 # it is definitely not dividable, but need to check\n HIDDEN_UNIT_WIDTH = int( log(NUM_HIDDEN_MAX+1)/log(2)) + 1 # 7 # it is definitely not dividable, but need to check\n VISIBLE_UNIT_WIDTH = int(log(NUM_VISIBLE_MAX+1)/log(2)) + 1 # 9\n EDGEMEM_ADDR_WIDTH = int(log( (NUM_VISIBLE_MAX+1)*(NUM_HIDDEN_MAX+1) ) / log(2)) + 1 # 16\n POS_ADDR_WIDTH = EDGEMEM_ADDR_WIDTH\n NEG_ADDR_WIDTH = EDGEMEM_ADDR_WIDTH\n PREDICT_RESULT_WIDTH = int(log(NUM_MOVIE_MAX)/log(2))+1 # 7\n KWIDTH = int( log(K)/log(2) ) + 1 # 3\n\n\n #---------------------------\n # Model\n #---------------------------\n\n rbm = ila.Abstraction('RBM')\n\n conf_done = rbm.inp('conf_done' , 1)\n conf_num_hidden = rbm.inp('conf_num_hidden' , 32)\n conf_num_visible = rbm.inp('conf_num_visible' , 32)\n conf_num_users = rbm.inp('conf_num_users' , 32)\n conf_num_loops = rbm.inp('conf_num_loops' , 32)\n conf_num_testusers = rbm.inp('conf_num_testusers' , 32)\n conf_num_movies = rbm.inp('conf_num_movies' , 32)\n\n rst = rbm.inp('rst', 1)\n\n init_done = rbm.reg('init_done' , 1)\n done = rbm.reg('done' , 1)\n num_hidden = rbm.reg('num_hidden' , 16)\n num_visible = rbm.reg('num_visible' , 16)\n num_users = rbm.reg('num_users' , 16)\n num_loops = rbm.reg('num_loops' , 16)\n num_testusers = rbm.reg('num_testusers' , 16)\n num_movies = rbm.reg('num_movies' , 16)\n\n # DMA output\n rd_index = rbm.reg('rd_index', 32)\n rd_length = rbm.reg('rd_length', 32)\n rd_request= rbm.reg('rd_request', 1)\n rd_grant = rbm.inp('rd_grant', 1)\n data_in = rbm.inp('data_in', 32)\n # rd_cnt = rbm.reg('rd_cnt', 16) # i ureg #585\n\n # DMA input\n wr_grant = rbm.inp('wr_grant', 1)\n wr_request = rbm.reg('wr_request', 1)\n wr_index = rbm.reg('wr_index', 32)\n wr_length = rbm.reg('wr_length', 32)\n data_out = rbm.reg('data_out', 32)\n # wr_cnt = rbm.reg('wr_cnt', 16) : u reg\n\n data = rbm.mem('data', DATAMEM_ADDR_WIDTH , 8 )\n rbm.mem('predict_result', PREDICT_RESULT_WIDTH, 8 )\n\n\n #-------------------------------------\n # Decoding Expressions\n #-------------------------------------\n rstInst = rst == 1\n confDoneInst = (rst == 0) & (init_done == 0) & (conf_done == 1)\n rdGrantInst = (rd_request == 1) & (rd_grant == 1)\n wrGrantInst = (wr_request == 1) & (wr_grant == 1)\n decodeExpr = [rstInst, confDoneInst, rdGrantInst, wrGrantInst]\n rbm.decode_exprs = decodeExpr\n\n #-------------------------------------\n # AUX Functions\n #-------------------------------------\n def const(v,w):\n return rbm.const(v,w)\n b0 = const(0,1)\n b1 = const(1,1)\n h0_8 = const(0, 8)\n h1_8 = const(1, 8)\n h0_4 = const(0, 4)\n h1_4 = const(1, 4)\n h2_4 = const(2, 4)\n h3_4 = const(3, 4)\n h4_4 = const(4, 4)\n h0_16 = const(0,16)\n h1_16 = const(1,16)\n h0_32 = const(0,32)\n h0_64 = const(0,64)\n\n #-------------------------------------\n # Init conditions\n #-------------------------------------\n\n rbm.set_init('init_done' , b0 )\n rbm.set_init('done' , b0 )\n rbm.set_init('num_hidden' , h0_16)\n rbm.set_init('num_visible' , h0_16)\n rbm.set_init('num_users' , h0_16)\n rbm.set_init('num_loops' , h0_16)\n rbm.set_init('num_testusers', h0_16)\n rbm.set_init('num_movies' , h0_16)\n\n #-------------------------------------\n # Config\n #-------------------------------------\n\n # this means, once configured, unless reset, it cannot be reconfigured\n init_done_nxt = ila.ite(rstInst, b0, ila.ite(confDoneInst, b1 , init_done) )\n num_hidden_nxt = ila.ite(rstInst, h0_16, ila.ite(confDoneInst, conf_num_hidden [15:0] , num_hidden ) )\n num_visible_nxt = ila.ite(rstInst, h0_16, ila.ite(confDoneInst, conf_num_visible [15:0] , num_visible ) )\n num_users_nxt = ila.ite(rstInst, h0_16, ila.ite(confDoneInst, conf_num_users [15:0] , num_users ) )\n num_loops_nxt = ila.ite(rstInst, h0_16, ila.ite(confDoneInst, conf_num_loops [15:0] , num_loops ) )\n num_testusers_nxt = ila.ite(rstInst, h0_16, ila.ite(confDoneInst, conf_num_testusers[15:0] , num_testusers ) )\n num_movies_nxt = ila.ite(rstInst, h0_16, ila.ite(confDoneInst, conf_num_movies [15:0] , num_movies ) )\n\n rbm.set_next('init_done', init_done_nxt)\n rbm.set_next('num_hidden', num_hidden_nxt)\n rbm.set_next('num_visible', num_visible_nxt)\n rbm.set_next('num_users', num_users_nxt)\n rbm.set_next('num_loops', num_loops_nxt)\n rbm.set_next('num_testusers', num_testusers_nxt)\n rbm.set_next('num_movies', num_movies_nxt)\n\n # INST-level w/r complete\n rbm_rd_complete = rbm.reg('rd_complete',1)\n rbm_wr_complete = rbm.reg('wr_complete',1)\n rbm.set_init('rd_complete', b0); rbm.set_init('wr_complete', b0)\n\n #------------------------------------\n # Compute UABS\n #------------------------------------\n\n uabs = rbm.add_microabstraction('compute', (init_done == 1) & (done == 0) )\n index = uabs.reg('index' , 16)\n loop_count = uabs.reg('loop_count', 16)\n pc = uabs.reg('upc', 4)\n edges_mem = uabs.mem('edges', EDGEMEM_ADDR_WIDTH ,8)\n\n nlp = uabs.getreg('num_loops')\n nm = ila.zero_extend(uabs.getreg('num_movies'), 32)\n nu = uabs.getreg('num_users')\n ntu = uabs.getreg('num_testusers')\n out_rd_request = uabs.getreg('rd_request')\n out_rd_complete = uabs.getreg('rd_complete')\n out_rd_length = uabs.getreg('rd_length')\n out_rd_index = uabs.getreg('rd_index')\n\n train_input_done = uabs.reg('train_input_done', 1)\n predict_input_done = uabs.reg('predict_input_done', 1)\n\n uabs.set_init('upc', const(0,4) )\n uabs.set_init('index', h0_16 )\n uabs.set_init('loop_count', h0_16 )\n uabs.set_init('train_input_done', b0)\n uabs.set_init('predict_input_done', b0)\n uabs.set_init('rd_complete', b0)\n\n ### computation micro_instructions\n\n StartRead = (pc == 0) \n WaitReadComplete = (pc == 1) & (out_rd_complete == 0)\n DecideTrainOrPredict = (pc == 1) & (out_rd_complete == 1)\n StartTrain = (pc == 2) & (train_input_done == 1) \n StartPredict = (pc == 2) & (predict_input_done == 1)\n Finish = (pc == 3)\n\n StartReadState = const(0,4); WaitReadCompleteState = const(1,4); StartTrainOrPredict = const(2,4); FinishState = const(3,4)\n \n\n decodeExpr = [StartRead, WaitReadComplete, DecideTrainOrPredict, StartTrain, StartPredict, Finish]\n uabs.decode_exprs = decodeExpr\n\n out_rd_request_nxt = ila.ite(StartRead, b1, out_rd_request )\n out_rd_length_nxt = ila.ite(StartRead, 5*nm, out_rd_length)\n out_rd_index_nxt = ila.ite(StartRead, ila.zero_extend(index, 32), out_rd_index )\n out_rd_complete_nxt = ila.ite(StartRead, b0, ila.ite(DecideTrainOrPredict, b0, out_rd_complete))\n\n train_input_done_nxt = ila.ite(DecideTrainOrPredict, ila.ite(loop_count < nlp, b1, b0) , train_input_done )\n predict_input_done_nxt = ila.ite(DecideTrainOrPredict, ila.ite(loop_count== nlp, b1, b0) , predict_input_done )\n\n pc_nxt = ila.ite(StartRead, WaitReadCompleteState, \n ila.ite(WaitReadComplete, pc,\n ila.ite(DecideTrainOrPredict, StartTrainOrPredict,\n ila.ite(StartTrain, StartTrainOrPredict, # StartReadState, # actually should be updated by u2inst \n ila.ite(StartPredict, StartTrainOrPredict, # StartReadState, # actually should be updated by u2inst \n ila.ite(Finish, FinishState ,\n pc # should never happen!\n ))))))\n\n # should be updated by u2inst\n index_nxt_dummy = ila.ite(StartTrain | StartPredict, \n ila.ite( (index == nu - 1) & (loop_count != nlp), h0_16,\n ila.ite( (index == ntu - 1) & (loop_count == nlp), index, # And it is not correct\n index + 1 ) ), \n index )\n # not in use\n loop_count_nxt_dummy = ila.ite(StartTrain | StartPredict,\n ila.ite( (index == nu - 1) & (loop_count != nlp) , loop_count + 1, loop_count), loop_count )\n\n uabs.set_next('rd_request', out_rd_request_nxt)\n uabs.set_next('rd_length' , out_rd_length_nxt )\n uabs.set_next('rd_index' , out_rd_index_nxt )\n uabs.set_next('rd_complete', out_rd_complete_nxt )\n uabs.set_next('train_input_done' , train_input_done_nxt )\n uabs.set_next('predict_input_done', predict_input_done_nxt)\n uabs.set_next('upc', pc_nxt)\n uabs.set_next('index', index) ; uabs.set_next('loop_count', loop_count)\n # this has to be updated by micro_inst\n # read_request is turned off by loaduabs\n # predict_input_done, train_input_done is turned off by uabs_train/predict\n\n #------------------------------------\n # Load UABS\n #------------------------------------\n # RBM interface\n # high-level interface\n rd_granted = rbm.reg('rd_granted', 1) # this is only used for maintaining the validity of load UABS, no other should use\n data_nxt = ila.ite( rdGrantInst , ila.store( data, const(0, DATAMEM_ADDR_WIDTH) , data_in[7:0] ) , data ) # data # \n rd_granted_nxt = ila.ite( rdGrantInst , b1, rd_granted )\n rbm.set_next('rd_granted', rd_granted_nxt)\n rbm.set_next('data', data_nxt)\n\n # one change is to move these into lower abstraction\n DMAload = rbm.add_microabstraction('DMAload', (rd_granted == 1) ) # this is sub-instruction\n w_cnt = DMAload.reg('i', 16)\n\n DMAload.decode_exprs = [ (rd_granted == 1) ] # XXX BY: is this the decode?\n\n dma_rd_request = DMAload.getreg('rd_request')\n dma_rd_length = DMAload.getreg('rd_length')\n dma_rd_index = DMAload.getreg('rd_index')\n\n\n state_update_data = DMAload.getmem('data')\n state_update_rd_request = dma_rd_request\n self_update_rd_granted = DMAload.getreg('rd_granted')\n\n more_read_in = w_cnt < dma_rd_length[15:0]\n last_cycle = w_cnt== dma_rd_length[15:0]\n DMAload.set_init('i', h1_16 ) # h0_16 )\n DMAload.set_next('i', ila.ite(more_read_in, w_cnt + 1, w_cnt) )\n DMAload.set_next('rd_request', b0 ) # reset to 0 immediately\n DMAload.set_next('rd_granted', ila.ite(more_read_in, self_update_rd_granted , b0) )\n DMAload.set_next('rd_complete', ila.ite(more_read_in, b0, b1))\n DMAload.set_next('data', ila.ite(more_read_in, ila.store(state_update_data, w_cnt[DATAMEM_ADDR_WIDTH-1:0] , data_in[7:0] ), \n ila.ite(last_cycle, ila.store(state_update_data, dma_rd_length[DATAMEM_ADDR_WIDTH-1:0], h1_8 ),\n state_update_data ) ) )\n\n #------------------------------------\n # Train UUABS\n #------------------------------------\n\n TrainUabs = uabs.add_microabstraction('train', train_input_done == 1)\n\n sigmoid_func = TrainUabs.fun('sigmoid', 64, [16]) # DATA_sum_, 01_D\n rand_func = TrainUabs.fun('rand', 64, []) # generate random number\n to_int_exp = TrainUabs.fun('to_int_exp', 32 , [16] ) # \n divide_func = TrainUabs.fun('divide', 64 , [32, 64] ) # dp:32_32 / sum_of_pow2 64_64 = 64_1\n\n hidden_unit = TrainUabs.mem('hidden_unit', HIDDEN_UNIT_WIDTH, 1)\n visible_unit = TrainUabs.mem('visible_unit', VISIBLE_UNIT_WIDTH, 1)\n visibleEnergy= TrainUabs.mem('visibleEnergies', KWIDTH , 16)\n pow2 = TrainUabs.mem('pow2', KWIDTH ,32)\n pos = TrainUabs.mem('pos', POS_ADDR_WIDTH, 1 )\n #neg = TrainUabs.mem('neg', NEG_ADDR_WIDTH, 1 ) # not needed\n\n train_sum = TrainUabs.reg('train_sum', 16)\n train_max = TrainUabs.reg('train_max', 16)\n sumOfpow2 = TrainUabs.reg('sumOfpow2', 64)\n\n jstate = TrainUabs.reg('jstate', 16)\n inner_loop_pc= TrainUabs.reg('per_v_pc', 4)\n\n\n train_pc = TrainUabs.reg('train_upc', 4) # Re-evaluate\n v_cnt = TrainUabs.reg('train_v_cnt', 16)\n h_cnt = TrainUabs.reg('train_h_cnt', 16)\n\n train_input = TrainUabs.getmem('data')\n edges_input = TrainUabs.getmem('edges')\n nv = TrainUabs.getreg('num_visible')\n nh = TrainUabs.getreg('num_hidden')\n nu = TrainUabs.getreg('num_users')\n ntu = TrainUabs.getreg('num_testusers')\n nlp = TrainUabs.getreg('num_loops')\n\n\n SumEdge = train_pc == 0; SumEdgeState = const(0,4)\n SumHidden = train_pc == 1; SumHiddenState = const(1,4)\n StorePos = train_pc == 3; StorePosState = const(3,4)\n EdgeUpdate = train_pc == 2; EdgeUpdateState = const(2,4)\n\n\n TrainUabs.decode_exprs = [SumEdge, SumHidden, EdgeUpdate ] \n\n #Begin\n v_cnt_init = const(0, 16)\n h_cnt_init = const(0, 16)\n pc_init = const(0, 4)\n\n #SumEdge: s0\n edge_load_addr = (NUM_HIDDEN_MAX+1) * v_cnt + h_cnt \n train_sum_s0_nxt = ila.ite(v_cnt == 0, const(0,16), train_sum ) + ila.ite( ila.load(train_input, v_cnt[DATAMEM_ADDR_WIDTH-1:0]) == 1 , fpconvert(ila.load(edges_input,edge_load_addr ), FPedge, FPsum) , const(0,16) )\n v_cnt_s0_nxt = ila.ite(v_cnt == nv , h0_16 , v_cnt + 1 )\n h_cnt_s0_nxt = ila.ite( ( v_cnt == nv ) , ila.ite(h_cnt == nh - 1 , h0_16 , h_cnt + 1 ) , h_cnt )\n # Here ^^^ is for transiting to next state\n hidden_update_s0_0 = ila.ite( ila.appfun(rand_func) < ila.appfun( sigmoid_func , train_sum_s0_nxt ) , b1 , b0)\n hidden_update_s0_1 = ila.ite( v_cnt == nv , ila.store(hidden_unit, h_cnt[HIDDEN_UNIT_WIDTH-1:0] , hidden_update_s0_0 ) , hidden_unit )\n hidden_update_s0_2 = ila.ite( ( v_cnt == nv ) & (h_cnt == nh - 1 ) , ila.store(hidden_update_s0_1, nh[HIDDEN_UNIT_WIDTH-1:0], b1 ) , hidden_update_s0_1 )\n train_pc_s0_nxt = ila.ite( ( v_cnt == nv ) & (h_cnt == nh - 1 ) , SumHiddenState , SumEdgeState )\n # Just like init\n jstate_s0_nxt = h0_16\n inner_loop_pc_s0_nxt = h0_4\n\n # add prefix : \n # train_sum_nxt = ila.ite(SumEdge, train_sum_s0_nxt, ila.ite(SumHidden, ... ) )\n\n # SumHiddenK0-K4 : s1-s5\n\n # pc:1 per_v_pc : 0 1 2 3 \n\n LastH = h_cnt == nh\n LastJ = jstate == K-1\n LastV = (v_cnt + K == nv) | (v_cnt + K >= NUM_VISIBLE_MAX)\n SumHiddenL0 = SumHidden & (inner_loop_pc == 0)\n SumHiddenL1 = SumHidden & (inner_loop_pc == 1)\n SumHiddenL2 = SumHidden & (inner_loop_pc == 2)\n SumHiddenL3 = SumHidden & (inner_loop_pc == 3)\n\n h_cnt_s1_s5_L0_nxt = ila.ite( LastH , h0_16, h_cnt + 1)\n jstate_s1_s5_L0_nxt = ila.ite( LastH, ila.ite( LastJ, h0_16, jstate + 1 ) , jstate )\n inner_loop_pc_s1_s5_L0_nxt = ila.ite( LastJ & LastH , h1_4 , inner_loop_pc )\n\n jstate_s1_s5_L1_nxt = ila.ite( LastJ, h0_16, jstate + 1 )\n inner_loop_pc_s1_s5_L1_nxt = ila.ite( LastJ, h2_4, inner_loop_pc )\n\n jstate_s1_s5_L2_nxt = jstate_s1_s5_L1_nxt\n inner_loop_pc_s1_s5_L2_nxt = ila.ite( LastJ, h3_4, inner_loop_pc )\n\n jstate_s1_s5_L3_nxt = jstate_s1_s5_L2_nxt\n inner_loop_pc_s1_s5_L3_nxt = ila.ite( LastJ, \n ila.ite( LastV , h0_4, h0_4 ), # will choose to go back or not\n inner_loop_pc )\n\n def nextCondition(l0,l1,l2,l3,default):\n return ila.ite(SumHiddenL0, l0, ila.ite(SumHiddenL1, l1, ila.ite(SumHiddenL2, l2, ila.ite(SumHiddenL3, l3,default ))))\n\n h_cnt_s1_s5_nxt = nextCondition(h_cnt_s1_s5_L0_nxt,h_cnt,h_cnt,h_cnt, h_cnt ) \n v_cnt_s1_s5_nxt = ila.ite(SumHiddenL3 & LastJ, ila.ite( LastV, h0_16 , v_cnt + K ), v_cnt )\n jstate_s1_s5_nxt = nextCondition(jstate_s1_s5_L0_nxt,jstate_s1_s5_L1_nxt,jstate_s1_s5_L2_nxt,jstate_s1_s5_L3_nxt, jstate ) \n inner_loop_pc_s1_s5_nxt = nextCondition(inner_loop_pc_s1_s5_L0_nxt,inner_loop_pc_s1_s5_L1_nxt,inner_loop_pc_s1_s5_L2_nxt,inner_loop_pc_s1_s5_L3_nxt, inner_loop_pc ) \n train_pc_s1_s5_nxt = ila.ite(SumHiddenL3 & LastJ & LastV, StorePosState, SumHiddenState )\n \n\n # L0\n train_sum_s1_s5_L0_nxt = ila.ite( h_cnt == 0 , h0_16 , train_sum ) +ila.ite( ila.load(hidden_unit, h_cnt[HIDDEN_UNIT_WIDTH-1:0]) == 1 , fpconvert( ila.load(edges_input, edge_load_addr ), FPedge, FPsum ), h0_16 )\n _train_max_origin_L0 = ila.ite( jstate == 0, fpconst(-500, FPsum).ast , train_max ) # make sure the first time we are comparing with init sum\n train_max_s1_s5_L0_nxt = ila.ite( LastH , ila.ite( ila.sgt( train_sum_s1_s5_L0_nxt , _train_max_origin_L0 ) , train_sum_s1_s5_L0_nxt , _train_max_origin_L0) , train_max )\n visibleEnergy_s1_s5_L0_nxt = ila.ite( LastH ,ila.store( visibleEnergy, jstate[KWIDTH-1:0], train_sum_s1_s5_L0_nxt ) , visibleEnergy )\n # L1\n # sum3: 64_64 -> dp: 32_32\n _31_sum = fpconst(31, FPsum).ast\n train_max_s1_s5_L1_nxt = ila.ite(jstate == 0, train_max - _31_sum, train_max)\n _st_val_L1 = ila.load(visibleEnergy, jstate[KWIDTH-1:0]) - train_max_s1_s5_L1_nxt\n visibleEnergy_s1_s5_L1_nxt = ila.store( visibleEnergy, jstate[KWIDTH-1:0], _st_val_L1 )\n # L2\n _pow2_new_val = ila.appfun(to_int_exp, ila.load(visibleEnergy, jstate[KWIDTH-1:0]) )\n _pow2_new_convert = fpconvert( _pow2_new_val, FPpow, FPsum3 )\n sumOfpow2_s1_s5_L2_nxt = ila.ite( jstate == 0, h0_64, sumOfpow2 ) + _pow2_new_convert\n pow2_s1_s5_L2_nxt = ila.store( pow2, jstate[KWIDTH-1:0], _pow2_new_val )\n # L3\n _probs = ila.appfun( divide_func, [ila.load(pow2, jstate[KWIDTH-1:0]), sumOfpow2] )\n _RAND = ila.appfun( rand_func )\n _visible_unit_new_val = ila.ite( _probs > _RAND, b1, b0 )\n _vu_idx = v_cnt + jstate \n _visible_unit_s1_s5_L3_1 = ila.store( visible_unit, _vu_idx[VISIBLE_UNIT_WIDTH-1:0], _visible_unit_new_val )\n visible_unit_s1_s5_L3_nxt = ila.ite( LastJ & LastV, ila.store(_visible_unit_s1_s5_L3_1, nv[VISIBLE_UNIT_WIDTH-1:0], b1 ), _visible_unit_s1_s5_L3_1 ) \n # when exit visible unit should be made to store 1 at nv\n\n train_sum_s1_s5_nxt = nextCondition(train_sum_s1_s5_L0_nxt, train_sum, train_sum, train_sum, train_sum )\n train_max_s1_s5_nxt = nextCondition(train_max_s1_s5_L0_nxt, train_max_s1_s5_L1_nxt, train_max, train_max, train_max )\n visible_unit_s1_s5_nxt = nextCondition(visible_unit, visible_unit, visible_unit, visible_unit_s1_s5_L3_nxt, visible_unit )\n visibleEnergy_s1_s5_nxt = nextCondition(visibleEnergy_s1_s5_L0_nxt, visibleEnergy_s1_s5_L1_nxt, visibleEnergy, visibleEnergy, visibleEnergy)\n sumOfpow2_s1_s5_nxt = nextCondition(sumOfpow2, sumOfpow2, sumOfpow2_s1_s5_L2_nxt, sumOfpow2, sumOfpow2 )\n pow2_s1_s5_nxt = nextCondition(pow2, pow2, pow2_s1_s5_L2_nxt, pow2, pow2)\n\n # before s6: store pos\n\n h_cnt_sp_nxt = ila.ite( h_cnt == nh , h0_16 , h_cnt + 1 )\n v_cnt_sp_nxt = ila.ite( h_cnt == nh , ila.ite( v_cnt == nv , h0_16 , v_cnt + 1 ) , v_cnt )\n _data_load = ila.load(train_input, v_cnt[VISIBLE_UNIT_WIDTH-1:0])\n _pos_sp_cond = ( _data_load != 2 ) \n _pos_sp_val = ila.ite( _data_load != 0, b1, b0 ) & ila.load(hidden_unit, h_cnt[HIDDEN_UNIT_WIDTH-1:0])\n _pos_st_addr = (NUM_HIDDEN_MAX+1) * v_cnt + h_cnt \n pos_sp_nxt = ila.store(pos, _pos_st_addr, _pos_sp_val )\n train_pc_sp_nxt = ila.ite( (h_cnt == nh) & (v_cnt == nv), EdgeUpdateState , StorePosState) \n\n # update edge : s6\n\n h_cnt_s6_nxt = ila.ite( h_cnt == nh , h0_16 , h_cnt + 1 )\n v_cnt_s6_nxt = ila.ite( h_cnt == nh , ila.ite( v_cnt == nv , v_cnt , v_cnt + 1 ) , v_cnt )\n\n _pos_ld_addr = (NUM_HIDDEN_MAX+1) * v_cnt + h_cnt \n train_pos = ila.load(pos,_pos_ld_addr) != 0\n train_neg = ( ila.load(hidden_unit, h_cnt[HIDDEN_UNIT_WIDTH-1:0]) != 0 ) & ( ila.load(visible_unit, v_cnt[VISIBLE_UNIT_WIDTH-1:0]) != 0 )\n edge_original = ila.load(edges_mem, (NUM_HIDDEN_MAX+1) * v_cnt + h_cnt )\n edge_new = ila.ite( (train_pos)&(~train_neg), edge_original + fpconst(LEARN_RATE, FPedge).ast , \n ila.ite( (~train_pos)&(train_neg), edge_original - fpconst(LEARN_RATE, FPedge).ast ,\n edge_original ) )\n edge_s6_nxt = ila.store( edges_mem, (NUM_HIDDEN_MAX+1) * v_cnt + h_cnt , edge_new )\n train_pc_s6_nxt = ila.ite( (h_cnt == nh) & (v_cnt == nv), EdgeUpdateState , EdgeUpdateState) \n # no need to jump back itself, because the flag: train_input_done is turned back to zero\n # don't forget to set back signals in Uabs ()\n\n train_done = TrainUabs.getreg('train_input_done')\n train_uabs_index = TrainUabs.getreg('index')\n train_uabs_loop_count = TrainUabs.getreg('loop_count')\n train_uabs_upc = TrainUabs.getreg('upc')\n\n # add prefix s6 !!!\n s6_complete = (h_cnt == nh) & (v_cnt == nv )\n index_nxt_s6_nxt = ila.ite( s6_complete, \n ila.ite( (train_uabs_index == nu - 1) & (train_uabs_loop_count != nlp), \n h0_16,\n train_uabs_index + 1 ) , \n train_uabs_index )\n\n # assert (train_uabs_index == ntu - 1) & (train_uabs_loop_count == nlp) should never happen\n\n loop_count_s6_nxt = ila.ite( s6_complete & (train_uabs_index == nu - 1) & (train_uabs_loop_count != nlp) , train_uabs_loop_count + 1, train_uabs_loop_count )\n upc_s6_nxt = ila.ite( s6_complete, StartReadState, train_uabs_upc )\n train_input_done_s6_nxt_nxt = ila.ite( s6_complete , b0 , train_done )\n\n # data -> hidden_unit -> visible_unit -> edge\n # data -> edge\n\n # add \n def TrainNext(e1,e2,e3, default):\n return ila.ite(SumEdge, e1, ila.ite(SumHidden, e2, ila.ite(EdgeUpdate, e3, default)))\n def TrainNextSP(e1,e2,e3,e4,default):\n return ila.ite(SumEdge, e1, ila.ite(SumHidden, e2, ila.ite(StorePos, e3, ila.ite(EdgeUpdate, e4, default))))\n def TrainChoice5(name, e1,e2, e3, default):\n return ila.choice(name, e1,e2,e3, default)\n def TrainChoice4(name, e1,e2, default):\n return ila.choice(name, e1,e2, default)\n def TrainChoice3(name, e1, default):\n return ila.choice(name, e1, default)\n\n TrainUabs.set_init('train_upc', pc_init)\n TrainUabs.set_init('train_v_cnt', v_cnt_init)\n TrainUabs.set_init('train_h_cnt', h_cnt_init)\n\n TrainUabs.set_next('jstate', TrainNext(jstate_s0_nxt, jstate_s1_s5_nxt, jstate, jstate))\n TrainUabs.set_next('train_sum', TrainNext(train_sum_s0_nxt, train_sum_s1_s5_nxt, train_sum, train_sum) )\n TrainUabs.set_next('train_v_cnt', TrainNextSP(v_cnt_s0_nxt, v_cnt_s1_s5_nxt, v_cnt_sp_nxt, v_cnt_s6_nxt, v_cnt) )\n TrainUabs.set_next('train_h_cnt', TrainNextSP(h_cnt_s0_nxt, h_cnt_s1_s5_nxt, h_cnt_sp_nxt, h_cnt_s6_nxt, h_cnt) )\n TrainUabs.set_next('train_upc', TrainNextSP(train_pc_s0_nxt, train_pc_s1_s5_nxt, train_pc_sp_nxt, train_pc_s6_nxt, train_pc) )\n\n TrainUabs.set_next('train_max', TrainNext(train_max, train_max_s1_s5_nxt, train_max, train_max) )\n TrainUabs.set_next('hidden_unit', TrainNext(hidden_update_s0_2, hidden_unit, hidden_unit, hidden_unit) ) \n TrainUabs.set_next('visible_unit', TrainNext(visible_unit, visible_unit_s1_s5_nxt, visible_unit, visible_unit))\n TrainUabs.set_next('edges', TrainNext(edges_mem, edges_mem, edge_s6_nxt, edges_mem))\n TrainUabs.set_next('index', TrainNext(train_uabs_index,train_uabs_index, index_nxt_s6_nxt,train_uabs_index) )\n TrainUabs.set_next('loop_count', TrainNext(train_uabs_loop_count,train_uabs_loop_count, loop_count_s6_nxt,train_uabs_loop_count) )\n TrainUabs.set_next('upc', TrainNext(train_uabs_upc,train_uabs_upc, upc_s6_nxt,train_uabs_upc) )\n TrainUabs.set_next('train_input_done', TrainNext(train_done,train_done, train_input_done_s6_nxt_nxt, train_done) )\n # newly added\n TrainUabs.set_next('visibleEnergies', TrainNext(visibleEnergy, visibleEnergy_s1_s5_nxt, visibleEnergy, visibleEnergy) )\n TrainUabs.set_next('sumOfpow2', TrainNext(sumOfpow2, sumOfpow2_s1_s5_nxt, sumOfpow2, sumOfpow2) )\n TrainUabs.set_next('pow2', TrainNext(pow2,pow2_s1_s5_nxt, pow2, pow2) )\n TrainUabs.set_next('pos', ila.ite(StorePos , pos_sp_nxt ,pos) )\n TrainUabs.set_next('per_v_pc', TrainNext( inner_loop_pc_s0_nxt , inner_loop_pc_s1_s5_nxt, inner_loop_pc, inner_loop_pc) )\n\n\n #------------------------------------\n # Predict UUABS\n #------------------------------------\n # data -> predict_result\n\n\n PredictUabs = uabs.add_microabstraction('predict', predict_input_done == 1)\n \n sigmoid_func = PredictUabs.fun('sigmoid', 64, [16]) # DATA_sum_, 01_D\n rand_func = PredictUabs.fun('rand', 64, []) # generate random number\n to_int_exp = PredictUabs.fun('to_int_exp', 32 , [16] ) # \n round_func = PredictUabs.fun('round', 8, [32]) # 05_D -> u8\n divide_func = PredictUabs.fun('divide', 64 , [32, 64] ) # dp:32_32 / sum_of_pow2 64_64 = 64_1\n\n hidden_unit = PredictUabs.mem('hidden_unit', HIDDEN_UNIT_WIDTH, 1)\n visibleEnergy = PredictUabs.mem('visibleEnergies', KWIDTH , 16)\n predict_result = PredictUabs.getmem('predict_result')\n predict_sum = PredictUabs.reg('predict_sum', 16)\n predict_max = PredictUabs.reg('predict_max', 16)\n sumOfpow2 = PredictUabs.reg('sumOfpow2', 64)\n pow2 = PredictUabs.mem('pow2', KWIDTH ,32)\n\n predict_vector = PredictUabs.mem('predict_vector', VISIBLE_UNIT_WIDTH, 1)\n inner_loop_pc= PredictUabs.reg('per_v_pc', 4)\n\n count = PredictUabs.reg('count', 8)\n jstate = PredictUabs.reg('jstate', 16)\n expectation = PredictUabs.reg('expectation', 32)\n prediction = PredictUabs.reg('prediction', 8)\n\n\n predict_pc = PredictUabs.reg('predict_upc', 4) # Re-evaluate\n v_cnt = PredictUabs.reg('predict_v_cnt', 16)\n h_cnt = PredictUabs.reg('predict_h_cnt', 16)\n\n predict_input = PredictUabs.getmem('data')\n edges_input = PredictUabs.getmem('edges')\n nv = PredictUabs.getreg('num_visible')\n nh = PredictUabs.getreg('num_hidden')\n nu = PredictUabs.getreg('num_users')\n ntu = PredictUabs.getreg('num_testusers')\n nlp = PredictUabs.getreg('num_loops')\n\n\n SumEdge = predict_pc == 0; SumEdgeState = const(0,4)\n SumHidden = predict_pc == 1; SumHiddenState = const(1,4)\n GenResult = predict_pc == 3; GenResultState = const(3,4)\n WaitForWrite = predict_pc == 2; WaitForWriteState = const(2,4)\n\n PredictUabs.decode_exprs = [SumEdge, SumHidden, WaitForWrite ] \n\n #Begin\n v_cnt_init = const(0, 16)\n h_cnt_init = const(0, 16)\n pc_init = const(0, 4)\n\n #SumEdge: s0\n edge_load_addr = (NUM_HIDDEN_MAX+1) * v_cnt + h_cnt \n predict_sum_s0_nxt = ila.ite(v_cnt == 0, const(0,16), predict_sum ) + ila.ite( ila.load(predict_input, v_cnt[DATAMEM_ADDR_WIDTH-1:0]) == 1, fpconvert(ila.load(edges_input,edge_load_addr ), FPedge, FPsum) , const(0,16) )\n v_cnt_s0_nxt = ila.ite(v_cnt == nv , h0_16 , v_cnt + 1 )\n h_cnt_s0_nxt = ila.ite( ( v_cnt == nv ) , ila.ite(h_cnt == nh - 1 , h0_16 , h_cnt + 1 ) , h_cnt )\n # Here ^^^ is for transiting to next state\n\n hidden_update_s0_0 = ila.ite( fpconst(0.5, FP01_D).ast < ila.appfun( sigmoid_func , predict_sum_s0_nxt ) , b1 , b0)\n hidden_update_s0_1 = ila.ite( v_cnt == nv , ila.store(hidden_unit, h_cnt[HIDDEN_UNIT_WIDTH-1:0] , hidden_update_s0_0 ), hidden_unit )\n hidden_update_s0_2 = ila.ite( ( v_cnt == nv ) & (h_cnt == nh - 1 ) , ila.store(hidden_update_s0_1, nh[HIDDEN_UNIT_WIDTH-1:0], b1 ) , hidden_update_s0_1 )\n hidden_update_s0_next = hidden_update_s0_2\n predict_pc_s0_nxt = ila.ite( ( v_cnt == nv ) & (h_cnt == nh - 1 ) , SumHiddenState , SumEdgeState )\n\n jstate_s0_nxt = h0_16\n count_s0_nxt = ila.const(0,8)\n inner_loop_pc_s0_nxt = h0_4\n # add prefix : \n # predict_sum_nxt = ila.ite(SumEdge, predict_sum_s0_nxt, ila.ite(SumHidden, ... ) )\n\n #-----------------------------\n # SumHiddensK0-K4 : s1-s5\n # \n #-----------------------------\n\n LastH = h_cnt == nh\n LastJ = jstate == K-1\n LastV = (v_cnt + K == nv) | (v_cnt + K >= NUM_VISIBLE_MAX)\n SumHiddenL0 = SumHidden & (inner_loop_pc == 0)\n SumHiddenL1 = SumHidden & (inner_loop_pc == 1)\n SumHiddenL2 = SumHidden & (inner_loop_pc == 2)\n SumHiddenL3 = SumHidden & (inner_loop_pc == 3)\n SumHiddenL4 = SumHidden & (inner_loop_pc == 4)\n\n h_cnt_s1_s5_L0_nxt = ila.ite( LastH , h0_16, h_cnt + 1)\n jstate_s1_s5_L0_nxt = ila.ite( LastH , ila.ite( LastJ, h0_16, jstate + 1 ) , jstate )\n inner_loop_pc_s1_s5_L0_nxt = ila.ite( LastJ & LastH , h1_4 , inner_loop_pc )\n\n jstate_s1_s5_L1_nxt = ila.ite( LastJ, h0_16, jstate + 1 )\n inner_loop_pc_s1_s5_L1_nxt = ila.ite( LastJ, h2_4, inner_loop_pc )\n\n jstate_s1_s5_L2_nxt = jstate_s1_s5_L1_nxt\n inner_loop_pc_s1_s5_L2_nxt = ila.ite( LastJ, h3_4, inner_loop_pc )\n\n jstate_s1_s5_L3_nxt = jstate_s1_s5_L2_nxt\n inner_loop_pc_s1_s5_L3_nxt = ila.ite( LastJ, h4_4, inner_loop_pc )\n\n jstate_s1_s5_L4_nxt = jstate_s1_s5_L3_nxt\n inner_loop_pc_s1_s5_L4_nxt = ila.ite( LastJ, \n ila.ite( LastV , h0_4, h0_4 ), # will choose to go back or not\n inner_loop_pc )\n def nextCondition(l0,l1,l2,l3,l4,default):\n return ila.ite(SumHiddenL0, l0, ila.ite(SumHiddenL1, l1, ila.ite(SumHiddenL2, l2, ila.ite(SumHiddenL3, l3,ila.ite(SumHiddenL4, l4, default )))))\n \n h_cnt_s1_s5_nxt = nextCondition(h_cnt_s1_s5_L0_nxt,h_cnt,h_cnt,h_cnt, h_cnt, h_cnt ) \n v_cnt_s1_s5_nxt = ila.ite(SumHiddenL4 & LastJ, ila.ite( LastV, h0_16 , v_cnt + K ), v_cnt )\n jstate_s1_s5_nxt = nextCondition( jstate_s1_s5_L0_nxt,\n jstate_s1_s5_L1_nxt,\n jstate_s1_s5_L2_nxt,\n jstate_s1_s5_L3_nxt, \n jstate_s1_s5_L4_nxt, \n jstate ) \n\n inner_loop_pc_s1_s5_nxt = nextCondition(inner_loop_pc_s1_s5_L0_nxt,\n inner_loop_pc_s1_s5_L1_nxt, inner_loop_pc_s1_s5_L2_nxt,\n inner_loop_pc_s1_s5_L3_nxt, inner_loop_pc_s1_s5_L4_nxt, inner_loop_pc ) \n\n predict_pc_s1_s5_nxt = ila.ite(SumHiddenL4 & LastJ & LastV, GenResultState, SumHiddenState )\n\n # L0\n predict_sum_s1_s5_L0_nxt = ila.ite( h_cnt == 0 , h0_16 , predict_sum ) +ila.ite( ila.load(hidden_unit, h_cnt[HIDDEN_UNIT_WIDTH-1:0]) == 1 , fpconvert( ila.load(edges_input, edge_load_addr ), FPedge, FPsum ), h0_16 )\n _predict_max_origin_L0 = ila.ite( jstate == 0, fpconst(-500, FPsum).ast , predict_max ) # make sure the first time we are comparing with init sum\n predict_max_s1_s5_L0_nxt = ila.ite( LastH , ila.ite( ila.sgt( predict_sum_s1_s5_L0_nxt , _predict_max_origin_L0 ) , predict_sum_s1_s5_L0_nxt , _predict_max_origin_L0) , predict_max )\n visibleEnergy_s1_s5_L0_nxt= ila.ite( LastH , ila.store( visibleEnergy, jstate[KWIDTH-1:0], predict_sum_s1_s5_L0_nxt ) , visibleEnergy )\n # L1\n # sum3: 64_64 -> dp: 32_32\n _31_sum = fpconst(31, FPsum).ast\n predict_max_s1_s5_L1_nxt = ila.ite(jstate == 0, predict_max - _31_sum, predict_max)\n _st_val_L1 = ila.load(visibleEnergy, jstate[KWIDTH-1:0]) - predict_max_s1_s5_L1_nxt\n visibleEnergy_s1_s5_L1_nxt = ila.store( visibleEnergy, jstate[KWIDTH-1:0], _st_val_L1 )\n # L2\n _pow2_new_val = ila.appfun(to_int_exp, ila.load(visibleEnergy, jstate[KWIDTH-1:0]) )\n _pow2_new_convert = fpconvert( _pow2_new_val, FPpow, FPsum3 )\n sumOfpow2_s1_s5_L2_nxt = ila.ite( jstate == 0, h0_64, sumOfpow2 ) + _pow2_new_convert\n pow2_s1_s5_L2_nxt = ila.store( pow2, jstate[KWIDTH-1:0], _pow2_new_val )\n # L3\n _probs = ila.appfun( divide_func, [ila.load(pow2, jstate[KWIDTH-1:0]), sumOfpow2] )\n _mul = fixpoint(_probs, FP01_D) * fixpoint(jstate, FPu16)\n expectation_s1_s5_L3_nxt = ila.ite( jstate == 0, h0_32, expectation ) + _mul.toFormat(FP05_D)\n # L4\n _prediction = ila.zero_extend(ila.appfun(round_func, [expectation] ), 16)\n _pv_val = ila.ite( jstate == _prediction , b1, b0 )\n _pv_idx = v_cnt + jstate\n _first_store = ila.store(predict_vector, _pv_idx[VISIBLE_UNIT_WIDTH-1:0], _pv_val)\n predict_vector_s1_s5_L4_nxt = ila.ite( SumHiddenL4 & LastV & LastJ, ila.store(_first_store, nv[VISIBLE_UNIT_WIDTH-1:0], b1), _first_store) \n\n\n predict_sum_s1_s5_nxt = nextCondition(predict_sum_s1_s5_L0_nxt, predict_sum, predict_sum, predict_sum, predict_sum, predict_sum )\n predict_max_s1_s5_nxt = nextCondition(predict_max_s1_s5_L0_nxt, predict_max_s1_s5_L1_nxt, predict_max, predict_max, predict_max, predict_max )\n visibleEnergy_s1_s5_nxt = nextCondition(visibleEnergy_s1_s5_L0_nxt, visibleEnergy_s1_s5_L1_nxt, visibleEnergy, visibleEnergy,visibleEnergy, visibleEnergy)\n sumOfpow2_s1_s5_nxt = nextCondition(sumOfpow2, sumOfpow2, sumOfpow2_s1_s5_L2_nxt, sumOfpow2, sumOfpow2, sumOfpow2 )\n pow2_s1_s5_nxt = nextCondition(pow2, pow2, pow2_s1_s5_L2_nxt, pow2, pow2, pow2)\n expectation_s1_s5_nxt = ila.ite(SumHiddenL3, expectation_s1_s5_L3_nxt, expectation)\n predict_vector_s1_s5_nxt= ila.ite(SumHiddenL4, predict_vector_s1_s5_L4_nxt, predict_vector)\n count_s1_s5_nxt = ila.ite(SumHiddenL4 & LastV & LastJ , h0_8, count )\n\n # before s6: store pos\n LastV = (v_cnt + K == nv) | (v_cnt + K >= NUM_VISIBLE_MAX)\n LastJ = jstate == K-1\n v_cnt_sp_nxt = ila.ite(LastV, v_cnt + K, v_cnt + K)\n jstate_sp_nxt = ila.ite( LastJ, h0_16, jstate + 1 ) \n\n _prediction_old = ila.ite(jstate == 0, h0_8, prediction)\n _pv_idx = v_cnt + jstate\n _predict_result_sp_val = ila.load(predict_vector, _pv_idx[VISIBLE_UNIT_WIDTH-1:0] )\n\n prediction_sp_nxt = ila.ite( _predict_result_sp_val == 1, (jstate + 1)[7:0], _prediction_old )\n count_sp_nxt = ila.ite(LastJ, count + 1 , count)\n predict_result_sp_nxt = ila.ite(LastJ, ila.store(predict_result, count[PREDICT_RESULT_WIDTH-1:0], prediction), predict_result)\n predict_pc_sp_nxt = ila.ite(LastV & LastJ, WaitForWriteState, GenResultState)\n\n wr_complete = PredictUabs.getreg('wr_complete')\n wr_req = PredictUabs.getreg('wr_request')\n wr_len = PredictUabs.getreg('wr_length')\n wr_idx = PredictUabs.getreg('wr_index')\n cur_idx = PredictUabs.getreg('index') # 32\n\n exitLoop = LastV & LastJ\n wr_request_sp_nxt = ila.ite( exitLoop , b1, wr_req) \n wr_index_sp_nxt = ila.ite( exitLoop , ila.zero_extend(nm, 32) * ila.zero_extend(cur_idx,32), wr_idx )\n wr_length_sp_nxt = ila.ite( exitLoop , ila.zero_extend(nm, 32), wr_len) \n wr_complete_sp_nxt= ila.ite( exitLoop , b0, wr_complete)\n # s6: \n\n #---------------------\n # update edge : s6\n #---------------------\n\n FinishOneRound = ( wr_req == 0 ) & ( wr_complete == 1 )\n\n predict_pc_s6_nxt = ila.ite( FinishOneRound , WaitForWriteState , WaitForWriteState )\n # its value does not matter because it will be terminated by predict_input_done\n # don't forget to set back signals in Uabs ()\n\n predict_done = PredictUabs.getreg('predict_input_done')\n predict_uabs_index = PredictUabs.getreg('index')\n predict_uabs_loop_count = PredictUabs.getreg('loop_count')\n predict_uabs_upc = PredictUabs.getreg('upc')\n all_done = PredictUabs.getreg('done')\n\n # add prefix s6 !!!\n index_nxt_s6_nxt = ila.ite( FinishOneRound , \n ila.ite( (predict_uabs_index == ntu - 1) & (predict_uabs_loop_count == nlp), \n predict_uabs_index,\n predict_uabs_index + 1 ),\n predict_uabs_index ) \n\n wr_complete_s6_nxt = ila.ite( FinishOneRound , b0 , wr_complete )\n # assert (predict_uabs_index == nu - 1) & (predict_uabs_loop_count != nlp) should never happen\n\n #loop_count_s6_nxt = ila.ite( (predict_uabs_index == nu - 1) & (predict_uabs_loop_count != nlp) , predict_uabs_loop_count + 1, predict_uabs_loop_count )\n\n upc_s6_nxt = ila.ite( FinishOneRound, ila.ite( (predict_uabs_index == ntu - 1) & (predict_uabs_loop_count == nlp) , FinishState ,StartReadState) , predict_uabs_upc )\n predict_input_done_s6_nxt_nxt = ila.ite( FinishOneRound , b0 , predict_done )\n\n all_done_s6_nxt = ila.ite( FinishOneRound & (predict_uabs_index == ntu - 1) & (predict_uabs_loop_count == nlp), b1, b0 )\n # data -> hidden_unit -> visible_unit -> edge\n # data -> edge\n\n # add\n\n # add \n def predictNext(e1,e2,e3, default):\n return ila.ite(SumEdge, e1, ila.ite(SumHidden, e2, ila.ite(WaitForWrite, e3, default)))\n def predictNextSp(e1,e2,e3,e4,default):\n return ila.ite(SumEdge, e1, ila.ite(SumHidden, e2, ila.ite( GenResult, e3 ,ila.ite(WaitForWrite, e4, default))))\n def ite(inst,e,default):\n return ila.ite(inst,e,default)\n\n PredictUabs.set_init('predict_upc', pc_init)\n PredictUabs.set_init('predict_v_cnt', v_cnt_init)\n PredictUabs.set_init('predict_h_cnt', h_cnt_init)\n\n PredictUabs.set_next('jstate', predictNextSp(jstate_s0_nxt, jstate_s1_s5_nxt, jstate_sp_nxt ,jstate, jstate))\n PredictUabs.set_next('predict_sum', predictNext(predict_sum_s0_nxt, predict_sum_s1_s5_nxt, predict_sum, predict_sum) )\n PredictUabs.set_next('predict_v_cnt', predictNextSp(v_cnt_s0_nxt, v_cnt_s1_s5_nxt, v_cnt_sp_nxt ,v_cnt, v_cnt) )\n PredictUabs.set_next('predict_h_cnt', predictNext(h_cnt_s0_nxt, h_cnt_s1_s5_nxt, h_cnt, h_cnt) )\n PredictUabs.set_next('predict_upc', predictNextSp(predict_pc_s0_nxt, predict_pc_s1_s5_nxt, predict_pc_sp_nxt ,predict_pc_s6_nxt, predict_pc) )\n PredictUabs.set_next('predict_max', predictNext(predict_max, predict_max_s1_s5_nxt, predict_max, predict_max) )\n PredictUabs.set_next('hidden_unit', predictNext(hidden_update_s0_2, hidden_unit, hidden_unit, hidden_unit) ) \n PredictUabs.set_next('count', predictNextSp(count_s0_nxt, count_s1_s5_nxt, count_sp_nxt, count, count) )\n PredictUabs.set_next('per_v_pc', predictNext(inner_loop_pc_s0_nxt, inner_loop_pc_s1_s5_nxt, inner_loop_pc, inner_loop_pc ) )\n \n PredictUabs.set_next('index', predictNext(predict_uabs_index,predict_uabs_index, index_nxt_s6_nxt,predict_uabs_index) )\n PredictUabs.set_next('upc', predictNext(predict_uabs_upc,predict_uabs_upc, upc_s6_nxt,predict_uabs_upc) )\n PredictUabs.set_next('predict_input_done', predictNext(predict_done,predict_done, predict_input_done_s6_nxt_nxt, predict_done) )\n PredictUabs.set_next('done', predictNext(all_done, all_done, all_done_s6_nxt, all_done) )\n\n PredictUabs.set_next('wr_request', predictNextSp(wr_req, wr_req ,wr_request_sp_nxt , wr_req, wr_req) )\n PredictUabs.set_next('wr_length', predictNextSp(wr_len, wr_len ,wr_length_sp_nxt, wr_len, wr_len ) )\n PredictUabs.set_next('wr_index', predictNextSp(wr_idx, wr_idx ,wr_index_sp_nxt, wr_idx, wr_idx ) )\n PredictUabs.set_next('wr_complete', predictNextSp(wr_complete, wr_complete, wr_complete_sp_nxt, wr_complete_s6_nxt, wr_complete) )\n # newly added\n PredictUabs.set_next('visibleEnergies', predictNext(visibleEnergy, visibleEnergy_s1_s5_nxt, visibleEnergy, visibleEnergy) )\n PredictUabs.set_next('sumOfpow2', predictNext(sumOfpow2, sumOfpow2_s1_s5_nxt, sumOfpow2, sumOfpow2) )\n PredictUabs.set_next('pow2', predictNext(pow2,pow2_s1_s5_nxt, pow2, pow2) )\n PredictUabs.set_next('expectation', predictNext(expectation,expectation_s1_s5_nxt, expectation, expectation) )\n PredictUabs.set_next('predict_vector', predictNext(predict_vector,predict_vector_s1_s5_nxt,predict_vector,predict_vector) )\n PredictUabs.set_next('prediction', ite(GenResult,prediction_sp_nxt,prediction) )\n PredictUabs.set_next('predict_result', ite(GenResult,predict_result_sp_nxt,predict_result) )\n\n\n\n #------------------------------------\n # Store UABS\n #------------------------------------\n # store is triggered by inst as uabs?\n\n # wr_grant == 1 is an instruction\n wr_granted = rbm.reg('wr_granted', 1)\n rbm.set_next('wr_granted', ila.ite((wr_request & wr_grant) == 1, b1, wr_granted ) )\n data_out_1st_set = ila.zero_extend(ila.load(predict_result, const(0,PREDICT_RESULT_WIDTH) ) , 32)\n rbm.set_next('data_out', ila.ite((wr_request & wr_grant) == 1, data_out_1st_set, data_out))\n # This is a hard decision, \n # as we set_next, the reaction as we defined will be appear in the next cycle\n\n StoreUabs = rbm.add_microabstraction('store', wr_granted == 1)\n\n StoreUabs.decode_exprs = [ (wr_granted == 1) ] # XXX BY: is this the decode?\n\n store_idx = StoreUabs.reg('i', 16)\n nm = StoreUabs.getreg('num_movies')\n wr_granted = StoreUabs.getreg('wr_granted')\n wr_request = StoreUabs.getreg('wr_request')\n wr_complete = StoreUabs.getreg('wr_complete')\n predict_result = StoreUabs.getmem('predict_result')\n\n StoreUabs.set_init('i', h1_16)\n StoreUabs.set_next('i', ila.ite(store_idx < nm , store_idx + 1 , store_idx ) )\n StoreUabs.set_next('wr_granted', ila.ite( store_idx < nm , wr_granted , b0 ) )\n StoreUabs.set_next('wr_request', ila.ite( store_idx == 0 , b0 , wr_request) )\n StoreUabs.set_next('wr_complete', ila.ite( store_idx < nm , wr_complete , b1) )\n data_out = StoreUabs.getreg('data_out')\n # possibly one cycle earlier\n StoreUabs.set_next('data_out', ila.zero_extend(ila.load(predict_result, store_idx[PREDICT_RESULT_WIDTH-1:0]) , 32) )\n\n\n #---------------------------\n # Add no next \n #\n def keepNC(Abs,name):\n Abs.set_next(name, Abs.getreg(name))\n def keepMemNC(Abs,name):\n Abs.set_next(name, Abs.getmem(name))\n keepNC(rbm,'done')\n keepNC(rbm,'wr_request')\n keepNC(rbm,'wr_index')\n keepNC(rbm,'wr_length')\n keepNC(rbm,'rd_index')\n keepNC(rbm,'rd_length')\n keepNC(rbm,'rd_request')\n\n keepMemNC(uabs,'edges')\n\n keepNC(rbm,'rd_complete')\n keepNC(rbm,'wr_complete')\n\n archive_dir = './archive'\n if not os.path.exists(archive_dir):\n os.makedirs(archive_dir)\n\n rbm.exportAll(archive_dir + '/rbm.ila')\n uabs.exportAll(archive_dir + '/compute.ila')\n DMAload.exportAll(archive_dir + '/DMAload.ila')\n TrainUabs.exportAll(archive_dir + '/train.ila')\n PredictUabs.exportAll(archive_dir + '/predict.ila')\n StoreUabs.exportAll(archive_dir + '/store.ila')\n\n return rbm\n\nif __name__ == '__main__':\n rbm = buildILA()\n","repo_name":"yuex1994/iw_imdb","sub_path":"accls/RBM/ILA/rbm_template.py","file_name":"rbm_template.py","file_ext":"py","file_size_in_byte":42975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10145344727","text":"#BUSCAR INSTRUCCIONES E IMPRIMIRLAS\r\nimport shutil\r\nimport os\r\nimport re\r\nfrom datetime import datetime\r\nimport time\r\nfrom pathlib import Path\r\nimport math\r\n\r\ninicio=time.time()\r\n\r\nruta = os.getcwd()#la ruta donde nos encontramos actualmente\r\n\r\n#BUSCAR RUTA DEL ARCHIVO COMPRIMIDO un comando adicional que no se ocupo.\r\ndef Buscar_ruta_archivo_comprimido():\r\n target = \"Proyecto+Dia+9.zip\"#se necesita colocar la extension del archivo\r\n #initial_dir = 'C:\\\\'\r\n initial_dir = 'C:\\\\Users\\\\uri\\\\Desktop\\\\Curso_Python'#se necesita colocar el doble diagonal\r\n path = ''\r\n for root, _, files in os.walk(initial_dir):\r\n if target in files:\r\n path = os.path.join(root, target)\r\n print(path)\r\n break\r\n\r\n#DESCOMPRIMIR ARCHIVO\r\n'''\r\nimport shutil\r\nshutil.unpack_archive(\"Proyecto+Dia+9.zip\",\"Instrucciones_9no_Proyecto\",\"zip\")\r\nprint(\"Tu archivo fue descomprimido\")\r\n'''\r\n\r\n#ENCONTRAR CARPETA\r\ndef encontrar_Carpeta():\r\n for carpeta,subcarpeta,archivo in os.walk(ruta):\r\n for subcarp in subcarpeta:\r\n if subcarp.startswith(\"Instrucciones\"):\r\n subcarp_ruta = os.path.join(carpeta, subcarp)\r\n print(f\"\\n{subcarp_ruta}\")#imprimir la ruta del archivo donde se encuentran los DIRECTORIOS\r\n return subcarp_ruta\r\n\r\n#BUSCAR EL ARCHIVO DE TEXTO E IMPRIMIR SU RUTA\r\ndef Buscar_archivo(subcarp_ruta):\r\n N=0\r\n for carpeta,subcarpeta,archivo in os.walk(subcarp_ruta):#muestra todas las subcarpetas, archivos de la carpeta curso_python\r\n for arch in archivo:\r\n #if arch.startswith(\"archivo30\"):#busca archivos que empiecen por las letras modul sin necesidad de poner la extension del archivo\r\n\r\n arch_ruta = os.path.join(carpeta,arch)#LOCALIZAR EL archivo\r\n #print(f\"\\t{arch}\") # imprimir el nombre del archivo\r\n #print(f\"\\n{arch_ruta}\")#imprimir la ruta del archivo\r\n #name_archivo=Path(arch_ruta) #la ruta del archivo\r\n\r\n mi_archivo = open(arch_ruta)#abrir archivo\r\n texto=mi_archivo.read()#LEE el archivo\r\n #print(texto)\r\n\r\n patron = r'[N]\\w{3}-\\d{5}'\r\n verificar = re.search(patron, texto)\r\n if verificar:\r\n print(f\"\\t{arch}.....\\t{verificar.group()}\")#se imprime el archivo y la info relacionada con el parametro\r\n #print(arch_ruta)#se imprime la ruta del archivo\r\n N = N + 1\r\n #else:\r\n # print(\"no se encontro concidencia\")\r\n mi_archivo.close()\r\n return N\r\n\r\n#FUNCION PARA OBTENER LA FECHA DEL DIA DE HOY\r\ndef dia():\r\n fecha_hora=datetime.now()\r\n return fecha_hora\r\n\r\n\r\n#LLAMANDO A LAS FUNCIONES\r\nBuscar_ruta_archivo_comprimido()\r\n\r\nfecha_hora=dia()\r\nprint(f\"La fecha del dia de hoy es: {fecha_hora}\")\r\nprint(f\"\\tArchivo.....\\t\\tNRO. SERIE\")\r\nsubcarp_ruta=encontrar_Carpeta()\r\nN=Buscar_archivo(subcarp_ruta)\r\nprint(f\"Números encontrados: {N}\")\r\n\r\nfin=time.time()\r\nduracion= fin-inicio\r\nprint(f\"La duracion del programa es de: {duracion} seg.\")\r\nprint(f\"La duracion del programa es de: {math.ceil(duracion)} seg. redondeado hacia arriba\")\r\n\r\n\r\n\r\n\r\n","repo_name":"SantiagoUGC07/Curso_Python","sub_path":"PRACT_9/8_1_NOVENO_proyecto.py","file_name":"8_1_NOVENO_proyecto.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20378128871","text":"from dataclasses import dataclass\nfrom meya.csp.integration import CspIntegration\nfrom meya.csp.integration.integration import CspIntegrationFilter\nfrom meya.db.view.thread import ThreadMode\nfrom meya.element import Element\nfrom meya.element import Ref\nfrom meya.element.field import element_field\nfrom meya.gridql.parser import GridQL\nfrom meya.gridql.parser import QueryException\nfrom meya.integration.element.element import FilterElementSpecUnion\nfrom meya.zendesk.base.integration import ZendeskBaseIntegration\nfrom meya.zendesk.support.payload.ticket import ZendeskSupportTicketGet\nfrom meya.zendesk.support.payload.ticket import ZendeskSupportTicketStatus\nfrom meya.zendesk.support.payload.user import ZendeskSupportUserGet\nfrom numbers import Real\nfrom typing import ClassVar\nfrom typing import List\nfrom typing import Optional\nfrom typing import Type\nfrom typing import Union\n\n\n@dataclass\nclass ZendeskSupportIntegrationFilter(CspIntegrationFilter):\n rx_unhandled_ticket: FilterElementSpecUnion = element_field(default=False)\n\n def is_valid(self) -> (bool, Optional[str]):\n \"\"\"\n :return: True, None if valid, False, \"error message\" otherwise\n \"\"\"\n tests = ((\"rx_unhandled_ticket\", self.rx_sub),)\n for name, value in tests:\n if not isinstance(value, bool):\n try:\n # try parse and match to validate the input\n gridql = GridQL.create(value)\n gridql.match(\n dict(ticket={}, current_user={}, requester={})\n )\n except QueryException as e:\n return (\n False,\n f\"Invalid GridQL for `{name}` field with value `{value}`: {str(e)}\",\n )\n return super().is_valid()\n\n @staticmethod\n def does_match_unhandled_ticket(\n ticket: ZendeskSupportTicketGet,\n current_user: ZendeskSupportUserGet,\n requester: ZendeskSupportUserGet,\n field: FilterElementSpecUnion,\n ) -> bool:\n \"\"\"\n :return: true|false whether or not to continue processing\n \"\"\"\n if isinstance(field, bool):\n return field\n else:\n # Lucene style match\n return GridQL.create(field).match(\n dict(\n ticket=ticket.to_dict(),\n current_user=current_user.to_dict(),\n requester=requester.to_dict(),\n )\n )\n\n\n@dataclass\nclass ZendeskSupportIntegration(ZendeskBaseIntegration, CspIntegration):\n \"\"\"\n This integration elements handles all incoming and outgoing events to and\n from Zendesk Support. It contains all the configuration properties needed\n to connect to Zendesk Support. Follow the instructions in the\n Zendesk Support integration [setup guide](https://docs.meya.ai/docs/how-to-set-up-a-zendesk-support-integration)\n to configure this integration.\n \"\"\"\n\n NAME: ClassVar[str] = \"zendesk_support\"\n\n target_password: str = element_field(\n help=(\n \"The target password you generated. This is used by the \"\n \"integration to authenticate incoming Zendesk Support webhooks.\"\n )\n )\n auto_reopen_ticket: Union[bool, ThreadMode] = element_field(\n default=ThreadMode.AGENT,\n help=(\n \"This automatically reopens the linked ticket, if the ticket is \"\n \"in either the 'pending', 'hold' or 'solved' state, and the \"\n \"user sends a new event e.g. a say event or an image event. \"\n \"This setting can either be 'True/False' or it can be a specific \"\n \"thread mode, so when the thread enters this mode and the user \"\n \"sends a new event, then the linked ticket will be reopened.\"\n ),\n )\n unlink_ticket_status: List[ZendeskSupportTicketStatus] = element_field(\n default_factory=lambda: [ZendeskSupportTicketStatus.CLOSED],\n help=(\n \"The set of Zendesk Support ticket statuses that will unlink the \"\n \"Meya thread from the ticket. When the Meya thread is unlinked, \"\n \"then the integration will no longer send events to Zendesk \"\n \"Support.\"\n ),\n )\n extract_html_links: bool = element_field(\n default=True,\n help=(\n \"This will cause the integration to parse out any hyperlinks in \"\n \"an incoming ticket comment, and convert it to a markdown link \"\n \"that can be rendered by messaging integrations such as Orb or \"\n \"Zendesk Sunshine Conversations.\"\n ),\n )\n include_text_with_media: bool = element_field(\n default=False,\n help=(\n \"When set to 'True' the integration will create a Meya media \"\n \"event and use the ticket's comment as the media event's text \"\n \"when the comment has an attachment. When set to 'False' the \"\n \"comment text will appear as a separate Meya say event. This \"\n \"setting is only applicable for ticket comments that have an \"\n \"attachment e.g. an image or a file.\"\n ),\n )\n upload_attachments: bool = element_field(\n default=True,\n help=(\n \"When set to 'True' the integration will upload all Meya media \"\n \"event files to your Zendesk Support instance instead of keeping \"\n \"it on the Meya CDN.\"\n ),\n )\n filter: ZendeskSupportIntegrationFilter = element_field(\n default_factory=ZendeskSupportIntegrationFilter,\n help=(\n \"This allows you to specify any valid GridQL query to filter \"\n \"incoming requests/events and outgoing requests/events.\"\n ),\n )\n\n api_timeout: Real = element_field(\n default=5,\n help=(\n \"The time, in seconds, to wait for a response from the Zendesk \"\n \"Support API.\"\n ),\n )\n\n def validate(self):\n super().validate()\n if ZendeskSupportTicketStatus.CLOSED not in self.unlink_ticket_status:\n raise self.validation_error(\n \"Tickets must be unlinked once closed.\"\n )\n\n\nclass ZendeskSupportIntegrationRef(Ref):\n element_type: ClassVar[Type[Element]] = ZendeskSupportIntegration\n","repo_name":"meya-customers/meya-sdk","sub_path":"meya/zendesk/support/integration/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":6325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23539352371","text":"\r\nimport sys\r\nimport time\r\nimport operator\r\nimport math\r\nimport re\r\n\r\ntimeit = 1\r\ndebugv = 0\r\nstartTime = 0\r\n\r\noutFile = open('output.txt', 'w')\r\ninFile = sys.stdin\r\ninFile = open('A-test.in', 'r')\r\ninFile = open('C:/Users/quentin/Downloads/A-small-attempt0.in', 'r')\r\ninFile = open('C:/Users/quentin/Downloads/A-large.in', 'r')\r\n\r\ndef main():\r\n\tT = int(inFile.readline())\r\n\tstartTime = time.clock()\r\n\tfor case in range(1,T+1):\r\n\t\tout(\"Case #{}: \".format(case))\r\n\t\tdoCase(case)\r\n\t\tout(\"\\n\")\r\n\r\n\r\n\r\n\r\n\r\ndef out(m):\r\n\toutFile.write(m)\r\n\tsys.stdout.write(m)\r\n\r\ndef cobin(k,n):\r\n\t#debug(str(k)+\" parmi \"+str(n)+\"\\n\")\r\n\treturn math.factorial(n)//(math.factorial(n-k)*math.factorial(k))\r\n\r\ndef shift(S, k, i):\r\n\tfor j in range(i, i+k):\r\n\t\ts = '-' if S[j]=='+' else '+'\r\n\t\tS = S[:j]+s+S[j+1:]\r\n\treturn S\r\ndef doCase(case):\r\n\tS, k = inFile.readline().split()\r\n\tk = int(k)\r\n\tcount=0\r\n\tfor i in range(len(S)-k+1):\r\n\t\tif S[i]=='-':\r\n\t\t\tS = shift(S,k,i)\r\n\t\t\tcount+=1\r\n\tif '-' in S:\r\n\t\tout('IMPOSSIBLE')\r\n\telse:\r\n\t\tout(str(count))\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef debug(m):\r\n\tif debugv:\r\n\t\tsys.stdout.write(m)\r\n\r\nif __name__ == '__main__':\r\n\tif len(sys.argv) > 1:\r\n\t\tif re.search('d', sys.argv[1]):\r\n\t\t\tdebugv = 1\r\n\t\tif re.search('i', sys.argv[1]):\r\n\t\t\tinFile = sys.stdin\r\n\r\n\tmain()\r\n\tif timeit:\r\n\t\tsys.stdout.flush()\r\n\t\tsys.stderr.write(\"Completed in {} seconds.\\n\".format(time.clock() - startTime))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/107.py","file_name":"107.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72027519873","text":"from fastapi import FastAPI\n\nimport geocoder\n\napp = FastAPI()\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n\n@app.get('/location')\ndef forward(address:str):\n try:\n geolocator = geocoder.osm(address)\n address = geolocator.json['raw']['display_name']\n lat = geolocator.json['raw']['lat']\n lon = geolocator.json['raw']['lon']\n data = {\n \"source\": \"OSM\",\n \"longitude\": lat,\n \"latitude\": lon, \n \"address\": address\n }\n return data\n except Exception:\n return {\"Internal Server Error.\"} ","repo_name":"fathimakmurshida/geocoder-fastapi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11244295809","text":"import io\nimport ipywidgets\nfrom IPython.display import display\nimport pandas as pd\n\n_display_fields = (\n (\"Name\", \"name\"),\n (\"AC Bonus\", \"ac_bonus\"),\n (\"col0\", \"col0\"),\n (\"weight\", \"weight\"),\n (\"Container Flags\", \"container_flags\"),\n (\"col4\", \"col4\"),\n (\"col5\", \"col5\"),\n (\"Action 1 DMG\", \"act1_dmg\"),\n (\"Action 1 Flags\", \"act1_flags\"),\n (\"Action 2 DMG\", \"act2_dmg\"),\n (\"Action 2 Flags\", \"act2_flags\"),\n (\"Action 3 DMG\", \"act3_dmg\"),\n (\"Action 3 Flags\", \"act3_flags\"),\n (\"Charges\", \"charges\"),\n (\"col11\", \"col11\"),\n (\"Sub\", \"col12\"),\n (\"Type\", \"obj_type\"),\n (\"col14\", \"col14\"),\n)\n\n\ndef _fill_template(record):\n html = \"\"\n for title, field in _display_fields:\n html += f\"\\n\"\n return html + \"
{title}{getattr(record, field)}
\"\n\n\nclass GameObject:\n def __init__(self, game, record):\n self.game = game # Enables us to look things up more easily\n self.record = record\n offsets = game.assets[\"INIT\"].sprite_offsets\n self.images = {}\n for img_type in (\"small\", \"worn\", \"tiny\"):\n self.images[img_type] = game.resources.sprites[\n getattr(offsets, img_type + \"_object\") + record.image_id\n ]\n self.name = game.assets[\"TEXT\"].text[record.text_record].value.decode(\"ascii\")\n\n def _ipython_display_(self):\n outputs = {}\n for t, i in self.images.items():\n outputs[t] = ipywidgets.Output()\n with outputs[t]:\n display(i.frames[0])\n vb = ipywidgets.VBox(\n [ipywidgets.HBox([outputs[\"small\"], outputs[\"tiny\"]]), outputs[\"worn\"]]\n )\n display(vb)\n\n\nclass ObjectDatabase(dict):\n def __init__(self, game):\n super(dict, self).__init__()\n self.game = game\n for i, rec in enumerate(game.assets[\"OBJECTS\"].object):\n obj_inst = GameObject(game, rec)\n self[i] = self[obj_inst.name] = obj_inst\n\n def to_df(self):\n obj = self.objects_by_id[0]\n fields = [\n _\n for _ in dir(obj)\n if not _.startswith(\"_\")\n and _ not in (\"close\", \"from_file\", \"from_io\", \"from_bytes\")\n ]\n fields.sort()\n records = []\n for n, orec in sorted(self.objects_by_id.items()):\n rec = {_: getattr(orec, _) for _ in fields}\n rec[\"obj_type\"] = orec.obj_type.name\n records.append(rec)\n df = pd.DataFrame(records)\n return df\n\n def _ipython_display_(self):\n n = len(self.objects_by_id)\n png_images = []\n for i in range(n):\n png_images.append([])\n for fr in self[i][1][1].frames:\n with io.BytesIO() as output:\n fr.save(output, format=\"PNG\")\n output.seek(0)\n png_images[-1].append(output.read())\n sprite_slider = ipywidgets.IntSlider(min=0, max=n, step=1)\n frame_slider = ipywidgets.IntSlider(min=0, step=1)\n frame_label = ipywidgets.Label(value=\"\")\n im = ipywidgets.Image(value=b\"\", format=\"png\", height=200)\n span = ipywidgets.HTML()\n\n def update_record(change):\n new_object_record = self.objects_by_id[sprite_slider.value]\n span.value = _fill_template(new_object_record)\n\n def update_image(change):\n im.value = png_images[sprite_slider.value][frame_slider.value]\n\n def update_sprite(change):\n nf = len(png_images[sprite_slider.value])\n frame_label.value = f\"({nf})\"\n frame_slider.max = nf\n\n im.add_class(\"dispersing-pixelated\")\n im.layout.height = \"200px\"\n im.layout.object_fit = \"contain\"\n sprite_slider.observe(update_record, \"value\")\n sprite_slider.observe(update_sprite, \"value\")\n sprite_slider.observe(update_image, \"value\")\n frame_slider.observe(update_image, \"value\")\n # There's gotta be a better way to do this, but.\n sprite_slider.value = 1\n sprite_slider.value = 0\n display(\n ipywidgets.HBox(\n [\n ipywidgets.VBox(\n [\n sprite_slider,\n ipywidgets.HBox([frame_slider, frame_label]),\n span,\n ]\n ),\n im,\n ]\n )\n )\n","repo_name":"matthewturk/dispersing","sub_path":"dispersing/object_db.py","file_name":"object_db.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34423049207","text":"from gensim import models\n\n#Fonction qui répète la fonction \"models.phrases\" de Gensim jusqu'au point fixe.\n#S'est révélée inutile à l'usage. \n\ndef collocs(texts):\n\tprev=texts\n\tbigram = models.Phrases(texts)\n\ttexts=map(lambda x : bigram[x],texts)\n\t\n\tif prev == texts:\n\t\treturn texts\n\telse:\n\t\treturn collocs(texts)\n\n\n\ndef col1(texts):\n\tprev=texts\n\tbigram = models.Phrases(texts)\n\ttexts=map(lambda x : bigram[x],texts)\n\t\n\treturn texts\n\nif __name__==\"__main__\":\n\t\"\"\"\n\t\toutput : corpus collocs\n\t\t\n\t\tà intégrer avec le système de recherche\n\t\t\n\t\"\"\"","repo_name":"ArthurLapraye/LSA","sub_path":"collocs.py","file_name":"collocs.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37340962146","text":"import sys\nassert sys.version_info >= (3, 0) # Bomb out if not running Python3\n\nimport unittest\nimport json\nfrom threading import Timer\nfrom asl_workflow_engine.logger import init_logging\nfrom asl_workflow_engine.state_engine import StateEngine\n\nASL = \"\"\"{\n \"Comment\": \"Test Step Function\",\n \"StartAt\": \"StartState\",\n \"States\": {\n \"StartState\": {\n \"Type\": \"Pass\",\n \"Next\": \"ChoiceState\"\n },\n \"ChoiceState\": {\n \"Type\": \"Choice\",\n \"Choices\": [\n {\n \"Variable\": \"$.lambda\",\n \"StringEquals\": \"InternalErrorNotHandled\",\n \"Next\": \"InternalErrorNotHandledLambda\"\n },\n {\n \"Variable\": \"$.lambda\",\n \"StringEquals\": \"InternalErrorHandled\",\n \"Next\": \"InternalErrorHandledLambda\"\n },\n {\n \"Variable\": \"$.lambda\",\n \"StringEquals\": \"Success\",\n \"Next\": \"SuccessLambda\"\n },\n {\n \"Variable\": \"$.lambda\",\n \"StringEquals\": \"Timeout\",\n \"Next\": \"TimeoutLambda\"\n }\n ],\n \"Default\": \"FailState\"\n },\n \"FailState\": {\n \"Type\": \"Fail\",\n \"Error\": \"NoLambdaError\",\n \"Cause\": \"No Matches!\"\n },\n \"SuccessLambda\": {\n \"Type\": \"Task\",\n \"Resource\": \"arn:aws:rpcmessage:local::function:SuccessLambda\",\n \"Next\": \"WaitState\"\n },\n \"InternalErrorNotHandledLambda\": {\n \"Type\": \"Task\",\n \"Resource\": \"arn:aws:rpcmessage:local::function:InternalErrorNotHandledLambda\",\n \"Next\": \"EndState\"\n },\n \"InternalErrorHandledLambda\": {\n \"Type\": \"Task\",\n \"Resource\": \"arn:aws:rpcmessage:local::function:InternalErrorHandledLambda\",\n \"Next\": \"EndState\"\n },\n \"TimeoutLambda\": {\n \"Type\": \"Task\",\n \"Resource\": \"arn:aws:rpcmessage:local::function:TimeoutLambda\",\n \"Next\": \"EndState\"\n },\n \"EndState\": {\n \"Type\": \"Pass\",\n \"End\": true\n },\n \"WaitState\": {\n \"Type\": \"Wait\",\n \"Seconds\":10,\n \"Next\": \"EndState\"\n }\n }\n}\"\"\"\n\n\n\"\"\"\nThe application context is described in the AWS documentation:\nhttps://docs.aws.amazon.com/step-functions/latest/dg/input-output-contextobject.html \n\n{\n \"Execution\": {\n \"Id\": ,\n \"Input\": ,\n \"StartTime\": \n },\n \"State\": {\n \"EnteredTime\": ,\n \"Name\": ,\n \"RetryCount\": \n },\n \"StateMachine\": {\n \"Id\": ,\n \"Definition\": \n },\n \"Task\": {\n \"Token\": \n }\n}\n\nThe most important paths for state traversal are:\n$$.State.Name = the current state\n$$.StateMachine.Definition = (optional) contains the complete ASL state machine\n$$.StateMachine.Id = a unique reference to an ASL state machine\n\"\"\"\ncontext = '{\"StateMachine\": {\"Id\": \"arn:aws:states:local:0123456789:stateMachine:simple_state_machine\", \"Definition\": ' + ASL + '}}'\n\nitems = ['{\"data\": {\"lambda\":\"Success\"}, \"context\": ' + context + '}',\n '{\"data\": {\"lambda\":\"InternalErrorNotHandled\"}, \"context\": ' + context + '}',\n '{\"data\": {\"lambda\":\"InternalErrorHandled\"}, \"context\": ' + context + '}',\n '{\"data\": {\"lambda\":\"Timeout\"}, \"context\": ' + context + '}']\n\n#items = ['{\"data\": {\"lambda\":\"Success\"}, \"context\": ' + context + '}']\n#items = ['{\"data\": {\"lambda\":\"InternalErrorNotHandled\"}, \"context\": ' + context + '}']\n#items = ['{\"data\": {\"lambda\":\"InternalErrorHandled\"}, \"context\": ' + context + '}']\n#items = ['{\"data\": {\"lambda\":\"Timeout\"}, \"context\": ' + context + '}']\n\n\n\"\"\"\nCreate a simple EventDispatcher stub so that we can test the State Engine\nwithout requiring the messaging fabric. Rather than publishing each new state\nto a queue the stub simply calls notify on the StateEngine with the new state\ninformation. This simplistic approach should be OK for most tests, but we should\nbe careful of issues due to recursion so we may need to revisit this IDC.\n\"\"\"\nclass EventDispatcherStub(object):\n\n def __init__(self, state_engine, config):\n\n \"\"\"\n Create an association with the state engine and give that a reference\n back to this event dispatcher so that it can publish events and make\n use of the set_timeout time scheduler.\n \"\"\"\n self.state_engine = state_engine\n self.state_engine.event_dispatcher = self\n self.message_count = -1\n\n \"\"\"\n This simple threaded timeout should work OK, the real timeout is actually\n implemented using Pika's connection.call_later() which is single threaded\n and handled within Pika's event loop. That approach plays much better with\n Pika's event loop.\n \"\"\"\n def set_timeout(self, callback, delay):\n t = Timer(delay/1000, callback)\n t.start()\n return t\n\n def dispatch(self, message):\n \"\"\"\n Start at -1 and increment before call to notify as this stub, unlike\n the real EventDispatcher will recursively call dispatch as the State\n Engine calls publish, this means code after the call to notify won't\n be reached when one might expect it to.\n \"\"\"\n self.message_count += 1\n # The notify method expects a JSON object not a string.\n self.state_engine.notify(json.loads(message), self.message_count)\n\n def acknowledge(self, id):\n pass\n\n def publish(self, item):\n # Convert event back to JSON string for dispatching.\n self.dispatch(json.dumps(item))\n \n def broadcast(self, subject, message, carrier_properties=None):\n pass\n\n\"\"\"\nThis stubs out the real TaskDispatcher execute_task method which requires\nmessaging infrastructure to run whereas this test is just a state machine test.\n\"\"\"\ndef execute_task_stub(resource_arn, parameters, callback, timeout, context, id):\n name = resource_arn.split(\":\")[-1]\n result = {\"reply\": name + \" reply\"}\n callback(result)\n\n\nclass TestSimpleStateMachine(unittest.TestCase):\n\n def setUp(self):\n # Initialise logger\n logger = init_logging(log_name=\"test_simple_state_machine\")\n config = {\n \"state_engine\": {\n \"store_url\": \"ASL_store.json\",\n \"execution_ttl\": 500\n }\n }\n\n state_engine = StateEngine(config)\n # Stub out the real TaskDispatcher execute_task\n state_engine.task_dispatcher.execute_task = execute_task_stub\n self.event_dispatcher = EventDispatcherStub(state_engine, config)\n\n def test_state_machine(self):\n for item in items:\n self.event_dispatcher.dispatch(item)\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"fadams/local-step-functions","sub_path":"asl-workflow-engine/py/test/test_simple_state_machine.py","file_name":"test_simple_state_machine.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"74496418433","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\nfrom views.view import View\n\n\ndef extract_definition(text):\n \"\"\" Allow to skip characters before ') ' \"\"\"\n\n for i, letter in enumerate(text):\n if letter == ')':\n return text[(i + 1):]\n\n\ndef parse_one_letter(letter, delimiter):\n\n # Scrap\n vgm_url = f'https://www.mso.anu.edu.au/~ralph/OPTED/v003/wb1913_{letter}.html'\n html_text = requests.get(vgm_url).text\n soup = BeautifulSoup(html_text, 'html.parser')\n\n # Extract\n words = []\n for line in soup.find_all('p'):\n word = line.find('b').text\n type = line.find('i').text\n definition = extract_definition(line.text)\n\n if len(word) > 1:\n # Regroup same words in one line\n if len(words) < 2 or not (words[-1][0] == word and words[-1][1] == type):\n words.append([word, type, definition])\n else:\n words[-1].append(definition)\n\n # Create csv file\n with open(f'./files/{letter}.csv', 'w', newline='') as f:\n writer = csv.writer(f, delimiter=delimiter)\n writer.writerows(words)\n\n\n# −−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−\nbodies = list()\nbodies.append('https://www.mso.anu.edu.au/~ralph/OPTED/')\nbodies.append(\"One file per letter, csv file with '_' as delimiter.\")\nbodies.append(\"One line per words with the type (multiple definitions are regrouped on the same line \\n\"\n \"and separated by the delimiter).\")\nmy_view = View(header='Scraping', bodies=bodies)\nmy_view.start_loading()\n\n# --\nletters = 'abcdefghijklmnopqrstuvwxyz'\nfor i, letter in enumerate(letters):\n my_view.update_loading_text(f'In progress : {letter.upper()}')\n my_view.update_loading(i * 100 / len(letters))\n parse_one_letter(letter, '_')\n\n# --\nmy_view.stop_loading()\nbodies.append('Done.')\nmy_view.show()\n","repo_name":"flinguenheld/csv_dictionary","sub_path":"scrap_dictionary.py","file_name":"scrap_dictionary.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8111258060","text":"from tkinter import *\nfrom tkinter import ttk\nfrom subprocess import Popen\nimport chromedriver_autoinstaller\nfrom selenium.webdriver.chrome.service import Service as ChromeService\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.keys import Keys\nfrom msedge.selenium_tools import Edge, EdgeOptions\nfrom subprocess import CREATE_NO_WINDOW\nfrom selenium import webdriver\nimport time\nimport importlib\nimport os\nimport zipfile\nimport urllib.request\nimport requests\nfrom io import BytesIO\nimport pandas as pd\nimport numpy as np\nfrom openpyxl import load_workbook\n\n\n\nclass App:\n\n def __init__(self):\n self.root = Tk()\n self.colorTeam = '#E11419'\n self.root.geometry('350x150')\n self.root.config(bg='#fff')\n self.link = 'https://poliedrodist.comcel.com.co/'\n self.linktemp = 'https://www.google.com/'\n self.link2 = 'https://poliedrodist.comcel.com.co/activaciones/http/REINGENIERIA/pagDispatcherEntradaModernizacion.asp?Site=1'\n self.interfas()\n\n def interfas(self):\n titulo = Label(self.root, text= \"Activaciones Team\")\n titulo.pack(anchor = CENTER)\n titulo.config(fg=self.colorTeam, bg='white', font=(\"Verdana\",18))\n\n titulo = Label(self.root, text= \"Comunicaciones\")\n titulo.pack(anchor = CENTER)\n titulo.config(fg=self.colorTeam, bg='white', font=(\"Verdana\",18))\n \n botonExcel = Button(self.root, text=\"ABRIR LISTA\", command=self.abrirArchivo, bg=self.colorTeam, fg=\"white\")\n botonExcel.place(relx=0.00, rely= 0.5, relwidth = 0.33, relheight= 0.5)\n\n botonActivar = Button(self.root, text=\"ABRIR PAGINA\", command=self.abrirPagina, bg=self.colorTeam, fg=\"white\")\n botonActivar.place(relx=0.333, rely= 0.5, relwidth = 0.33, relheight= 0.5)\n\n botonActivar = Button(self.root, text=\"ACTIVAR LINEAS\", command=self.activarLineas, bg=self.colorTeam, fg=\"white\")\n botonActivar.place(relx=0.666, rely= 0.5, relwidth = 0.33, relheight= 0.5)\n\n def abrirArchivo(self):\n p = Popen(\"openExcel.bat\")\n stdout, stderr = p.communicate()\n \n def excel(self):\n file = \"lineas.xlsx\"\n fileExcel = pd.read_excel(file)\n # numbers = np.asarray(fileExcel)\n numbers = fileExcel\n return numbers\n \n def saveExcel(self,posicion):\n self.lineas['Min'][posicion]= self.min\n self.lineas['Mensaje'][posicion]= self.mensaje\n self.lineas['ICC_ID_Identificacion_Tarjeta_de_Circuito_Integrada'][posicion]= self.icc\n self.lineas['IMEI_Identificacion_Internacional_del_Equipo_Movil'][posicion]= self.imei\n self.lineas['Validacion_Tecnologia'][posicion]= self.vTecnologia\n self.lineas['Validacion_Kit_Prepago'][posicion]= self.vKit\n self.lineas['Validacion_Region_ICCID_Distribuidor'][posicion]= self.vRegion\n self.lineas['Validacion_Equipo'][posicion]= self.vEquipo\n self.lineas['Validacion_Lista'][posicion]= self.vLista\n self.lineas.to_excel('lineas.xlsx', index=False)\n \n def quitarFormatoCientifico(self, cantidad):\n for i in range (0,cantidad):\n self.lineas['Iccid'][i]= \" \"+str(self.lineas['Iccid'][i]).strip()\n print(self.lineas['Iccid'][i])\n\n def activarLineas(self):\n self.elegirOpcion()\n time.sleep(2)\n self.lineas = self.excel()\n cantidad = len(self.lineas['Iccid'])\n self.quitarFormatoCientifico(cantidad)\n for i in range (0,cantidad):\n dato1= str(self.lineas['Imei'][i])\n dato2= str(self.lineas['Iccid'][i]).strip()\n dato2= dato2[5:]\n dato3= str(self.lineas['Cedula vendedor'][i])\n print(dato1, dato2, dato3)\n self.llenarInfo(dato1, dato2, dato3)\n time.sleep(2)\n self.saveExcel(i)\n \n\n\n \n\n\n\n def abrirPagina(self):\n self.installEdge()\n self.openEdge()\n \n\n def continuar(self,by,str):\n validate = True\n while(validate):\n try:\n if by == \"xpath\": find = self.browser.find_element_by_xpath(str)\n elif by == \"id\": find = self.browser.find_element_by_id(str)\n elif by == \"name\": find = self.browser.find_element_by_name(str)\n validate = FALSE\n except:\n print('Cargando')\n time.sleep(1)\n\n def errorControl(self,func,by,str):\n validate = True\n while(validate):\n try:\n func(by,str)\n validate = FALSE\n except:\n print('Cargando')\n time.sleep(1)\n\n \n\n def elegirOpcion(self):\n\n \n \n self.browser.get(self.link2)\n time.sleep(0.5)\n self.click('name', 'shortcutProduct')\n\n def llenarInfo(self,equipo,sim,vendedor):\n self.continuar('id', 'DetailProduct_SellerId')\n self.insert('id','DetailProduct_SellerId', vendedor)\n self.insert('id','DetailProduct_Imei', equipo)\n self.insert('id','DetailProduct_Iccid', sim)\n self.continuar('id', 'btnNext')\n time.sleep(2)\n self.click('id', 'btnNext')\n time.sleep(5)\n self.identificarPaso()\n\n def identificarPaso(self):\n op = 0\n try:\n intento1 = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div[2]/div[4]/ul/li')\n if intento1 == \"none\":\n valor = False\n elif \"no pertenece\" in intento1:\n valor = True\n op = 1\n self.mensaje = intento1\n else:\n valor = True\n op = 1\n self.mensaje = intento1\n except Exception as e:\n valor = False\n\n if (valor == False):\n try:\n intento2 = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[6]/div/span')\n if intento2 == \"none\":\n valor = False\n elif \"Correcta\" in intento2:\n valor = True\n op = 2\n self.mensaje = intento2\n except Exception as e:\n valor = False\n if (valor == False):\n try:\n op = 3\n self.mensaje = 'No deja preactivar por seriales en uso o principal'\n except Exception as e:\n print('ninguna coincide')\n \n if op==1:\n print('error1')\n time.sleep(1)\n self.error1()\n if op==2:\n print('valida')\n time.sleep(1)\n self.validado()\n if op==3:\n print('error2')\n time.sleep(1)\n try:\n self.error2()\n except:\n pass\n \n \n\n def validado(self):\n self.icc = \"\"\n self.imei = \"\"\n self.vTecnologia = \"\"\n self.vKit = \"\"\n self.vLista = \"\"\n self.vEquipo = \"\"\n self.vRegion = \"\"\n self.continuar('id','btnNext')\n self.click('id', 'btnNext')\n time.sleep(1)\n self.continuar('id','btnNext')\n self.click('id', 'btnNext')\n self.continuar('xpath', '/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[1]/div/div[2]/div/div[1]/div[3]/div/span/span[1]/span/span[1]')\n self.click('xpath', '/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[1]/div/div[2]/div/div[1]/div[3]/div/span/span[1]/span/span[1]')\n self.continuar('xpath','/html/body/span/span/span[2]/ul/li[2]')\n self.click('xpath','/html/body/span/span/span[2]/ul/li[2]') \n time.sleep(2)\n self.continuar('id','btnNext')\n self.errorControl(self.click,'id','btnNext')\n time.sleep(1)\n self.continuar('id','btnNext')\n self.click('id', 'btnNext')\n self.continuar('xpath', '/html/body/div/div[2]/section/div/div[2]/div[2]/main/div/div/div/strong/strong/div/div/div/p/strong[2]')\n self.min = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/div/div/div/strong/strong/div/div/div/p/strong[2]')\n print(self.min)\n self.continuar('id','btnPrev')\n self.click('id','btnPrev')\n self.continuar('name', 'shortcutProduct')\n self.click('name', 'shortcutProduct')\n \n\n\n # ECM5430C\n # Marzo23*$%\n \n\n def error1(self):\n self.icc = \"\"\n self.imei = \"\"\n self.vTecnologia = \"\"\n self.vKit = \"\"\n self.vLista = \"\"\n self.vEquipo = \"\"\n self.vRegion = \"\"\n self.min = \"\"\n self.borrar('id','DetailProduct_Imei')\n time.sleep(0.5)\n self.borrar('id','DetailProduct_Iccid')\n time.sleep(0.5)\n self.borrar('id','DetailProduct_SellerId')\n time.sleep(0.5)\n self.borrarLetras('id','DetailProduct_SellerId',8)\n time.sleep(0.5)\n\n\n def error2(self):\n self.icc = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[3]/div[2]/div[1]/div/div/div')\n self.imei = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[3]/div[2]/div[2]/div/div/div')\n self.vTecnologia = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[3]/div[2]/div[3]/div/div/div')\n self.vKit = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[3]/div[2]/div[4]/div/div/div')\n self.vLista = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[3]/div[2]/div[5]/div/div/div')\n self.vEquipo = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[3]/div[2]/div[6]/div/div/div')\n self.vRegion = self.leerData('/html/body/div/div[2]/section/div/div[2]/div[2]/main/form/div/div[3]/div[2]/div[7]/div/div/div')\n self.min= \"\"\n time.sleep(0.5)\n self.click('id', 'btnPrev')\n time.sleep(0.5)\n self.borrar('id','DetailProduct_Imei')\n time.sleep(0.5)\n self.borrar('id','DetailProduct_Iccid')\n\n \n def leerData(self, path):\n data = self.browser.find_element_by_xpath(path)\n if data is not None:\n # print(data.text)\n return data.text\n else: return \"none\"\n \n\n\n def click(self, by, str):\n if by == \"xpath\": find = self.browser.find_element_by_xpath(str)\n elif by == \"id\": find = self.browser.find_element_by_id(str)\n elif by == \"name\": find = self.browser.find_element_by_name(str)\n else: find =None\n if find is not None:\n find.click()\n \n def insert(self, by, str, text):\n if by == \"xpath\": find = self.browser.find_element_by_xpath(str)\n elif by == \"id\": find = self.browser.find_element_by_id(str)\n elif by == \"name\": find = self.browser.find_element_by_name(str)\n else: find =None\n if find is not None:\n find.send_keys(text)\n \n def borrar(self, by, str):\n if by == \"xpath\": find = self.browser.find_element_by_xpath(str)\n elif by == \"id\": find = self.browser.find_element_by_id(str)\n elif by == \"name\": find = self.browser.find_element_by_name(str)\n else: find =None\n if find is not None:\n find.clear()\n \n def borrarLetras(self, by, str, cantidad):\n \n if by == \"xpath\": find = self.browser.find_element_by_xpath(str)\n elif by == \"id\": find = self.browser.find_element_by_id(str)\n elif by == \"name\": find = self.browser.find_element_by_name(str)\n else: find =None\n if find is not None:\n for i in range(0,cantidad):\n find.send_keys(Keys.BACKSPACE)\n \n def select(self, by, str, text):\n \n if by == \"xpath\": find = self.browser.find_element_by_xpath(str)\n elif by == \"id\": find = self.browser.find_element_by_id(str)\n elif by == \"name\": find = self.browser.find_element_by_name(str)\n else: find =None\n if find is not None:\n select = Select(find)\n select.select_by_visible_text(text)\n \n\n \n def openEdge(self):\n options = EdgeOptions()\n options.use_chromium = True\n options.add_argument(\"start-maximized\")\n self.browser = Edge(executable_path='msedgedriver.exe', options=options)\n self.browser.get(self.link)\n\n\n def installEdge(self):\n # Obtener la última versión del controlador de Microsoft Edge WebDriver\n response = requests.get('https://msedgewebdriverstorage.blob.core.windows.net/edgewebdriver/LATEST_STABLE')\n latest_version = response.text.strip()\n print(latest_version)\n\n # URL de descarga del controlador\n url = f'https://msedgedriver.azureedge.net/{latest_version}/edgedriver_win64.zip'\n\n # Descargar y extraer el archivo zip del controlador\n response = urllib.request.urlopen(url)\n zipfile.ZipFile(BytesIO(response.read())).extractall(os.getcwd())\n\n # Agregar el controlador al PATH del sistema\n os.environ['PATH'] += os.pathsep + os.getcwd()\n\n\nroot = App ()\nroot.root.mainloop()\n\n# root.abrirPagina()\n# root.activarLineas()\n\n# EC0717A Clave Febrer*+17\n","repo_name":"sebastian7584/activaciones-equipos-claro","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":13460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6535956393","text":"import slimmer\nfrom sqlalchemy import Table, or_, text, and_\nfrom turbogears import expose, validate, exception_handler, view\nfrom turbogears.database import metadata, mapper, session\nfrom ttl.tg.controllers import EmbeddedBaseController\nfrom ttl.tg.validators import std_state_factory, RestSchema\nfrom ttl.tg.errorhandlers import pr_std_exception_handler\nfrom prcommon.lib.common import add_config_details\nfrom prcommon.model.contact import Contact\nfrom prcommon.model.crm import ContactHistory\nfrom prcommon.model.employee import Employee\nfrom prcommon.model.touch.utils.utils import Utilities\nfrom ttl.model import PPRApplication\nfrom prcommon import Constants\nfrom ttl.postgres import DBConnect, DBCompress\nfrom prmaxtouch.sitecontrollers.search import SearchController\nfrom prcommon.model import ApiSearch\n\n\nPRMAXTOUCH = PPRApplication(\"Prmaxtouch\", True)\n\nclass SearchContactController(EmbeddedBaseController):\n\t\"\"\" Search controller \"\"\"\n\n\t@expose('text/html')\n\t@exception_handler(pr_std_exception_handler)\n\t@validate(validators=RestSchema(), state_factory=std_state_factory)\n\tdef search(self, *args, **params):\n\t\t\"\"\" return the search criteria page\"\"\"\n\n\t\tdata = add_config_details(params)\n\t\thtml = view.render(data, 'prmaxtouch.templates.contacts.search')\n\n\t\treturn slimmer.xhtml_slimmer(html)\n\n\t@expose('text/html')\n\t@exception_handler(pr_std_exception_handler)\n\t@validate(validators=RestSchema(), state_factory=std_state_factory)\n\tdef results(self, *args, **params):\n\t\t\"\"\" return the search results page \"\"\"\n\t\tlistpage = 1\n\t\tfamilyname = params['familyname'] if 'familyname' in params else ''\n\t\tfirstname = params['firstname'] if 'firstname' in params else ''\n\t\toutletname = params['outletname'] if 'outletname' in params else ''\n\t\t\n\t\tparams['search_type'] = 'quick'\n\t\tparams['mode'] = 1\n\t\tparams['search_partial'] = 2\n\t\tparams['quick_contact_ext'] = u'{\"data\":{\"surname\":\"%s\",\"firstname\":\"%s\"},\"logic\":2}' %(familyname, firstname)\n\t\tparams['quick_searchname'] = outletname\n\t\tparams['searchtypeid'] = 7\n\n\t\tdata = ApiSearch.do_search(params)\n\n\t\tif \"listpage\" in params:\n\t\t\tlistpage = int(params[\"listpage\"])\n\n\t\tdata = add_config_details({\"familyname\":familyname, \"firstname\":firstname, \"outletname\":params['quick_searchname'], \"listpage\":listpage, \"total\": data['results']['total']} )\n\t\thtml = view.render(data, 'prmaxtouch.templates.contacts.results')\n\n\t\treturn slimmer.xhtml_slimmer(html)\n\n\t@expose('text/html')\n\t@exception_handler(pr_std_exception_handler)\n\t@validate(validators=RestSchema(), state_factory=std_state_factory)\n\tdef list(self, *args, **params):\n\t\t\"\"\" return the search results page \"\"\"\n\t\tlistpage = 1\n\t\tfamilyname = params['familyname'] if 'familyname' in params else ''\n\t\tfirstname = params['firstname'] if 'firstname' in params else ''\n\t\toutletname = params['outletname'] if 'outletname' in params else ''\n\t\t\n\t\tparams['search_type'] = 'quick'\n\t\tparams['mode'] = 1\n\t\tparams['search_partial'] = 2\n\t\tparams['quick_contact_ext'] = u'{\"data\":{\"surname\":\"%s\",\"firstname\":\"%s\"},\"logic\":2}' %(familyname, firstname)\n\t\tparams['quick_searchname'] = outletname\n\t\tparams['searchtypeid'] = 7\n\t\n\t\tif \"listpage\" in params:\n\t\t\tlistpage = int(params[\"listpage\"])\t\t\t\n\n\t\tparams['offset'] = listpage * 6 - 6\n\t\tdata2 = ApiSearch.results_view(params)\n\n\n\t\titems = {\"listpage\": listpage}\n\t\trow = 1\n\t\tfor num in range(listpage*6-6, len(data2['items'])):\n\t\t\tiemployeeid = data2['items'][num]['employeeid']\n\t\t\tioutletid = data2['items'][num]['outletid']\n\t\t\tijobtitle = data2['items'][num]['job_title']\n\t\t\tioutletname = data2['items'][num]['outletname']\n\t\t\ticolour = \"icon-blue\"\n\t\t\tiicon = \"\"\n\t\t\tifamilyname = data2['items'][num]['contactname']\n\t\t\tifirstname = ''\n\t\t\tirowclass = \"\"\n\t\t\tilocation = \"javascript:window.location = '/enquiries/add/add?&employeeid=%s&outletid=%s'\" % (iemployeeid, ioutletid)\n\n\t\t\tnewitem2 = {\n\t\t\t \"employeeid%s\" %row: iemployeeid,\n\t\t\t \"outletid%s\" %row: ioutletid,\n\t\t\t \"jobtitle%s\" %row: ijobtitle if ijobtitle else \"\",\n\t\t \"familyname%s\" %row:ifamilyname if ifamilyname else \"\",\n\t\t \"firstname%s\" %row: ifirstname if ifirstname else \"\",\n\t\t\t \"outletname%s\" %row: ioutletname if ioutletname else \"\",\n\t\t \"colour%s\" %row: \"icon-blue\", \n\t\t \"rowclass%s\" %row: \"item-row item-row-click\",\n\t\t\t \"icon%s\" %row: \"\",\n\t\t\t \"location%s\" %row: ilocation\n\t\t\t \n\t\t }\n\t\t\t\n\t\t\titems.update(newitem2)\n\t\t\trow += 1\n\t\t\n\t\tdata = add_config_details(items, True, PRMAXTOUCH)\n\t\thtml = view.render(data, 'prmaxtouch.templates.contacts.list')\n\n\t\treturn slimmer.xhtml_slimmer(html)\n\t\n\ndef _fix_db_characters(value):\n\treturn value.replace('\\xe2\\x80\\x99', \"'\").\\\n\t replace('\\xe2\\x80\\x9c', '\"').\\\n\t replace('\\xe2\\x80\\x9d', '\"').\\\n\t replace('\\xe2\\x80\\x9e', '\"').\\\n\t replace('\\xe2\\x80\\x9f', '\"').\\\n\t replace('\\xc3\\xa7', 'c').\\\n\t replace(\"\\xc2\\xa3\", 'poundsymbol')\n\t\t","repo_name":"meanang123/prmax","sub_path":"prmaxtouch/prmaxtouch/sitecontrollers/contacts/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25480617709","text":"import random\n\nfrom model.Position import Position\nfrom model.Var import *\n\nclass GameMap():\n\n def __init__(self, map):\n self.map = map\n\n def on_grid_random(self, snake):\n while True:\n x = random.randint( 0, len( self.map ) ) - 1\n y = random.randint( 0, len( self.map ) ) - 1\n if self.map[x][y] == WALL:\n continue\n for i in range( snake.get_size() ):\n x_snake, y_snake = snake.get_position( i ).get_coordenates()\n if x == x_snake and y == y_snake:\n continue\n break\n return Position( x, y )\n \n def get_map( self, snake, apple ):\n new_map = []\n for i in range( len( self.map ) ):\n new_map.append( [] )\n for j in range( len( self.map[i] ) ):\n new_map[i].append( self.map[i][j] )\n \n x, y = apple.get_position().get_coordenates()\n new_map[x][y] = APPLE\n for i in range( snake.get_size() ):\n x, y = snake.get_position( i ).get_coordenates()\n new_map[x][y] = SNAKE\n return new_map\n \n def its_off_the_map( self, head ):\n return head.x < 0 or head.x >= len( self.map ) or head.y< 0 or head.y >= len( self.map )\n \n def collide_on_wall( self, head ):\n x, y = head.get_coordenates()\n return self.map[ x ][ y ] == WALL\n\n def get_map_size(self):\n return len( self.map )","repo_name":"tiagofunk/Smart-Snake","sub_path":"controller/GameMap.py","file_name":"GameMap.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32271452766","text":"from collections import namedtuple\n\nfrom aquilon.config import Config\n\n\nShareInfo = namedtuple('ShareInfo', ['server', 'mount'])\n\n\n# Utility functions for service / resource based disk mounts\n# This should come from some external API...?\ndef cache_storage_data(only=None):\n \"\"\"\n Scan a storeng-style data file, checking each line as we go\n\n Storeng-style data files are blocks of data. Each block starts\n with a comment describing the fields for all subsequent lines. A\n block can start at any time. Fields are separated by '|'.\n This function will invoke the function after parsing every data\n line. The function will be called with a dict of the fields. If the\n function returns True, then we stop scanning the file, else we continue\n on until there is nothing left to parse.\n\n dbshare can be a Share\n \"\"\"\n\n config = Config()\n sharedata = {}\n found_header = False\n header_idx = {}\n with open(config.get(\"broker\", \"sharedata\")) as datafile:\n for line in datafile:\n if line[0] == '#':\n # A header line\n found_header = True\n hdr = line[1:].rstrip().split('|')\n\n header_idx = {}\n for idx, name in enumerate(hdr):\n header_idx[name] = idx\n\n # Silently discard lines that don't have all the required info\n for k in [\"objtype\", \"pshare\", \"server\", \"dg\"]:\n if k not in header_idx:\n found_header = False\n elif not found_header:\n # We haven't found the right header line\n continue\n else:\n fields = line.rstrip().split('|')\n if len(fields) != len(header_idx): # Silently ignore invalid lines\n continue\n if fields[header_idx[\"objtype\"]] != \"pshare\":\n continue\n\n sharedata[fields[header_idx[\"pshare\"]]] = ShareInfo(\n server=fields[header_idx[\"server\"]],\n mount=\"/vol/%s/%s\" % (fields[header_idx[\"dg\"]],\n fields[header_idx[\"pshare\"]])\n )\n\n # Take a shortcut if we need just a single entry\n if only and only == fields[header_idx[\"pshare\"]]:\n break\n\n return sharedata\n\n\ndef find_storage_data(dbshare, cache=None):\n if not cache:\n cache = cache_storage_data(only=dbshare.name)\n if dbshare.name in cache:\n return cache[dbshare.name]\n else:\n return ShareInfo(server=None, mount=None)\n","repo_name":"gombasg/aquilon","sub_path":"lib/python2.6/aquilon/aqdb/data_sync/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"6026500119","text":"#!/usr/bin/python3\n'''\nMain GUI file\n\nVersion 2.3.1\n'''\n\nimport sys\nimport worker\nimport mainWindow\nimport sheetReporter\n\nfrom dependencies import miscFunc\nfrom JSONReader import JSONReader\nfrom PyQt5.QtCore import QThread, Qt\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QDesktopWidget, QRadioButton, QMessageBox\n\n\nclass Form(QMainWindow, mainWindow.Ui_MainWindow):\n '''\n Main Gui Form Class\n '''\n\n def __init__(self):\n if not miscFunc.testRoot():\n print(\"Please rerun this script with root.\")\n sys.exit()\n if not miscFunc.testInternet():\n print(\"Please Verify this machine is connected to the internet.\")\n sys.exit()\n QMainWindow.__init__(self)\n self.setupUi(self)\n self.postSetup()\n sheetReporter.Reporter() # init Google API\n\n #\n # worker thread code from http://stackoverflow.com/a/33453124\n #\n self.workerobj = worker.Worker(self) # instatiate worker object\n self.wThread = QThread() # instatiate worker thread\n\n self.workerobj.moveToThread(self.wThread) # move the worker object into the worker thread\n\n #emitters enable inter-thread function calls\n self.workerobj.updateStatus.connect(self.updateStatus) # connect the updateStatus emitter to the updateStatus function\n self.workerobj.Alert.connect(self.Alert) # do the same for alert\n\n self.wThread.started.connect(self.workerobj.USBworker) # on thread started: run the USBworker function\n self.wThread.start() # start the worker thread\n\n #attach button/keyboard key listeners\n self.lineEdit.returnPressed.connect(self.pushButton.click)\n self.pushButton.clicked.connect(self.buttonPushed)\n\n #show the window\n self.show()\n\n\n def postSetup(self):\n '''\n Function to be executed after window creation\n '''\n\n #Comment out this line to give the window a frame (and thus, a close button)\n self.setWindowFlags(Qt.FramelessWindowHint)\n #^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n frameGeo = self.frameGeometry()\n frameGeo.moveCenter(QDesktopWidget().availableGeometry().center())\n self.move(frameGeo.topLeft())\n # attempt to center the window on screen\n\n i = 0\n for radio in self.findChildren(QRadioButton):\n #dynamically populate club radio-button list\n try:\n elem = JSONReader().getClubNameLong(JSONReader().getClubList()[i])\n radio.setText(elem)\n except IndexError:\n radio.hide()\n i += 1\n\n def W_onFinished(self):\n '''\n Worker thread cleanup function\n '''\n self.wThread.quit()\n self.obj.USBworkerfinish()\n\n def buttonPushed(self):\n '''\n Function to execute when the submit button or return key is pushed\n '''\n\n IDnum = self.lineEdit.text()\n if IDnum == \"\":\n self.updateStatus(\"Please Enter an ASU ID\", 3)\n else:\n self.updateStatus(\"Logging...\", 0)\n self.lineEdit.setText(\"\")\n if len(IDnum) == 10 and IDnum.isdigit():\n clubName = self.getSelectedRadio()\n self.setSelectedRadio(clubName)\n clubID = None\n for club in JSONReader().getClubList():\n if JSONReader().getClubNameLong(club) == clubName:\n clubID = club\n self.updateStatus(sheetReporter.Reporter().log(IDnum, clubID), 3)\n else:\n self.updateStatus(\"Please Enter an ASU ID\", 3)\n\n def getSelectedRadio(self):\n '''get Selected Radio Button'''\n for radio in self.findChildren(QRadioButton):\n if radio.isChecked():\n return radio.text()\n\n def setSelectedRadio(self, name):\n '''set Selected Radio Button'''\n for radio in self.findChildren(QRadioButton):\n if name == radio.text():\n radio.setChecked(True)\n\n def Alert(self, mb_type, mb_message):\n '''Opens messageBox\n\n type is 'info', 'warn', or 'crit'\n '''\n if mb_type == 'info':\n # Information\n QMessageBox.information(self, 'Information', mb_message)\n elif mb_type == 'warn':\n # Warning\n QMessageBox.warning(self, 'Warning', mb_message)\n elif mb_type == 'crit':\n # Critical\n QMessageBox.critical(self, 'Severe Error', mb_message)\n else:\n # Error throwing Error. Oh God...\n QMessageBox.critical(self, 'Error', 'Error displaying Error.\\nNow is the time to panic.').exec()\n\n def updateStatus(self, message, time):\n '''\n Update the status bar in the window\n\n time is in seconds\n '''\n self.statusBar.showMessage(message, time*1000)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n Form()\n app.exec_()\n sys.exit()\n","repo_name":"RossumRumblers/AccessLog","sub_path":"Main-GUI.py","file_name":"Main-GUI.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18581625889","text":"import socket\nfrom tkinter import *\n\nport = 5504\n\n\n\n\n\n \n\nclass popup(Tk):\n def __init__(self, master = None):\n super().__init__()\n fr =Frame(self)\n lab = Label(fr, text=\"Enter Host IP:\")\n box = Entry(fr)\n ok = Button(fr, text=\"Ok\", command = lambda: self.getip(box.get()))\n fr.pack()\n lab.grid(row = 0, column = 0)\n box.grid(row = 0, column = 1)\n ok.grid(row = 1, column = 1)\n\n def getip(self, entry):\n global host\n host = entry\n global mySocket\n global Message\n message = \"print('Error')\"\n mySocket = socket.socket()\n mySocket.connect((host,port))\n self.destroy()\n\nclass Window(Frame):\n\n def __init__(self, master = None):\n Frame.__init__(self, master)\n\n self.master = master\n\n self.init_window()\n \n \n def init_window(self):\n self.master.title(\"Client\")\n self.pack(fill = BOTH, expand = 1)\n\n sendButton = Button(self, text=\"Send to Board\", width=\"15\", height=\"2\", command=self.sendCommand)\n\n sendButton.place(x=0, y=0)\n \n def sendCommand(self):\n message = open('turtleCode.py', 'r').read()\n mySocket.send(message.encode())\n \n\ndef disable_event():\n pass\n \ndef Main():\n root = Tk()\n #root.resizable(False, False)\n \n root.resizable(False, False)\n root.geometry(\"115x40\")\n app = popup()\n sender = Window(root)\n #Disabled During Testing as its annoying\n #root.protocol(\"WM_DELETE_WINDOW\", disable_event)\n \n root.lift()\n root.call('wm', 'attributes', '.', '-topmost', True)\n #root.after_idle(self.root.call, 'wm', 'attributes', '.', '-topmost', False)\n \n try:\n while True:\n app.update()\n root.update()\n except:\n root.mainloop()\n \nif __name__ == '__main__':\n Main()\n \n\n","repo_name":"vingard/liveturtle-socket","sub_path":"client/client.pyw","file_name":"client.pyw","file_ext":"pyw","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73304750913","text":"import random\r\n\r\n# Shared prime number and base (you can choose different values)\r\nprime = 23\r\nbase = 9\r\n\r\n# Alice and Bob's private keys (randomly chosen)\r\nprivate_key_alice = random.randint(1, prime - 1)\r\nprivate_key_bob = random.randint(1, prime - 1)\r\n\r\n# Function to calculate modular exponentiation (a^b % m)\r\ndef mod_exp(a, b, m):\r\n result = 1\r\n a = a % m\r\n while b > 0:\r\n if b % 2 == 1:\r\n result = (result * a) % m\r\n a = (a * a) % m\r\n b = b // 2\r\n return result\r\n\r\n# Calculate public keys for Alice and Bob\r\npublic_key_alice = mod_exp(base, private_key_alice, prime)\r\npublic_key_bob = mod_exp(base, private_key_bob, prime)\r\n\r\n# Exchange public keys (simulated over a network)\r\n# In a real implementation, these values would be sent to each other securely\r\nshared_secret_alice = mod_exp(public_key_bob, private_key_alice, prime)\r\nshared_secret_bob = mod_exp(public_key_alice, private_key_bob, prime)\r\n\r\n# Both Alice and Bob now have the same shared secret\r\nprint(\"Shared Secret (Alice):\", shared_secret_alice)\r\nprint(\"Shared Secret (Bob):\", shared_secret_bob)\r\n","repo_name":"pappu18/c","sub_path":"diffie.py","file_name":"diffie.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34305595803","text":"# key point : set\n# 1 ~ 9\n# 10 ~ 99\n# 100 : 1 0 0 (x)\n# 101 : 1 0 1 (x)\n# 123 : 1 2 3 (O)\n\n\ndef check_arithmetic(N = int):\n str_N = str(N)\n arithmetic_list = [0]*(len(str_N)-1)\n\n for i in range(len(str_N)-1):\n arithmetic_list[i] = int(str_N[i+1]) - int(str_N[i])\n\n set_arithmeetic_list = set(arithmetic_list)\n\n if len(set_arithmeetic_list) <= 1:\n check_cnt = 1\n elif len(set_arithmeetic_list) >= 2:\n check_cnt = 0\n\n return check_cnt\n\n\ndef main():\n N = int(input())\n total = 0\n\n for i in range(N):\n total += check_arithmetic(i+1)\n\n print(total)\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BillEvans0320/Baekjoon_Algorithms_python3","sub_path":"1065.py","file_name":"1065.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31834475786","text":"from queue import PriorityQueue\nimport turtle\nimport math\n\nturtle.register_shape(\"muh.gif\")\nclass Avatar(turtle.Turtle):\n def __init__(self, target: tuple, speed):\n super().__init__()\n self.shape(\"muh.gif\")\n self._speed = speed\n self.speed(speed)\n self.finish = False\n self.target = target\n self.path = []\n self.now_step = 0\n\n def heuristic(self, coord):\n end_x, end_y = self.target\n x, y = coord\n distances = math.sqrt((end_x - x) ** 2 + (end_y - y) ** 2)\n return distances\n\n def astar(self, start, target):\n list = PriorityQueue()\n path = []\n visited = set()\n list.put((0, start, path))\n\n while not list.empty():\n f_cost, current_pos, path = list.get()\n\n if current_pos == target:\n return path\n\n if current_pos in visited:\n continue\n\n visited.add(current_pos)\n\n for neighbor in self.neighbors(current_pos):\n if neighbor in visited:\n continue\n\n g_cost = self.get_pay(current_pos, neighbor)\n new_path = path + [neighbor]\n h_cost = self.heuristic(neighbor)\n f_cost = g_cost + h_cost\n list.put((f_cost, neighbor, new_path))\n\n return None\n\n def init_path(self):\n self.path = self.astar((0, 0), self.target)[::self._speed]\n\n def neighbors(self, GPS):\n (x, y) = GPS\n res_point = [\n (x + 1, y),\n (x - 1, y),\n (x, y + 1),\n (x, y - 1)]\n return res_point\n\n def get_pay (self, stream, neighbor):\n return 1\n\n def do_step(self):\n if self.now_step == len(self.path):\n self.finish = True\n return\n self.goto(self.path[self.now_step][0], self.path[self.now_step][1])\n self.now_step += 1\n\n return self.heading()","repo_name":"kit8nino/2023-python","sub_path":"ИС34/Монцева Екатерина/[4]/Avatar.py","file_name":"Avatar.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"13908861794","text":"\"\"\"The Polytech vibrometer instrument.\n\nThis module allows interfacing with Polytec vibrometer OFV-5000 controller and\nOFV-505 sensor head. Functions based on Polytec \"RS-232 Interface Commands:\nOFV-5000 User Manual\"\n\n**NOTE** For each polytec controller, different decoders may be installed.\nThese values should be stored in the your PLACE config file. (~/.place.cfg)\n\nExample:\nDD-300 is 'DisplDec,0'\nDD-900 is 'DisplDec,1'\nVD-08 is 'VeloDec,0'\nVD-09 is 'VeloDec,1'\n\"\"\"\nimport ast\nimport re\nfrom time import sleep\n\nimport numpy as np\nimport serial\nfrom serial import Serial\n\nfrom place.config import PlaceConfig\nfrom place.plugins.instrument import Instrument\n\n_NUMBER = r'[-+]?\\d*\\.\\d+|\\d+'\n\n\nclass Polytec(Instrument):\n \"\"\"The polytec class\n\n The Polytec module requires the following configuration data (accessible as\n self._config['*key*']):\n\n ========================= ============== ================================================\n Key Type Meaning\n ========================= ============== ================================================\n dd_300 bool flag indicating use of the DD-300\n dd_300_range string the range of DD-300\n dd_900 bool flag indicating use of the DD-900\n dd_900_range string the range of DD-900\n vd_08 bool flag indicating use of the VD-08\n vd_08_range string the range of VD-08\n vd_09 bool flag indicating use of the VD-09\n vd_09_range string the range of VD-09\n autofocus string the type of autofocus span\n area_min int the minimum autofocus range\n area_max int the maximum autofocus range\n autofocus_everytime bool flag indicating if autofocus should be\n performed at every update\n timeout float number of seconds to wait for autofocus\n plot bool turns live plotting on or off\n ========================= ============== ================================================\n\n The Polytec module will produce the following experimental metadata:\n\n ========================= ============== ================================================\n Key Type Meaning\n ========================= ============== ================================================\n actual_area_min int the actual minimum autofocus range used\n actual_area_max int the actual maximum autofocus range used\n vd_08_time_delay float the decoder time delay (if used)\n vd_08_maximum_frequency float the decoder maximum frequency (if used)\n vd_09_time_delay float the decoder time delay (if used)\n vd_09_maximum_frequency float the decoder maximum frequency (if used)\n dd_300_calibration float the decoder calibration (if used)\n dd_300_calibration_units string the decoder units (if used)\n dd_900_calibration float the decoder calibration (if used)\n dd_900_calibration_units string the decoder units (if used)\n vd_08_calibration float the decoder calibration (if used)\n vd_08_calibration_units string the decoder units (if used)\n vd_09_calibration float the decoder calibration (if used)\n vd_09_calibration_units string the decoder units (if used)\n ========================= ============== ================================================\n\n The Polytec will produce the following experimental data:\n\n +---------------+-------------------------+---------------------------+\n | Heading | Type | Meaning |\n +===============+=========================+===========================+\n | signal | uint64 | the signal level recorded |\n | | | from the vibrometer |\n +---------------+-------------------------+---------------------------+\n\n .. note::\n\n PLACE will usually add the instrument class name to the heading. For\n example, ``signal`` will be recorded as ``Polytec-signal`` when using\n the Polytec vibrometer. The reason for this is because NumPy will not\n check for duplicate heading names automatically, so prepending the\n class name greatly reduces the likelihood of duplication.\n\n \"\"\"\n\n def __init__(self, config, plotter):\n \"\"\"Constructor\"\"\"\n Instrument.__init__(self, config, plotter)\n self._serial = None\n self._signal = None\n self.min_used = None\n self.max_used = None\n\n def config(self, metadata, total_updates):\n \"\"\"Configure the vibrometer.\n\n :param metadata: experiment metadata\n :type metadata: dict\n\n :param total_updates: number of updates for the experiment\n :type total_updates: int\n \"\"\"\n name = self.__class__.__name__\n self._serial = Serial(\n port=PlaceConfig().get_config_value(name, \"port\"),\n baudrate=PlaceConfig().get_config_value(name, \"baudrate\"),\n timeout=10,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS)\n\n self.controller_name = self._get_controller_name()\n metadata['polytec_controller'] = self.controller_name \n\n if self.controller_name == \"OFV-5000 Vibrometer Controller\":\n self.prefixd = 'dd'\n self.prefixv = 'vd'\n elif self.controller_name == \"OFV-5000Xtra Vibrometer Controller\":\n self.prefixd = 'dx'\n self.prefixv = 'vx'\n\n if self._config['{}_300'.format(self.prefixd)]:\n self._setup_decoder(metadata, '{}_300'.format(self.prefixd))\n\n if self._config['{}_900'.format(self.prefixd)]:\n self._setup_decoder(metadata, '{}_900'.format(self.prefixd))\n\n try:\n if self._config['{}_08'.format(self.prefixv)]:\n self._setup_decoder(metadata, '{}_08'.format(self.prefixv))\n except:\n pass\n\n if self._config['{}_09'.format(self.prefixv)]:\n self._setup_decoder(metadata, '{}_09'.format(self.prefixv))\n\n if self._config['autofocus'] == 'custom':\n curr_set = self._write_and_readline(\n 'GetDevInfo,SensorHead,0,Focus\\n')\n curr_min, curr_max = ast.literal_eval(curr_set)\n self.min_used = max(curr_min, self._config['area_min'])\n self.max_used = min(curr_max, self._config['area_max'])\n metadata['actual_area_min'] = self.min_used\n metadata['actual_area_max'] = self.max_used\n\n def update(self, update_number, progress):\n \"\"\"Update the vibrometer.\n\n :param update_number: the count of the current update (0-indexed)\n :type update_number: int\n\n :param progress: a dictionary of values passed back to your Elm app\n :type progress: dict\n\n :returns: an array containing the signal level\n :rtype: numpy.array dtype='uint64'\n \"\"\"\n if self._config['autofocus'] != 'none':\n if update_number == 0 or self._config['autofocus_everytime'] is True:\n self._autofocus_vibrometer(\n span=self._config['autofocus'],\n timeout=self._config['timeout'])\n signal_level = self._get_signal_level()\n field = '{}-signal'.format(self.__class__.__name__)\n data = np.array([(signal_level,)], dtype=[(field, 'uint64')])\n if self._config['plot']:\n self._draw_plot(signal_level, update_number, progress)\n return data\n\n def cleanup(self, abort=False):\n \"\"\"Free resources and cleanup.\n\n Display the final plot, unless aborted or plotting is disabled.\n\n :param abort: indicates that the experiment is being aborted and is unfinished\n :type abort: bool\n \"\"\"\n if abort is False:\n self._serial.close()\n\n\n# PRIVATE METHODS\n\n def _write(self, message):\n \"\"\"Send a message\n\n :param message: message to be sent to the Polytec receiver\n :type message: str\n \"\"\"\n self._serial.write(message.encode())\n\n def _write_and_readline(self, message):\n \"\"\"Send a message and get a response.\n\n :param message: message to be sent to the Polytec receiver\n :type message: str\n\n :returns: the decoded response\n :rtype: str\n \"\"\"\n self._write(message)\n return self._serial.readline().decode('ascii', 'replace')\n\n def _setup_decoder(self, metadata, name):\n \"\"\"Set the range for the decoder and obtain metadata\n\n :param metadata: experiment metadata\n :type metadata: dict\n\n :param name: the name to use for the decoder\n :type name: str\n \"\"\"\n id_ = PlaceConfig().get_config_value(self.__class__.__name__, name)\n self._set_range(id_, self._config[name + '_range'])\n if name == '{}_08'.format(self.prefixv) or name == '{}_09'.format(self.prefixv):\n metadata[name + '_time_delay'] = self._get_delay(id_)\n metadata[name +\n '_maximum_frequency'] = self._get_maximum_frequency(id_)\n calibration, calibration_units = self._get_range(name, id_)\n metadata[name + '_calibration'] = calibration\n metadata[name + '_calibration_units'] = calibration_units\n\n def _autofocus_vibrometer(self, span='Full', timeout=30):\n \"\"\"Autofocus the vibrometer.\n\n :param span: the range in which the vibrometer should look for focus\n :type span: str\n\n :param timeout: the number of seconds to wait for focus before failing\n :type timeout: int\n\n :raises RuntimeError: if focus is not found before timeout\n \"\"\"\n\n if self._config['autofocus'] == 'custom':\n self._write('Set,SensorHead,0,AutoFocusArea,{},{}\\n'.format(\n self.min_used, self.max_used))\n else:\n self._write('Set,SensorHead,0,AutoFocusSpan,'+span+'\\n')\n\n self._write('Set,SensorHead,0,AutoFocus,Search\\n')\n countdown = timeout\n tick = 1\n while countdown > 0:\n sleep(tick)\n countdown -= tick\n if self._write_and_readline('Get,SensorHead,0,AutoFocusResult\\n') == 'Found\\n':\n break\n else:\n print(\"Polytec: Autofocus failed. Continuing.\")\n #raise RuntimeError('autofocus failed')\n\n def _get_delay(self, id_):\n \"\"\"Get time delay.\n\n :param id_: the identification string for the decoder\n :type id_: str\n\n :returns: the delay time\n :rtype: float\n \"\"\"\n delay_string = self._write_and_readline(\n 'Get,' + id_ + ',SignalDelay\\n')\n return float(re.findall(_NUMBER, delay_string)[0])\n\n def _get_controller_name(self):\n \"\"\"Get the name of the controller.\n\n :returns: the name of the controller\n :rtype: string\n \"\"\"\n controller_string = self._write_and_readline(\n 'GetDevInfo,Controller,0,Name\\n')\n name = controller_string[:-1]\n if name != '':\n return name\n else:\n raise RuntimeError('Polytec controller name could not be found. This might be a connection problem.')\n\n def _get_maximum_frequency(self, id_):\n \"\"\"Get the maximum frequency.\n\n :param id_: the identification string for the decoder\n :type id_: str\n\n :returns: the frequency value of the selected decoder\n :rtype: float\n\n :raises ValueError: if maximum frequency is not available\n \"\"\"\n frequency_string = self._write_and_readline(\n 'Get,' + id_ + ',MaxFreq\\n')\n if frequency_string == 'Not Available':\n raise ValueError(\n 'maximum frequency for {} not available'.format(id_))\n return _parse_frequency(frequency_string)\n\n def _get_range(self, name, id_):\n \"\"\"Get the current range.\n\n :param name: the name for the decoder\n :type name: str\n\n :param id_: the identification string for the decoder\n :type id_: str\n\n :returns: the range value and units returned from the instrument\n :rtype: float, string\n\n :raises ValueError: if decoder name is not recognized\n \"\"\"\n decoder_range = self._write_and_readline('Get,' + id_ + ',Range\\n')\n if name == '{}_300'.format(self.prefixd):\n range_num = re.findall(_NUMBER, self._config['{}_300_range'.format(self.prefixd)])\n elif name == '{}_900'.format(self.prefixd):\n raw_num = re.findall(_NUMBER, self._config['{}_900_range'.format(self.prefixd)])\n range_num = [string.replace('um', 'µm') for string in raw_num]\n elif name == '{}_08'.format(self.prefixv):\n range_num = re.findall(_NUMBER, self._config['{}_08_range'.format(self.prefixv)])\n elif name == '{}_09'.format(self.prefixv):\n range_num = re.findall(_NUMBER, self._config['{}_09_range'.format(self.prefixv)])\n else:\n raise ValueError('unknown decoder: ' + name)\n del_num_r = len(range_num)+1\n calib = float(range_num[0])\n calib_unit = decoder_range[del_num_r:].lstrip()\n return calib, calib_unit\n\n def _set_range(self, id_, range_):\n \"\"\"Set the range.\n\n :param id_: the identification string for the decoder\n :type id_: str\n\n :param range_: the desired decoder range\n :type range_: str\n \"\"\"\n self._write('Set,' + id_ + ',Range,' + range_ + '\\n')\n\n def _get_signal_level(self):\n return int(self._write_and_readline('Get,SignalLevel,0,Value\\n'))\n\n def _draw_plot(self, signal_level, update_number, progress):\n if update_number == 0:\n self._signal = [signal_level]\n else:\n self._signal.append(signal_level)\n title = 'Signal level at each PLACE update'\n self.plotter.view(\n title,\n [\n self.plotter.line(\n self._signal,\n color='purple',\n shape='cross',\n label='signal'\n )\n ]\n )\n # TODO: add axis labels when PLACE supports it\n # plt.xlabel('trace')\n # plt.ylabel('signal level')\n\nclass OFV5000(Polytec):\n \"\"\"Subclass for OFV5000\"\"\"\n pass\n\n\nclass OFV5000X(Polytec):\n \"\"\"Subclass for OFV5000X\"\"\"\n pass\n\n\ndef _parse_frequency(frequency_string):\n \"\"\"Calculate a frequency from a string.\n\n Takes a frequency string and parses it to a float value.\n\n .. doctest::\n\n >>> _parse_frequency('20MHz')\n 20000000.0\n >>> _parse_frequency('20 MHz')\n 20000000.0\n >>> _parse_frequency('5kHz')\n 5000.0\n >>> _parse_frequency('16.6mhz')\n 16600000.000000002\n >>> _parse_frequency('16.6 mhz')\n 16600000.000000002\n\n :param frequency_string: string to be parsed\n :type frequency_string: str\n\n :returns: the frequency value\n :rtype: float\n\n :raises ValueError: if frequency units are not recognized\n \"\"\"\n re_match = re.match(\n r'([-+]?\\d*\\.\\d+|\\d+)\\s?([kmg]?Hz)',\n frequency_string,\n flags=re.IGNORECASE # pylint: disable=no-member\n )\n if re_match is None:\n raise ValueError(\n 'could not parse frequency string: ' + frequency_string)\n else:\n num_str, unit_str = re_match.groups()\n if unit_str.lower() == 'hz':\n return float(num_str)\n elif unit_str.lower() == 'khz':\n return float(num_str) * 10**3\n elif unit_str.lower() == 'mhz':\n return float(num_str) * 10**6\n elif unit_str.lower() == 'ghz':\n return float(num_str) * 10**9\n else:\n raise ValueError('could not match units of frequency: ' + unit_str)\n","repo_name":"PALab/place","sub_path":"place/plugins/polytec/polytec.py","file_name":"polytec.py","file_ext":"py","file_size_in_byte":16254,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"61"} +{"seq_id":"44354889667","text":"import asyncio\nimport logging\nimport os\nimport shutil\nfrom typing import Any, Callable, Literal, Optional\nfrom urllib.parse import urlencode, urlparse, urlunparse\n\nimport aiohttp\n\n__logger__ = logging.getLogger()\n\nfrom clio import Log\n\n\nclass HttpException(Exception):\n def __init__(self, message: str, cause: Optional[Exception] = None):\n super().__init__(message)\n self.cause = cause\n\n def __str__(self):\n if self.cause is None:\n return super().__str__()\n return f\"{super().__str__()}, caused by {self.cause}\"\n\n\nclass RawResponse:\n def __init__(self, headers, body):\n self.headers = headers\n self.body = body\n\n\ndef default_valid_status(status_code: int) -> bool:\n return 200 == status_code\n\n\nasync def http_invoke(\n url: str,\n method: Literal[\"GET\", \"POST\", \"DELETE\", \"PUT\", \"HEAD\", \"OPTIONS\"] = \"GET\",\n headers: Optional[dict[str, str]] = None,\n query: Optional[dict[str, Any]] = None,\n json: Any = None,\n data: Any = None,\n files: Optional[dict[str, Any]] = None,\n response_type: Literal[\"json\", \"text\", \"bytes\"] = \"json\",\n timeout: int = 10,\n proxy: str = \"\",\n validate_status: Optional[Callable[[int], bool]] = default_valid_status,\n verbose: bool = True,\n) -> RawResponse:\n try:\n async with aiohttp.ClientSession() as session:\n request_url_str = url\n try:\n request_url = urlparse(url)\n except Exception as e:\n raise HttpException(f\"url解析出错:{url}\", e)\n\n if query is not None:\n try:\n query_string = urlencode(query)\n new_url_parts = request_url._replace(query=query_string)\n request_url_str = urlunparse(new_url_parts)\n except Exception as e:\n raise HttpException(f\"query参数错误: {query}\", e)\n\n if verbose:\n request_log = [f\"方法: {method}\", f\"URL: {request_url_str}\"]\n if data is not None:\n request_log.append(f\"data: {data}\")\n if json is not None:\n request_log.append(f\"json: {json}\")\n if files is not None and len(files) > 0:\n request_log.append(f\"文件: {files.keys()}\")\n if headers is not None:\n request_log.append(f\"请求头: {headers}\")\n __logger__.debug(f\"http 调用, {', '.join(request_log)}\")\n\n if data is not None and json is not None:\n raise ValueError(\n \"data and json parameters can not be used at the same time\"\n )\n try:\n async with session.request(\n method,\n request_url_str,\n json=json,\n data=data,\n headers=headers,\n timeout=timeout,\n proxy=proxy,\n ) as response:\n response_headers = response.headers\n status_code = response.status\n valid_status = validate_status or default_valid_status\n if not valid_status(status_code):\n raise HttpException(f\"响应状态码为 {response.status}\")\n\n if response_type == \"json\":\n resp = await response.json()\n elif response_type == \"text\":\n resp = await response.text()\n elif response_type == \"bytes\":\n resp = await response.read()\n else:\n raise HttpException(f\"不支持的响应类型{response_type}\")\n return RawResponse(response_headers, resp)\n\n except Exception as e:\n raise HttpException(f\"请求 URL[{url}] 错误\", e)\n except Exception as e:\n if verbose:\n __logger__.error(f\"http 调用 {request_url_str} 失败, {e}\")\n raise e\n\n\nasync def download_file(\n url: str, save_path, delete_if_exists: bool = False, verbose: bool = True\n):\n if not url:\n raise HttpException(\"download url is empty\")\n\n if not save_path:\n raise HttpException(\"save path is empty\")\n\n exists = os.path.exists(save_path)\n is_file = os.path.isfile(save_path)\n if not delete_if_exists and exists and is_file:\n if verbose:\n Log.info(f\"download url[{url}] to path[{save_path}], file exists, skip\")\n return\n\n if exists:\n try:\n if is_file:\n os.remove(save_path)\n else:\n shutil.rmtree(save_path)\n except Exception as e:\n raise HttpException(f\"delete file[{save_path}] error\", e)\n\n await asyncio.sleep(1)\n\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as resp:\n with open(save_path, \"wb\") as fd:\n while True:\n chunk = await resp.content.read(1024)\n if not chunk:\n break\n fd.write(chunk)\n fd.flush()\n except Exception as e:\n raise HttpException(f\"download url[{url}] to path[{save_path}] error\", e)\n","repo_name":"ytongshang/clio","sub_path":"clio/web/http/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19491313881","text":"import numpy as np\nimport tensorflow as tf\nimport argparse\n\nfrom tensorflow.python.framework import graph_util\nfrom helper import *\n\n\nmodel_name = '3dense'\nimage_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nFLAGS = None\n\ndef forward_propagation(X):\n\n dense1 = tf.layers.dense(inputs=X, units=500, kernel_initializer=tf.random_normal_initializer,\n bias_initializer=tf.zeros_initializer(),\n activation=tf.nn.relu, name='dense1')\n\n dense2 = tf.layers.dense(inputs=dense1, units=784, kernel_initializer=tf.random_normal_initializer,\n bias_initializer=tf.zeros_initializer(),\n activation=tf.nn.relu,\n name='dense2')\n\n dense3 = tf.layers.dense(inputs=dense2, units=10, kernel_initializer=tf.random_normal_initializer,\n bias_initializer=tf.zeros_initializer(),\n #activation=tf.nn.relu,\n name='dense3')\n\n '''\n dense4 = tf.layers.dense(inputs=dense3, units=10, kernel_initializer=tf.random_normal_initializer,\n bias_initializer=tf.zeros_initializer(),\n name='dense4')\n '''\n return dense3\n\ndef compute_cost(scores, y):\n logits = scores\n labels = y\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,\n labels=labels))\n return cost\n\ndef get_minibatch(X, y, mb_size):\n '''\n perm = np.random.permutation(X.shape[0])\n return X[perm][:mb_size], one_hot(y[perm][:mb_size], 10, 1)\n '''\n\n m = X.shape[0]\n mini_batches = []\n\n # Shuffling the data\n permutation = list(np.random.permutation(m))\n shuf_x = X[permutation, :]\n shuf_y = y[permutation,]\n\n # Partition\n total_mb = np.floor(m / mb_size).astype(np.int32)\n\n for k in range(total_mb):\n mb_x = shuf_x[k * mb_size : (k+1) * mb_size, :]\n mb_y = shuf_y[k * mb_size : (k+1) * mb_size,]\n mini_batches.append((mb_x, mb_y))\n\n if m % mb_size != 0:\n mb_x = shuf_x[(total_mb * mb_size) :, :]\n mb_y = shuf_y[(total_mb * mb_size) :,]\n mini_batches.append((mb_x, mb_y))\n\n return mini_batches\n\ndef save_graph_to_file(sess, graph, graph_file_name=FLAGS.output_graph):\n output_graph_def = graph_util.convert_variables_to_constants(sess,\n graph.as_graph_def(), [FLAGS.final_tensor_name])\n with gFile.FastGFile(graph_file_name, 'wb'):\n f.write(output_graph_def.SerializeToString())\n return\n\ndef model(X_train, y_train, X_val, y_val, learning_rate=0.001, epochs=1500,\n mb_size=200):\n\n y_train_oh = one_hot(y_train, 10, 1)\n y_val_oh = one_hot(y_val, 10, 1)\n X_val = reshape_data(X_val)\n\n m, n_x = X_train.shape\n n_y = y_train_oh.shape[1]\n\n X, y = create_placeholder_variables(n_x, n_y)\n\n scores = forward_propagation(X)\n\n cost = compute_cost(scores, y)\n sum_cost = tf.summary.scalar('cost', cost)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)\n\n init = tf.global_variables_initializer()\n\n prediction = tf.argmax(scores, 1, name='prediction')\n\n acc_vec = tf.cast(tf.equal(tf.cast(y_val, tf.int64), prediction), dtype=tf.int32,\n name='acc_vec1')\n acc = tf.divide(tf.reduce_sum(acc_vec), tf.shape(X)[0], name='accuracy1')\n accuracy = tf.summary.scalar('accuracy', acc)\n\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('./graphs/' + model_name + '/')\n writer.add_graph(sess.graph)\n\n sess.run(init)\n\n step = 1\n for i in range(epochs):\n mini_batches = get_minibatch(X_train, y_train, mb_size)\n\n j = 1\n\n for mb in mini_batches:\n mb_x, mb_y = mb\n\n mb_y = one_hot(mb_y, 10, 1)\n\n feed_dict = {X : mb_x,\n y : mb_y}\n\n _, n_cost = sess.run([optimizer, cost], feed_dict=feed_dict)\n\n print (n_cost, j)\n j += 1\n step += 1\n\n if j % 10 == 0:\n s = sess.run(merged, feed_dict={X:X_val, y:y_val_oh})\n writer.add_summary(s, step)\n\n\n '''\n save_path = saver.save(sess, './var_dir/model.ckpt')\n print ('Model saved in path: %s' % save_path)\n '''\n writer.close()\n\n return\n\ndef run_model_with_data():\n path = './mnist.npz'\n X_train, y_train, X_test, y_test = load_data(path)\n\n # Just first 5000 examples for faster training\n '''\n X_train = X_train[:2000]\n y_train = y_train[:2000]\n X_test = X_test[:2000]\n y_test = y_test[:2000]\n X_val = X_test[:100]\n y_val = X_test[100:200]\n '''\n X_val = X_test[:5000]\n y_val = y_test[:5000]\n\n X_test = X_test[5000:]\n y_test = y_test[5000:]\n\n X_train = reshape_data(X_train)\n model(X_train, y_train, X_val, y_val, mb_size=256, epochs=10)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_name',\n type=str,\n default='mnist_neural_net',\n help='Name of the model you are building. Note: This name will be used\\\n to create directory when visualizing model summary with Tensorboard.')\n\n parser.add_argument('--output_graph',\n type=str,\n default='./output_graph.pb',\n help='Where to save the model')\n\n parser.add_argument('--output_labels',\n type=str,\n default='output_labels.txt',\n help='Where to save the labels for the graph')\n\n parser.add_argument('--tb_dir',\n type=str,\n default='./tf_summaries/',\n help='Where to save the summaries for TensorBoard')\n\n parser.add_argument('--epochs',\n type=int,\n default=500,\n help='How many epochs to run')\n\n parser.add_argument('--learning_rate',\n type=float,\n default=0.01,\n help='The learning rate to use while training the model')\n\n parser.add_argument('--data_dir',\n type=str,\n default='./mnist/',\n help='The folder where the MNIST data is located. Do note the program \\\n assumes that there is a file \"mnist.npz\" in the directory mentioned with\\\n this argument.\\n The required dataset can be downloaded from here: \\\n https://www.kaggle.com/vikramtiwari/mnist-numpy')\n\n FLAGS, unparsed = parser.parse_known_args()\n run_model_with_data()\n","repo_name":"iArunava/Handwritten-Digits-Recognizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70010450434","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nimport pandas as pd\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom urllib.parse import quote_plus\n\ndef crawler(company_code, maxpage):\n \n page = 1 \n df_result = []\n while page <= int(maxpage): \n \n url = 'https://finance.naver.com/item/news_news.nhn?code=' + str(company_code) + '&page=' + str(page) \n source_code = requests.get(url).text\n html = BeautifulSoup(source_code, \"lxml\")\n \n # 뉴스 제목 \n titles = html.select('.title')\n title_result=[]\n for title in titles: \n title = title.get_text() \n title = re.sub('\\n','',title)\n title_result.append(title)\n \n \n # 뉴스 링크\n links = html.select('.title') \n \n link_result =[]\n news = []\n for link in links: \n add = 'https://finance.naver.com' + link.find('a')['href']\n link_result.append(add)\n news.append(add)\n \n # 컨텐츠\n content_result = []\n for i in news:\n try:\n res = requests.get(i)\n content = res.text\n soup = BeautifulSoup(content, 'html.parser')\n body = soup.find(id='news_read')\n contents = body.text.strip()\n content_result.append(contents)\n except AttributeError as e:\n print('내용없음')\n continue\n # 뉴스 날짜 \n dates = html.select('.date') \n date_result = [date.get_text() for date in dates] \n \n \n # 뉴스 매체 \n sources = html.select('.info')\n source_result = [source.get_text() for source in sources] \n \n \n # 변수들 합쳐서 해당 디렉토리에 csv파일로 저장하기 \n \n result= {\"날짜\" : date_result, \"언론사\" : source_result, \"기사제목\" : title_result, \"기사내용\" : content_result, \"링크\" : link_result} \n df_result = pd.DataFrame(result)\n print(df_result)\n print(\"다운 받고 있습니다------\")\n df_result.to_csv('page' + str(page) + '.csv', mode='w', encoding='utf-8-sig')\n page += 1\n \n \n \n \n\ndef main():\n info_main = input(\"=\"*50+\"\\n\"+\"실시간 뉴스기사 다운받기.\"+\"\\n\"+\" 시작하시려면 Enter를 눌러주세요.\"+\"\\n\"+\"=\"*50)\n \n company_code = input(\"종목이나 이름이나 코드 입력 : \")\n\n url = 'https://finance.naver.com/item/news_news.nhn?code='+str(company_code)\n driver = webdriver.Chrome('./chromedriver')\n driver.get(url)\n mydata = driver.find_element_by_class_name('type5')\n mydata = driver.page_source\n\n html = BeautifulSoup(mydata, 'lxml')\n navi = html.find(\"table\", class_=\"Nnavi\")\n navi_last = navi.find(\"td\", class_=\"pgRR\")\n pag = navi_last.a.get('href').rsplit('&')[1]\n pg_last = pag.split('=')[1]\n pg_last = int(pg_last)\n print(\"총 \" + str(pg_last) + \" 개 확인\")\n \n maxpage = input(\"총 페이지 수 : \")\n crawler(company_code, maxpage)\n\nmain() ","repo_name":"kimmjen/bit_project","sub_path":"Kimmjen/final_project/final_crawl.py","file_name":"final_crawl.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12551460887","text":"import pymysql\n \n# 打开数据库连接\ndb = pymysql.connect(\"localhost\",\"root\",\"123456\",\"TESTDB\" )\n\n# 使用 cursor() 方法创建一个游标对象 cursor\ncursor = db.cursor()\n \n# 使用 execute() 方法执行 SQL 查询 \n# cursor.execute(\"SELECT VERSION()\")\n# cursor.execute(\"DROP TABLE IF EXISTS EMPLOYEE\")\n# sql=\"\"\"CREATE TABLE EMPLOYEE(\n# FIRST_NAME CHAR(20) NOT NULL,\n# LAST_NAME CHAR(20),\n# AGE INT,\n# SEX CHAR(1),\n# INCOME FLOAT\n# )\n# # \"\"\"\n# sql=\"\"\"CREATE TABLE DEPART(\n# ID INT NOT NULL,\n# DEP_NAME CHAR(20),\n# MEMBER INT\n# )\n# # \"\"\"\n# SQL 插入语句\n# sql = \"INSERT INTO EMPLOYEE(FIRST_NAME,LAST_NAME,AGE,SEX,INCOME) \\\n# VALUES('C','Chou',20,'M',12000)\"\nsql = \"INSERT INTO DEPART(DEP_NAME,MEMBER) \\\n VALUES('Sale',20)\"\n# 用了\"\"\"可以不用加\\ 来做换行\n# sql = \"\"\"INSERT INTO EMPLOYEE(FIRST_NAME,\n# LAST_NAME, AGE, SEX, INCOME)\n# VALUES ('Mac', 'Mohan', 20, 'M', 2000)\"\"\"\n # SQL 插入语句 方便替换变量\n# sql = \"INSERT INTO EMPLOYEE(FIRST_NAME, \\\n# LAST_NAME, AGE, SEX, INCOME) \\\n# VALUES ('%s', '%s', '%d', '%c', '%d' )\" % \\\n# ('Mac', 'Mohan', 20, 'M', 2000)\n# SQL 更新语句\n# sql = \"UPDATE EMPLOYEE SET AGE = AGE +1 WHERE SEX = '%s'\" %('M')\n# SQL 删除语句\n# sql=\"DELETE FROM DEPART WHERE MEMBER = '%d'\" %(20)\ntry:\n # 执行sql语句\n cursor.execute(sql)\n # 提交到数据库执行\n db.commit()\n# 使用 fetchone() 方法获取单条数据.\n# data = cursor.fetchone()\nexcept:\n # 如果发生错误则回滚\n # db.rollback()\n print(\"error\")\n# print (\"Database version : %s \" % data)\n \n# 关闭数据库连接\ndb.close()\n","repo_name":"chouchou723/About-Python","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6224952172","text":"from PyQt5.Qt import *\n\n\n# 信号\nclass MyASB(QAbstractSpinBox):\n def __init__(self, parent=None, num =\"0\", *args, **kwargs):\n super().__init__(parent, *args, **kwargs)\n self.lineEdit().setText(num)\n\n def stepEnabled(self):\n # 0 -- 9\n # current_num = int(self.text())\n # if current_num == 0:\n # return QAbstractSpinBox.StepUpEnabled\n # elif current_num == 9:\n # return QAbstractSpinBox.StepDownEnabled\n # elif current_num < 0 or current_num > 9:\n # return QAbstractSpinBox.StepNone\n # else:\n return QAbstractSpinBox.StepUpEnabled | QAbstractSpinBox.StepDownEnabled\n\n\n def stepBy(self, p_int):\n current_num = int(self.text()) + p_int\n self.lineEdit().setText(str(current_num))\n\n def validate(self, p_str, p_int):\n # 18 - 180\n num = int(p_str)\n if num < 18:\n return (QValidator.Intermediate, p_str, p_int)\n elif num <= 180:\n return (QValidator.Acceptable, p_str, p_int)\n else:\n return (QValidator.Invalid, p_str, p_int)\n\n\n\n\n def fixup(self, p_str):\n print(p_str)\n return \"18\"\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"QAbstractSpinBox的学习\")\n self.resize(500, 500)\n self.setup_ui()\n\n def setup_ui(self):\n asb = MyASB(self, \"6\")\n self.asb = asb\n asb.resize(100, 30)\n asb.move(100, 100)\n\n self.asb.editingFinished.connect(lambda :print(\"结束编辑\"))\n\n test_btn = QPushButton(self)\n test_btn.move(200, 200)\n test_btn.setText(\"测试按钮\")\n test_btn.clicked.connect(self.btn_test)\n\n def btn_test(self):\n self.asb.setButtonSymbols(QAbstractSpinBox.NoButtons)\n\nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n\n window = Window()\n window.show()\n\n\n sys.exit(app.exec_())","repo_name":"ywkangkai/PythonGUI","sub_path":"GUI/步长调节器/案例/案例5.py","file_name":"案例5.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29418705015","text":"import io\nimport itertools\nimport tkinter as tk\nfrom typing import List\n\nimport cairosvg\nfrom PIL import Image, ImageTk\n\nfrom bigtwo.bigtwo import BigTwo, BigTwoHand\nfrom gamerunner.ppo_bot import PPOAction\nfrom gamerunner.ppo_runner import SinglePlayerWrapper, build_bot\nfrom playingcards.card import Card, Rank, Suit\n\nsuit_image_mapping = {\n Suit.spades: \"S\",\n Suit.hearts: \"H\",\n Suit.clubs: \"C\",\n Suit.diamond: \"D\",\n}\n\n\nclass CardImages:\n CARD_BACK = \"BACK\"\n\n def __init__(self):\n image_width = 70\n self.card_images = {}\n for r, s in itertools.product(Rank, Suit):\n suit_str = suit_image_mapping[s]\n key = r.value + suit_str\n path = f\"cards/{key}.svg\"\n data = cairosvg.svg2png(url=path, output_width=image_width)\n img = Image.open(io.BytesIO(data))\n self.card_images[key] = ImageTk.PhotoImage(img)\n\n image_data = cairosvg.svg2png(\n url=\"cards/BLUE_BACK.svg\", output_width=image_width\n )\n image = Image.open(io.BytesIO(image_data))\n\n self.card_images[self.CARD_BACK] = ImageTk.PhotoImage(image)\n\n def get_card_image(self, card: Card) -> ImageTk.PhotoImage:\n suit_str = suit_image_mapping[card.suit]\n key = card.rank.value + suit_str\n return self.card_images[key]\n\n def get_card_back(self) -> ImageTk.PhotoImage:\n return self.card_images[self.CARD_BACK]\n\n\nclass OpponentCardFrame:\n def __init__(\n self, parent, x: int, y: int, card_images: CardImages, hand: BigTwoHand\n ):\n self.card_frame = tk.Frame(master=parent, relief=tk.RAISED, borderwidth=1)\n self.card_frame.grid(row=x, column=y)\n\n self.card_images = card_images\n\n self.update_cards(hand)\n\n def update_cards(self, hand: BigTwoHand, clear=False, display_card=False):\n if clear:\n for widget in self.card_frame.winfo_children():\n widget.destroy()\n\n num_of_cards = len(hand)\n\n self.card_frame.columnconfigure(num_of_cards, weight=1)\n self.card_frame.rowconfigure([0, 1], weight=1)\n\n last_played_label = tk.Label(self.card_frame, text=f\"{num_of_cards}\")\n last_played_label.grid(row=1, column=num_of_cards // 2)\n\n columnspan = 2 if num_of_cards > 8 else 1\n for i, card in enumerate(hand):\n card_img = (\n self.card_images.get_card_image(card)\n if display_card\n else self.card_images.get_card_back()\n )\n card_label = tk.Label(self.card_frame, image=card_img)\n card_label.grid(row=0, column=i, columnspan=columnspan)\n\n\nclass PlayedCardFrame:\n def __init__(\n self,\n parent,\n x: int,\n y: int,\n last_player_played: int,\n last_cards_played: List[Card],\n card_images: CardImages,\n ):\n self.num_cards = 5\n self.card_frame = tk.Frame(master=parent, relief=tk.RAISED, borderwidth=1)\n self.card_frame.grid(row=x, column=y)\n\n self.card_frame.columnconfigure(self.num_cards, weight=1)\n self.card_frame.rowconfigure([0, 1], weight=1)\n\n self.card_images = card_images\n self.last_played_mapping = {0: \"YOU\", 1: \"LEFT\", 2: \"TOP\", 3: \"RIGHT\"}\n\n self.update_cards(last_player_played, last_cards_played)\n\n def update_cards(\n self, last_player_played: int, last_cards_played: List[Card], clear=False\n ):\n if clear:\n for widget in self.card_frame.winfo_children():\n widget.destroy()\n\n label_txt = self.last_played_mapping.get(last_player_played, \"\")\n\n last_played_label = tk.Label(self.card_frame, text=label_txt)\n last_played_label.grid(row=1, column=2)\n\n for i, card in enumerate(last_cards_played):\n cards = tk.Label(\n self.card_frame, image=self.card_images.get_card_image(card)\n )\n cards.grid(row=0, column=i)\n\n for i in range(len(last_cards_played), self.num_cards):\n cards = tk.Label(self.card_frame, image=self.card_images.get_card_back())\n cards.grid(row=0, column=i)\n\n\ndef build_opponent_bots(paths: List[str]) -> List[any]:\n \"\"\"\n expects each dir_path to be in the format of:\n ../gamerunner/experiments/2021_07_13_21_31_32/bot_save/2021_07_13_21_31_32_19999\n\n :param paths:\n :return:\n \"\"\"\n return [build_bot(dir_path) for dir_path in paths]\n\n\nclass BigTwoClient:\n CARD_BACK = \"BACK\"\n\n def __init__(self):\n game = BigTwo()\n\n self.single_player_number = 0\n\n # 3 bots\n opponent_bots = build_opponent_bots(\n [\n \"../gamerunner/experiments/2021_07_13_21_31_32/bot_save/2021_07_13_21_31_32_19999\"\n for _ in range(3)\n ]\n )\n\n self.wrapped_env = SinglePlayerWrapper(\n env=game,\n opponent_bots=opponent_bots,\n single_player_number=self.single_player_number,\n )\n\n self.wrapped_env.reset_and_start()\n\n self.main = tk.Tk()\n self.main.columnconfigure([0, 1, 2], weight=1, minsize=250)\n self.main.rowconfigure([0, 1, 2], weight=1, minsize=250)\n\n self.display_opponent_cards = tk.BooleanVar(value=False)\n\n self.card_images = CardImages()\n\n self.left_player_frame = OpponentCardFrame(\n self.main,\n 1,\n 0,\n self.card_images,\n self.wrapped_env.get_left_opponent_hand(),\n )\n self.top_player_frame = OpponentCardFrame(\n self.main,\n 0,\n 1,\n self.card_images,\n self.wrapped_env.get_top_opponent_hand(),\n )\n self.right_player_frame = OpponentCardFrame(\n self.main,\n 1,\n 2,\n self.card_images,\n self.wrapped_env.get_right_opponent_hand(),\n )\n\n self.selected_idx: List[tk.IntVar] = []\n\n self.card_played_frame = PlayedCardFrame(\n self.main,\n 1,\n 1,\n self.wrapped_env.env.get_player_last_played(),\n self.wrapped_env.env.get_last_cards_played(),\n self.card_images,\n )\n\n self.player_card_frame = tk.Frame(\n master=self.main, relief=tk.RAISED, borderwidth=1\n )\n self.player_card_frame.grid(row=2, column=1)\n\n self._update_play_hand(\n self.player_card_frame,\n self.wrapped_env.env.player_hands[self.single_player_number],\n )\n\n self._add_control_frame(2, 2)\n\n self.main.geometry(\"1920x1080\")\n self.main.resizable(width=0, height=0)\n\n def _update_game(self):\n self._update_opponent_cards()\n\n self.card_played_frame.update_cards(\n self.wrapped_env.env.get_player_last_played(),\n self.wrapped_env.env.get_last_cards_played(),\n clear=True,\n )\n\n self._update_play_hand(\n self.player_card_frame, self.wrapped_env.env.player_hands[0], clear=True\n )\n\n def _reset_game(self):\n self.wrapped_env.reset_and_start()\n\n self._update_game()\n\n def _play_selected_idx(self):\n raw_action = [1 if v.get() >= 0 else 0 for v in self.selected_idx]\n\n if len(raw_action) < 13:\n raw_action += (13 - len(raw_action)) * [0]\n\n action = PPOAction(\n transformed_obs=None,\n raw_action=raw_action,\n action_cat=None,\n action_mask=None,\n logp=None,\n cards=None,\n )\n\n _, rewards, done = self.wrapped_env.step(action)\n\n self._update_game()\n\n def _add_control_frame(self, x: int, y: int):\n control_frame = tk.Frame(master=self.main, relief=tk.RAISED, borderwidth=1)\n control_frame.grid(row=x, column=y)\n control_frame.columnconfigure([0, 1], weight=1)\n control_frame.rowconfigure([0, 1, 2], weight=1)\n\n play_button = tk.Button(\n master=control_frame,\n command=self._play_selected_idx,\n text=\"Play Cards\",\n width=10,\n height=5,\n )\n play_button.grid(row=0, column=0)\n\n reset_button = tk.Button(\n master=control_frame,\n command=self._reset_game,\n text=\"Reset\",\n width=10,\n height=5,\n )\n reset_button.grid(row=0, column=1)\n\n sort_by_rank_button = tk.Button(\n master=control_frame,\n text=\"Sort By Rank\",\n width=10,\n height=5,\n )\n sort_by_rank_button.grid(row=1, column=0)\n\n sort_by_suit_button = tk.Button(\n master=control_frame,\n text=\"Sort By Suit\",\n width=10,\n height=5,\n )\n sort_by_suit_button.grid(row=1, column=1)\n\n display_opponents_card = tk.Checkbutton(\n master=control_frame,\n command=self._update_opponent_cards,\n variable=self.display_opponent_cards,\n text=\"Display opponent cards\",\n onvalue=True,\n offvalue=False,\n )\n display_opponents_card.grid(row=2, column=0)\n\n def _update_opponent_cards(self):\n self.left_player_frame.update_cards(\n self.wrapped_env.get_left_opponent_hand(),\n clear=True,\n display_card=self.display_opponent_cards.get(),\n )\n self.top_player_frame.update_cards(\n self.wrapped_env.get_top_opponent_hand(),\n clear=True,\n display_card=self.display_opponent_cards.get(),\n )\n self.right_player_frame.update_cards(\n self.wrapped_env.get_right_opponent_hand(),\n clear=True,\n display_card=self.display_opponent_cards.get(),\n )\n\n def _update_play_hand(self, frame, hand: BigTwoHand, clear=False):\n if clear:\n for widget in frame.winfo_children():\n widget.destroy()\n\n frame.columnconfigure(len(hand), weight=1)\n frame.rowconfigure([0, 1], weight=1)\n self.selected_idx = []\n for i, card in enumerate(hand):\n cards = tk.Label(frame, image=self.card_images.get_card_image(card))\n cards.grid(row=0, column=i)\n\n idx = tk.IntVar(value=-1)\n check = tk.Checkbutton(frame, variable=idx, onvalue=i, offvalue=-1)\n check.grid(row=1, column=i)\n\n self.selected_idx.append(idx)\n\n def run(self):\n self.main.mainloop()\n\n\ndef main():\n client = BigTwoClient()\n client.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Wal8800/card-games","sub_path":"bigtwo_client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":10635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27616232986","text":"# 680. Valid Palindrome II\n# Easy\n# Two Pointers, String, Greedy \n# https://leetcode.com/problems/valid-palindrome-ii\n#\n# Return true if the s can be palindrome after deleting at most one character from it.\n\nclass Solution:\n # Time: O(n) | Space: O(n)\n def validPalindrome(self, s: str) -> bool:\n left, right = 0, len(s) - 1\n \n while left < right:\n if s[left] == s[right]:\n left += 1\n right -= 1\n else:\n return (\n s[left:right] == s[left:right][::-1]\n or s[left+1:right+1] == s[left+1:right+1][::-1]\n ) \n return True","repo_name":"daviscvance/Practice","sub_path":"Leetcode/Python/strings/easy/680-valid-palindrome-ii.py","file_name":"680-valid-palindrome-ii.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4234586073","text":"import re\n\nfrom scraper.base_scrapper import (\n SitemapSpider,\n SiteMapScrapper\n)\n\n\nclass RSTForumsSpider(SitemapSpider):\n name = \"rstforums_spider\"\n\n # Url stuffs\n base_url = \"https://rstforums.com/forum/\"\n\n # Xpath stuffs\n forum_xpath = \"//h4[contains(@class,'ipsDataItem_title')]/a/@href|\" \\\n \"//ul[contains(@class,'ipsDataItem_subList')]/li[@class]/a/@href\"\n pagination_xpath = \"//li[@class='ipsPagination_next']/a/@href\"\n\n thread_xpath = \"//li[contains(@class,'ipsDataItem') and @data-rowid]\"\n thread_first_page_xpath = \".//span[contains(@class,'ipsType_break')]/a/@href\"\n thread_last_page_xpath = \".//span[contains(@class,'ipsPagination_mini')]/span[last()]/a/@href\"\n thread_date_xpath = \".//li[@class='ipsType_light']/a/time/@datetime\"\n thread_pagination_xpath = \"//li[@class='ipsPagination_prev']/a/@href\"\n thread_page_xpath = \"//li[contains(@class,'ipsPagination_active')]/a/text()\"\n post_date_xpath = \"//div[contains(@class,'ipsType_reset')]/a/time/@datetime\"\n post_datetime_format = \"%Y-%m-%dT%H:%M:%SZ\"\n avatar_xpath = \"//li[@class='cAuthorPane_photo']/a/img/@src\"\n\n # Regex stuffs\n avatar_name_pattern = re.compile(\n r\".*/(\\S+\\.\\w+)\",\n re.IGNORECASE\n )\n\n topic_pattern = re.compile(\n r\"forum/topic/(\\d+)-\",\n re.IGNORECASE\n )\n\n use_proxy = \"On\"\n\n def parse_thread(self, response):\n\n # Parse generic thread\n yield from super().parse_thread(response)\n\n # Parse generic avatar\n yield from super().parse_avatars(response)\n\n\nclass RSTForumsScrapper(SiteMapScrapper):\n spider_class = RSTForumsSpider\n site_type = 'forum'\n","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"scraper/rstforums.py","file_name":"rstforums.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24550813769","text":"import os\nimport random\nimport syft as sy\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom torch import ( # pylint:disable=no-name-in-module\n manual_seed,\n stack,\n cat,\n std_mean,\n save,\n is_tensor,\n from_numpy,\n randperm,\n default_generator,\n)\nfrom torch._utils import _accumulate\nimport albumentations as a\nfrom copy import deepcopy\nfrom torch.utils import data as torchdata\nfrom torchvision.datasets import MNIST\nfrom torchvision import transforms\nfrom torchvision.datasets.folder import default_loader\n\nfrom os.path import splitext\nfrom typing import Dict, Union, Set, Callable\n\nfrom pathlib import Path\nfrom .dicomtools import DicomLoader\n\n\nclass AlbumentationsTorchTransform:\n def __init__(self, transform, **kwargs):\n # print(\"init albu transform wrapper\")\n self.transform = transform\n self.kwargs = kwargs\n\n def __call__(self, img):\n # print(\"call albu transform wrapper\")\n if Image.isImageType(img):\n img = np.array(img)\n elif is_tensor(img):\n img = img.cpu().numpy()\n img = self.transform(image=img, **self.kwargs)[\"image\"]\n # if img.max() > 1:\n # img = a.augmentations.functional.to_float(img, max_value=255)\n img = from_numpy(img)\n if img.shape[-1] < img.shape[0]:\n img = img.permute(2, 0, 1)\n return img\n\n\nclass CombinedLoader:\n \"\"\"Class that combines several data loaders and their extensions.\n\n Args: \n mapping (Dict): Dictionary that maps loader names to tuples \n consisting of (corresponding extensions, loader method)\n \"\"\"\n\n def __init__(\n self,\n mapping: Dict[str, Dict[str, Union[Set[str], Callable]]] = {\n \"default\": {\n \"extensions\": {\n \".jpg\",\n \".jpeg\",\n \".png\",\n \".ppm\",\n \".bmp\",\n \".pgm\",\n \".tif\",\n \".tiff\",\n \".webp\",\n },\n \"loader\": default_loader,\n },\n \"dicom\": {\"extensions\": {\".dcm\", \".dicom\"}, \"loader\": DicomLoader(3)},\n },\n ):\n self.extensions = set()\n self.mapping = mapping\n self.ext_to_loader_name = dict()\n for loader_name, defining_dict in mapping.items():\n self.extensions |= defining_dict[\"extensions\"]\n for ext in defining_dict[\"extensions\"]:\n if ext in self.ext_to_loader_name:\n raise RuntimeError(\n \"Extension {:s} was passed for multiple loaders\".format(ext)\n )\n self.ext_to_loader_name[ext] = loader_name\n\n def __call__(self, path: Path, **kwargs):\n \"\"\"Apply loader to path\n\n Args:\n path (Path): path to file.\n kwargs: kwargs passed to load methods\n\n Returns:\n Image: a PIL image of the given path\n\n Raises:\n RuntimeError: If loader for path extension not specified.\n \"\"\"\n file_ending = splitext(path)[1].lower()\n if file_ending in self.extensions:\n return self.mapping[self.ext_to_loader_name[file_ending]][\"loader\"](\n path, **kwargs\n )\n else:\n raise RuntimeError(\n \"file extension does not match specified supported extensions. \"\n \"Please provide the matching loader for the {:s} extension.\".format(\n file_ending\n )\n )\n\n def change_channels(self, num_channels: int):\n \"\"\"Change the number of channels that are loaded (Default: 3)\n\n Args:\n num_channels (int): Number of channels. Currently only 1 and 3 supported\n\n Raises:\n RuntimeError: if num_channels is not 1 or 3\n \"\"\"\n if num_channels not in [1, 3]:\n raise RuntimeError(\"Only 1 or 3 channels supported yet.\")\n self.mapping[\"default\"][\"loader\"] = (\n single_channel_loader if num_channels == 1 else default_loader\n )\n self.mapping[\"dicom\"][\"loader\"] = DicomLoader(num_channels)\n\n\ndef create_albu_transform(args, mean, std):\n train_tf = transforms.RandomAffine(\n degrees=args.rotation,\n translate=(args.translate, args.translate),\n scale=(1.0 - args.scale, 1.0 + args.scale),\n shear=args.shear,\n # fillcolor=0,\n )\n start_transformations = [\n a.Resize(args.inference_resolution, args.inference_resolution),\n a.RandomCrop(args.train_resolution, args.train_resolution),\n ]\n if args.clahe:\n start_transformations.extend(\n [\n a.FromFloat(dtype=\"uint8\", max_value=1.0),\n a.CLAHE(always_apply=True, clip_limit=(1, 1)),\n ]\n )\n train_tf_albu = [\n a.VerticalFlip(p=args.individual_albu_probs),\n ]\n if args.randomgamma:\n train_tf_albu.append(a.RandomGamma(p=args.individual_albu_probs))\n if args.randombrightness:\n train_tf_albu.append(a.RandomBrightness(p=args.individual_albu_probs))\n if args.blur:\n train_tf_albu.append(a.Blur(p=args.individual_albu_probs))\n if args.elastic:\n train_tf_albu.append(a.ElasticTransform(p=args.individual_albu_probs))\n if args.optical_distortion:\n train_tf_albu.append(a.OpticalDistortion(p=args.individual_albu_probs))\n if args.grid_distortion:\n train_tf_albu.append(a.GridDistortion(p=args.individual_albu_probs))\n if args.grid_shuffle:\n train_tf_albu.append(a.RandomGridShuffle(p=args.individual_albu_probs))\n if args.hsv:\n train_tf_albu.append(a.HueSaturationValue(p=args.individual_albu_probs))\n if args.invert:\n train_tf_albu.append(a.InvertImg(p=args.individual_albu_probs))\n if args.cutout:\n train_tf_albu.append(\n a.Cutout(\n num_holes=5, max_h_size=80, max_w_size=80, p=args.individual_albu_probs\n )\n )\n if args.shadow:\n assert args.pretrained, \"RandomShadows needs 3 channels\"\n train_tf_albu.append(a.RandomShadow(p=args.individual_albu_probs))\n if args.fog:\n assert args.pretrained, \"RandomFog needs 3 channels\"\n train_tf_albu.append(a.RandomFog(p=args.individual_albu_probs))\n if args.sun_flare:\n assert args.pretrained, \"RandomSunFlare needs 3 channels\"\n train_tf_albu.append(a.RandomSunFlare(p=args.individual_albu_probs))\n if args.solarize:\n train_tf_albu.append(a.Solarize(p=args.individual_albu_probs))\n if args.equalize:\n train_tf_albu.append(a.Equalize(p=args.individual_albu_probs))\n if args.grid_dropout:\n train_tf_albu.append(a.GridDropout(p=args.individual_albu_probs))\n train_tf_albu.append(a.GaussNoise(var_limit=args.noise_std ** 2, p=args.noise_prob))\n end_transformations = [\n a.ToFloat(max_value=255.0),\n a.Normalize(mean, std, max_pixel_value=1.0),\n ]\n if not args.pretrained:\n end_transformations.append(\n a.Lambda(image=lambda x, **kwargs: x[:, :, np.newaxis])\n )\n train_tf_albu = AlbumentationsTorchTransform(\n a.Compose(\n [\n a.Compose(start_transformations),\n a.Compose(train_tf_albu, p=args.albu_prob),\n a.Compose(end_transformations),\n ]\n )\n )\n return transforms.Compose([train_tf, train_tf_albu,])\n\n\ndef calc_mean_std(dataset, save_folder=None):\n \"\"\"\n Calculates the mean and standard deviation of `dataset` and\n saves them to `save_folder`.\n\n Needs a dataset where all images have the same size\n \"\"\"\n accumulated_data = []\n for d in tqdm(\n dataset, total=len(dataset), leave=False, desc=\"accumulate data in dataset\"\n ):\n if type(d) is tuple or type(d) is list:\n d = d[0]\n accumulated_data.append(d)\n if isinstance(dataset, torchdata.Dataset):\n accumulated_data = stack(accumulated_data)\n elif isinstance(dataset, torchdata.DataLoader):\n accumulated_data = cat(accumulated_data)\n else:\n raise NotImplementedError(\"don't know how to process this data input class\")\n if accumulated_data.shape[1] in [1, 3]: # ugly hack\n dims = (0, *range(2, len(accumulated_data.shape)))\n else:\n dims = (*range(len(accumulated_data.shape)),)\n std, mean = std_mean(accumulated_data, dim=dims)\n if save_folder:\n save(stack([mean, std]), os.path.join(save_folder, \"mean_std.pt\"))\n return mean, std\n\n\ndef single_channel_loader(filename):\n \"\"\"Converts `filename` to a grayscale PIL Image\n \"\"\"\n with open(filename, \"rb\") as f:\n img = Image.open(f).convert(\"L\")\n return img.copy()\n\n\nclass LabelMNIST(MNIST):\n def __init__(self, labels, *args, **kwargs):\n super().__init__(*args, **kwargs)\n indices = np.isin(self.targets, labels).astype(\"bool\")\n self.data = self.data[indices]\n self.targets = self.targets[indices]\n\n\nclass PathDataset(torchdata.Dataset):\n def __init__(\n self,\n root,\n transform=None,\n loader=CombinedLoader(),\n extensions=[\n \".jpg\",\n \".jpeg\",\n \".png\",\n \".ppm\",\n \".bmp\",\n \".pgm\",\n \".tif\",\n \".tiff\",\n \".webp\",\n \".dcm\",\n \".dicom\",\n ],\n ):\n super(PathDataset, self).__init__()\n self.root = root\n self.transform = transform\n self.loader = loader\n self.imgs = [\n f\n for f in os.listdir(root)\n if os.path.splitext(f)[1].lower() in extensions\n and not os.path.split(f)[1].lower().startswith(\"._\")\n ]\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n img_path = self.imgs[idx]\n img = self.loader(os.path.join(self.root, img_path))\n if self.transform:\n img = self.transform(img)\n return img\n\n\nclass RemoteTensorDataset(torchdata.Dataset):\n def __init__(self, tensor):\n self.tensor = tensor\n\n def __len__(self):\n return self.tensor.shape[0]\n\n def __getitem__(self, idx):\n return self.tensor[idx].copy()\n\n\nclass ImageFolderFromCSV(torchdata.Dataset):\n def __init__(\n self, csv_path, img_folder_path, transform=None, target_transform=None\n ):\n super().__init__()\n self.transform = transform\n self.target_transform = target_transform\n self.img_folder_path = img_folder_path\n self.img_files = [\n i for i in os.listdir(img_folder_path) if not i.startswith(\".\")\n ]\n\n metastats = pd.read_csv(csv_path)\n\n metastats[\"class_label\"] = metastats.apply(\n ImageFolderFromCSV.__meta_to_class__, axis=1\n )\n self.categorize_dict = dict(\n zip(metastats.X_ray_image_name, metastats.class_label)\n )\n for img in self.img_files:\n assert (\n img in self.categorize_dict.keys()\n ), \"img label not known {:s}\".format(str(img))\n if self.categorize_dict[img] == -1:\n self.img_files.remove(img)\n print(\"Ignore image {:s} because category is certain\".format(img))\n\n @staticmethod\n def __meta_to_class__(row):\n if row[\"Label\"] == \"Normal\":\n return 0\n if row[\"Label\"] == \"Pnemonia\": # i know this is a typo but was in original csv\n if row[\"Label_1_Virus_category\"] == \"bacteria\":\n return 1\n if row[\"Label_1_Virus_category\"] == \"Virus\":\n return 2\n return -1\n\n def __getitem__(self, i):\n img_path = self.img_files[i]\n label = self.categorize_dict[img_path]\n img = single_channel_loader(os.path.join(self.img_folder_path, img_path))\n if self.transform:\n img = self.transform(img)\n if self.target_transform:\n label = self.target_transform(label)\n return img, label\n\n def __len__(self):\n return len(self.img_files)\n\n\nclass PPPP(torchdata.Dataset):\n def __init__(\n self, label_path=\"data/Labels.csv\", train=False, transform=None, seed=1,\n ):\n super().__init__()\n random.seed(seed)\n manual_seed(seed)\n self.train = train\n self.labels = pd.read_csv(label_path)\n self.labels = self.labels[\n self.labels[\"Dataset_type\"] == (\"TRAIN\" if train else \"TEST\")\n ]\n self.transform = transform\n \"\"\"\n Split into train and validation set\n if self.train:\n indices = [\n i\n for i in range(len(self.labels))\n if ((i % self.val_split) != 0 and self.val)\n or (not self.val and (i % self.val_split) == 0)\n ]\n self.labels = self.labels.drop(index=indices)\n \"\"\"\n\n def __len__(self):\n return len(self.labels)\n\n def __getitem__(self, index):\n row = self.labels.iloc[index]\n label = row[\"Numeric_Label\"]\n path = \"train\" if self.train else \"test\"\n path = os.path.join(\"data\", path, row[\"X_ray_image_name\"])\n img = single_channel_loader(path)\n if self.transform:\n img = self.transform(img)\n return img, label\n\n # def get_class_name(self, numeric_label):\n # return self.class_names[numeric_label]\n\n \"\"\"\n Works only if not torch.utils.torchdata.random_split is applied\n \"\"\"\n\n def get_class_occurances(self):\n return dict(self.labels[\"Numeric_Label\"].value_counts())\n\n def __compute_mean_std__(self):\n\n calc_mean_std(\n self, save_folder=\"data\",\n )\n\n\n##This is from torch.data.utils and adapted for our purposes\nclass Subset(torchdata.Dataset):\n def __init__(self, dataset, indices):\n self.dataset = deepcopy(dataset)\n self.indices = indices\n\n def __getitem__(self, idx):\n return self.dataset[self.indices[idx]]\n\n def __len__(self):\n return len(self.indices)\n\n\ndef random_split(dataset, lengths, generator=default_generator):\n if sum(lengths) != len(dataset):\n raise ValueError(\n \"Sum of input lengths does not equal the length of the input dataset!\"\n )\n\n indices = randperm(sum(lengths), generator=generator).tolist()\n return [\n Subset(dataset, indices[offset - length : offset])\n for offset, length in zip(_accumulate(lengths), lengths)\n ]\n\n\nif __name__ == \"__main__\":\n # import matplotlib.pyplot as plt\n import sys\n from tqdm import tqdm\n import numpy as np\n\n sys.path.append(\n os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))\n )\n from torchlib.utils import AddGaussianNoise\n\n ds = PPPP(train=True, transform=transforms.ToTensor())\n print(\"Class distribution\")\n print(ds.get_class_occurances())\n\n sizes = []\n\n for data, _ in tqdm(ds, total=len(ds), leave=False):\n sizes.append(data.size()[1:])\n sizes = np.array(sizes)\n print(\n \"data resolution stats: \\n\\tmin: {:s}\\n\\tmax: {:s}\\n\\tmean: {:s}\\n\\tmedian: {:s}\".format(\n str(np.min(sizes, axis=0)),\n str(np.max(sizes, axis=0)),\n str(np.mean(sizes, axis=0)),\n str(np.median(sizes, axis=0)),\n )\n )\n\n ds = PPPP(train=False)\n\n L = len(ds)\n print(\"length test set: {:d}\".format(L))\n img, label = ds[1]\n img.show()\n\n tf = transforms.Compose(\n [transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(),]\n )\n ds = PPPP(train=True, transform=tf)\n\n ds.__compute_mean_std__()\n L = len(ds)\n print(\"length train set: {:d}\".format(L))\n\n from matplotlib import pyplot as plt\n\n ds = PPPP()\n hist = ds.labels.hist(bins=3, column=\"Numeric_Label\")\n plt.show()\n","repo_name":"gkaissis/PriMIA","sub_path":"torchlib/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":15988,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"61"} +{"seq_id":"2467578880","text":"from os.path import join as pjoin\n\nimport numpy as np\n\nimport joblib\n\ndef feature_builder_item_gnn_feat(keys, pretrain_feature_info):\n feat_fname = pjoin(pretrain_feature_info[\"dir_name\"], \"item_gnn_feat\")\n idmap, feat = joblib.load(feat_fname)\n\n \n feat_list = []\n mean_vector = feat.mean(axis=0)\n\n for key in keys:\n feat_list.append(feat[idmap[key]])\n\n feat_list.extend([mean_vector, mean_vector]) # for pad, clz token\n return np.stack(feat_list)","repo_name":"kakao/kakao-recoteam-recsys-2022-challenge","sub_path":"BERT4Rec/bert4rec/data/custom_dataset/recsys2022.py","file_name":"recsys2022.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"73842232834","text":"import pdb\nimport re\n\ndef add(numbers):\n '''Add the numbers in comma delimited string numbers\n\n Params:\n numbers: comma delimited string\n\n Returns sum of all numbers\n '''\n # delimiter declaration pattern\n delimiter_declaration = re.compile(r'//\\[?(?P.*?)\\]?\\n(?P.*)')\n # match object for delimiter declaration\n match_declaration = re.match(delimiter_declaration, numbers)\n # if it is a match declaration get the numbers string and delimiters\n if match_declaration:\n numbers = match_declaration.groupdict()['numbers_string']\n delimiters = match_declaration.groupdict()['delimiter']\n # multiple delimiters pattern\n multiple_delimiters_pattern = re.compile(r'\\]\\[')\n delimiters = re.split(multiple_delimiters_pattern, delimiters)\n else:\n # set of delimiters\n delimiters = set(['\\n', ','])\n # compiled regex to split using multiple delimiters\n delimiters_regex = re.compile('|'.join(map(re.escape, delimiters)))\n numbers = re.split(delimiters_regex, numbers)\n # convert to int after checking non-negative\n def non_neg_to_int(number):\n # handle empty string\n if len(number)==0:\n return 0\n number = int(number)\n if number < 0:\n raise Exception('negatives not allowed')\n else:\n return number\n # filter numbers greater than 1000\n def filter_numbers_gt_1000(numbers):\n numbers_int = map(non_neg_to_int, numbers)\n numbers_int = filter(lambda x: x<1000, numbers_int)\n return numbers_int\n numbers_int = filter_numbers_gt_1000(numbers)\n return sum(numbers_int)\n","repo_name":"starkhv/tdd_kata","sub_path":"day1/string_calculator.py","file_name":"string_calculator.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35181486640","text":"import json\nimport os\nimport threading\n\nfrom CTRegisterMicroserviceFlask.errors import NotFound\nfrom flask import jsonify\nfrom requests import post, Session, Request\n\nAUTOREGISTER_MODE = 'AUTOREGISTER_MODE'\nNORMAL_MODE = 'NORMAL_MODE'\n\nCT_URL = os.getenv('CT_URL')\nCT_TOKEN = os.getenv('CT_TOKEN')\nAPI_VERSION = os.getenv('API_VERSION')\n\n\ndef ct_register(name, ct_url, url, active):\n \"\"\"Autoregister method\"\"\"\n payload = {'name': name, 'url': url, 'active': active}\n\n try:\n r = post(ct_url + '/api/v1/microservice', json=payload, timeout=10)\n except Exception as error:\n os._exit(1)\n\n if r.status_code >= 400:\n os._exit(1)\n\n\ndef register(app, name, info, swagger, mode, ct_url=False, url=False, active=True, delay=5.0):\n \"\"\"Register method\"\"\"\n if mode == AUTOREGISTER_MODE:\n t = threading.Timer(delay, ct_register, [name, ct_url, url, active])\n t.start()\n\n @app.route('/info')\n def get_info():\n info['swagger'] = swagger\n return jsonify(info)\n\n @app.route('/ping')\n def get_ping():\n return 'pong'\n\n\ndef request_to_microservice(config):\n \"\"\"Request to microservice method\"\"\"\n try:\n session = Session()\n request = Request(\n method=config.get('method'),\n url=CT_URL + config.get('uri') if config.get(\n 'ignore_version') or not API_VERSION else CT_URL + '/' + API_VERSION + config.get('uri'),\n headers={\n 'content-type': 'application/json',\n 'Authorization': 'Bearer ' + CT_TOKEN,\n 'APP_KEY': config.get('application', 'rw')\n },\n data=json.dumps(config.get('body'))\n )\n prepped = session.prepare_request(request)\n\n response = session.send(prepped)\n except Exception as error:\n raise error\n\n try:\n return response.json()\n except Exception:\n raise NotFound(response.text)\n","repo_name":"Skydipper/ct-register-microservice-python-flask","sub_path":"CTRegisterMicroserviceFlask/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6993980010","text":"import psutil\nimport fcntl\n\n# Get the process of code-server\ncode_server = None\nfor proc in psutil.process_iter():\n if proc.name() == \"code-server\":\n code_server = proc\n break\n\n# Check if code-server is running\nif code_server is None:\n print(\"Error: code-server is not running.\")\n exit(1)\n\n# Get a list of the filepaths of the files open in code-server\nopen_files = code_server.open_files()\nfilepaths = []\nfor file_desc in open_files:\n filepath = fcntl.fcntl(file_desc.fd, fcntl.F_GETPATH)\n filepaths.append(filepath)\n\n# Save the filepaths to a file\nwith open(\"open_files.txt\", \"w\") as f:\n for filepath in filepaths:\n f.write(filepath + \"\\n\")\n\nprint(\"Filepaths of open files in code-server saved to open_files.txt.\")\n\n","repo_name":"enoki-inc/enoki-scripts","sub_path":"save_codeserver.py","file_name":"save_codeserver.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23371881041","text":"def google (engines, queries):\n switch = 0\n while 1:\n max = 0 \n for item in engines:\n try:\n indi = queries.index(item)\n except ValueError:\n return switch\n if indi > max:\n max = indi\n switch = switch + 1\n del queries[0: max]\n \n\nf = open(\"abc.in\")\nw = open(\"output.ot\", 'w')\nN = int (f.readline().strip())\nfor i in range(N):\n S = int (f.readline().strip())\n engines = []\n queries = []\n for j in range(S):\n engine = f.readline().strip()\n engines.append(engine)\n Q = int (f.readline().strip())\n for j in range(Q):\n query = f.readline().strip()\n queries.append(query)\n \n result = \"Case #%d: %d\" % ((i +1), google (engines, queries)) + \"\\n\"\n w.write(result)\nf.close()\nw.close()\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_1/331.py","file_name":"331.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30612389828","text":"import GeoIP\nfrom unidecode import unidecode\nfrom Annotations import Annotate\nimport potiron\n\n\nclass AnnotateGeo(Annotate):\n\n def __init__(self):\n self.mfields = [\"ipsrc\", \"ipdst\", \"packet_id\", \"timestamp\",\n \"sensorname\", \"filename\"]\n # Open the geoip database\n self.database = \"/usr/share/GeoIP/GeoIPCity.dat\"\n\n self.help = \\\n\"\"\"potiron-json-geo.py [-h] [-r filename] [-d directory] [-k]\n [-c config] [-i index]\n\n -h Shows this screen\n -d directory Specify the directory where the files should be stored\n -k Sent log data also to console and not only to syslog\n -c Filename of the configuration file\n -i Put annotation data directly in the index instead of writing json\n files\n\nINPUT FILES\n\nThis program reads json documents as input. The source IP addresses and\ndestination IP addresses are annotated with a Geo lookup of each IP address.\n\nThe following fields are required in the input json files\n\nKEY VALUE\n\nipsrc Source IP address in dotted decimal notation\nipdst Destination IP address in dotted decimal notation\n\n\nOUTPUT\n\nThe following fields are added to the json document\n\nKEY VALUE\n\nsipcountry Country of the source IP\nsipcity City of the source IP\ndipcountry Country of the Destination IP address\ndipcity City of the Destination IP address\n\"\"\"\n try:\n self.gi = GeoIP.open(self.database, GeoIP.GEOIP_STANDARD)\n except Exception as e:\n potiron.errormsg(\"Failed to initialize GeoIP module. Cause={}\".format(e))\n self.gi = None\n\n # Function to annoate the data\n def annoate_doc(self, doc):\n if self.gi is None:\n return doc\n try:\n g = self.gi.record_by_addr(doc[\"ipdst\"])\n if g is not None:\n if g[\"city\"] is not None and type(g[\"city\"]) is str:\n doc[\"dipcity\"] = unidecode(g[\"city\"])\n if g[\"country_name\"] is not None and type(g[\"country_name\"]) is str:\n doc[\"dipcountry\"] = unidecode(g[\"country_name\"])\n\n g = self.gi.record_by_addr(doc[\"ipsrc\"])\n if g is not None:\n if g[\"city\"] is not None and type(g[\"city\"]) is str:\n doc[\"sipcity\"] = unidecode(g[\"city\"])\n if g[\"country_name\"] is not None and type(g[\"country_name\"]) is str:\n doc[\"sipcountry\"] = unidecode(g[\"country_name\"])\n doc['state'] = doc['state'] | potiron.STATE_GEO_AN\n except Exception as e:\n potiron.errormsg(\"Geoip annotation failed. Cause={}\".format(e))\n return doc\n\nif __name__ == \"__main__\":\n obj = AnnotateGeo()\n obj.handle_cli()\n","repo_name":"CIRCL/potiron","sub_path":"potiron/PotironAnGeo.py","file_name":"PotironAnGeo.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"61"} +{"seq_id":"6302004468","text":"# first welcome user for playing this game\nprint(\"********** Welcome to the Tip Calculator ***********\")\n# idea is to\n# 1. take total amount to pay as bill , input from user : total_bill\ntotal_bill = float(input(\"What was the total bill : Rs. \"))\n\n# 2. how many people are there with you to whom you can split the bill : total_people\ntotal_people = int(input(\"How many people to split the bill? \"))\n\n# 3. what percentage of tip do you want to give to better. ( 10 / 12 or 15 % )\npercentage_tip = float(input(\"What percentage of tip would you like to give? \"))\n\n# now total tip will be\ntip = (total_bill * percentage_tip) / 100\n\n# total amount to pay\ntotal_amount = total_bill + tip;\n\n# each person amount to pay would be\neach_person_bill = total_amount/total_people;\n\nprint(f\"Each person should pay: Rs. {each_person_bill}\")\n","repo_name":"TechnicalAmanjeet/100DaysOfPython","sub_path":"Day02/tip-calculator.py","file_name":"tip-calculator.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4900288511","text":"import numpy as np\r\nfrom data_process import Dataset\r\n\r\ndata = Dataset(\"pos_tags.txt\", \"pos_sentences.txt\", train_test_split=0.8, seed=0)\r\n\r\ntrain_data = data.words\r\n#obs_dict = {i: j for i, j in zip(data.train_data, range(len(data.train_data)))}\r\nnp_array = np.array(data.train_data)\r\nprint(data.tags)\r\nprint(np_array)","repo_name":"XinjieSun/Machine-Learning","sub_path":"HMM/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22557935339","text":"class Character:\n def __init__(self):\n (\n self.PY_MAN,\n self.PELLET,\n self.POWER_PELLET,\n self.GHOST,\n self.BLINKY,\n self.PINKY,\n self.INKY,\n self.CLYDE,\n self.FRUIT,\n ) = (0, 1, 2, 3, 4, 5, 6, 7, 8)\n","repo_name":"MichaelJohnson144/pygame-projects","sub_path":"py_man/utils/enums/display/character/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"28587403682","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\ns = input().rstrip()\r\nt = input().rstrip()\r\n\r\nwhile len(s) < len(t) :\r\n if t[-1] == 'A' and len(t) > len(s):\r\n t = t[:-1]\r\n\r\n if t[-1] == 'B' and len(t) > len(s):\r\n t = t[:-1]\r\n t = t[::-1]\r\n\r\nif s == t :\r\n print(1)\r\nelse :\r\n print(0)","repo_name":"rmffpaps98/CodingTest","sub_path":"백준/Gold/12904. A와 B/A와 B.py","file_name":"A와 B.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18754773115","text":"# !/usr/bin/python\n# -*- coding: UTF-8 -*-\n# -*- coding: utf-8 -*-\n\n\n####################################################################################################\n# on declare 3 tableaux pour stoker les debuts des numéro de chaque réseau # #\n####################################################################################################\n\norange = ['07', 59, 57, 47, 78, 49, 48, 67, 68, 97, \"08\", \"09\", 97, 77, 78, 89, 58, 7, 87, 88, 69, 79, 98]\nmoov = ['02', '01', 51, 50, 71, 72, 41, 72, 73, \"03\", 52, 53, 43, 42, 40, 70]\nmtn = [55, 56, '05', 65, '04', 45, 46, 44, 84, '06', 85, 74, 75, 76, 64, 65, 66, 54, 86, 94, 95, 96]\n\n\n\nrec = \"\"\n\ndef open_file():\n\n try:\n with open(\"contacts.vcf\", \"r\") as source_file:\n\n \"\"\"\n Ouverture du fichier principale en lecture .\n On recupere les numeros dans un tableau.\n Attention le fichier doit bien se trouver dans le meme repertoire que le script\n \"\"\"\n\n rec = source_file.read()\n chaine = rec.split(\"\\n\")\n\n\n\n\n except FileNotFoundError:\n print(\"ATTENTION VOTRE FICHIER DOIT IMPERATIVEMENT SE TROUVER DANS LE MEME REPERTOIR QUE LE PROGRAME ET DOIT AVOIR L'EXTENTION (.vcf)\")\n\n return chaine\n\ndef controle():\n filtre = \"\"\n aigain5 = \"\"\n chaine = open_file()\n for i in chaine:\n if 'TEL' in i:\n conser = i.replace(\"TEL;CELL:\", \"\")\n brob = conser.replace(\"TEL;HOME:\", \"\")\n new = brob.replace(\"TEL;TYPE=CELL:\", \"\")\n recnew = new.replace(\"TEL;TYPE=HOME:\", \"\")\n recnews = recnew.replace(\"item1.TEL:\", \"\")\n aigain = recnews.replace(\"TEL;CELL;PREF:\", \"\")\n aigain2 = aigain.replace(\"TEL;TYPE=WORK:\", \"\")\n aigain3 = aigain2.replace(\"TEL;X-Portable:\", \"\")\n aigain4 = aigain3.replace(\"TEL:\", \"\")\n aigain5 = aigain4.replace(\"+225\", \"\")\n filtre = aigain5.strip(\" \")\n a = filtre[:2]\n\n for x in orange:\n if a == str(x):\n filtre = \"07\"+filtre\n\n else:\n filtre = filtre\n\n for x in moov:\n if a == str(x):\n filtre = \"01\"+filtre\n\n else:\n filtre = filtre\n\n for x in mtn:\n if a == str(x):\n filtre = \"05\"+filtre\n\n else:\n filtre = filtre\n if i:\n d = i.replace(aigain5, filtre)\n print(d)\n\ncontrole()\n\n###########################################################################################\n# on pourra recuperer la sortie de fichier en console avec un ex > NomFic.csv # #\n###########################################################################################\n\n","repo_name":"sneezy-5/convertisor","sub_path":"conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981957025","text":"import serial\n\n# ser = serial.Serial('/dev/tty.usbserial-142410', 256000, timeout=1)\nser = serial.Serial('/dev/tty.usbmodem1424203', 256000, timeout=1)\n# tty.usbmodem1424203\n\ndef checksum(data):\n return ~(sum(data)) & 0xff\n \ndef Rx():\n Rx_buf = []\n if ser.is_open:\n Rx_buf = ser.read(11)\n print(Rx_buf)\n if Rx_buf[0] == 0xFF:\n instuction = Rx_buf[-1]\n if instuction == 0xA1:\n return 1\n if instuction == 0xA2:\n # CRC Calculation error\n return Rx_buf\n if instuction == 0xA3:\n # Acknowledge\n return 3\n if instuction == 0xA4:\n # Done\n return 4\n\ndef tx_sethome():\n tx_buff = [0xFF,0x02,0x10]\n tx_buff.append(checksum(tx_buff[1:]))\n print(tx_buff)\n if ser.is_open:\n ser.write(tx_buff)\n\n\"\"\"\n\nrobot jog\n type = parameter which detemine type of joging 'c' catesian jog 'j' joint jog\n catesian jog \n axis = parameter which detemine robot moving direction in catesian axis {input x y z rz}\n step = parameter which detemine robor movig step {input 1 5 10 mm }\n\n joint jog\n axis = parameter which detemine robot moving direction in robot joint {input j1(deg) j2(deg) j3(mm) j4(deg)}\n step = parameter which detemine robor movig step {input 1 5 10}\n\n\"\"\"\ndef tx_jog(axis, step, type = 'c'):\n\n if(type == 'c'): # catesian jog\n if(axis == 'x'):\n move_axis = 0b00001000 #8\n elif(axis == 'y'):\n move_axis = 0b00000100 #4\n elif(axis == 'z'):\n move_axis = 0b00000010 #2\n elif(axis == 'rz'):\n move_axis = 0b00000001 #1\n\n if(step < 0):\n move_step = step & 0xFF\n else:\n move_step = step\n\n tx_buff = [0xFF,0x04,0x20,move_axis,move_step]\n\n elif(type == 'j'): # joint jog\n \n if(axis == 'j1'):\n move_axis = 0b00001000\n elif(axis == 'j2'):\n move_axis = 0b00000100\n elif(axis == 'j3'):\n move_axis = 0b00000010\n elif(axis == 'j4'):\n move_axis = 0b00000001\n\n if(step < 0):\n move_step = step & 0xFF\n else:\n move_step = step\n\n tx_buff = [0xFF,0x04,0x21,move_axis,move_step]\n\n tx_buff.append(checksum(tx_buff[1:]))\n print(tx_buff)\n if ser.is_open:\n ser.write(tx_buff)\n\n\"\"\"\nmove the robot\n robot can move in 2 style 1. move to target. the target with respect to home position\n 2. move relatime to. input position will add in now position so the target position = now_position + input position\n ref = refferance target {home ,current}\n type = type of input {c(catesian) , j(joint)}\n position = list of position 1. input are catesian position {x(mm), y(mm), z(mm), rz(mm)}\n 2. input type are joint configuration {j1(deg), j2(deg), j3(mm), j4(deg)}\n\"\"\"\ndef tx_move(position=[0,0,0,0], ref='home',type='j'):\n if(type == 'c'):\n tx_buff = [0xff,0x00,0x30]\n elif(type == 'j'):\n tx_buff = [0xff,0x00,0x31]\n \n style = 0b00000000\n \n if(ref == 'current'):\n style |= 1\n\n tx_buff.append(style)\n \n for i in range(len(position)):\n if(position[i] < 0):\n position[i] = position[i] & 0xFFFF\n tx_buff.append((position[i] >> 8) & 0xFF)\n tx_buff.append(position[i] & 0xFF)\n \n tx_buff[1] = len(tx_buff) - 1\n \n tx_buff.append(checksum(tx_buff[1:]))\n\n print(tx_buff)\n \n if ser.is_open:\n ser.write(tx_buff)\n\ndef require_manipulator_position():\n tx_buff = [0xff,0x02,0x71]\n tx_buff.append(checksum(tx_buff[1:]))\n\n print(tx_buff)\n \n if ser.is_open:\n ser.write(tx_buff)\n\ndef require_chessboard_position():\n tx_buff = [0xff,0x02,0x70]\n tx_buff.append(checksum(tx_buff[1:]))\n\n print(tx_buff)\n \n if ser.is_open:\n ser.write(tx_buff)\n\ndef task_move(pointfrom,pointto,action):\n tx_buff = [0xff,0,0x40,pointfrom,pointto,action]\n tx_buff[1] = len(tx_buff) - 1\n tx_buff.append(checksum(tx_buff[1:]))\n print(tx_buff)\n \n if ser.is_open:\n ser.write(tx_buff)\nif __name__ == \"__main__\":\n # tx_sethome()\n tx_jog(axis='j3', step=10 , type='j')\n # tx_jog(axis='x', step=-10, type='c')\n # tx_move(ref='current',type='j',position=[0,0,115,0])\n # tx_move(ref='home',type='j',position=[0,0,0,0])\n # tx_move(ref='current',type='c',position=[-10,0,0,0])\n \n # task_move(25,41,1)\n\n # require_manipulator_position()\n # require_chessboard_position()\n # print(Rx())\n\n\n\n\n\n","repo_name":"maytusudomlerd/Manipulator_lowlevel","sub_path":"mhainw_protocol.py","file_name":"mhainw_protocol.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36672698894","text":"import scrapy\nimport re\nimport datetime\n\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.loader import ItemLoader\nfrom itemloaders.processors import Join\n\nfrom helpers.s3_helper import S3Helper\nfrom ingestion.scrapy.items import JobsCrawlerItem\n\n\nclass WttjSpider(scrapy.Spider):\n \"\"\"\n Spider to scrape jobs information on Welcome to the Jungle Website.\n The individual pages are not rendered with Javascript so it only uses Scrapy.\n \"\"\"\n\n name = \"wttj\"\n\n def start_requests(self):\n links = S3Helper().extract_s3_links()\n for link in links:\n yield scrapy.Request(link, self.yield_job_item)\n\n def yield_job_item(self, response):\n l = ItemLoader(item=JobsCrawlerItem(), response=response)\n\n match = re.search(r'.*(?=\\?q=)', response.url) # url with random ending\n if match:\n l.add_value(\"url\", match.group(0))\n else:\n l.add_value(\"url\", response.url)\n\n l.add_value(\n \"title\",\n response.xpath(\n '//a[@data-testid=\"job-header-organization-link-logo\"]/parent::div/h1/text()[2]'\n ).get(),\n )\n l.add_value(\n \"company\",\n response.xpath(\n '//a[@data-testid=\"job-header-organization-link-logo\"]/parent::div/a/span/text()'\n ).get(),\n )\n l.add_value(\n \"location\",\n response.xpath(\n '//*[@name=\"location\"]/parent::span/following-sibling::span//text()'\n ).get(),\n )\n l.add_value(\n \"contract\",\n response.xpath(\n '//i[@name=\"contract\"]/following-sibling::span/text()'\n ).get(),\n )\n l.add_value(\n \"industry\",\n response.xpath(\n '//*[@name=\"tag\"]/parent::span/following-sibling::span/text()'\n ).get(),\n )\n l.add_value('size', response.xpath('//*[@name=\"department\"]/parent::span/following-sibling::span/text()').get())\n l.add_value(\n \"text\",\n response.xpath(\"//h2/following-sibling::div//text()\").getall(),\n Join(),\n )\n l.add_value(\n \"remote\",\n response.xpath(\n '//i[@name=\"remote\"]/following-sibling::span/text()'\n ).get(),\n )\n l.add_value(\"created_at\", datetime.date.today())\n\n yield l.load_item()\n\n\nif __name__ == \"__main__\":\n process = CrawlerProcess(\n settings={\n \"ROBOTSTXT_OBEY\": False,\n \"ITEM_PIPELINES\": {\n \"ingestion.scrapy.pipelines.JobsCrawlerPipeline\": 300,\n },\n \"AUTOTHROTTLE_ENABLED\": True,\n \"AUTOTHROTTLE_TARGET_CONCURRENCY\": 1,\n \"AUTOTHROTTLE_START_DELAY\": 5,\n \"AUTOTHROTTLE_MAX_DELAY\": 60,\n }\n )\n process.crawl(WttjSpider)\n process.start()\n","repo_name":"FelitaD/job-radar-2.0","sub_path":"ingestion/scrapy/spiders/wttj.py","file_name":"wttj.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23567788361","text":"import sys\r\n \r\ndef c(N, K):\r\n if K == N:\r\n return 0, 0\r\n if K == 1:\r\n return (N-1)//2, N//2\r\n\r\n #return c(N//2, K//2)\r\n m1, M1 = c(N//2, K//2)\r\n if (K == 2):\r\n return m1, M1\r\n \r\n m2, M2 = c((N-1)//2, (K-1)//2)\r\n\r\n if m1 < m2:\r\n return m1, M1\r\n elif m1 == m2:\r\n if M1 < M2:\r\n return m1, M1\r\n else:\r\n return m2, M2\r\n else:\r\n return m2, M2\r\n\r\nn = int(input())\r\nfor i in range(n):\r\n line = input()\r\n li = line.split();\r\n \r\n N = int(li[0])\r\n K = int(li[1])\r\n\r\n m, M = c(N, K)\r\n \r\n print('Case #' + str(i + 1) + ': ', end='')\r\n print(str(M) + ' ' + str(m))\r\n\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1587.py","file_name":"1587.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37847513420","text":"# encoding=utf-8\nimport sys\n\nimport tensorflow as tf\n\n\n__all__ = ['entity_metric_collect']\n\n\ndef entity_metric_collect(real_tag_seqs, predict_tag_seqs, lengths, metrics):\n \"\"\"\"\"\"\n try:\n assert len(real_tag_seqs) == len(predict_tag_seqs) == len(lengths)\n except AssertionError:\n tf.logging.error(\"Error: predict tag seq num doesn't equal real tag seq!\")\n sys.exit(0)\n\n for real_tag_seq, predict_tag_seq, length in zip(real_tag_seqs, predict_tag_seqs, lengths):\n metrics = _entity_count(real_tag_seq, predict_tag_seq, length, metrics)\n return metrics\n\n\ndef _entity_count(real_tag_seq, predict_tag_seq, length, count):\n real_pos, real_type = _extract_entity_pos_and_type(real_tag_seq[0])\n predict_pos, predict_type = _extract_entity_pos_and_type(predict_tag_seq[0])\n\n last_real_entity_item_len = 1\n last_real_entity_type = real_type\n\n if predict_pos != \"I\":\n last_predict_entity_item_len = 1\n last_predict_entity_type = predict_type\n else:\n last_predict_entity_item_len = 0\n last_predict_entity_type = \"\"\n\n for real_tag, predict_tag in zip(real_tag_seq[1:length] + [\"O\"], predict_tag_seq[1:length] + [\"O\"]):\n real_pos, real_type = _extract_entity_pos_and_type(real_tag)\n predict_pos, predict_type = _extract_entity_pos_and_type(predict_tag)\n\n update_real = _update_entity_check(real_pos, real_type, last_real_entity_type)\n update_predict = _update_entity_check(predict_pos, predict_type, last_predict_entity_type)\n\n if update_real and update_predict:\n count = _entity_type_exist_check(last_real_entity_type, count)\n count = _entity_type_exist_check(last_predict_entity_type, count)\n if last_real_entity_item_len == last_predict_entity_item_len and last_real_entity_type == last_predict_entity_type:\n count[last_real_entity_type][\"real\"] += 1\n count[last_real_entity_type][\"predict\"] += 1\n count[last_real_entity_type][\"correct\"] += 1\n else:\n count[last_real_entity_type][\"real\"] += 1\n if last_predict_entity_item_len != 0:\n count[last_predict_entity_type][\"predict\"] += 1\n last_real_entity_item_len = 1\n last_real_entity_type = real_type\n\n if predict_pos == \"I\":\n last_predict_entity_item_len = 0\n last_predict_entity_type = \"\"\n else:\n last_predict_entity_item_len = 1\n last_predict_entity_type = predict_type\n\n elif update_real and not update_predict:\n count = _entity_type_exist_check(last_real_entity_type, count)\n count[last_real_entity_type][\"real\"] += 1\n\n last_real_entity_item_len = 1\n last_real_entity_type = real_type\n\n last_predict_entity_item_len += 1\n\n elif not update_real and update_predict:\n count = _entity_type_exist_check(last_predict_entity_type, count)\n if last_predict_entity_item_len != 0:\n count[last_predict_entity_type][\"predict\"] += 1\n\n last_real_entity_item_len += 1\n\n if predict_pos == \"I\":\n last_predict_entity_item_len = 0\n last_predict_entity_type = \"\"\n else:\n last_predict_entity_item_len += 1\n last_predict_entity_type = predict_type\n\n else:\n last_real_entity_item_len += 1\n last_predict_entity_item_len += 1\n return count\n\n\ndef _update_entity_check(cur_pos, cur_type, last_type):\n \"\"\"\n check if previous entity reaches the end\n when current tag\n - is O\n - begins with B\n - different entity type\n means previous tag is the end of entity\n \"\"\"\n if cur_pos == \"O\":\n return True\n elif cur_pos == \"B\":\n return True\n elif cur_type != last_type:\n return True\n else:\n return False\n\n\ndef _extract_entity_pos_and_type(tag):\n \"\"\"\n split tag in position(B, I, O etc.) and type(ORG, PER, O etc.)\n \"\"\"\n if tag == \"O\":\n return \"O\", \"O\"\n else:\n return tag.split(\"-\")\n\n\ndef _entity_type_exist_check(tag, info):\n if not tag:\n return info\n\n if tag not in info:\n info[tag] = {\"real\": 0, \"predict\": 0, \"correct\": 0}\n return info\n\nif __name__ == \"__main__\":\n metrics_dict = {\"ORG\": {'real': 2, 'predict': 2, 'correct': 1}}\n real_seqs = [[\"O\", \"O\", \"B-ORG\", \"I-ORG\", \"O\", \"B-PER\", \"I-PER\", \"B-LOC\", \"B-LOC\"],\n [\"B-ORG\", \"B-LOC\", \"I-LOC\", \"O\", \"O\", \"O\", \"O\", \"B-PER\"]]\n predict_seqs = [[\"B-PER\", \"O\", \"B-ORG\", \"I-PER\", \"O\", \"B-PER\", \"I-PER\", \"B-LOC\", \"O\"],\n [\"B-ORG\", \"B-LOC\", \"I-LOC\", \"O\", \"O\", \"O\", \"O\", \"B-PER\"]]\n print(entity_metric_collect(real_seqs, predict_seqs, metrics_dict))\n\n\n\n\n\n\n\n\n","repo_name":"ShangzhiH/ChineseNER","sub_path":"eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11582848319","text":"\n#1\n#Write a function that computes the area of a circle given its radius.\nimport math\ndef area(a):\n return math.pi * a * a\nprint(area(10))\n\n#2 Write a function calculation() such that it can accept two variables and calculate the addition and subtraction of it.\n# And also it must return both addition and subtraction in a single return call\ndef calculation(x,y):\n return x+y,x-y\nprint(calculation(10,5))\n\n#3 Write a recursive function that prints the product of all values from an array of integer numbers,\n# without using for or while loops. Can you make it tail-recursive? (Hint use Sum Example as inspiration)\nfrom functools import reduce\nnums = [1,2,3,4,5]\nprint(reduce(lambda x,y : x*y,nums))\n# look for tail recursive\n\n#4 Write a function that takes a milliseconds value and returns a string describing the value in days, hours, minutes, and seconds.\n# What’s the optimal type for the input value?\n\n\n\n#5 Create a function showSalary() in such a way that it should accept employee name,\n# and it’s salary and display both, and if the salary is missing in function call it should show it as 5000\nsalfunction= lambda name,sal : print(\"Name :\",name, \" has salary: \",sal) if sal > 0 else print(\"Name :\",name, \" has salary: \",500)\nsalfunction(\"vinit\",100)\nsalfunction(\"vinit\",200)\nsalfunction(\"vinit\",0)\n\n\n#6Write a function that calculates the difference between a pair of 2D points (x and y) and returns the result as a point.\n# (Hint: this would be a good use for tuples - Search Tuples Examples for inspiration).\n#Reduce\nfrom functools import reduce\ninputs = ((2,3),(4,5),(8,9),(3,8))\ncalculateDistance = lambda xy : xy[1]-xy[0]\nresult = map(calculateDistance,list(inputs))\nprint(list(result))\naddfunction = lambda x,y : x[0]+y[0]\n#reducedata = reduce( addfunction,result)\n#print(reducedata)\n\n#7 Write a function that takes a 3-sized tuple and returns a 6-sized tuple, with each original parameter followed by its String representation.\n# For example, invoking the function with (true, 22.25, \"yes\") should return (true, \"true\", 22.5, \"22.5\", \"yes\", \"yes\").\n# Can you ensure that tuples of all possible types are compatible with your function? When you invoke this function,\n# can you do so with explicit types not only in the function result but in the value that you use to store the result?\n\ndef convert(tuppleExample):\n newTupple = list()\n for i in tuppleExample:\n newTupple.append(i)\n newTupple.append(str(i))\n return tuple(newTupple)\n\ntupleEx = (True,22.25,\"yes\")\nprint(tupleEx)\nprint(convert(tupleEx))\n\n#8 Write a sort function for a list of 4-tuples. Below is a list of the nearest stars and some of their properties.\n# The list elements are 4-tuples containing the name of the star, the distance from the sun in light years, the apparent brightness, and the luminosity.\n# The apparent brightness is how bright the stars look in our sky compared to the brightness of Sirius A.\n# The luminosity, or the true brightness, is how bright the stars would look if all were at the same distance compared to the Sun\n\nstartList = list ();\n\nstar1 = (\"Alpha Centauri A\", 4.3, 0.26, 1.56)\nstar2 = (\"Alpha Centauri B\", 4.3, 0.077, 0.45)\nstar3 = (\"Alpha Centauri C\", 4.2, 0.00001, 0.00006)\nstar4 = (\"Barnard’s Star\", 6.0, 0.00004, 0.0005)\nstar5 = (\"Wolf 359\", 7.7, 0.000001, 0.00002)\nstar6 = (\"BD +36 degrees 2147\", 8.2, 0.0003, 0.006)\nstar7 = (\"Luyten 726-8 A\", 8.4, 0.000003, 0.00006)\nstar8 = (\"Luyten 726-8 B\", 8.4, 0.000002, 0.00004)\nstar9 = (\"Sirius A\", 8.6, 1.00, 23.6)\nstar10 = (\"Sirius B\", 8.6, 0.001, 0.003)\nstar11 = (\"Ross 154\", 9.4, 0.00002, 0.0005)\nstartList.append(star1)\nstartList.append(star2)\nstartList.append(star3)\nstartList.append(star4)\nstartList.append(star5)\nstartList.append(star6)\nstartList.append(star7)\nstartList.append(star8)\nstartList.append(star9)\nstartList.append(star10)\nstartList.append(star11)\nprint(startList)\n\ntotal = len(startList)\nstartList.sort(key = lambda x:x[1])\nprint(startList)\nstartList.sort(key = lambda x:x[2])\nprint(startList)\nstartList.sort(key = lambda x:x[3])\nprint(startList)","repo_name":"veenu43/pythonBasics","sub_path":"practice/Excercise.py","file_name":"Excercise.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21661170040","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef upsample(signal, factor):\n signal = signal.permute(0, 2, 1)\n signal = F.interpolate(\n torch.cat((signal, signal[:, :, -1:]), 2),\n size=signal.shape[-1] * factor + 1,\n mode=\"linear\",\n align_corners=True,\n )\n signal = signal[:, :, :-1]\n return signal.permute(0, 2, 1)\n\n\nclass VolumeExtractor:\n def __init__(self, hop_size=512, block_size=None, model_sampling_rate=None):\n self.block_size = block_size\n self.model_sampling_rate = model_sampling_rate\n self.hop_size = hop_size\n if (self.block_size is not None) or (self.model_sampling_rate is not None):\n assert (self.block_size is not None) and (\n self.model_sampling_rate is not None\n )\n self.hop_size_follow_input = True\n else:\n self.hop_size_follow_input = False\n\n def extract(self, audio, sr=None): # audio: 1d numpy array\n if sr is not None:\n assert self.hop_size_follow_input\n self.hop_size = self.block_size * sr / self.model_sampling_rate\n n_frames = int(len(audio) // self.hop_size) + 1\n audio2 = audio**2\n audio2 = np.pad(\n audio2,\n (int(self.hop_size // 2), int((self.hop_size + 1) // 2)),\n mode=\"reflect\",\n )\n volume = np.array(\n [\n np.mean(audio2[int(n * self.hop_size) : int((n + 1) * self.hop_size)])\n for n in range(n_frames)\n ]\n )\n volume = np.sqrt(volume)\n \"\"\"\n if isinstance(audio, torch.Tensor):\n n_frames = int(audio.size(-1) // self.hop_size) + 1\n audio2 = audio ** 2\n audio2 = torch.nn.functional.pad(audio2, (int(self.hop_size // 2), int((self.hop_size + 1) // 2)),\n mode='reflect')\n audio_frame = torch.nn.functional.unfold(audio2[:, None, None, :], (1, int(self.hop_size)),\n stride=int(self.hop_size))[:, :, :n_frames]\n volume = audio_frame.mean(dim=1)[0]\n volume = torch.sqrt(volume).squeeze().cpu().numpy()\n else:\n n_frames = int(len(audio) // self.hop_size) + 1\n audio2 = audio ** 2\n audio2 = np.pad(audio2, (int(self.hop_size // 2), int((self.hop_size + 1) // 2)), mode='reflect')\n volume = np.array(\n [np.mean(audio2[int(n * self.hop_size): int((n + 1) * self.hop_size)]) for n in range(n_frames)])\n volume = np.sqrt(volume)\n \"\"\"\n return volume\n\n def get_mask_from_volume(self, volume, threhold=-60.0, device=\"cpu\"):\n mask = (volume > 10 ** (float(threhold) / 20)).astype(\"float\")\n mask = np.pad(mask, (4, 4), constant_values=(mask[0], mask[-1]))\n mask = np.array([np.max(mask[n : n + 9]) for n in range(len(mask) - 8)])\n mask = torch.from_numpy(mask).float().to(device).unsqueeze(-1).unsqueeze(0)\n mask = upsample(mask, self.block_size).squeeze(-1)\n return mask\n\n\ndef extract_volume(\n audio: np.ndarray,\n sr: int = None,\n hop_size: int = 160,\n block_size=None,\n model_sampling_rate=None,\n):\n if (block_size is not None) or (model_sampling_rate is not None):\n assert (block_size is not None) and (model_sampling_rate is not None)\n hop_size_follow_input = True\n else:\n hop_size_follow_input = False\n # audio: 1d numpy array\n if sr is not None:\n assert hop_size_follow_input\n hop_size = block_size * sr / model_sampling_rate\n n_frames = int(len(audio) // hop_size) + 1\n audio = audio**2\n audio = np.pad(\n audio,\n (int(hop_size // 2), int((hop_size + 1) // 2)),\n mode=\"reflect\",\n )\n volume = np.array(\n [\n np.mean(audio[int(n * hop_size) : int((n + 1) * hop_size)])\n for n in range(n_frames)\n ]\n )\n volume = np.sqrt(volume)\n return volume\n\n\ndef get_mask_from_volume(volume, threhold=-60.0, block_size=512, device=\"cpu\"):\n mask = (volume > 10 ** (float(threhold) / 20)).astype(\"float\")\n mask = np.pad(mask, (4, 4), constant_values=(mask[0], mask[-1]))\n mask = np.array([np.max(mask[n : n + 9]) for n in range(len(mask) - 8)])\n mask = torch.from_numpy(mask).float().to(device).unsqueeze(-1).unsqueeze(0)\n mask = upsample(mask, block_size).squeeze(-1)\n return mask\n","repo_name":"ddPn08/Latopia","sub_path":"latopia/volume_extractor.py","file_name":"volume_extractor.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"61"} +{"seq_id":"7595369984","text":"import os\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom newspy import newsorg, rss\nfrom newspy.models import Article, Category, Channel, Country, Language, Source\n\ndefault_client_config = {}\n\nchannels = {\n Channel.NEWSORG: newsorg,\n Channel.RSS: rss,\n}\n\n\ndef configure(newsorg_api_key: str | None = None) -> None:\n global default_client_config\n\n if newsorg_api_key is None:\n newsorg_api_key = os.getenv(\"NEWSORG_API_KEY\")\n\n default_client_config = {\n \"newsorg_api_key\": newsorg_api_key,\n }\n\n\ndef get_sources(\n category: Category | None = None,\n country: Country | None = None,\n language: Language | None = None,\n) -> list[Source]:\n sources = []\n with ThreadPoolExecutor() as executor:\n futures = [\n executor.submit(\n channels[key].client.get_sources,\n category=category,\n country=country,\n language=language,\n )\n for key in channels\n ]\n\n for future in as_completed(futures):\n sources.extend([r.to_source() for r in future.result()])\n\n return sources\n\n\ndef get_articles(\n category: Category | None = None,\n country: Country | None = None,\n language: Language | None = None,\n) -> list[Article]:\n articles = []\n with ThreadPoolExecutor() as executor:\n futures = [\n executor.submit(\n channels[key].client.get_articles,\n category=category,\n country=country,\n language=language,\n )\n for key in channels\n ]\n\n for future in as_completed(futures):\n articles.extend([r.to_article() for r in future.result()])\n\n return articles\n\n\ndef get_categories() -> list[Category]:\n return [c for c in Category] # type: ignore\n","repo_name":"onemoola/newspy","sub_path":"newspy/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"71766652995","text":"# coding=utf-8\nimport copy\nimport time\n\nfrom mycodo.inputs.base_input import AbstractInput\nfrom mycodo.utils.constraints_pass import constraints_pass_positive_or_zero_value\n\n# Measurements\nmeasurements_dict = {\n 0: {\n 'measurement': 'length',\n 'unit': 'cm'\n }\n}\n\n# Input information\nINPUT_INFORMATION = {\n 'input_name_unique': 'HCSR04_CIRCUITPYTHON',\n 'input_manufacturer': 'Multiple Manufacturers',\n 'input_name': 'HC-SR04',\n 'input_library': 'Adafruit_CircuitPython_HCSR04',\n 'measurements_name': 'Ultrasonic Distance',\n 'measurements_dict': measurements_dict,\n 'url_manufacturer': 'https://www.cytron.io/p-5v-hc-sr04-ultrasonic-sensor',\n 'url_datasheet': 'http://web.eece.maine.edu/~zhu/book/lab/HC-SR04%20User%20Manual.pdf',\n 'url_product_purchase': 'https://www.adafruit.com/product/3942',\n 'url_additional': 'https://learn.adafruit.com/ultrasonic-sonar-distance-sensors/python-circuitpython',\n\n 'options_enabled': [\n 'period',\n 'pre_output'\n ],\n 'options_disabled': ['interface'],\n\n 'dependencies_module': [\n ('apt', 'libgpiod-dev', 'libgpiod-dev'),\n ('pip-pypi', 'usb.core', 'pyusb==1.1.1'),\n ('pip-pypi', 'adafruit_hcsr04', 'adafruit-circuitpython-hcsr04==0.4.6')\n ],\n\n 'interfaces': ['GPIO'],\n\n 'custom_options': [\n {\n 'id': 'pin_trigger',\n 'type': 'integer',\n 'default_value': None,\n 'required': False,\n 'constraints_pass': constraints_pass_positive_or_zero_value,\n 'name': 'Trigger Pin',\n 'phrase': 'Enter the GPIO Trigger Pin for your device (BCM numbering).'\n },\n {\n 'id': 'pin_echo',\n 'type': 'integer',\n 'default_value': None,\n 'required': False,\n 'constraints_pass': constraints_pass_positive_or_zero_value,\n 'name': 'Echo Pin',\n 'phrase': 'Enter the GPIO Echo Pin for your device (BCM numbering).'\n },\n ]\n}\n\n\nclass InputModule(AbstractInput):\n \"\"\"A sensor support class that measures the HCSR04's temperature\"\"\"\n def __init__(self, input_dev, testing=False):\n super().__init__(input_dev, testing=testing, name=__name__)\n\n self.sensor = None\n\n self.pin_trigger = None\n self.pin_echo = None\n\n if not testing:\n self.setup_custom_options(\n INPUT_INFORMATION['custom_options'], input_dev)\n self.try_initialize()\n\n def initialize(self):\n import board\n import adafruit_hcsr04\n\n bcm_to_board = [\n board.D1,\n board.D2,\n board.D3,\n board.D4,\n board.D5,\n board.D6,\n board.D7,\n board.D8,\n board.D9,\n board.D10,\n board.D11,\n board.D12,\n board.D13,\n board.D14,\n board.D15,\n board.D16,\n board.D17,\n board.D18,\n board.D19,\n board.D20,\n board.D21,\n board.D22,\n board.D23,\n board.D24,\n board.D25,\n board.D26,\n board.D27\n ]\n\n if self.pin_trigger and self.pin_echo:\n self.sensor = adafruit_hcsr04.HCSR04(\n trigger_pin=bcm_to_board[self.pin_trigger - 1],\n echo_pin=bcm_to_board[self.pin_echo - 1])\n else:\n self.logger.error(\"Must set trigger and enable pins\")\n\n def get_measurement(self):\n \"\"\"Gets the measurement.\"\"\"\n if not self.sensor:\n self.logger.error(\"Error 101: Device not set up. See https://kizniche.github.io/Mycodo/Error-Codes#error-101 for more info.\")\n return\n\n self.return_dict = copy.deepcopy(measurements_dict)\n\n # try 3 times\n for _ in range(3):\n try:\n self.value_set(0, self.sensor.distance)\n return self.return_dict\n except Exception as err:\n self.logger.debug(\"Error: {}\".format(err))\n time.sleep(0.5)\n","repo_name":"kizniche/Mycodo","sub_path":"mycodo/inputs/hcsr04_circuitpython.py","file_name":"hcsr04_circuitpython.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":2708,"dataset":"github-code","pt":"61"} +{"seq_id":"22957198653","text":"\nfrom vsg.vhdlFile.extract import tokens\nfrom vsg.vhdlFile.extract import utils\n\n\ndef get_n_tokens_before_and_after_tokens_bounded_by_tokens(iToken, lTokens, lBetween, lAllTokens, oTokenMap):\n lReturn = []\n lIndexes = []\n\n lIndexes = utils.filter_tokens_between_tokens(lTokens, lBetween[0], lBetween[1], oTokenMap)\n\n for iIndex in lIndexes:\n iLine = oTokenMap.get_line_number_of_index(iIndex)\n lReturn.append(tokens.New(iIndex - iToken, iLine, lAllTokens[iIndex - iToken:iIndex + iToken + 1]))\n\n return lReturn\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/vhdlFile/extract/get_n_tokens_before_and_after_tokens_bounded_by_tokens.py","file_name":"get_n_tokens_before_and_after_tokens_bounded_by_tokens.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"664020085","text":"import cv2 as c\r\nimport numpy as np\r\nimport time\r\n\r\ndef sorty(a, p):\r\n img = np.zeros((512,512,4),np.uint8)\r\n n = len(a)\r\n for i in range(n):\r\n min_id = i\r\n for j in range(i+1, n):\r\n \r\n if a[min_id] > a[j] :\r\n min_id = j\r\n \r\n c.line(img, (20,155+p[i]), (200+a[i], 155+p[i]), (0,0 ,0),5)\r\n \r\n t = a[i]\r\n a[i] = a[min_id]\r\n a[min_id] = t\r\n c.line(img, (20,155+p[i]), (200+a[i], 155+p[i]), (255,0 ,255),5)\r\n c.line(img, (20,155+p[min_id]), (200+a[min_id], 155+p[min_id]), (0,0 ,0),5)\r\n c.line(img, (20,155+p[min_id]), (200+a[min_id], 155+p[min_id]), (255,0 ,255),5)\r\n \r\n \r\n c.imshow('Animation Window', img)\r\n k = c.waitKey(1000)\r\n if k == 270:\r\n break\r\n \r\n swap = True\r\n if swap == False:\r\n break\r\n \r\n c.destroyAllWindows()\r\n\r\ndef ask():\r\n a = []\r\n p = []\r\n t= 5\r\n c = int(input('How many lines do you want:-'))\r\n for i in range(0, c) :\r\n x = int(input('Enter the Value:- '))\r\n a.append(x)\r\n p.append(t)\r\n t += 20\r\n return a, p\r\n\r\ndef main():\r\n a, p = ask()\r\n sorty(a, p)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n ## Copyrights Reserves to Aaris Kazi for the Visualising the Algorithm","repo_name":"Aaris-Kazi/Visualizing-Sorting-Algo-in-Python","sub_path":"select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"23480527451","text":"# read in the file\r\nwith open(\"A-large.in\") as f:\r\n\tlines = f.readlines()\r\n\tfor line in enumerate(lines): # skip T\r\n\t\tif line[0] == 0:\r\n\t\t\tcontinue\r\n\t\tdigits = [False for _ in range(10)]\r\n\t\tN = int( line[1][:-1] )\r\n\t\tif N == 0:\r\n\t\t\tprint(\"Case #%d: INSOMNIA\" % line[0])\r\n\t\t\tcontinue\r\n\t\ti = 0\r\n\t\twhile sum(digits) < 10:\r\n\t\t\ti += 1\r\n\t\t\tn = i * N\r\n\t\t\twhile n != 0:\r\n\t\t\t\tdigits[n % 10] = True\r\n\t\t\t\tn = int(n/10)\r\n\t\tprint(\"Case #%d: %d\" % (line[0], i * N))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/2555.py","file_name":"2555.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8612106592","text":"import gettext\n\nfrom gi.repository import Gtk, Pango\n\nimport tryton.common as common\nfrom tryton.common.widget_style import widget_class\nfrom tryton.config import CONFIG\nfrom tryton.gui import Main\n\nfrom .infobar import InfoBar\n\n_ = gettext.gettext\n\n\nclass ToolbarItem(object):\n def __init__(self, id, label,\n tooltip=None, icon_name=None, accel_path=None, toggle=False):\n self.id = id\n self.label = label\n self.tooltip = tooltip\n self.icon_name = icon_name\n self.accel_path = accel_path\n self.toggle = toggle\n\n @property\n def menu(self):\n return True\n\n @property\n def toolbar(self):\n return bool(self.tooltip)\n\n\nclass TabContent(InfoBar):\n\n def __init__(self, **attributes):\n super(TabContent, self).__init__()\n self.attributes = attributes.copy()\n\n @property\n def menu_def(self):\n return [\n ToolbarItem(\n id='switch',\n label=_(\"_Switch View\"),\n tooltip=_(\"Switch View\"),\n icon_name='tryton-switch',\n accel_path='/Form/Switch View'),\n ToolbarItem(\n id='previous',\n label=_(\"_Previous\"),\n tooltip=_(\"Previous Record\"),\n icon_name='tryton-back',\n accel_path='/Form/Previous'),\n ToolbarItem(\n id='next',\n label=_(\"_Next\"),\n tooltip=_(\"Next Record\"),\n icon_name='tryton-forward',\n accel_path='/Form/Next'),\n ToolbarItem(\n id='search',\n label=_(\"_Search\"),\n icon_name='tryton-search',\n accel_path='/Form/Search'),\n None,\n ToolbarItem(\n id='new',\n label=_(\"_New\"),\n tooltip=_(\"Create a new record\"),\n icon_name='tryton-create',\n accel_path='/Form/New'),\n ToolbarItem(\n id='save',\n label=_(\"_Save\"),\n tooltip=_(\"Save this record\"),\n icon_name='tryton-save',\n accel_path='/Form/Save'),\n ToolbarItem(\n id='reload',\n label=_(\"_Reload/Undo\"),\n tooltip=_(\"Reload/Undo\"),\n icon_name='tryton-refresh',\n accel_path='/Form/Reload'),\n ToolbarItem(\n id='copy',\n label=_(\"_Duplicate\"),\n icon_name='tryton-copy',\n accel_path='/Form/Duplicate'),\n ToolbarItem(\n id='remove',\n label=_(\"_Delete...\"),\n icon_name='tryton-delete',\n accel_path='/Form/Delete'),\n None,\n ToolbarItem(\n id='logs',\n label=_(\"View _Logs...\"),\n icon_name='tryton-log'),\n ToolbarItem(\n id='revision' if self.model in common.MODELHISTORY else None,\n label=_(\"Show revisions...\"),\n icon_name='tryton-history'),\n None,\n ToolbarItem(\n id='attach',\n label=_(\"A_ttachments...\"),\n tooltip=_(\"Add an attachment to the record\"),\n icon_name='tryton-attach',\n accel_path='/Form/Attachments',\n toggle=True),\n ToolbarItem(\n id='note',\n label=_(\"_Notes...\"),\n tooltip=_(\"Add a note to the record\"),\n icon_name='tryton-note',\n accel_path='/Form/Notes'),\n ToolbarItem(\n id='action',\n label=_(\"_Actions...\"),\n icon_name='tryton-launch',\n accel_path='/Form/Actions'),\n ToolbarItem(\n id='relate',\n label=_(\"_Relate...\"),\n icon_name='tryton-link',\n accel_path='/Form/Relate'),\n None,\n ToolbarItem(\n id='print_open',\n label=_(\"_Report...\"),\n icon_name='tryton-open',\n accel_path='/Form/Report'),\n ToolbarItem(\n id='print',\n label=_(\"_Print...\"),\n icon_name='tryton-print',\n accel_path='/Form/Print'),\n ToolbarItem(\n id='email',\n label=_(\"_E-Mail...\"),\n tooltip=_(\"Send an e-mail using the record\"),\n icon_name='tryton-email',\n accel_path='/Form/Email'),\n None,\n ToolbarItem(\n id='export',\n label=_(\"_Export Data...\"),\n icon_name='tryton-export',\n accel_path='/Form/Export Data'),\n ToolbarItem(\n id='import',\n label=_(\"_Import Data...\"),\n icon_name='tryton-import',\n accel_path='/Form/Import Data'),\n ToolbarItem(\n id='copy_url',\n label=_(\"Copy _URL...\"),\n icon_name='tryton-public',\n accel_path='/Form/Copy URL'),\n None,\n ToolbarItem(\n id='win_close',\n label=_(\"_Close Tab\"),\n icon_name='tryton-close',\n accel_path='/Form/Close'),\n ]\n\n def create_tabcontent(self):\n self.buttons = {}\n self.menu_buttons = {}\n self.tooltips = common.Tooltips()\n self.accel_group = Main().accel_group\n\n self.widget = Gtk.VBox(spacing=3)\n self.widget.show()\n\n title_box = self.make_title_bar()\n self.widget.pack_start(title_box, expand=False, fill=True, padding=3)\n\n self.toolbar = self.create_toolbar(self.get_toolbars())\n self.toolbar.show_all()\n self.widget.pack_start(\n self.toolbar, expand=False, fill=True, padding=0)\n\n self.main = Gtk.HPaned()\n self.main.show()\n self.widget.pack_start(\n self.main, expand=True, fill=True, padding=0)\n\n viewport = Gtk.Viewport()\n viewport.set_shadow_type(Gtk.ShadowType.NONE)\n viewport.add(self.widget_get())\n viewport.show()\n self.scrolledwindow = Gtk.ScrolledWindow()\n self.scrolledwindow.set_shadow_type(Gtk.ShadowType.NONE)\n self.scrolledwindow.set_policy(\n Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n self.scrolledwindow.add(viewport)\n self.scrolledwindow.show()\n self.main.pack1(self.scrolledwindow, resize=True, shrink=False)\n\n self.widget.pack_start(\n self.create_info_bar(), expand=False, fill=True, padding=0)\n\n def make_title_bar(self):\n tooltips = common.Tooltips()\n self.title = title = Gtk.Label(\n label=common.ellipsize(self.name, 80),\n halign=Gtk.Align.START, margin=5,\n ellipsize=Pango.EllipsizeMode.END)\n tooltips.set_tip(title, self.name)\n title.set_size_request(0, -1) # Allow overflow\n title.show()\n\n menu = Gtk.MenuButton.new()\n menu.set_relief(Gtk.ReliefStyle.NONE)\n menu.set_popup(self.set_menu_form())\n menu.show()\n\n self.status_label = Gtk.Label(\n margin=5, halign=Gtk.Align.END)\n widget_class(self.status_label, 'status', True)\n self.status_label.show()\n\n hbox = Gtk.HBox()\n hbox.pack_start(title, expand=True, fill=True, padding=0)\n hbox.pack_start(self.status_label, expand=False, fill=True, padding=0)\n hbox.show()\n\n frame = Gtk.Frame()\n frame.set_shadow_type(Gtk.ShadowType.ETCHED_IN)\n widget_class(frame, 'window-title', True)\n frame.add(hbox)\n frame.show()\n\n frame_menu = Gtk.Frame()\n frame_menu.set_shadow_type(Gtk.ShadowType.ETCHED_IN)\n frame_menu.add(menu)\n frame_menu.show()\n\n title_box = Gtk.HBox()\n title_box.pack_start(frame_menu, expand=False, fill=True, padding=0)\n title_box.pack_start(frame, expand=True, fill=True, padding=0)\n title_box.show()\n return title_box\n\n def create_base_toolbar(self, toolbar):\n previous = None\n for item in self.menu_def:\n if item and item.toolbar:\n callback = getattr(self, 'sig_%s' % item.id, None)\n if not callback:\n continue\n if item.toggle:\n toolitem = Gtk.ToggleToolButton()\n toolitem.connect('toggled', callback)\n else:\n toolitem = Gtk.ToolButton()\n toolitem.connect('clicked', callback)\n toolitem.set_icon_widget(\n common.IconFactory.get_image(\n item.icon_name, Gtk.IconSize.LARGE_TOOLBAR))\n toolitem.set_label(item.label)\n toolitem.set_use_underline(True)\n self.tooltips.set_tip(toolitem, item.tooltip)\n self.buttons[item.id] = toolitem\n elif not item and previous:\n toolitem = Gtk.SeparatorToolItem()\n else:\n continue\n previous = item\n toolbar.insert(toolitem, -1)\n\n def set_menu_form(self):\n menu_form = Gtk.Menu()\n menu_form.set_accel_group(self.accel_group)\n menu_form.set_accel_path('/Form')\n previous = None\n for item in self.menu_def:\n if item and item.menu:\n callback = getattr(self, 'sig_%s' % item.id, None)\n if not callback:\n continue\n menuitem = Gtk.MenuItem(\n label=item.label,\n use_underline=True)\n menuitem.connect('activate', callback)\n if item.accel_path:\n menuitem.set_accel_path(item.accel_path)\n self.menu_buttons[item.id] = menuitem\n elif not item and previous:\n menuitem = Gtk.SeparatorMenuItem()\n else:\n continue\n previous = item\n menu_form.add(menuitem)\n\n menu_form.show_all()\n return menu_form\n\n def create_toolbar(self, toolbars):\n gtktoolbar = Gtk.Toolbar()\n option = CONFIG['client.toolbar']\n if option == 'default':\n gtktoolbar.set_style(False)\n elif option == 'both':\n gtktoolbar.set_style(Gtk.ToolbarStyle.BOTH)\n elif option == 'text':\n gtktoolbar.set_style(Gtk.ToolbarStyle.TEXT)\n elif option == 'icons':\n gtktoolbar.set_style(Gtk.ToolbarStyle.ICONS)\n self.create_base_toolbar(gtktoolbar)\n return gtktoolbar\n\n def compare(self, model, attributes):\n return False\n","repo_name":"tryton/tryton-client","sub_path":"tryton/gui/window/tabcontent.py","file_name":"tabcontent.py","file_ext":"py","file_size_in_byte":11042,"program_lang":"python","lang":"en","doc_type":"code","stars":178,"dataset":"github-code","pt":"61"} +{"seq_id":"9354882718","text":"import os\nfrom pathlib import Path\nfrom dotenv import load_dotenv;\nimport yaml\n\ndef find_yml():\n config_path = os.getenv('CONFIG_PATH')\n if config_path is not None:\n p = Path(config_path) / '.Config.yml'\n if p.exists():\n return p\n dirs = ['.', '/app', '~']\n for e in dirs:\n p = Path(e) / '.Config.yml'\n if p.exists():\n return p\n return None\n\ndef Config_load():\n load_dotenv()\n fname = find_yml()\n if fname is None:\n return None, 'Config_load ERROR: Cant find .Config.yml'\n with open(str(fname), 'r') as stream:\n try:\n Config = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n return None, f'Config_load ERROR: .Config.yml parsing error (str({exc}))'\n return Config, None\n\ndef get_db_url(Config, with_db_name=False):\n if Config is None:\n return None\n if not 'db' in Config:\n return None\n if os.getenv('development') is None:\n if not 'host' in Config['db']:\n return None\n host = Config['db']['host']\n else:\n if not 'host_dev' in Config['db']:\n return None\n host = Config['db']['host_dev']\n if not 'port' in Config['db']:\n port = 3306\n else:\n port = Config['db']['port']\n if not 'name' in Config['db']:\n return None\n if not 'passwd' in Config['db']:\n return None\n passwd = Config['db']['passwd']\n url = f'mysql+pymysql://root:{passwd}@{host}:{port}'\n if with_db_name:\n name = Config['db']['name']\n url = f'{url}/{name}'\n return url\n\ndef get_rest_url(Config):\n if Config is None:\n return None\n if not 'rest' in Config:\n return None\n if os.getenv('development') is None:\n if not 'host' in Config['rest']:\n return None\n host = Config['rest']['host']\n else:\n if not 'host_dev' in Config['rest']:\n return None\n host = Config['rest']['host_dev']\n if not 'port' in Config['rest']:\n return None\n port = Config['rest']['port']\n url = f'{host}:{port}'\n return url","repo_name":"jn2050/ml","sub_path":"lib/utils/utils/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19902676815","text":"#!/usr/bin/env python3\n\n'''\nCPSC 415 -- Homework #2 template\nDylan Meyers, University of Mary Washington, fall 2021\n'''\n\nfrom math import inf\nfrom os import listxattr\nfrom atlas import Atlas\nimport numpy as np\nimport logging\nimport sys\nfrom collections import defaultdict\n\ndef find_best_path(atlas):\n '''Finds the best path from src to dest, based on costs from atlas.\n Returns a tuple of two elements. The first is a list of city numbers,\n starting with 0 and ending with atlas.num_cities-1, that gives the\n optimal path between those two cities. The second is the total cost\n of that path.'''\n\n # THIS IS WHERE YOUR AMAZING CODE GOES\n # print(atlas._adj_mat)\n \n num_cities = atlas.get_num_cities()\n # for i in range(num_cities):\n # print (i)\n # print(atlas.get_crow_flies_dist(i, num_cities-1))\n\n\n nodes = {}\n \n length = 0\n i = 0\n list_added = {}\n while(i != num_cities-1):\n # length = length + 1\n # if length > 10:\n # break\n \n dict_1 = {}\n\n \n for x in range(num_cities):\n next_item = {}\n if atlas.get_road_dist(i,x) != inf and i != x and \"{}\".format(x) not in nodes :\n\n \n if \"{}\".format(i) not in nodes:\n nodes[\"{}\".format(i)] = {\"{}\".format(x): []}\n if \"{}\".format(x) not in nodes[\"{}\".format(i)]:\n nodes[\"{}\".format(i)][\"{}\".format(x)] = []\n\n next_item[\"{}\".format(x)]=(atlas.get_road_dist(i,x))\n next_item[\"hero\"] = atlas.get_crow_flies_dist(x,num_cities-1)\n new_dict = {}\n num = 0\n for c in list_added:\n new_dict[c] = list_added[c]\n num = num + 1\n if num == len(list_added)-1:\n break\n both_list = {**new_dict.copy(), **next_item.copy()}\n\n nodes[\"{}\".format(i)][\"{}\".format(x)] = both_list.copy()\n if len(dict_1) >0:\n nodes[\"{}\".format(i)]= dict_1\n\n\n if x == num_cities-1:\n lowest_node = lowest_value(nodes)\n # print(lowest_node)\n \n if lowest_node == 0:\n print(\"THERE IS NO SOLUTION\")\n return None\n i = int(lowest_node[1])\n\n list_added = nodes[lowest_node[0]][lowest_node[1]].copy()\n\n if i == num_cities-1:\n final_list = list(nodes[lowest_node[0]][lowest_node[1]].keys())\n final_values = list(nodes[lowest_node[0]][lowest_node[1]].values())\n final_list.insert(0,0)\n final_list.remove(\"hero\")\n return (final_list, sum(final_values))\n del nodes[lowest_node[0]][lowest_node[1]]\n if len(dict_1) == 0 and x == 0:\n break \n\n\n\n\ndef lowest_value(dict):\n tuple_dict = {}\n for i in dict:\n if len(dict[i])>0:\n low = minimum(dict[i])\n tuple_dict[(i,low)] = dict[i][low]\n \n return minimum(tuple_dict)\n\n\ndef minimum(dict):\n num = 0\n least = 0\n for key in dict:\n if num == 0:\n least = key\n if sum(dict[key].values()) < sum(dict[least].values()):\n least = key\n num = num + 1\n\n return least\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) not in [2,3]:\n # print(\"Usage: gps.py numCities|atlasFile [debugLevel].\")\n sys.exit(1)\n\n if len(sys.argv) > 2:\n if sys.argv[2] not in ['DEBUG','INFO','WARNING','ERROR']:\n # print('Debug level must be one of: DEBUG, INFO, WARNING, ERROR.')\n sys.exit(2)\n logging.getLogger().setLevel(sys.argv[2])\n else:\n logging.getLogger().setLevel('INFO')\n\n try:\n num_cities = int(sys.argv[1])\n logging.info('Building random atlas with {} cities...'.format(\n num_cities))\n usa = Atlas(num_cities)\n logging.info('...built.')\n except:\n logging.info('Loading atlas from file {}...'.format(sys.argv[1]))\n usa = Atlas.from_filename(sys.argv[1])\n logging.info('...loaded.')\n\n path, cost = find_best_path(usa)\n print('Best path from {} to {} costs {}: {}.'.format(0,\n usa.get_num_cities()-1, cost, path))\n print('You expanded {} nodes: {}'.format(len(usa._nodes_expanded),\n usa._nodes_expanded))\n\n","repo_name":"DylanMey/cpsc415","sub_path":"gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"32367085025","text":"PROXY = \"ts-henri.maarseveen:Henr!m666@proxysv:80\"\r\nURL_STOCK = \"http://finance.google.com/finance/info?q={}\"\r\nURL_WEATHER = \"http://api.openweathermap.org/data/2.5/forecast?units=metric&q={}\"\r\nURL_WARNING = \"http://www.jma.go.jp/en/warn/318_table.html\"\r\nURL_TRAIN = \"http://traininfo.jreast.co.jp/train_info/e/{}.aspx\"\r\nSPLIT_FOR_URL_WARNING = ''\r\nSPLIT_FOR_ADVISORIES = 'class=\"advisories\">'\r\nWARNING_LIST_HTML_TABLE = \"WarnTableTableEn\"\r\nTRAIN_TABLE_TIME = ''\r\nTRAIN_TABLE_INFO_OK = ''\r\nTRAIN_TABLE_INFO_NOK = ['', '','','','' ]\r\nADRUINO_COM = \"COM5\"\r\nADRUINO_BAUDRATE = 9600","repo_name":"henrim666/APO","sub_path":"apo/conf/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13805382934","text":"#!usr/bin/python3.9\n\n# j6.py\n# Version 1.0.0\n# 5/2/2022\n\n# Western Jihadism Project; Brandeis University\n# Written By: Mason Ware\n\n\n''' '''\n\n\nimport os\nimport csv\nimport json\nfrom typing import Dict\nimport pandas as pd #type: ignore\n\nfrom utils.animate import Loader\n\n\nclass File:\n ''' Class to represent a file of csv data and all of its attributes. '''\n def __init__(self, file_path: str) -> None:\n self.rt_demoprof_infile_csv: str = file_path\n \n # person-dependent:\n self.rt_demoprof_outfile_csv: str = 'data/csv/out_rt_de_demographicprofile.csv'\n self.rt_demoprof_outfile_json: str = 'data/csv/out_rt_de_demographicprofile.json'\n self.rt_indicator_outfile_csv: str = 'data/csv/out_indicator.csv'\n self.rt_indicator_outfile_json: str = 'data/json/out_indicator.csv'\n \n # person-dependent: \n self.db_de_rt_demographic_profile_out: list(dict()) = list()\n self.db_de_rt_indicator_out: list(dict()) = list()\n \n \n def load_dataframe(self) -> None:\n ''' method to load the csv file into a dataframe. '''\n # collect cleaned json data from raw csv\n with open(self.rt_demoprof_infile_csv, 'r', encoding='UTF-8') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for i, rows in enumerate(csv_reader):\n # TODO\n # add info to indicator\n # ^ keep a dict for demoprof in the traj_id?\n pass\n \n \n \n def handle_endpoints(self) -> None:\n ''' '''\n # TODO\n # move the traj_id dict out and replace it with an id\n for entry in self.j6_var_sheet_data:\n # person-independent:\n self.db_city_hometown_out.append(entry['hometown_id'])\n #? entry['hometown_id'] = id?\n self.db_city_death_out.append(entry['city_death_id'])\n #? entry['city_death_id'] = id?\n self.c_rad_out.append(entry['radicalization_reason_id'])\n #? entry['radicalization_reason_id'] = id?\n self.db_ethnicity_out.append(entry['ethnicity_id'])\n #? entry['ethnicity_id'] = id?\n \n # person-dependent:\n self.db_de_arrest_out.append(entry['db_de_arrest'])\n del entry['db_de_arrest']\n self.db_de_residency_out.append(entry['db_de_residency'])\n del entry['db_de_residency']\n self.db_de_citizenship_out.append(entry['db_de_citizenship'])\n del entry['db_de_citizenship']\n self.db_de_personalias_out.append(entry['db_de_personalias'])\n del entry['db_de_personalias']\n \n def write_out(self) -> None:\n ''' ''' \n # main output:\n\n # j6 variable sheet --> db_domestic_extremist\n # json\n with open(self.db_domestic_extremist_outfile_json, 'w') as json_file:\n json_file.write(json.dumps(self.j6_var_sheet_data, indent=4)) \n # csv\n keys = self.j6_var_sheet_data[0].keys()\n with open(self.db_domestic_extremist_outfile_csv, 'w', newline=None) as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(self.j6_var_sheet_data)\n \n # person-independent outputs:\n \n # hometown --> db_city\n with open(self.db_city_outfile_json, 'w') as json_file:\n json_file.write(json.dumps(self.db_city_hometown_out, indent=4))\n keys = self.db_city_hometown_out[0].keys()\n with open(self.db_city_outfile_csv, 'w', newline=None) as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(self.db_city_hometown_out) \n \n # death-town --> db_city\n with open(self.db_city_outfile_json, 'w') as json_file:\n json_file.write(json.dumps(self.db_city_death_out, indent=4))\n with open(self.db_city_outfile_csv, 'a', newline=None) as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writerows(self.db_city_death_out)\n \n # radicalization --> c_radicalization\n with open(self.c_rad_outfile_json, 'w') as json_file:\n json_file.write(json.dumps(self.c_rad_out, indent=4))\n keys = self.c_rad_out[0].keys()\n with open(self.c_rad_outfile_csv, 'w', newline=None) as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(self.c_rad_out) \n \n # ethnicity --> db_ethnicity\n with open(self.db_ethnicity_outfile_json, 'w') as json_file:\n json_file.write(json.dumps(self.db_ethnicity_out, indent=4))\n keys = self.db_ethnicity_out[0].keys()\n with open(self.db_ethnicity_outfile_csv, 'w', newline=None) as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(self.db_ethnicity_out) \n \n # person-dependent outputs:\n \n # arrest --> db_de_arrest\n with open(self.db_de_arrest_outfile_json, 'w') as json_file:\n json_file.write(json.dumps(self.db_de_arrest_out, indent=4))\n keys = self.db_de_arrest_out[0].keys()\n with open(self.db_de_arrest_outfile_csv, 'w', newline=None) as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(self.db_de_arrest_out) \n \n # residency --> db_de_residency\n with open(self.db_de_residency_outfile_json, 'w') as json_file:\n json_file.write(json.dumps(self.db_de_residency_out, indent=4))\n keys = self.db_de_residency_out[0].keys()\n with open(self.db_de_residency_outfile_csv, 'w', newline=None) as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(self.db_de_residency_out) \n \n # citizenship --> db_de_citizenship\n with open(self.db_de_citizenship_outfile_json, 'w') as json_file:\n json_file.write(json.dumps(self.db_de_citizenship_out, indent=4))\n keys = self.db_de_citizenship_out[0].keys()\n with open(self.db_de_citizenship_outfile_csv, 'w', newline=None) as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(self.db_de_citizenship_out)\n \n # alias --> personalias (a copy is also sent to db_de under 'nickname')\n with open(self.db_de_personalias_outfile_json, 'w') as json_file:\n json_file.write(json.dumps(self.db_de_personalias_out, indent=4))\n keys = self.db_de_personalias_out[0].keys()\n with open(self.db_de_personalias_outfile_csv, 'w', newline=None) as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(self.db_de_personalias_out) \n \n \ndef main() -> None:\n # user_io_file = input(f\"\\nEnter a file path > \") \n user_io_file = \"data/csv/in_j6_var_sheet.csv\" \n user_file = File(file_path=user_io_file)\n user_file.load_dataframe()\n user_file.handle_endpoints()\n user_file.write_out()\n \nif __name__ == '__main__':\n main()","repo_name":"masonrware/klausen-lab-scripts","sub_path":"j6trajectories.py","file_name":"j6trajectories.py","file_ext":"py","file_size_in_byte":7621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14574112176","text":"#!/usr/bin/env python\n\n\"\"\"model_learning.py: Source code of the model predictive control to obtain optimum trajectories for 2D arm movement\n\nThis module demonstrates how to use a do-mpc module to generate optimum target trajectories\n\nExample:\n You can directly execute with python command and pass -r [Trial Name] to name the execution, see below for other arguments ::\n\n $ python model_learning.py --show_animation True --store_animation True --store_results True -r fixed_theta0\n\nIt saves the trajectories obtained from the optimum solution while adjusting the timestep for musculoskeletal control\n\nOptions:\n --show_animation Visualize the animation\n --store_animation Save the animation as a gif\n --store_results Save the mpc results along with joint angles to be used in musculoskeletal control\n -r --run Name the execution to use in output images and files to be recorded\n\"\"\"\n\n__author__ = \"Berat Denizdurduran\"\n__copyright__ = \"Copyright 2022, Berat Denizdurduran\"\n__license__ = \"public, published\"\n__version__ = \"1.0.0\"\n__email__ = \"berat.denizdurduran@alpineintuition.ch\"\n__status__ = \"After-publication\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom casadi import *\nfrom casadi.tools import *\nimport pdb\nimport sys\n#sys.path.append('../../')\nimport do_mpc\nfrom scipy import interpolate\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.patches import Rectangle, Circle\nfrom matplotlib import rcParams\nfrom matplotlib.animation import FuncAnimation, FFMpegWriter, ImageMagickWriter\n# Plot settings\nrcParams['text.usetex'] = False\nrcParams['axes.grid'] = True\nrcParams['lines.linewidth'] = 2.0\nrcParams['axes.labelsize'] = 'xx-large'\nrcParams['xtick.labelsize'] = 'xx-large'\nrcParams['ytick.labelsize'] = 'xx-large'\n\nimport time\nimport argparse\n\nfrom template_mpc import template_mpc\nfrom template_simulator import template_simulator\nfrom template_model import template_model\n\n\n\"\"\" User settings: \"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--show_animation\", help=\"Visualize the animation\", default=True)\nparser.add_argument(\"--store_animation\", help=\"Save the animation\", default=True)\nparser.add_argument(\"--store_results\", help=\"Save the mpc results\", default=True)\nparser.add_argument(\"-r\", \"--run\", required=True, help=\"Run name\")\nargs = parser.parse_args()\n\nshow_animation = True\nstore_animation = True\nstore_results = True\n\n# Define obstacles to avoid (cicles)\n# here there are two obtstacles at the close proximity of the arm to help mpc to find the best trajectories\nobstacles = [\n {'x': 0.25, 'y': -0.225, 'r': 0.1},\n {'x': -0.12, 'y': -0.225, 'r': 0.1},\n]\n\nscenario = 1 # 1 = down-down start, 2 = up-up start, both with setpoint change.\n\n\"\"\"\nGet configured do-mpc modules:\n\"\"\"\n\nmodel = template_model(obstacles)\nsimulator = template_simulator(model)\nmpc = template_mpc(model)\nestimator = do_mpc.estimator.StateFeedback(model)\n\n\"\"\"\nSet initial state\n\"\"\"\n\nif scenario == 1:\n simulator.x0['theta'] = 0.\nelif scenario == 2:\n simulator.x0['theta'] = np.pi\nelse:\n raise Exception('Scenario not defined.')\n\nx0 = simulator.x0.cat.full()\n\nmpc.x0 = x0\nestimator.x0 = x0\n\nmpc.set_initial_guess()\n\n\"\"\"\nSetup graphic:\n\"\"\"\n\n# Function to create lines:\nL1 = 0.34 #m, length of the first rod\nL2 = 0.29 #m, length of the second rod\ndef pendulum_bars(x):\n x = x.flatten()\n # Get the x,y coordinates of the two bars for the given state x.\n line_1_x = np.array([\n 0,\n L1*np.sin(x[0])\n ])\n\n line_1_y = np.array([\n 0,\n -L1*np.cos(x[0])\n ])\n\n line_2_x = np.array([\n line_1_x[1],\n line_1_x[1] + L2*np.sin(x[1])\n ])\n\n line_2_y = np.array([\n line_1_y[1],\n line_1_y[1] - L2*np.cos(x[1])\n ])\n\n line_1 = np.stack((line_1_x, line_1_y))\n line_2 = np.stack((line_2_x, line_2_y))\n\n return line_1, line_2\n\nmpc_graphics = do_mpc.graphics.Graphics(mpc.data)\n\nfig = plt.figure(figsize=(18,10))\nplt.ion()\n\nax1 = plt.subplot2grid((1, 2), (0, 0), rowspan=2)\nax2 = plt.subplot2grid((1, 2), (0, 1))\n\nax2.set_ylabel('Relative Angle (Radian)')\nax2.set_xlabel('Time (Second)')\n\nmpc_graphics.add_line(var_type='_x', var_name='theta', axis=ax2)\n\nax1.axhline(0,color='black')\n\n# Axis on the right.\nfor ax in [ax2]:\n ax.yaxis.set_label_position(\"right\")\n ax.yaxis.tick_right()\n\nbar1 = ax1.plot([],[], '-o', linewidth=5, markersize=10)\nbar2 = ax1.plot([],[], '-o', linewidth=5, markersize=10)\n\n\"\"\"\n# uncomment this to visualize the obstacles\nfor obs in obstacles:\n circle = Circle((obs['x'], obs['y']), obs['r'], edgecolor='red',facecolor='red')\n ax1.add_artist(circle)\n\"\"\"\n\nax1.set_xlim(-0.8,0.8)\nax1.set_ylim(-0.8,0.8)\nax1.set_axis_off()\n\nfig.align_ylabels()\nfig.tight_layout()\n\n\n\"\"\"\nRun MPC main loop:\n\"\"\"\ntime_list = []\n\nn_steps = 40\nfor k in range(n_steps):\n tic = time.time()\n u0 = mpc.make_step(x0)\n toc = time.time()\n y_next = simulator.make_step(u0)\n x0 = estimator.make_step(y_next)\n time_list.append(toc-tic)\n\n if args.show_animation:\n line1, line2 = pendulum_bars(x0)\n bar1[0].set_data(line1[0],line1[1])\n bar2[0].set_data(line2[0],line2[1])\n mpc_graphics.plot_results()\n mpc_graphics.plot_predictions()\n mpc_graphics.reset_axes()\n plt.show()\n plt.savefig(\"animation/{}_{}\".format(args.run, k))\n plt.pause(0.04)\n\ntime_arr = np.array(time_list)\nmean = np.round(np.mean(time_arr[1:])*1000)\nvar = np.round(np.std(time_arr[1:])*1000)\nprint('mean runtime:{}ms +- {}ms for MPC step'.format(mean, var))\n\n# The function describing the gif:\nif args.store_animation:\n x_arr = mpc.data['_x']\n def update(t_ind):\n line1, line2 = pendulum_bars(x_arr[t_ind])\n bar1[0].set_data(line1[0],line1[1])\n bar2[0].set_data(line2[0],line2[1])\n mpc_graphics.plot_results(t_ind)\n mpc_graphics.plot_predictions(t_ind)\n mpc_graphics.reset_axes()\n\n anim = FuncAnimation(fig, update, frames=n_steps, repeat=False)\n gif_writer = ImageMagickWriter(fps=20)\n anim.save('animation/{}.gif'.format(args.run), writer=gif_writer)\n\n# Store results:\nif args.store_results:\n do_mpc.data.save_results([mpc, simulator], args.run)\n # change of dt to adjust the trajectory for opensim-rl timesteps\n t_mpc = np.arange(0,1.6,0.04)\n f_elbow = interpolate.interp1d(t_mpc, mpc.data['_x'][0:,1])\n f_shoulder = interpolate.interp1d(t_mpc, mpc.data['_x'][0:,0])\n f_elbow_vel = interpolate.interp1d(t_mpc, mpc.data['_x'][0:,3])\n f_shoulder_vel = interpolate.interp1d(t_mpc, mpc.data['_x'][0:,2])\n f_elbow_acc = interpolate.interp1d(t_mpc, mpc.data['_z'][0:,1])\n f_shoulder_acc = interpolate.interp1d(t_mpc, mpc.data['_z'][0:,0])\n t_arm = np.arange(0, 0.75, 0.01)\n target_elbow = f_elbow(t_arm)\n target_shoulder = f_shoulder(t_arm)\n target_elbow_vel = f_elbow_vel(t_arm)\n target_shoulder_vel = f_shoulder_vel(t_arm)\n target_elbow_acc = f_elbow_acc(t_arm)\n target_shoulder_acc = f_shoulder_acc(t_arm)\n np.save(\"results/target_of_elbow_{}\".format(args.run), target_elbow)\n np.save(\"results/target_of_shoulder_{}\".format(args.run), target_shoulder)\n np.save(\"results/target_of_elbow_vel_{}\".format(args.run), target_elbow_vel)\n np.save(\"results/target_of_shoulder_vel_{}\".format(args.run), target_shoulder_vel)\n np.save(\"results/target_of_elbow_acc_{}\".format(args.run), target_elbow_acc)\n np.save(\"results/target_of_shoulder_acc_{}\".format(args.run), target_shoulder_acc)\n\ninput('Learning finished! Press any key to exit.')\n","repo_name":"BlueBrain/learning_musculoskeletal_arm_control","sub_path":"mpc/model_learning.py","file_name":"model_learning.py","file_ext":"py","file_size_in_byte":7614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15008654906","text":"try:\n x = []\n k = 1\n print('Введите три числа:')\n for i in range(1,4):\n x.append(float(input('1: ')))\n for i in range(3):\n k = x.count(x[i])\n if k >= 2:\n print('Количество одинаковых чисел:', k)\n else:\n print('Нет одинаковых чтсел.')\nexcept ValueError:\n print('Ошибка ввода.')","repo_name":"GAKiknadze/LW","sub_path":"Лабораторная работа №2/Задание №5.py","file_name":"Задание №5.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27759362505","text":"import numpy as np\nimport csv\nfrom datetime import datetime\nfrom ClaseContrato import contrato\nclass ManejadorContrato:\n __cantidad = 0\n __dimension = 0\n\n def __init__(self, dimension):\n self.__contratos = np.empty(dimension, contrato)\n self.__cantidad = 0\n self.__dimension = dimension\n\n def __str__(self):\n s = \"\"\n for co in self.__contratos:\n s += str(co) + \"\\n\"\n return s\n \n def agregarContrato(self,unContrato):\n if self.__cantidad == self.__dimension:\n self.__contratos.resize(self.__dimension)\n self.__contratos[self.__cantidad] = unContrato\n self.__cantidad += 1\n \n def crearContrato(self, jugador, equipo):\n fechaInicio = input(\"Ingrese fecha inicio de contrato (dd/mm/yyyy): \")\n fechaFin = input(\"Ingrese fecha fin (dd/mm/yyyy): \")\n pagoMensual = input(\"Ingrese pago mensual: \")\n print(\"\\n\")\n cont = contrato(fechaInicio, fechaFin, pagoMensual, jugador, equipo)\n self.agregarContrato(cont)\n\n def diff_month(self,d1, d2):\n return (d1.year - d2.year) * 12 + d1.month - d2.month\n\n def buscaJugador(self, j):\n jug = input(\"Ingrese DNI de jugador a buscar: \")\n print(\"\\n\")\n jugador = j.buscaJugador(jug)\n if jugador != None:\n for i, contrato in enumerate(self.__contratos):\n if contrato.getJugador() == jugador:\n print(\"El jugador posee un contrato en el equipo: {} y la fecha de finalizacion del mismo es: {}\".format(contrato.getEquipo(), contrato.getFechaFin()))\n else:\n print(\"Error al buscar jugador\")\n else:\n print(\"El DNI ingresado no corresponde a ningun jugador que tenga contrato\")\n\n def buscaEquipo(self, e):\n equipo = e.buscaEquipo()\n if equipo != None:\n for i, contrato in enumerate(self.__contratos):\n if contrato.getEquipo() == equipo:\n\n F_inicio = datetime.strptime(contrato.getFechaInicio(), \"%d/%m/%Y\")\n F_final = datetime.strptime(contrato.getFechaFin(), \"%d/%m/%Y\")\n mes = self.diff_month(F_inicio, F_final)\n\n if mes == 6:\n print(\"Jugadores: {} \\n\".format(contrato.getJugador()))\n else:\n print(\"No hay jugadores en este equipo cuyo contrato vence en 6 meses\")\n else:\n print(\"Equipo no encontrado\")\n\n def buscaImportes(self, e):\n importe=0\n equipo = e.buscaEquipo()\n if equipo != None:\n for i, contrato in enumerate(self.__contratos):\n if contrato.getEquipo() == equipo:\n importe += int(contrato.getPagoMensual())\n print(\"El importe total de los contratos que posee con los jugadores es: {} \".format(importe))\n\n def generaArchivo(self,j,e):\n with open('contratos.csv', 'w', newline='') as csvfile:\n fieldnames = ['DNI jugador', 'Nombre del equipo', 'Fecha Inicio','Fecha Fin','Pago mes']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for i, contrato in enumerate(self.__contratos):\n writer.writeheader()\n writer.writerow({'DNI jugador': j.buscaDNI(i), 'Nombre del equipo': e.buscaNombre(i), 'Fecha Inicio': contrato.getFechaInicio(), 'Fecha Fin': contrato.getFechaFin(), 'Pago mes': contrato.getPagoMensual()})\n","repo_name":"katherina-00/Unidad-3","sub_path":"ejercicio3/ClaseManejadorContrato.py","file_name":"ClaseManejadorContrato.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32507970959","text":"import os\nimport sys\n\n# os.chdir(sys.path[0])\nimport os.path as osp\nimport numpy as np\nfrom glob import glob\nfrom PyQt5.Qt import QWidget\nfrom PyQt5.QtWidgets import (\n QApplication,\n QHBoxLayout,\n QVBoxLayout,\n QPushButton,\n QSplitter,\n QLineEdit,\n QLabel,\n QFrame,\n QMessageBox,\n)\nfrom PyQt5.QtGui import QIntValidator, QImage, QPixmap, QPainter, QPen\nfrom PyQt5 import QtCore\nfrom pyqtgraph.widgets.RawImageWidget import RawImageWidget\n\n\nclass PainterLabel(QLabel):\n ready = QtCore.pyqtSignal(list)\n\n def __init__(self, Parent=None):\n super().__init__(Parent)\n self.x1, self.y1 = 0, 0\n self.x2, self.y2 = 0, 0\n\n self.__pose_pts = []\n self.__is_drawing = False\n\n def mousePressEvent(self, event):\n self.__pose_pts = [(event.x(), event.y())]\n self.__is_drawing = True\n\n def mouseMoveEvent(self, event):\n if self.__is_drawing:\n self.x1, self.y1 = self.__pose_pts[0]\n self.x2, self.y2 = event.x(), event.y()\n self.update()\n\n def mouseReleaseEvent(self, event):\n self.__pose_pts.append((event.x(), event.y()))\n self.__is_drawing = False\n self.draw_pose(self.__pose_pts[0], self.__pose_pts[1])\n self.ready.emit(self.__pose_pts)\n\n def draw_pose(self, pt1, pt2):\n self.x1, self.y1 = pt1\n self.x2, self.y2 = pt2\n self.update()\n\n def paintEvent(self, event):\n super().paintEvent(event)\n\n if self.x1 != 0 or self.y1 != 0:\n painter = QPainter(self)\n\n painter.setPen(QPen(QtCore.Qt.red, 10))\n painter.drawPoint(self.x1, self.y1)\n\n painter.setPen(QPen(QtCore.Qt.red, 3, QtCore.Qt.SolidLine))\n painter.drawLine(self.x1, self.y1, self.x2, self.y2)\n\n\nclass MainWidget(QWidget):\n def __init__(self, prefix, name_filter=\"*\", ext=\"png\", Parent=None) -> None:\n \"\"\"Constructor\"\"\"\n super().__init__(Parent)\n\n self.__prefix = prefix\n self.__name_filter = name_filter\n self.__ext = ext\n self.__pose_prefix = osp.join(prefix + \"_feature\", \"pose_m\")\n\n self.__name_lst = glob(osp.join(prefix, f\"**/{name_filter}.{ext}\"), recursive=True)\n self.__name_lst = [x.replace(f\"{prefix}/\", \"\").replace(f\".{ext}\", \"\") for x in self.__name_lst]\n self.__name_lst.sort()\n\n if len(self.__name_lst) == 0:\n QMessageBox.critical(self, \"Error\", \"No image file with specified format is found\")\n return\n\n self.__img_panel_width = 512\n self.__img_panel_height = 512\n\n self.__InitView()\n self.__SetupConnection()\n\n self.__scale_ratio = 1.0\n self.__cur_pose = np.zeros(3)\n self.__index = -1\n self.__is_modifed = False\n self.on_btn_Next_Clicked()\n\n def __InitView(self):\n \"\"\"初始化界面\"\"\"\n # self.setFixedSize(640, 480)\n self.setWindowTitle(\"Pose Annotation\")\n\n top_layout = QVBoxLayout(self)\n top_layout.setSpacing(10)\n\n sub_layout = QHBoxLayout()\n sub_layout.setContentsMargins(5, 5, 5, 5)\n self.__label_prefix = QLabel(\"Image Filter\")\n self.__label_prefix.setFixedHeight(20)\n sub_layout.addWidget(self.__label_prefix)\n self.__edit_prefix = QLineEdit(osp.join(self.__prefix, f\"**/{self.__name_filter}.{self.__ext}\"))\n self.__edit_prefix.setFixedHeight(20)\n self.__edit_prefix.setReadOnly(True)\n sub_layout.addWidget(self.__edit_prefix)\n top_layout.addLayout(sub_layout)\n\n # 新建一个水平布局作为本窗体的主布局\n main_layout = QHBoxLayout()\n # 设置主布局内边距以及控件间距为10px\n main_layout.setSpacing(10)\n\n # 新建垂直子布局用于放置画板和提示栏\n sub_layout = QVBoxLayout()\n # 设置此子布局和内部控件的间距为5px\n sub_layout.setContentsMargins(5, 5, 5, 5)\n # 在主界面左侧放置画板\n self.__img_box = PainterLabel()\n self.__img_box.setGeometry(20, 20, self.__img_panel_width, self.__img_panel_height)\n self.__img_box.setAlignment(QtCore.Qt.AlignTop)\n sub_layout.addWidget(self.__img_box)\n self.__label_info = QLabel()\n self.__label_info.setFixedWidth(self.__img_panel_width - 20)\n self.__label_info.setFixedHeight(20)\n self.__label_info.setFrameShape(QFrame.Box)\n self.__label_info.setFrameShadow(QFrame.Shadow.Raised)\n sub_layout.addWidget(self.__label_info)\n main_layout.addLayout(sub_layout)\n\n # 新建垂直子布局用于放置按键\n sub_layout = QVBoxLayout()\n # 设置此子布局和内部控件的间距为5px\n sub_layout.setContentsMargins(5, 5, 5, 5)\n # image name\n ssub_layout = QHBoxLayout()\n self.__label_img_name = QLabel(\"Image Name\")\n # self.__label_img_name.setText(\"Image Name\")\n self.__label_img_name.setFixedHeight(20)\n ssub_layout.addWidget(self.__label_img_name)\n self.__edit_img_name = QLineEdit()\n self.__edit_img_name.setFixedHeight(20)\n self.__edit_img_name.setReadOnly(True)\n ssub_layout.addWidget(self.__edit_img_name)\n sub_layout.addLayout(ssub_layout)\n\n sub_layout.addWidget(QSplitter()) # 占位符\n\n # labeled fingerprint pose\n ssub_layout = QHBoxLayout()\n self.__label_fp_pose = QLabel(\"FP Pose\")\n # self.__label_fp_pose.setText(\"FP Pose\")\n self.__label_fp_pose.setFixedHeight(20)\n ssub_layout.addWidget(self.__label_fp_pose)\n self.__edit_center_x = QLineEdit()\n self.__edit_center_x.setFixedWidth(50)\n self.__edit_center_x.setFixedHeight(20)\n self.__edit_center_x.setReadOnly(True)\n ssub_layout.addWidget(self.__edit_center_x)\n self.__edit_center_y = QLineEdit()\n self.__edit_center_y.setFixedWidth(50)\n self.__edit_center_y.setFixedHeight(20)\n self.__edit_center_y.setReadOnly(True)\n ssub_layout.addWidget(self.__edit_center_y)\n self.__edit_center_angle = QLineEdit()\n self.__edit_center_angle.setFixedWidth(50)\n self.__edit_center_angle.setFixedHeight(20)\n self.__edit_center_angle.setReadOnly(True)\n ssub_layout.addWidget(self.__edit_center_angle)\n sub_layout.addLayout(ssub_layout)\n\n # prev or next button\n ssub_layout = QHBoxLayout()\n self.__btn_Prev = QPushButton(\"Prev\")\n ssub_layout.addWidget(self.__btn_Prev)\n self.__btn_Next = QPushButton(\"Next\")\n ssub_layout.addWidget(self.__btn_Next)\n sub_layout.addLayout(ssub_layout)\n\n # save buttion\n self.__btn_Save = QPushButton(\"Save\")\n sub_layout.addWidget(self.__btn_Save)\n\n main_layout.addLayout(sub_layout)\n\n top_layout.addLayout(main_layout)\n\n @property\n def cur_pose(self):\n return self.__cur_pose\n\n @cur_pose.setter\n def cur_pose(self, value):\n # update when value changed\n if np.any(self.__cur_pose != value):\n self.__edit_center_x.setText(f\"{np.rint(value[0] / self.__scale_ratio):.0f}\")\n self.__edit_center_y.setText(f\"{np.rint(value[1] / self.__scale_ratio):.0f}\")\n self.__edit_center_angle.setText(f\"{value[2]:.2f}\")\n self.__cur_pose = value\n self.__is_modifed = True\n\n def __SetupConnection(self):\n self.__btn_Prev.clicked.connect(self.on_btn_Prev_Clicked)\n self.__btn_Next.clicked.connect(self.on_btn_Next_Clicked)\n self.__btn_Save.clicked.connect(self.on_btn_Save_Clicked)\n self.__img_box.ready.connect(self.on_pose_Changed)\n\n def on_btn_Prev_Clicked(self):\n if self.__is_modifed:\n self.on_btn_Save_Clicked()\n\n self.__index = (self.__index - 1) % len(self.__name_lst)\n self.fetch_new_data()\n\n def on_btn_Next_Clicked(self):\n if self.__is_modifed:\n self.on_btn_Save_Clicked()\n\n self.__index = (self.__index + 1) % len(self.__name_lst)\n self.fetch_new_data()\n\n def on_btn_Save_Clicked(self):\n if not self.__is_modifed or np.all(self.cur_pose == 0):\n # QMessageBox.warning(self, \"Warning\", \"Initialized pose has not been changed\")\n return\n\n try:\n img_name = self.__name_lst[self.__index]\n fpath = osp.join(self.__pose_prefix, f\"{img_name}.txt\")\n if not osp.exists(osp.dirname(fpath)):\n os.makedirs(osp.dirname(fpath))\n with open(fpath, \"w\") as fp:\n x1 = np.rint(self.cur_pose[0] / self.__scale_ratio)\n y1 = np.rint(self.cur_pose[1] / self.__scale_ratio)\n fp.write(f\"{x1:.0f} {y1:.0f} {self.cur_pose[2]:.2f}\")\n self.__label_info.setText(f\"Save pose to {img_name} done\")\n except Exception as ex:\n self.__label_info.setText(ex)\n self.__is_modifed = False\n\n def on_pose_Changed(self, pose_pts):\n x1, y1 = pose_pts[0]\n x2, y2 = pose_pts[1]\n self.cur_pose = np.array([x1, y1, np.rad2deg(np.arctan2(x1 - x2, y1 - y2))])\n\n def fetch_new_data(self):\n img_name = self.__name_lst[self.__index]\n\n self.__edit_img_name.setText(img_name)\n self.draw_image(osp.join(self.__prefix, f\"{img_name}.{self.__ext}\"))\n if osp.exists(osp.join(self.__pose_prefix, f\"{img_name}.txt\")):\n tmp = np.loadtxt(osp.join(self.__pose_prefix, f\"{img_name}.txt\"))\n tmp[:2] = np.rint(tmp[:2] * self.__scale_ratio)\n self.cur_pose = tmp\n else:\n self.cur_pose = np.zeros(3)\n x1, y1 = self.cur_pose[:2]\n x2 = x1 - 100 * np.sin(np.deg2rad(self.cur_pose[2]))\n y2 = y1 - 100 * np.cos(np.deg2rad(self.cur_pose[2]))\n self.__img_box.draw_pose((x1, y1), (x2, y2))\n self.__is_modifed = False\n\n def draw_image(self, path):\n pixmap = QPixmap(path)\n img_width = pixmap.width()\n img_height = pixmap.height()\n\n margin = 20\n if img_width >= img_height:\n self.__scale_ratio = (self.__img_panel_width - margin) * 1.0 / img_width\n pixmap = pixmap.scaledToWidth(self.__img_panel_width - margin)\n else:\n self.__scale_ratio = (self.__img_panel_height - margin) * 1.0 / img_height\n pixmap = pixmap.scaledToHeight(self.__img_panel_height - margin)\n\n self.__img_box.setPixmap(pixmap)\n\n def Quit(self):\n self.close()\n\n\nif __name__ == \"__main__\":\n # Just an example, import this class in your own project, DO NOT MODIFY THIS CODE!\n app = QApplication(sys.argv)\n\n prefix = \"/home/duanyongjie/data/finger/ContactSerials/Hefei/rolled\"\n name_filter = \"*\"\n main_win = MainWidget(prefix=prefix, name_filter=name_filter, ext=\"png\")\n main_win.show()\n\n exit(app.exec_())\n","repo_name":"keyunj/fptools","sub_path":"fp_edit/manual_label_pose.py","file_name":"manual_label_pose.py","file_ext":"py","file_size_in_byte":10865,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23452866181","text":"in_file=open('A-large.in','r')\r\nt_cases=int(in_file.readline())\r\nneed=[]\r\nif ((t_cases<=100)and(t_cases>=1)):\r\n for reader in range(t_cases):\r\n aggr_sum=0\r\n frnd_cnt=0\r\n data_set=(in_file.readline()).split()\r\n shy_max=int(data_set[0])\r\n ppl_cnt=data_set[1]\r\n aggr_sum=int(ppl_cnt[0])\r\n for shy_level in range(1,shy_max+1):\r\n needed=shy_level-aggr_sum\r\n if needed>0:\r\n frnd_cnt+=needed\r\n aggr_sum+=needed\r\n aggr_sum+=int(ppl_cnt[shy_level])\r\n else:\r\n aggr_sum+=int(ppl_cnt[shy_level])\r\n need+=[frnd_cnt]\r\nout_file=open('OutputBig.txt','w')\r\nfor reader2 in range(t_cases):\r\n out_file.write(\"Case #\")\r\n out_file.write(str(reader2+1))\r\n out_file.write(\": \")\r\n out_file.write(str(need[reader2]))\r\n if reader2<99:\r\n out_file.write('\\n')\r\nout_file.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/2711.py","file_name":"2711.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32416371142","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n## 获取html网页\n\ndef get_html(url):\n try:\n r = requests.get(url, timeout=30)\n r.raise_for_status()\n # 这里知道百度贴吧的编码是utf-8,爬取其他页面时建议使用:\n # r.encoding = r.apparent_encoding\n r.encoding = 'utf-8'\n return r.text\n except:\n return \"ERROR\"\n\n\n# 获取详细内容\ndef get_content(url):\n print(\"开始抓取本页内容!\")\n contents = []\n html = get_html(url)\n\n # 解析html, soup\n soup = BeautifulSoup(html, \"lxml\")\n\n # get Tags\n tags = soup.find_all(\"div\", class_=\"item\")\n # pprint.pprint(tags)\n\n for tag in tags:\n content = {}\n try:\n content['rank'] = tag.find(\"em\", class_=\"\").text.strip()\n content['title'] = tag.find(\"span\", class_=\"title\").text.strip()\n content['director'] = tag.find(\"p\", class_=\"\").text.strip().split(\" \")[0]\n content['starring'] = tag.find(\"p\", class_=\"\").text.strip().split(\" \")[1]\n content['time'] = tag.find(\"p\", class_=\"\").text.strip().split(\" \")[2]\n content['country'] = tag.find(\"p\", class_=\"\").text.strip().split(\" \")[3]\n content['genre'] = tag.find(\"p\", class_=\"\").text.strip().split(\" \")[4]\n content['star'] = tag.find('span', class_=\"rating_num\").text.strip()\n content['quote'] = tag.find('span', class_=\"inq\").text.strip()\n contents.append(content)\n except:\n \"Something Wrong happened!\"\n return contents\n\n\ndef out2txt(dict):\n with open('douban250.txt', 'a+', encoding='utf-8') as f:\n for content in dict:\n f.write(content['rank'] + \" \"\n + content['title'] + \" \"\n + content['director'] + \" \"\n + content['starring'] + \" \"\n + content['time'] + \" \"\n + content['country'] + \" \"\n + content['genre'] + \" \"\n + content['star'] + \" \"\n + content['quote'] + \" \"\n + \"\\n\")\n f.close()\n\n\ndef main(base_url, deep):\n url_list = []\n for i in range(deep):\n url_list.append(base_url + str(i * 25) + \"&filter=\")\n\n for url in url_list:\n content = get_content(url)\n out2txt(content)\n print(\"抓取完成!\")\n\n\nbase_url = \"https://movie.douban.com/top250?start=\"\ndeep = 10\n\nif __name__ == '__main__':\n main(base_url, deep)\n","repo_name":"yijigao/Python_scraper","sub_path":"Douban_movie_top250/douban_movie_250.py","file_name":"douban_movie_250.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31961852600","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/12/27 上午10:49\n# @Author : Hanxun Yu\n# @Email : \n# @File : rsa_util.py\n# @Software: PyCharm\n\nfrom sympy.ntheory.modular import crt\n\nimport modulo_util as m_u\nimport numpy as np\nimport random\nimport char_table\n\nmu = m_u.ModuloUtil\nct = char_table.CharTable()\n\n\nclass RSAUtil:\n def __init__(self, p, q, b):\n \"\"\"\n :param p:\n :param q:\n :param b: if -1 ,a will be assignment a random value in (3, 9999)\n :return:\n \"\"\"\n self.p = p\n self.q = q\n public_key = self.cal_public_key(p, q, b)\n print(public_key)\n self.a = public_key['a']\n self.b = public_key['b']\n self.n = public_key['n']\n\n def encode_self(self, num_arr: list) -> list:\n return self.encode_custom(self.b, self.n, num_arr)\n\n def decode_self(self, num_arr: list) -> list:\n return self.decode_custom(self.a, self.p, self.q, num_arr)\n\n def encode_custom(self, b: int, n, num_arr: list) -> list:\n ret = []\n for num in num_arr:\n cipher = mu.numModulo(mu.powModulo(num, b, n), n)\n ret.append(cipher)\n return ret\n\n def decode_custom(self, a, p, q, num_arr: list) -> list:\n ret = []\n n = p * q\n for num in num_arr:\n plaintext = mu.numModulo(mu.powModulo(num, a, n), n)\n ret.append(plaintext)\n return ret\n\n def cal_public_key(self, p, q, b) -> {}:\n \"\"\"\n :param p:\n :param q:\n :param b: if -1 ,b will be assignment b random value in (3, 9999)\n :return:\n \"\"\"\n euler_number = (p - 1) * (q - 1)\n print(\"fai(n):\", euler_number)\n mod = euler_number\n\n if b == -1:\n while 1:\n b = random.randint(3, 9999)\n if np.gcd(b, mod) == 1:\n break\n\n # 如果这里异常gcd问题,说明选的a有问题\n a = mu.numIntInverse(b, mod)\n n = p * q\n return {'n': n, 'b': b, 'a': a}\n\n\nif __name__ == \"__main__\":\n rsa = RSAUtil(113, 127, 17)\n print(\"encode 7:\", rsa.encode_self([7]))\n print(\"decode 7:\", rsa.decode_self([7]))\n\n print(\"encode 309:\", rsa.encode_self([309]))\n print(\"decode 1134:\", rsa.decode_self([1134]))\n\n# 剩余定理实现\n# m = [25, 26, 27]\n# v = [12, 9, 23]\n# print(crt(m, v))\n","repo_name":"Hanxun-Yu/MSE-ECNU-py","sub_path":"InformationSecurityAndTechnology/util/rsa_util.py","file_name":"rsa_util.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1727409240","text":"from pkg_resources import resource_filename\n\n__all__ = [\n \"TRIES_TILL_TIMEOUT\",\n \"USE_WORKSPACE_FILE_GROUP\",\n \"VD18_IDS_FILE\",\n \"VD18_URL\",\n \"VD18_METS_EXT\",\n \"WAIT_TIME_BETWEEN_SUBMITS\",\n \"WAIT_TIME_BETWEEN_POLLS\",\n]\n\n# These are the VD18 constants\n# The vd18IDs.txt file contains METS IDs.\n# These IDs are used to build the METS URL to be submitted to the Operandi Server.\nVD18_IDS_FILE: str = resource_filename(__name__, \"assets/vd18IDs.txt\")\nVD18_URL: str = \"https://gdz.sub.uni-goettingen.de/mets/\"\nVD18_METS_EXT: str = \".mets.xml\"\n\n# Time waited between the POST requests to the OPERANDI Server\nWAIT_TIME_BETWEEN_SUBMITS: int = 15 # seconds\n# Time waited between each workflow job status check\nWAIT_TIME_BETWEEN_POLLS: int = 15 # seconds\n# Times to perform workflow job status checks before timeout\nTRIES_TILL_TIMEOUT: int = 30\n\nUSE_WORKSPACE_FILE_GROUP = \"DEFAULT\"\n","repo_name":"subugoe/operandi","sub_path":"src/harvester/operandi_harvester/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"40195305073","text":"#helper.py\n\nfrom functools import wraps\nfrom flask import session, redirect\nimport sqlite3\nimport os\n\n#login required decorated function\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/login')\n return f(*args, *kwargs)\n return decorated_function\n\n#function to create users table\ndef create_users_table(dbFile: str):\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n c.execute(f'''CREATE TABLE IF NOT EXISTS users(id INTERGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n username VARCHAR(20) NOT NULL, \n hash BINARY(255) NOT NULL)''')\n conn.commit()\n conn.close()\n\n#function to add user to database\ndef add_user(Username, Hash):\n DB = 'databases/users.db'\n try:\n conn = sqlite3.connect(DB)\n c = conn.cursor()\n c.execute('''INSERT INTO users(username, hash) VALUES(?, ?)''', (Username, Hash))\n conn.commit()\n conn.close()\n return redirect('/login')\n except sqlite3.OperationalError as e:\n print(e)\n\n#function to create databases\ndef initialise():\n if not os.path.exists('databases'):\n print(os.getcwd())\n os.mkdir('databases')\n sqlite3.connect('databases/users.db').close()\n create_users_table(dbFile='databases/users.db')\n else:\n print('Already initialised')\n\n\n#function to returmn username password hash\ndef check_username_password_hash(Username):\n DB = 'databases/user.db'\n try:\n conn = sqlite3.connect(DB)\n c = conn.cursor()\n c.execute('''SELECT hash from users WHERE username = :username''', (Username, ))\n found_hash = list(c.fetchall())\n return found_hash\n except sqlite3.OperationalError as e:\n print (e)\n except Exception as e:\n print(e)\n","repo_name":"malgulam/100ProjectsOfCode","sub_path":"p2pfilesharing/previous_code/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"16604031907","text":"from db import get_db\n\nclass Group:\n def __init__(self, name, expense=0):\n self.group_name = name\n self.expense = expense\n\n @staticmethod\n def get(group_name):\n db = get_db()\n group = db.execute(\n \"SELECT * FROM chatgroup WHERE group_name = ?\", \n (group_name,)\n ).fetchone()\n if not group:\n return None\n\n group = Group(\n name = group[0], expense = group[1]\n )\n return group\n \n @staticmethod\n def create(name):\n db = get_db()\n\n # handle empty group name\n if len(name.strip()) == 0:\n return None\n\n db.execute(\n \"INSERT INTO chatgroup (group_name, expense) \"\n \"VALUES (?, 0)\",\n (name,)\n )\n db.commit()\n return name\n \n def list_members(self):\n db = get_db()\n members = db.execute(\n \"SELECT user.email, user.name FROM user JOIN user_group ON user.email = user_group.user_email WHERE group_name = ?\",\n (self.group_name,)\n )\n if not members:\n return None\n \n member_dict = {}\n for row in members:\n member_dict[row[0]] = row[1]\n \n return member_dict\n \n def list_bills(self):\n db = get_db()\n bills = db.execute(\n \"SELECT bill.bill_date FROM bill JOIN chatgroup WHERE chatgroup.group_name = ?\",\n (self.group_name,)\n )\n if not bills:\n return None\n \n bills_list = []\n for row in bills:\n bills_list.append(row[0])\n \n return bills_list\n\n def list_bills_info(self):\n db = get_db()\n bills = db.execute(\n \"SELECT bill_date, payer, bill.group_name, amount, description FROM bill JOIN chatgroup WHERE chatgroup.group_name = ?\",\n (self.group_name,)\n )\n if not bills:\n return None\n \n bills_list = []\n for row in bills:\n bills_list.append([row[0], row[1], row[2], row[3], row[4]])\n \n return bills_list\n\n ","repo_name":"yimingyinqwqq/Splitter-Application","sub_path":"backend/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9016537206","text":"import csv\nimport random\nimport time\nfrom copy import deepcopy\nfrom typing import Callable, Tuple, List\n\nfrom BreadthFirstSearchNextLand import bfs_next_land, bfs_next_land_with_node_count\nfrom BreadthFirstSearchVisitOnce import (\n bfs_visit_once,\n bfs_visit_once_with_node_count,\n)\nfrom Constants import WATER, LAND, Grid\nfrom BreadthFirstSearchOnLand import (\n bfs_on_land,\n bfs_on_land_with_node_count,\n)\n\n\ndef test():\n name_to_method = {\n \"bfs_on_land\": bfs_on_land,\n \"bfs_visit_once\": bfs_visit_once,\n \"bfs_next_land\": bfs_next_land,\n }\n\n grid1 = [\n [\"1\", \"1\", \"1\", \"1\", \"0\"],\n [\"1\", \"1\", \"0\", \"1\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\"],\n ]\n grid2 = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"],\n ]\n grid3 = [\n [\"1\", \"1\", \"1\", \"1\", \"1\"],\n [\"0\", \"0\", \"0\", \"0\", \"1\"],\n [\"0\", \"0\", \"0\", \"0\", \"1\"],\n [\"0\", \"0\", \"0\", \"0\", \"1\"],\n ]\n grid4 = [\n [\"1\", \"1\", \"1\", \"1\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"1\"],\n [\"0\", \"0\", \"0\", \"0\", \"1\"],\n [\"0\", \"0\", \"0\", \"0\", \"1\"],\n ]\n grid5 = [\n [\"1\", \"1\", \"1\", \"1\", \"1\"],\n [\"1\", \"0\", \"0\", \"0\", \"1\"],\n [\"1\", \"0\", \"0\", \"0\", \"1\"],\n [\"1\", \"1\", \"1\", \"1\", \"1\"],\n ]\n grid6 = [\n [\"1\", \"1\", \"1\", \"0\", \"0\"],\n [\"0\", \"1\", \"0\", \"1\", \"1\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"1\", \"0\", \"1\", \"1\", \"1\"],\n ]\n grids = [grid1, grid2, grid3, grid4, grid5, grid6]\n\n for name, method in name_to_method.items():\n print(\"--- \" + name + \" ---\")\n for grid in grids:\n print(method(deepcopy(grid)))\n\n\ndef random_tests_with_stat_collection_for_all(\n num_tests: int, min_size: int, max_size: int\n):\n name_to_method = {\n \"bfs_on_land\": bfs_on_land_with_node_count,\n \"bfs_visit_once\": bfs_visit_once_with_node_count,\n \"bfs_next_land\": bfs_next_land_with_node_count,\n }\n\n for name, method in name_to_method.items():\n random_tests_with_stat_collection(num_tests, min_size, max_size, name, method)\n\n\ndef random_tests_with_stat_collection(\n num_tests: int,\n min_size: int,\n max_size: int,\n filename: str,\n solve_with_node_count: Callable[[List[List[str]]], Tuple[int, int]],\n):\n tests_file = open(filename + \".txt\", \"w\")\n\n with open(filename + \".csv\", \"w\") as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(\n [\n \"Problem Number\" \"Width\",\n \"Height\",\n \"Number Islands\",\n \"Nodes Visited\",\n \"Time (ms)\",\n \"Max Memory (MiB)\",\n ]\n )\n\n total_width = 0\n total_height = 0\n total_num_islands = 0\n total_num_nodes_visited = 0\n total_time = 0\n\n test_num = 1\n\n while test_num <= num_tests:\n width = random.randint(min_size, max_size)\n height = random.randint(min_size, max_size)\n grid = generate_random_grid(width, height)\n tests_file.write(\"Test \" + str(test_num) + \"\\n\")\n tests_file.write(\n \"\\n\".join([\"\".join([cell for cell in row]) for row in grid])\n )\n tests_file.write(\"\\n\\n\")\n\n t0 = time.time_ns() / 1000000\n num_islands, num_nodes = solve_with_node_count(grid)\n t1 = time.time_ns() / 1000000\n t = t1 - t0\n\n total_width += width\n total_height += height\n total_num_islands += num_islands\n total_num_nodes_visited += num_nodes\n total_time += t\n\n csvwriter.writerow([test_num, width, height, num_islands, num_nodes, t])\n\n test_num += 1\n\n csvwriter.writerow([])\n csvwriter.writerow(\n [\n \"Total Tests\",\n \"Average Width\",\n \"Average Height\",\n \"Average Number Islands\",\n \"Average Nodes Visited\",\n \"Average Time (ms)\",\n ]\n )\n\n csvwriter.writerow(\n [\n num_tests,\n total_width / num_tests,\n total_height / num_tests,\n total_num_islands / num_tests,\n total_num_nodes_visited / num_tests,\n total_time / num_tests,\n ]\n )\n tests_file.close()\n\n\ndef generate_random_grid(width: int, height: int) -> Grid:\n grid = [[WATER] * width for _ in range(height)]\n\n chance_for_land = random.random()\n\n for i in range(height):\n for j in range(width):\n r = random.random()\n if r <= chance_for_land:\n grid[i][j] = LAND\n\n return grid\n\n\nif __name__ == \"__main__\":\n random_tests_with_stat_collection_for_all(1000, 50, 100)\n","repo_name":"cjberson/island_detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4024481000","text":"n = int(input())\n\nx = 0\ny = 0\nj = 0\nmaxNum = 0\nresultNum = 0\n# i는 2부터 중간 값까지의 값을 가져서 i+i+j+j가 성립하도록 j를 구한다.\nfor i in range(2, n//2):\n \n # j의 값은 중간값(소수 포함 - i)로 만약 n의 값이 7이면, i는 2, j는 1.5가 됀다.\n j = n/2 - i\n \n if(i+i+j+j == n):\n \n if(maxNum < i*j):\n maxNum = i*j\n resultNum = i\n\nprint(resultNum)\n","repo_name":"JinleeJeong/Algorithm","sub_path":"20년 2월/1297.py","file_name":"1297.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41181884090","text":"\"\"\"\nEntry point for training DETR models\n\"\"\"\nimport os\nimport sys\nimport time\nimport yaml\nimport json\nimport random\nimport datetime\nimport warnings\nfrom pathlib import Path\nwarnings.filterwarnings('ignore')\n\nimport wandb\nimport click\nimport numpy as np\n\nimport torch\n\npackage_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nsys.path.insert(0, package_root)\n\nfrom sparse_detector.configs import build_dataset_config, build_detr_config, load_base_configs, build_trainer_config\nfrom sparse_detector.utils import misc as utils\nfrom sparse_detector.utils import distributed as dist_utils\nfrom sparse_detector.models import build_model\nfrom sparse_detector.models.utils import describe_model\nfrom sparse_detector.datasets.loaders import build_dataloaders\nfrom sparse_detector.engines.base import build_detr_optims, train_one_epoch, evaluate\nfrom sparse_detector.utils.logging import log_ap_to_wandb, log_to_wandb\nfrom sparse_detector.models.attention import VALID_ACTIVATION\n\n\n@click.command(\"train_detr\")\n@click.option('--detr-config-file', default='', help=\"Path to config file\")\n@click.option('--exp-name', default=None, help='Experiment name. Need to be set')\n@click.option('--seed', default=42, type=int)\n@click.option('--decoder-act', default='softmax', type=str, help='Activation function for the decoder MH cross-attention')\n@click.option('--coco-path', type=str)\n@click.option('--output-dir', default='checkpoints', help='path where to save, empty for no saving')\n@click.option('--resume-from-checkpoint', default='', help='resume from checkpoint')\n@click.option('--start-epoch', default=0, type=int, help='start epoch')\n@click.option('--epochs', default=300, type=int, help='number of training epochs')\n@click.option('--batch-size', default=6, type=int, help='batch size')\n@click.option('--num-workers', default=24, type=int)\n@click.option('--wandb-log/--no-wandb-log', default=True, help=\"Whether to enable logging to W&B\")\n@click.option('--wandb-group', default=None, help=\"The group for experiment on W&B\")\n@click.option('--wandb-id', default=None, help=\"Run ID for resume\")\n@click.pass_context\ndef main(ctx, detr_config_file, exp_name, seed, decoder_act, coco_path,\n output_dir, resume_from_checkpoint, start_epoch, epochs, \n batch_size, num_workers, wandb_log, wandb_group, wandb_id):\n # Load the base config and initialise distributed training mode first\n # to avoid multiple hassles in printing\n base_configs = load_base_configs()\n dist_config = dist_utils.init_distributed_mode(base_configs['distributed']['dist_url'])\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n\n print(\"Base config\", base_configs)\n cmd_params = ctx.params\n print(\"cmd_params\", cmd_params)\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n if cmd_params['decoder_act'] not in VALID_ACTIVATION:\n raise ValueError(f\"Unsupported decoder activation: {cmd_params['decoder_act']}\")\n\n detr_config = build_detr_config(cmd_params['detr_config_file'], params=cmd_params, device=device)\n print(\"DETR config\", detr_config)\n\n trainer_config = build_trainer_config(base_configs['trainer'], params=cmd_params)\n print(\"Trainer config\", trainer_config)\n\n dataset_config = build_dataset_config(base_configs['dataset'], params=ctx.params)\n\n wandb_run = None\n wandb_configs = None\n if dist_utils.is_main_process() and cmd_params['wandb_log']:\n print(\"Initialize WandB logging...\")\n wandb_configs = base_configs.get(\"wandb\")\n wandb_configs[\"name\"] = cmd_params['exp_name']\n if cmd_params['wandb_id'] is not None:\n wandb_configs[\"id\"] = cmd_params['wandb_id']\n wandb_configs[\"resume\"] = True\n\n if cmd_params['wandb_group'] is not None:\n wandb_configs[\"group\"] = cmd_params['wandb_group']\n\n config_to_log = {**trainer_config, **detr_config, **dataset_config}\n wandb_run = wandb.init(**wandb_configs, config=config_to_log)\n\n # Fix the seed for reproducibility\n seed = seed + dist_utils.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n print(\"Setup output directory\")\n exp_dir = Path(output_dir) / exp_name\n exp_dir.mkdir(parents=True, exist_ok=True)\n\n if dist_utils.is_main_process():\n config_to_dump = {\"base\": base_configs, \"detr\": detr_config, \"wandb\": wandb_configs, \"cmd_params\": cmd_params}\n with open(exp_dir / \"configs.yml\", \"w\") as f:\n yaml.dump(config_to_dump, f)\n\n print(\"Building DETR model...\")\n model, criterion, postprocessors = build_model(**detr_config)\n model.to(device)\n\n if wandb_run is not None:\n wandb_run.watch(model, log=\"gradients\")\n\n model_without_ddp = model\n if dist_config.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[dist_config.gpu])\n model_without_ddp = model.module\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n describe_model(model_without_ddp)\n\n print(\"Building datasets and data loaders...\")\n data_loader_train, sampler_train = build_dataloaders(\n 'train', dataset_config['coco_path'], dataset_config['batch_size'],\n dist_config.distributed, dataset_config['num_workers']\n )\n data_loader_val, base_ds = build_dataloaders(\n 'val', dataset_config['coco_path'], dataset_config['batch_size'],\n dist_config.distributed, dataset_config['num_workers']\n )\n\n print(\"Building optim...\")\n optimizer, lr_scheduler = build_detr_optims(\n model_without_ddp,\n lr=trainer_config['lr'],\n lr_backbone=detr_config['lr_backbone'],\n lr_drop=trainer_config['lr_drop'],\n weight_decay=trainer_config['weight_decay'],\n lr_alpha=trainer_config['lr'] * 10, # TODO: don't hardcode\n )\n\n global_step = 0 # Initialize the global step\n if resume_from_checkpoint:\n checkpoint = torch.load(resume_from_checkpoint, map_location='cpu')\n model_without_ddp.load_state_dict(checkpoint['model'])\n if 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n start_epoch = checkpoint['epoch'] + 1\n global_step = checkpoint['global_step'] + 1\n\n if decoder_act == \"alpha_entmax\":\n print(\"Adjusting lr_scheduler state from checkpoint to meet \"\n \"the setup of new experiment: not decaying alpha learning rates!\")\n lr_state_dict = checkpoint['lr_scheduler']\n lr_state_dict['_last_lr'][0] = trainer_config['lr'] * 10\n print(lr_state_dict)\n lr_scheduler.load_state_dict(lr_state_dict)\n else:\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n print(f\"start_epoch = {start_epoch}; global_step = {global_step}\")\n print(f\"Resumming from checkpoint {resume_from_checkpoint}\")\n\n print(\"Start training...\")\n start_time = time.time()\n for epoch in range(start_epoch, epochs):\n if dist_config.distributed:\n sampler_train.set_epoch(epoch)\n train_stats, global_step = train_one_epoch(\n model, criterion, data_loader_train, optimizer, device, epoch,\n trainer_config['clip_max_norm'], global_step=global_step, wandb_run=wandb_run, log_freq=base_configs['logging'].get('log_freq'),\n monitor_alpha=(decoder_act == \"alpha_entmax\")\n )\n lr_scheduler.step()\n if exp_dir:\n checkpoint_paths = [exp_dir / 'checkpoint.pth']\n # extra checkpoint before LR drop and every 10 epochs\n if (epoch + 1) % trainer_config['lr_drop'] == 0 or (epoch + 1) % 10 == 0:\n checkpoint_paths.append(exp_dir / f'checkpoint_{epoch:04}.pth')\n for checkpoint_path in checkpoint_paths:\n dist_utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'hyperparams': ctx.params,\n 'global_step': global_step,\n }, checkpoint_path)\n \n # Logging epoch train stats to W&B\n if dist_utils.is_main_process():\n log_to_wandb(wandb_run, train_stats, epoch=epoch, prefix=\"train-epoch\")\n\n test_stats, coco_evaluator = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, epoch, wandb_run=wandb_run\n )\n if dist_utils.is_main_process():\n log_to_wandb(wandb_run, test_stats, epoch=epoch, prefix=\"val-epoch\")\n log_ap_to_wandb(wandb_run, test_stats.get(\"coco_eval_bbox\"), epoch=epoch, prefix=\"val-AP\")\n\n log_stats = {\n **{f'train_{k}': v for k, v in train_stats.items()},\n **{f'test_{k}': v for k, v in test_stats.items()},\n 'epoch': epoch,\n 'n_parameters': n_parameters,\n 'global_step': global_step\n }\n\n if exp_dir and dist_utils.is_main_process():\n with (exp_dir / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n # for evaluation logs\n if coco_evaluator is not None:\n (exp_dir / 'eval').mkdir(exist_ok=True)\n if \"bbox\" in coco_evaluator.coco_eval:\n filenames = ['latest.pth']\n if epoch % 10 == 0:\n filenames.append(f'{epoch:03}.pth')\n for name in filenames:\n torch.save(coco_evaluator.coco_eval[\"bbox\"].eval, exp_dir / \"eval\" / name)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n wandb.finish()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"erikdao/sparse-detector","sub_path":"sparse_detector/engines/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32273798446","text":"import unittest\nimport os\nimport socket\n\nif __name__ == \"__main__\":\n import utils\n utils.import_depends()\n\nfrom brokertest import TestBrokerCommand\n\nSW_HOSTNAME = \"utpgsw0.aqd-unittest.ms.com\"\n\n\nclass TestVlan(TestBrokerCommand):\n\n def getswip(self):\n return self.net.tor_net[10].usable[0]\n\n def test_001_addvlan714(self):\n command = [\"add_vlan\", \"--vlan=714\", \"--name=user_714\",\n \"--vlan_type=user\"]\n self.noouttest(command)\n\n command = \"show vlan --vlan 714\"\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out, \"Vlan: 714\", command)\n self.matchoutput(out, \"Name: user_714\", command)\n\n def test_001_addutpgsw(self):\n ip = self.getswip()\n\n self.dsdb_expect_add(SW_HOSTNAME, ip, \"xge49\",\n ip.mac)\n command = [\"add\", \"switch\", \"--type\", \"tor\",\n \"--switch\", SW_HOSTNAME, \"--rack\", \"ut3\",\n \"--model\", \"rs g8000\", \"--interface\", \"xge49\",\n \"--mac\", ip.mac, \"--ip\", ip]\n self.ignoreoutputtest(command)\n self.dsdb_verify()\n\n def test_010_pollutpgsw(self):\n command = [\"poll\", \"switch\", \"--vlan\", \"--switch\",\n SW_HOSTNAME]\n err = self.statustest(command)\n\n self.matchoutput(err, \"Using jump host nyaqd1.ms.com from service \"\n \"instance poll_helper/unittest to run CheckNet for \"\n \"switch utpgsw0.aqd-unittest.ms.com.\", command)\n\n self.matchoutput(err, \"vlan 5 is not defined in AQ. Please use \"\n \"add_vlan to add it.\", command)\n\n # Adding vlan 5 as unknown will suppress poll_switch vlan warning.\n def test_012_addvlan5(self):\n command = [\"add_vlan\", \"--vlan=5\", \"--name=user_5\",\n \"--vlan_type=unknown\"]\n self.noouttest(command)\n\n command = \"show vlan --vlan 5\"\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out, \"Vlan: 5\", command)\n self.matchoutput(out, \"Name: user_5\", command)\n\n def test_012_pollutpgsw(self):\n command = [\"poll\", \"switch\", \"--vlan\", \"--switch\",\n SW_HOSTNAME]\n err = self.statustest(command)\n\n self.matchoutput(err, \"Using jump host nyaqd1.ms.com from service \"\n \"instance poll_helper/unittest to run CheckNet for \"\n \"switch utpgsw0.aqd-unittest.ms.com.\", command)\n\n self.matchclean(err, \"vlan 5 is not defined in AQ. Please use \"\n \"add_vlan to add it.\", command)\n\n def test_015_searchswbyvlan(self):\n command = [\"search_switch\", \"--vlan=714\",\n \"--format=csv\"]\n out = self.commandtest(command)\n ip = self.getswip()\n self.matchoutput(out,\n \"utpgsw0.aqd-unittest.ms.com,%s,tor,ut3,ut,bnt,\"\n \"rs g8000,,xge49,%s\" % (ip, ip.mac), command)\n self.matchclean(out,\n \"ut3gd1r01.aqd-unittest.ms.com,4.2.5.8,bor,ut3,ut,hp,\"\n \"uttorswitch,SNgd1r01,,\", command)\n\n def test_020_faildelvlan(self):\n command = [\"del_vlan\", \"--vlan=714\"]\n errOut = self.badrequesttest(command)\n self.matchoutput(errOut,\n \"VlanInfo 714 is still in use and cannot be \"\n \"deleted.\", command)\n\n # Unknown vlans have no dependencies, can be deleted.\n def test_025_delvlan(self):\n command = [\"del_vlan\", \"--vlan=5\"]\n self.noouttest(command)\n\n command = [\"show_vlan\", \"--vlan=5\"]\n self.notfoundtest(command)\n\n def test_030_delutpgsw(self):\n self.dsdb_expect_delete(self.getswip())\n\n command = \"del switch --switch %s\" % SW_HOSTNAME\n self.noouttest(command.split(\" \"))\n\n plenary = os.path.join(self.config.get(\"broker\", \"plenarydir\"),\n \"switchdata\", \"%s.tpl\" % SW_HOSTNAME)\n self.failIf(os.path.exists(plenary),\n \"Plenary file '%s' still exists\" % plenary)\n\n self.dsdb_verify()\n\n def test_040_delvlan(self):\n command = [\"del_vlan\", \"--vlan=714\"]\n self.noouttest(command)\n\n command = [\"show_vlan\", \"--vlan=714\"]\n self.notfoundtest(command)\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestVlan)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","repo_name":"gombasg/aquilon","sub_path":"tests/broker/test_vlan.py","file_name":"test_vlan.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"70588966275","text":"\"\"\" (c) This file is part of the course\r\n Mathematical Logic through Programming\r\n by Gonczarowski and Nisan.\r\n File name: code/predicates/semantics.py \"\"\"\r\n\r\nfrom predicates.syntax import *\r\nimport copy\r\nimport math\r\n\r\nclass Model:\r\n \"\"\" A model for first-order formulae: contains a universe - a set of\r\n elements, and a dictionary that maps every constant name to an element,\r\n every k-ary relation name to a set of k-tuples of elements, and every\r\n k-ary function name to a map from k-tuples of elements to an element \"\"\"\r\n\r\n def __init__(self, universe, meaning):\r\n assert type(universe) is set\r\n assert type(meaning) is dict\r\n self.universe = universe\r\n self.meaning = meaning\r\n\r\n def __repr__(self):\r\n return 'Universe=' + str(self.universe) + '; Meaning=' + str(self.meaning)\r\n \r\n def evaluate_term(self, term, assignment={}):\r\n \"\"\" Return the value of the given term in this model, where variables \r\n get their value from the given assignment \"\"\"\r\n assert term.variables().issubset(assignment.keys())\r\n # Task 7.7\r\n if is_constant(term.root):\r\n return self.meaning[term.root]\r\n elif is_variable(term.root):\r\n return assignment[term.root]\r\n else:\r\n arguments_evaluated = list()\r\n for argument in term.arguments:\r\n arguments_evaluated.append(self.evaluate_term(argument, assignment))\r\n return self.meaning[term.root][tuple(arguments_evaluated)]\r\n\r\n\r\n def evaluate_formula(self, formula, assignment={}):\r\n \"\"\" Return the value of the given formula in this model, where\r\n variables that are free in the formula get their values from the\r\n given assignment \"\"\"\r\n assert formula.free_variables().issubset(assignment.keys())\r\n # Task 7.8\r\n if is_relation(formula.root):\r\n arguments_evaluated = list()\r\n for argument in formula.arguments:\r\n arguments_evaluated.append(self.evaluate_term(argument, assignment))\r\n return tuple(arguments_evaluated) in self.meaning[formula.root]\r\n elif is_equality(formula.root):\r\n return self.evaluate_term(formula.first, assignment) == self.evaluate_term(formula.second, assignment)\r\n elif is_quantifier(formula.root):\r\n current_assignment = copy.deepcopy(assignment)\r\n if formula.root == \"A\":\r\n for item in self.universe:\r\n current_assignment[formula.variable] = item\r\n if not self.evaluate_formula(formula.predicate, current_assignment):\r\n return False\r\n return True\r\n elif formula.root == \"E\":\r\n for item in self.universe:\r\n current_assignment[formula.variable] = item\r\n if self.evaluate_formula(formula.predicate, current_assignment):\r\n return True\r\n return False\r\n elif is_unary(formula.root):\r\n return not self.evaluate_formula(formula.first, assignment)\r\n else:\r\n if formula.root == \"|\":\r\n return self.evaluate_formula(formula.first, assignment) | self.evaluate_formula(formula.second, assignment)\r\n elif formula.root == \"&\":\r\n return self.evaluate_formula(formula.first, assignment) & self.evaluate_formula(formula.second, assignment)\r\n elif formula.root == \"->\":\r\n return (not self.evaluate_formula(formula.first, assignment)) | self.evaluate_formula(\r\n formula.second, assignment)\r\n\r\n def is_model_of(self, formulae_repr):\r\n \"\"\" Return whether self a model of the formulae represented by the\r\n given list of strings. For this to be true, each of the formulae\r\n must be satisfied, if the formula has free variables, then it must\r\n be satisfied for every assignment of elements of the universe to\r\n the free variables \"\"\"\r\n # Task 7.9\r\n for formula_string in formulae_repr:\r\n current_formula = Formula.parse(formula_string)\r\n free_vars = current_formula.free_variables()\r\n all_models_of_free_vars = self.all_models(free_vars)\r\n for model in all_models_of_free_vars:\r\n if not self.evaluate_formula(current_formula, model):\r\n return False\r\n return True\r\n\r\n def all_models(self, variables):\r\n \"\"\" Return an iterator over all possible models over the variables in the\r\n given list of variables. The order of the models is lexicographic\r\n according to the order of the variables in the given list, where False\r\n precedes True \"\"\"\r\n # Task 2.2\r\n universe_as_list = list(self.universe)\r\n list_of_models = []\r\n basic_model = {}\r\n list_of_variables = list(variables)\r\n number_of_variables = len(list_of_variables)\r\n last_item = len(self.universe) - 1\r\n for variable in list_of_variables:\r\n basic_model[variable] = 0\r\n list_of_models.append(basic_model)\r\n number_of_models = (math.pow(len(self.universe), number_of_variables)) - 1\r\n current_model = basic_model\r\n for model in range(int(number_of_models)):\r\n current_model = copy.deepcopy(current_model)\r\n current_variable_number = number_of_variables - 1\r\n while current_model[list_of_variables[current_variable_number]] == last_item:\r\n if current_variable_number < 0:\r\n break\r\n else:\r\n current_variable_number -= 1\r\n current_model[list_of_variables[current_variable_number]] += 1\r\n while current_variable_number < number_of_variables - 1:\r\n current_variable_number += 1\r\n current_model[list_of_variables[current_variable_number]] = 0\r\n list_of_models.append(current_model)\r\n for model in list_of_models:\r\n for key in model:\r\n model[key] = universe_as_list[model[key]]\r\n return iter(list_of_models)\r\n\r\n\r\n","repo_name":"bendavidelad/Logic","sub_path":"predicates/semantics.py","file_name":"semantics.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23380072091","text":"def is_winning_line(line):\n winner = line[0]\n for i in xrange(4):\n if winner == 'T':\n winner = line[i]\n if line[i] == 'T':\n continue\n if line[i] == '.' or line[i] != winner:\n return False\n return True\n\ndef complete(board):\n for i in xrange(4):\n for j in xrange(4):\n if board[i][j] == '.':\n return False\n return True\n\ndef winner_of(line):\n # there can only be one T\n if line[0] == 'T':\n return line[1]\n return line[0]\n\ndef tic_easier(board):\n # List of lines (horizontals/verticals/diagonal) where we can win\n lines = board # horizontals\n # verticals\n lines += [[l[i] for l in board] for i in xrange(4)]\n # diagonals\n lines.append([board[i][i] for i in xrange(4)])\n lines.append([board[i][3-i] for i in xrange(4)])\n # winning lines\n win = filter(is_winning_line, lines)\n if win == []:\n if complete(board):\n return 'Draw'\n else:\n return 'Game has not completed'\n return '{} won'.format(winner_of(win[0]))\n \nif __name__ == '__main__':\n n = int(raw_input())\n for i in xrange(n):\n board = [list(raw_input()) for _ in xrange(4)]\n print('Case #{}: {}'.format(i+1, tic_easier(board)))\n try:\n raw_input()\n except:\n pass\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/1713.py","file_name":"1713.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7647163184","text":"print ('NAMA : ILHAM YOGA PRATAMA')\nprint ('NIM : 121140081')\nprint ('KELAS : RB')\nprint ('=============================')\nprint()\n\n#contoh kelas 'DataDiri' dengan atribut 'nama', 'nim', 'kelas_PBO', 'jumlah_SKS'\nclass DataDiri:\n def __init__(self, nama, nim, kelas_PBO, jumlah_SKS):\n self.nama = nama #inisialisasi atribut 'nama'\n self.nim = nim #inisialisasi atribut 'nim'\n self.kelas_PBO = kelas_PBO #inisialisasi atribut 'kelas_PBO'\n self.jumlah_SKS = jumlah_SKS #inisialisasi atribut 'jumlah_SKS'\n\n\n #method 'tampilkan_data' untuk menampilkan keterangan data diri\n def tampilkan_data(self):\n print(str(self.nama), \"dengan NIM\", str(self.nim), \"dari kelas PBO\", str(self.kelas_PBO), \"mengambil mata kuliah semester 4 dengan jumlah\", str(self.jumlah_SKS), \"SKS\")\n\n\n#inputan data diri yang dilakukan oleh user\nnama = str(input(\"Masukkan Nama : \"))\nnim = str(input(\"Masukkan NIM : \"))\njumlah_SKS = str(input(\"Masukkan Jumlah SKS : \"))\n\n\n#contoh pembuatan objek dari kelas 'DataDiri' dan pemanggilan method 'tampilkan_data' untuk menampilkan keterangan data diri\nmhs = DataDiri(nama, nim, \"RB\", jumlah_SKS) #membuat objek 'mhs'\nmhs.tampilkan_data() #memanggil method 'tampilkan_data' untuk menampilkan keterangan data diri\n","repo_name":"YogaaPratama/Prak-PBO","sub_path":"Tugas2/121140081_ILHAM YOGA PRATAMA_Prak2.py","file_name":"121140081_ILHAM YOGA PRATAMA_Prak2.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71899266754","text":"import pandas as pd\nimport torch\nimport tqdm\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom transformers import GPT2LMHeadModel\nfrom transformers import GPT2Tokenizer\n\nfrom dataset import StackOverflowGPTDataset\n\ndef train(chatData, model, optim, device):\n\n epochs = 12\n\n for i in range(epochs):\n torch.cuda.empty_cache()\n l = 0\n for batch, data in tqdm.tqdm(enumerate(chatData), total=len(chatData)):\n X, a = data[\"input_ids\"], data[\"attention_mask\"]\n X = X.to(device)\n a = a.to(device)\n optim.zero_grad()\n loss = model(X, attention_mask=a, labels=X).loss\n loss.backward()\n optim.step()\n l += loss.item()\n print(l/len(chatData))\n torch.save(model.state_dict(), \"model_state.pt\")\ndevice = \"cuda\" if torch.cuda.is_available() else \"mps\" if torch.backends.mps.is_available() else \"cpu\"\nprint(\"Using device:\", device)\n\ntokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\ntokenizer.add_special_tokens({\n \"eos_token\": \"<|endoftext|>\",\n \"bos_token\": \"<|startoftext|>\",\n \"sep_token\": \"<|sep|>\",\n \"pad_token\": \"<|pad|>\"\n})\n\nmodel = GPT2LMHeadModel.from_pretrained(\"gpt2\")\nmodel.resize_token_embeddings(len(tokenizer))\nmodel = model.to(device)\ndef collate_fn(batch):\n # Get the input_ids and attention_masks from the batch\n input_ids = [item['input_ids'].tolist() for item in batch]\n attention_masks = [item['attention_mask'].tolist() for item in batch]\n\n # Pad the input_ids and attention_masks to the same length\n max_length = 512\n padded_input_ids = [seq + [0] * (max_length - len(seq)) for seq in input_ids]\n padded_attention_masks = [seq + [0] * (max_length - len(seq)) for seq in attention_masks]\n\n # Convert the padded input_ids and attention_masks to tensors\n input_ids = torch.tensor(padded_input_ids)\n attention_masks = torch.tensor(padded_attention_masks)\n\n # Create a dictionary containing the input_ids, attention_masks, and labels\n batch_dict = {'input_ids': input_ids, 'attention_mask': attention_masks}\n\n return batch_dict\nquestions_df = pd.read_csv('data/Questions_cleaned.csv', encoding='latin-1')\nanswers_df = pd.read_csv('data/Answers_cleaned.csv', encoding='latin-1')\ndataset = StackOverflowGPTDataset(questions_df, answers_df, tokenizer)\nloader = DataLoader(dataset, batch_size=4, collate_fn=collate_fn)\n\nmodel.train()\n\noptim = Adam(model.parameters(), lr=1e-3)\n\nprint(\"training .... \")\ntrain(loader, model, optim, device)\n","repo_name":"aryankhatana01/gpt-overflow","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12307523558","text":"# Purpose: RCM complex CP preprocessing (Main program) Mohsen Ghanbari May 2020\r\n\r\n# First, we ingest two files: CH and CV. So the p_directory should be modified in the program. Also, set the parameters:\r\n# IS_THERE_BOXCAR_AVERAGING and IS_THERE_D_SAMPLING, as well as D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C, and BOXCAR_COEFF.\r\n# Then, the program generates the elements of the CP coherence matrix: c11, c12_real, c22, c12_imag\r\n# Then, if required, the box-car averaging and downsampling are performed.\r\n# The output is the elements of the CP coherence matrix that need to be converted to .bin files using the corresponding MATLAB script.\r\n# Subscene functionality added!\r\nimport tifffile as tiff\r\nimport numpy as np\r\nimport scipy.io\r\nfrom scipy import ndimage\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as colors\r\nfrom skimage.measure import block_reduce\r\nfrom tkinter import Tk\r\nfrom tkinter.filedialog import askdirectory\r\n\r\nIS_THERE_BOXCAR_AVERAGING = True\r\nIS_THERE_D_SAMPLING = False\r\n\r\n\r\nD_SAMPLING_COEFF_R = 2 # downsampling coefficient: the number of rows in the window in which the pixels are replaced with the average value\r\nD_SAMPLING_COEFF_C = 2 # downsampling coefficient: the number of columns in the window in which the pixels are replaced with the average value\r\n\r\nBOXCAR_COEFF = 5 # boxcar averaging coefficient: the size of the averaging window\r\n\r\n\r\n# p_directory = 'C:/Users/vip-mohsen/Desktop/Results_realCp/Winnipeg/Calibrated_by_PCI/without boxcar/'\r\np_directory = askdirectory(title='Select Folder') # shows dialog box and return the path\r\np_directory = p_directory + \"/\"\r\n\r\nhh_tiff = tiff.imread(p_directory + '*HH.tif')\r\nvv_tiff = tiff.imread(p_directory + '*VV.tif')\r\nhv_tiff = tiff.imread(p_directory + '*HV.tif')\r\n\r\nhh = hh_tiff[:,:,0] + 1j * hh_tiff[:,:,1]\r\nvv = vv_tiff[:,:,0] + 1j * vv_tiff[:,:,1]\r\nhv = hv_tiff[:,:,0] + 1j * hv_tiff[:,:,1]\r\n\r\nc11 = np.real(np.multiply(hh, np.conj(hh)))\r\nc22 = np.real(np.multiply(hv, np.conj(hv)))\r\nc33 = np.real(np.multiply(vv, np.conj(vv)))\r\nc12_real = np.real(np.multiply(hh, np.conj(hv)))\r\nc12_imag = np.imag(np.multiply(hh, np.conj(hv)))\r\nc13_real = np.real(np.multiply(hh, np.conj(vv)))\r\nc13_imag = np.imag(np.multiply(hh, np.conj(vv)))\r\nc23_real = np.real(np.multiply(hv, np.conj(vv)))\r\nc23_imag = np.imag(np.multiply(hv, np.conj(vv)))\r\n\r\n\r\n\r\n#taking a subscene\r\n# import cv2\r\n# img = cv2.imread(p_directory + \"image_for_labeling.png\")\r\n# row_start = 5091\r\n# row_end = 6091\r\n# col_start = 1327\r\n# col_end = 2127\r\n#\r\n# img_sub = img[row_start:row_end, col_start:col_end]\r\n# tiff.imwrite(p_directory + 'subscene.tif', img_sub)\r\n# c11 = c11[row_start:row_end, col_start:col_end]\r\n# c12_real = c12_real[row_start:row_end, col_start:col_end]\r\n# c22 = c22[row_start:row_end, col_start:col_end]\r\n# c12_imag = c12_imag[row_start:row_end, col_start:col_end]\r\n# c13_real = c13_real[row_start:row_end, col_start:col_end]\r\n# c33 = c33[row_start:row_end, col_start:col_end]\r\n# c13_imag = c13_imag[row_start:row_end, col_start:col_end]\r\n# c23_real = c23_real[row_start:row_end, col_start:col_end]\r\n# c23_imag = c23_imag[row_start:row_end, col_start:col_end]\r\n\r\nif IS_THERE_BOXCAR_AVERAGING:\r\n\r\n\r\n fig1 = plt.figure()\r\n fig2 = plt.figure()\r\n fig3 = plt.figure()\r\n\r\n ax_c11 = fig1.add_subplot(121)\r\n ax_c11.set_title('c11')\r\n ax_c22 = fig2.add_subplot(121)\r\n ax_c22.set_title('c22')\r\n ax_c33 = fig3.add_subplot(121)\r\n ax_c33.set_title('c33')\r\n\r\n ax_c11_filt = fig1.add_subplot(122)\r\n ax_c11_filt.set_title('c11-filtered')\r\n ax_c22_filt = fig2.add_subplot(122)\r\n ax_c22_filt.set_title('c22-filtered')\r\n ax_c33_filt = fig3.add_subplot(122)\r\n ax_c33_filt.set_title('c33-filtered')\r\n\r\n ax_c11.imshow(c11, cmap = 'gray', norm = colors.Normalize(vmin = 0, vmax =1))\r\n ax_c22.imshow(c22, cmap = 'gray', norm = colors.Normalize(vmin = 0, vmax =1))\r\n ax_c33.imshow(c33, cmap = 'gray', norm = colors.Normalize(vmin = 0, vmax =1))\r\n\r\n c11 = ndimage.uniform_filter(c11, size = BOXCAR_COEFF)\r\n c22 = ndimage.uniform_filter(c22, size=BOXCAR_COEFF)\r\n c33 = ndimage.uniform_filter(c33, size=BOXCAR_COEFF)\r\n c12_real = ndimage.uniform_filter(c12_real, size=BOXCAR_COEFF)\r\n c12_imag = ndimage.uniform_filter(c12_imag, size=BOXCAR_COEFF)\r\n c13_real = ndimage.uniform_filter(c13_real, size=BOXCAR_COEFF)\r\n c13_imag = ndimage.uniform_filter(c13_imag, size=BOXCAR_COEFF)\r\n c23_real = ndimage.uniform_filter(c23_real, size=BOXCAR_COEFF)\r\n c23_imag = ndimage.uniform_filter(c23_imag, size=BOXCAR_COEFF)\r\n\r\n\r\n\r\n ax_c11_filt.imshow(c11, cmap = 'gray', norm = colors.Normalize(vmin = 0, vmax =1))\r\n ax_c22_filt.imshow(c22, cmap = 'gray', norm = colors.Normalize(vmin = 0, vmax =1))\r\n ax_c33_filt.imshow(c33, cmap = 'gray', norm = colors.Normalize(vmin = 0, vmax =1))\r\n\r\nif IS_THERE_D_SAMPLING:\r\n\r\n fig1 = plt.figure()\r\n fig2 = plt.figure()\r\n fig3 = plt.figure()\r\n ax_c11 = fig1.add_subplot(121)\r\n ax_c11.set_title('c11')\r\n ax_c22 = fig2.add_subplot(121)\r\n ax_c22.set_title('c22')\r\n ax_c33 = fig3.add_subplot(121)\r\n ax_c33.set_title('c33')\r\n ax_c11_filt = fig1.add_subplot(122)\r\n ax_c11_filt.set_title('c11-downsampled')\r\n ax_c22_filt = fig2.add_subplot(122)\r\n ax_c22_filt.set_title('c22-downsampled')\r\n ax_c33_filt = fig3.add_subplot(122)\r\n ax_c33_filt.set_title('c33-downsampled')\r\n ax_c11.imshow(c11, cmap='gray', norm=colors.Normalize(vmin=0, vmax=1))\r\n ax_c22.imshow(c22, cmap='gray', norm=colors.Normalize(vmin=0, vmax=1))\r\n ax_c33.imshow(c33, cmap='gray', norm=colors.Normalize(vmin=0, vmax=1))\r\n\r\n c11 = block_reduce(c11, block_size=(D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C), func=np.mean)\r\n c22 = block_reduce(c22, block_size=(D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C), func=np.mean)\r\n c33 = block_reduce(c33, block_size=(D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C), func=np.mean)\r\n c12_real = block_reduce(c12_real, block_size=(D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C), func=np.mean)\r\n c12_imag = block_reduce(c12_imag, block_size=(D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C), func=np.mean)\r\n c13_real = block_reduce(c13_real, block_size=(D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C), func=np.mean)\r\n c13_imag = block_reduce(c13_imag, block_size=(D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C), func=np.mean)\r\n c23_real = block_reduce(c23_real, block_size=(D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C), func=np.mean)\r\n c23_imag = block_reduce(c23_imag, block_size=(D_SAMPLING_COEFF_R, D_SAMPLING_COEFF_C), func=np.mean)\r\n\r\n ax_c11_filt.imshow(c11, cmap = 'gray', norm = colors.Normalize(vmin = 0, vmax =1))\r\n ax_c22_filt.imshow(c22, cmap = 'gray', norm = colors.Normalize(vmin = 0, vmax =1))\r\n ax_c33_filt.imshow(c33, cmap = 'gray', norm = colors.Normalize(vmin = 0, vmax =1))\r\n\r\nC3 = np.zeros((c11.shape[0], c11.shape[1], 9))\r\nC3[:,:,0] = c11\r\nC3[:,:,1] = c12_real\r\nC3[:,:,2] = c22\r\nC3[:,:,3] = c13_real\r\nC3[:,:,4] = c23_real\r\nC3[:,:,5] = c33\r\nC3[:,:,6] = c12_imag\r\nC3[:,:,7] = c13_imag\r\nC3[:,:,8] = c23_imag\r\ntiff.imwrite(p_directory + 'C3.tif', C3)\r\n\r\n# 0:C11, 1:C12_real, 2:C22, 3:C13_real, 4:C23_real, 5:C33, 6:C12_imag, 7:C13_imag, 8:C23_imag\r\ncovariance = {'c11':c11,'c12_real':c12_real, 'c22':c22, 'c13_real':c13_real, 'c23_real':c23_real, 'c33':c33,\r\n 'c12_imag':c12_imag, 'c13_imag':c13_imag, 'c23_imag':c23_imag}\r\n\r\nscipy.io.savemat(p_directory + 'CovarianceMatrixElements.mat', covariance)\r\n\r\n","repo_name":"m5ghanba/Preprocessing_complex_compact_and_quad_polsar_data","sub_path":"QP_Preprocessing_Downsampling_Multilookaveraging.py","file_name":"QP_Preprocessing_Downsampling_Multilookaveraging.py","file_ext":"py","file_size_in_byte":7442,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"17305678185","text":"import random\n\n\ndef ex25_2_1():\n # Задача 1. Машина\n #\n # Напишите класс Toyota, состоящий из четырёх статических атрибутов:\n #\n # цвет машины (например, красный),\n # цена (один миллион),\n # максимальная скорость (200),\n # текущая скорость (ноль).\n # Создайте три экземпляра класса и каждому из них поменяйте значение текущей скорости на случайное число от нуля до 200.\n class Toyota:\n color = 'red'\n price = 100000\n max_speed = 200\n current_speed = 0\n\n first_toyota = Toyota()\n second_toyota = Toyota()\n third_toyota = Toyota()\n first_toyota.current_speed = random.randint(0, 200)\n second_toyota.current_speed = random.randint(0, 200)\n third_toyota.current_speed = random.randint(0, 200)\n print('1st toyota speed: {}\\n2nd toyota speed: {}\\n3rd toyota speed: {}'.format(first_toyota.current_speed,\n second_toyota.current_speed,\n third_toyota.current_speed))\n\n\ndef ex25_2_2():\n class Monitor:\n manufacturer = 'Samsung'\n matrix = 'VA'\n resolution = 'WQHD'\n frequency = 60\n\n class Headphones:\n manufacturer = 'Sony'\n sensitivity = 108\n with_microphone = False\n\n monitor_1 = Monitor()\n monitor_2 = Monitor()\n monitor_3 = Monitor()\n monitor_4 = Monitor()\n\n monitor_2.frequency = 144\n monitor_3.frequency = 70\n\n headphones_1 = Headphones()\n headphones_2 = Headphones()\n headphones_3 = Headphones()\n\n headphones_2.with_microphone = True\n headphones_3.with_microphone = True\n","repo_name":"Gegcuk/skillbox","sub_path":"python_for_data_science/lesson25_intro_to_OOP/ex25.2.py","file_name":"ex25.2.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38224603062","text":"import time\n\nfrom setzer.app.service_locator import ServiceLocator\nfrom setzer.dialogs.dialog_locator import DialogLocator\n\n\nclass BuildSystemPresenter(object):\n ''' Mediator between document and build_system. '''\n \n def __init__(self, document, build_system):\n self.document = document\n build_system.register_observer(self)\n\n '''\n *** notification handlers, get called by observed build system\n '''\n\n def change_notification(self, change_code, notifying_object, parameter):\n\n if change_code == 'building_started':\n self.document.change_build_state('building_in_progress')\n \n if change_code == 'reset_timer':\n self.document.build_widget.view.reset_timer()\n self.document.build_widget.view.label.set_text('0:00')\n\n if change_code == 'building_stopped':\n self.document.show_build_state('')\n self.document.change_build_state('idle')\n\n if change_code == 'building_finished':\n result_blob = parameter\n if result_blob['build'] != None or result_blob['forward_sync'] != None:\n if result_blob['build'] != None:\n try:\n self.document.preview.set_pdf_filename(result_blob['build']['pdf_filename'])\n except KeyError: pass\n self.document.add_change_code('pdf_updated')\n\n if result_blob['forward_sync'] != None:\n self.document.preview.set_synctex_rectangles(result_blob['forward_sync'])\n self.document.show_build_state('')\n\n if result_blob['build'] != None:\n build_blob = result_blob['build']\n\n if build_blob['error'] == 'interpreter_missing':\n self.document.show_build_state('')\n self.document.change_build_state('idle')\n if DialogLocator.get_dialog('interpreter_missing').run(build_blob['error_arg']):\n DialogLocator.get_dialog('preferences').run()\n return\n\n if build_blob['error'] == 'interpreter_not_working':\n self.document.show_build_state('')\n self.document.change_build_state('idle')\n if DialogLocator.get_dialog('building_failed').run(build_blob['error_arg']):\n DialogLocator.get_dialog('preferences').run()\n return\n\n build_blob['log_messages']['BibTeX'] = build_blob['bibtex_log_messages']\n self.document.set_build_log_items(build_blob['log_messages'])\n self.document.build_time = time.time() - self.document.last_build_start_time\n\n error_count = self.document.get_error_count()\n if error_count > 0:\n error_color_rgba = ServiceLocator.get_color_manager().get_theme_color('error_color')\n error_color = '#' + format(int(error_color_rgba.red * 255), '02x') + format(int(error_color_rgba.green * 255), '02x') + format(int(error_color_rgba.blue * 255), '02x')\n str_errors = ngettext('Failed ({amount} error)!', 'Failed ({amount} errors)!', error_count)\n message = str_errors.format(color=error_color, amount=str(error_count))\n self.document.show_build_state(message)\n else:\n self.document.show_build_state(_('Success!'))\n\n self.document.set_has_synctex_file(build_blob['has_synctex_file'])\n self.document.has_been_built = True\n\n elif result_blob['backward_sync'] != None:\n if not self.document.root_is_set:\n if result_blob['backward_sync']['filename'] == self.document.get_filename():\n self.document.set_synctex_position(result_blob['backward_sync'])\n elif self.document.is_root:\n workspace = ServiceLocator.get_workspace()\n document = workspace.open_document_by_filename(result_blob['backward_sync']['filename'])\n if document != None:\n document.set_synctex_position(result_blob['backward_sync'])\n\n self.document.change_build_state('idle')\n\n if result_blob['build'] != None:\n self.document.invalidate_build_log()\n\n\n","repo_name":"RaMathuZen/Setzer","sub_path":"setzer/document/latex/build_system/build_system_presenter.py","file_name":"build_system_presenter.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"23630045721","text":"\r\ninfile = open(\"A-small-attempt1.in\")\r\noutfile = open(\"A-small-attempt1.out\", \"w\")\r\n\r\n#infile = open(\"a.input.txt\")\r\n#outfile = open(\"a.output.txt\", \"w\")\r\n\r\nmapping = {\r\n 'y':'a',\r\n 'e':'o',\r\n 'q':'z',\r\n 'j':'u',\r\n 'p':'r',\r\n 'm':'l',\r\n 's':'n',\r\n 'l':'g',\r\n 'c':'e',\r\n 'k':'i',\r\n 'd':'s',\r\n 'x':'m',\r\n 'v':'p',\r\n 'd':'s',\r\n 'n':'b',\r\n 'r':'t',\r\n 'i':'d',\r\n 't':'w',\r\n 'a':'y',\r\n 'h':'x',\r\n 'w':'f',\r\n 'f':'c',\r\n 'o':'k',\r\n 'b':'h',\r\n 'g':'v',\r\n 'u':'j',\r\n 'z':'q'}\r\n\r\nn = int(infile.readline())\r\n\r\nfor casenum in range(0, n):\r\n\r\n print (\"Case %d\" % casenum)\r\n line = infile.readline().strip()\r\n\r\n out = []\r\n for c in line:\r\n if mapping.has_key(c):\r\n out.append(mapping[c])\r\n else:\r\n out.append(c)\r\n out = \"\".join(out)\r\n\r\n outfile.write(\"Case #%d: %s\\n\" % (casenum+1, out))\r\n \r\ninfile.close()\r\noutfile.close()\r\n\r\nprint(\"ok\")\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/1222.py","file_name":"1222.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10525498088","text":"#!/usr/bin/env python\n\nfreqword = {}\n\nwith open(\"FREQ.txt\", \"r\") as file:\n file.readline()\n file.readline()\n for line in file:\n freqword[line.strip()] = 1\n\ndef check_word_is_freq(word):\n return freqword.get(word) == 1\n\ndef freqword_is_in(text):\n judge = False\n for w in text.strip().split():\n judge = judge or check_word_is_freq(w)\n return judge\n\nif __name__ == '__main__':\n if check_word_is_freq(\"hello\"):\n print(\"Freq!\")\n else:\n print('\"hello\" is not freq.')\n if check_word_is_freq(\"season\"):\n print(\"Freq!\")\n if freqword_is_in(\"this is a test\"):\n print(\"Freq word is in.\")\n print('モジュール名:{}'.format(__name__))","repo_name":"togoannotator/ta","sub_path":"app/load_tool/bin/check_freq.py","file_name":"check_freq.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36820934164","text":"# pylint: disable=W0212\n\"\"\"Chess History unit test module.\"\"\"\n\nimport unittest\n\nfrom chess.board.history import History\nfrom chess.piece.piece import Piece\n\n\nclass TestHistory(unittest.TestCase):\n\n def setUp(self):\n self.history = History()\n\n def test_add(self):\n self.history.add({})\n self.assertEqual(len(self.history), 1)\n\n def test_get_full_history(self):\n for i in range(5):\n self.history.add({str(i): i})\n actual = self.history.all()\n self.assertEqual(len(actual), 5)\n self.assertEqual(actual[0], {\"0\": 0})\n self.assertEqual(actual[4], {\"4\": 4, 'current': True})\n\n def test_get_full_history_current_when_not_first(self):\n for i in range(5):\n self.history.add({str(i): i})\n self.history.previous()\n self.history.previous()\n actual = self.history.all()\n self.assertEqual(len(actual), 5)\n self.assertEqual(actual[0], {\"0\": 0})\n self.assertEqual(actual[1], {\"1\": 1})\n self.assertEqual(actual[2], {\"2\": 2, 'current': True})\n self.assertEqual(actual[3], {\"3\": 3})\n self.assertEqual(actual[4], {\"4\": 4})\n\n def test_get_full_history_current_when_no_history(self):\n actual = self.history.all()\n self.assertEqual(len(actual), 0)\n\n def test_get_full_history_current_when_at_first(self):\n for i in range(5):\n self.history.add({str(i): i})\n self.history.first()\n actual = self.history.all()\n self.assertEqual(len(actual), 5)\n self.assertEqual(actual[0], {\"0\": 0})\n self.assertEqual(actual[4], {\"4\": 4})\n\n def test_previous(self):\n for i in range(5):\n self.history.add({str(i): i})\n previous = self.history.previous()\n self.assertEqual({'4': 4}, previous)\n\n previous = self.history.previous()\n self.assertEqual({'3': 3}, previous)\n\n # can go back to beginning of history\n self.history.previous()\n self.history.previous()\n previous = self.history.previous()\n self.assertEqual({'0': 0}, previous)\n\n # doesn't go beyond initial history state\n previous = self.history.previous()\n self.assertEqual(None, previous)\n self.assertEqual(self.history._index, -1)\n\n def test_add_after_previous(self):\n for i in range(5):\n self.history.add({str(i): i})\n self.history.previous()\n previous = self.history.previous()\n self.assertEqual({'3': 3}, previous)\n self.assertEqual(5, len(self.history))\n self.assertEqual(2, self.history._index)\n self.history.add({\"sitting\": \"throne\"})\n self.assertEqual(4, len(self.history))\n\n def test_first(self):\n for i in range(5):\n self.history.add({str(i): i})\n self.history.first()\n self.assertEqual(-1, self.history._index)\n self.assertEqual(5, len(self.history))\n\n def test_next(self):\n for i in range(5):\n self.history.add({str(i): i})\n self.history.first()\n item = self.history.next()\n self.assertEqual({'0': 0}, item)\n item = self.history.next()\n self.assertEqual({'1': 1}, item)\n\n # beyond end of list\n self.history.next()\n self.history.next()\n self.history.next()\n item = self.history.next()\n self.assertEqual(None, item)\n self.assertEqual(4, self.history._index)\n\n def test_json(self):\n for i in range(3):\n self.history.add({str(i): i})\n data = self.history.json\n self.assertEqual(data, {'history': [{'0': 0}, {'1': 1}, {'2': 2}], 'initial_board': {}, 'index': 2})\n\n def test_construct_history_object(self):\n start = [0, 0]\n end = [4, 5]\n piece = Piece(\"name\", \"red\", [{\"directions\": [\"move\"]}])\n captures = History.construct_capture_obj({(5, 5): piece})\n record = History.construct_history_object(start, end, piece, captures)\n self.assertEqual(piece.moves, [{'directions': ['move']}])\n self.assertEqual(record, {\n 'start': start,\n 'end': end,\n 'piece': {'name': 'name', 'color': 'red', 'moves': [{'directions': ['move']}]},\n 'captures': [{'location': (5, 5), 'name': 'name', 'color': 'red', 'moves': [{'directions': ['move']}]}]})\n","repo_name":"theovoss/Chess","sub_path":"chess/tests/board/test_history.py","file_name":"test_history.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"19100916874","text":"import math\n\nlijst=[]\n\ndef findRoot(f, a, b, epsilon):\n if f(a)==0:\n return a\n if f(b)==0:\n return b\n if abs(b-a)>epsilon:\n #print(a,b)\n m=(a+b)/2\n #print(m)\n #print(f(m))\n lijst.append(m)\n\n if f(a)>0 and f(m)<0:\n #print([a,m])\n return findRoot(f,a,m,epsilon)\n \n\n elif f(a)<0 and f(m)>0:\n #print([a,m])\n return findRoot(f,a,m,epsilon)\n \n\n elif f(b)>0 and f(m)<0:\n #print([m,b])\n return findRoot(f,m,b,epsilon)\n \n\n elif f(b)<0 and f(m)>0:\n #print([m,b])\n return findRoot(f,m,b,epsilon)\n \n else:\n m=(a+b)/2\n return m\n\n#def f(x):\n # return x-1\n \n#root = findRoot(lambda x:x - 1,0,3,.1)\n\n\n\n\ndef findAllRoots(f,a,b,epsilon):\n nulpunten=[]\n aantal_intervallen= int((b-a)/epsilon)\n #print(aantal_intervallen)\n for i in range(aantal_intervallen):\n if f(a+(i+1)*epsilon)*f(a+i*epsilon)<=0:\n wortel=findRoot(f,a+i*epsilon, a+(i+1)*epsilon, epsilon)\n nulpunten=nulpunten+[wortel]\n i=i+1\n #print(nulpunten)\n return (list(set(nulpunten)))\n\n#def findAllRoots(f, a, b, epsilon):\n# nulpunten=[]\n# if f(a)==0:\n# nulpunten.append(a)\n# if f(b)==0:\n# nulpunten.append(b)\n# if abs(b-a)>epsilon:\n #print(a,b)\n# m=(a+b)/2\n #print(m)\n #print(f(m))\n# lijst.append(m)\n# if f(m)==0:\n# nulpunten.append(m)\n\n# if f(a)*f(m)<0:\n #print([a,m])\n# findALLRoots(f,a,m,epsilon)\n \n# if f(b)*f(m)<0:\n #print([m,b])\n# findALLRoots(f,m,b,epsilon)\n \n# else:\n# m=(a+b)/2\n# nulpunten.append(m)\n #findALLRoots(f,a,m-epsilon,epsilon)\n # findALLRoots(f, m+epsilon, b, epsilon)\n# print(nulpunten)\n # findALLRoots(f, a, b, epsilon)\n \n \n \n","repo_name":"kylavandenbogaerde/WISB256","sub_path":"Opdracht4/bisection.py","file_name":"bisection.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31067478776","text":"import xlsxwriter, os\nimport pandas as pd\nimport io\n\nparent_path = os.path.join(os.getcwd())\nsheet_name = 'جداول الطلاب'\n# excel_file = rf'{parent_path}\\{sheet_name}.xlsx'\ntime_cells_dict = {\n \"08\": \"BC\", \"09\": \"DE\", \"10\": \"FG\", \"11\": \"HI\",\n \"12\": \"JK\", \"13\": \"LM\", \"14\": \"NO\", \"15\": \"PQ\", \"16\": \"RS\", \"17\": \"TU\"\n}\nday_cells_dict = {\n \"الأحد\": \"14\", \"الاثنين\": \"18\", \"الثلاثاء\": \"22\", \"الأربعاء\": \"26\", \"الخميس\": \"30\"\n}\n\ndef get_term():\n term = dfss01['الفصل التدريبي'].iloc[0]\n return str(term)\n \n \ndef get_term_text(term):\n if term[-2:-1] == '1':\n return \"الأول\"\n elif term[-2:-1] == '2':\n return \"الثاني\"\n elif term[-2:-1] == '3':\n return \"الثالث\"\n\ndef studentName(userID):\n df_new = dfsf24[dfsf24['رقم الطالب'] == (userID)]\n return df_new['إسم الطالب'].iloc[0]\n\n# def comIDs():\n# IDs = list(set(dfsf24['رقم الطالب']))\n# return IDs\n\ndef get_lab_department(department='all'):\n \"\"\"Return computer ids\"\"\"\n if department != \"all\":\n IDs = dfsf24.loc[dfsf24['القسم'] == department, 'رقم الطالب'].unique().tolist()\n else:\n IDs = dfsf24['رقم الطالب'].unique().tolist()\n return IDs\n\n\ndef split(txt):\n res = [i.split('\\n') for i in txt][0]\n return list(map(str.strip, res))\n\ndef removeNonValidTimeSlot(timeslots, *arguments):\n Error = '18'\n for i in range(len(timeslots) - 1, -1, -1):\n if Error not in timeslots[i]:\n pass\n else:\n for x in arguments:\n x.pop(i)\n timeslots.pop(i)\n return timeslots\n\ndef merge_cells(timeslot, day):\n x = timeslot.split(\"-\")\n start_time = x[1].strip()\n end_time = x[0].strip()\n s = start_time[:2]\n e = end_time[:2]\n starting_cell = time_cells_dict[s][0]\n ending_cell = time_cells_dict[e][-1]\n day_row = day_cells_dict[day]\n start_and_end = starting_cell + ending_cell\n return f\"{starting_cell}{day_row}:{ending_cell}{int(day_row)+3}\", start_and_end\n\n \ndef ss01Details(userID):\n teachers, subjects, crn, classrooms, days, times = [], [], [], [], [], []\n df_new = dfsf24[dfsf24['رقم الطالب'] == (userID)]\n ref_subject_id = df_new['الرقم المرجعي'].tolist()\n for ref_id in ref_subject_id:\n df_temp = dfss01[dfss01['الرقم المرجعي'] == ref_id]\n teachers.extend(df_temp['اسم المدرب'].tolist())\n subjects.extend(df_temp['اسم المقرر'].tolist())\n crn.extend(df_temp['الرقم المرجعي'].tolist())\n times.extend(df_temp['الوقت'].tolist())\n classrooms.extend(df_temp['قاعة'].tolist())\n days.extend(df_temp['اليوم'].tolist())\n days = [d.strip() for d in days]\n major = df_new['القسم'].iloc[0]\n return teachers, subjects, crn, classrooms, times, days, major\n\n\ndef create_excel_file(student_id):\n\n worksheet = workbook.add_worksheet(f\"{student_id}\") # add a new worksheet\n \n \n teachers, subs, refs, labs, times, days, major = ss01Details(student_id)\n result = removeNonValidTimeSlot(times, subs, refs, labs, days)\n\n\n def total_hours(s, e):\n sum = ord(e) - ord(s) + 1\n sum //= 2 \n return sum\n \n def merge_format(back_color, size, font='black'):\n merge_format = workbook.add_format({\n 'bold': True,\n 'font_name': 'Calibri',\n 'font_size': f'{size}',\n 'border': 6,\n 'font_color':f'{font}',\n 'align': 'center',\n 'valign': 'vcenter',\n 'fg_color': f'{back_color}',\n 'text_wrap': True\n })\n return merge_format\n \n def no_Border(back_color, size, font='black'):\n no_border = workbook.add_format({\n 'bold': True,\n 'font_name': 'Calibri',\n 'font_size': f'{size}',\n 'border': 0,\n 'font_color':f'{font}',\n 'align': 'center',\n 'valign': 'vcenter',\n 'fg_color': f'{back_color}',\n })\n return no_border\n \n def merge_format2(back_color, size, font='black', border=3):\n merge_format = workbook.add_format({\n 'bold': True,\n 'font_name': 'Calibri',\n 'font_size': f'{size}',\n 'border': int(border),\n 'font_color':f'{font}',\n 'align': 'center',\n 'valign': 'vcenter',\n 'fg_color': f'{back_color}',\n 'text_wrap': True\n })\n return merge_format\n \n timeslots = []\n totalhours = []\n for t, d in zip(times, days):\n whatcell, traininghours = merge_cells(t, d)\n timeslots.append(whatcell)\n totalhours.append(total_hours(traininghours[0], traininghours[1]))\n\n list_of_alphabets = [\"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\"]\n for letter in list_of_alphabets:\n for i in range(14,34+1):\n worksheet.write(f\"{letter}{i}:{letter}{i}\", \"\", merge_format2(\"#FFFFFF\", 9))\n\n for teacher, sub, ref, lab, slot in zip(teachers, subs, refs, labs, timeslots):\n worksheet.merge_range(f\"{slot}\", f'{sub}\\n{ref}\\n{str(lab)[-3:]}\\n{teacher}', merge_format('#ADD5B0', '9'))\n\n \n \n colV = workbook.add_format()\n colV.set_left(6)\n \n line29 = workbook.add_format()\n line29.set_top(6)\n \n \n # Set column size\n worksheet.set_column('A:A', 5.71) \n worksheet.set_column('B:U', 2.86) \n\n # Insert image (tvtc)\n worksheet.insert_image('G1', 'icon/tvtc.jpg', {'x_scale': 0.3, 'y_scale': 0.3, 'x_offset': 0,'y_offset': 0})\n \n\n worksheet.merge_range(\"A1:F1\", \"المملكة العربية السعودية\" , no_Border('#FFFFFF', 9))\n worksheet.merge_range(\"A2:F2\", \"المؤسسة العامة للتدريب التقني والمهني\" , no_Border('#FFFFFF', 9))\n worksheet.merge_range(\"A3:F3\", \"الكلية التقنية بمحافظة حقل\" , no_Border('#FFFFFF', 9))\n\n worksheet.merge_range(\"M1:U1\", \"Kingdom of Saudi Arabia\" ,no_Border('#FFFFFF', 9))\n worksheet.merge_range(\"M2:U2\", \"Technical and Vocational Training Corporation\" ,no_Border('#FFFFFF', 9))\n worksheet.merge_range(\"M3:U3\", \"College of Technology in Haql\" ,no_Border('#FFFFFF', 9))\n \n worksheet.merge_range(\"A5:U6\", \"جدول المتدرب\" ,merge_format('#636467', 14, font='white'))\n \n worksheet.merge_range(\"A8:B8\", \"اسم المتدرب\" ,merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"C8:J8\", f\"{studentName(student_id)}\" ,merge_format('#FFFFFF', '9'))\n worksheet.merge_range(\"K8:M8\", \"رقم المتدرب\" ,merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"N8:P8\", f\"{student_id}\" ,merge_format('#FFFFFF', '9'))\n worksheet.merge_range(\"Q8:S8\", \"الفصل التدريبي\" ,merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"T8:U8\", get_term_text(get_term()) ,merge_format('#FFFFFF', '9'))\n\n worksheet.merge_range(\"A9:B9\", \"القسم\" ,merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"C9:J9\", major ,merge_format('#FFFFFF', '9'))\n worksheet.merge_range(\"K9:M9\", \"المؤهل\" ,merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"N9:U9\", \"الدبلوم\" ,merge_format('#FFFFFF', '9'))\n\n \n worksheet.write(\"A11\", \"المحاضرة\",merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"A12:A13\", \"الوقت\",merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"A14:A17\", \"الأحد\",merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"A18:A21\", \"الإثنين\",merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"A22:A25\", \"الثلاثاء\",merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"A26:A29\", \"الأربعاء\",merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"A30:A33\", \"الخميس\",merge_format('#2FBAB3', '9'))\n \n worksheet.merge_range(\"V14:V33\", \"\",colV)\n\n \n worksheet.merge_range(\"A34:U34\", \"\",line29)\n \n worksheet.merge_range(\"A35:C35\", \"ساعات الإتصال\",merge_format('#2FBAB3', '9'))\n worksheet.merge_range(\"D35:H35\", f\"{sum(totalhours)}\",merge_format('#FFFFFF', '9'))\n \n worksheet.merge_range(\"A36:C36\", \"اخر تعديل\",merge_format('#2FBAB3', '9'))\n import datetime\n now = datetime.datetime.now()\n worksheet.merge_range(\"D36:H36\", now.strftime(\"%I:%M%p - %d/%m/%Y\"),merge_format('#FFFFFF', '9'))\n \n row = 11\n t_start = [\"08:00\",\"09:00\",\"10:00\",\"11:00\",\"12:00\",\"13:00\",\"14:00\",\"15:00\",\"16:00\",\"17:00\"]\t\n t_end = [\"08:40\",\"09:40\",\"10:40\",\"11:40\",\"12:40\",\"13:40\",\"14:40\",\"15:40\",\"16:40\",\"17:40\"]\t\n \n\n\n for index, i in enumerate(time_cells_dict, start =1 ):\n worksheet.merge_range(f\"{time_cells_dict[i][0]}{row}:{time_cells_dict[i][1]}{row}\", f'{index}', merge_format('#2FBAB3', '9')) \n\n\n \n row += 1\n for i, s, e in zip(time_cells_dict, t_start, t_end ):\n worksheet.merge_range(f\"{time_cells_dict[i][0]}{row}:{time_cells_dict[i][1]}{row}\", f'{s}', merge_format('#2FBAB3', '9'))\n worksheet.merge_range(f\"{time_cells_dict[i][0]}{row+1}:{time_cells_dict[i][1]}{row+1}\", f'{e}', merge_format('#2FBAB3', '9'))\n \n \ndef run(file1, file2, department):\n global workbook, dfss01, dfsf24\n dfss01 = pd.read_csv(file1)\n dfsf24 = pd.read_csv(file2)\n # Check if the number of columns is 20, otherwise raise an exception\n if len(dfss01.columns) != 20:\n raise ValueError(\"Make sure you upload the correct file (SS01) from Rayat!\")\n if len(dfsf24.columns) != 20:\n raise ValueError(\"Make sure you upload the correct file (SF24) from Rayat!\")\n LIST_OF_STUDENT_ID = get_lab_department(department) \n output = io.BytesIO()\n workbook = xlsxwriter.Workbook(output, {'in_memory': True}) \n for STUDENT_ID in LIST_OF_STUDENT_ID:\n create_excel_file(STUDENT_ID) \n workbook.close()\n output.seek(0)\n return output\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"amsb14/CampusAutoAssistant","sub_path":"utils/generateStudentTables.py","file_name":"generateStudentTables.py","file_ext":"py","file_size_in_byte":9989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19269496478","text":"from Project import Project\nimport random\n\nclass Student(object):\n\t\"\"\"\n\tThe student object contains all the information about it.\n\t\"\"\"\n\tdef __init__(self, user_id:int, university:str, name:str, link:str, level:int, project_name:str, project_grades:list, mood_grades:list, match_state:int=0):\n\t\tsuper(Student, self).__init__()\n\t\t\n\t\tself._user_id:int = user_id\n\t\tself._university:str = university\n\t\tself._name:str = name\n\t\tself._link:str = link\n\t\tself._level:int = level\n\t\tself._project:Project = Project(project_name, project_grades)\n\t\tself._mood_grades:list = mood_grades\n\t\tself._match_state:int = match_state\n\n\tdef set_not_match(self):\n\t\tself._match_state = 0\n\n\tdef set_eval(self, is_eval):\n\t\tif is_eval:\n\t\t\tself._match_state = 1\n\t\telse:\n\t\t\tself._match_state = 2\n\t\n\tdef get_match_state(self):\n\t\treturn self._match_state\n\n\tdef get_user_id(self):\n\t\treturn self._user_id\n\n\tdef calc_average_mood(self): \n\t\tself._mood_grades = [int(i) for i in self._mood_grades if i != ',']\n\t\treturn sum(self._mood_grades) / len(self._mood_grades)\n\n\tdef calc_average_grade(self):\n\t\tself._project._grades = [int(i) for i in self._project._grades if i != ',']\n\t\treturn sum(self._project._grades) / len(self._project._grades)\n\n\tdef to_string(self):\n\t\treturn '''\\\nStudent: {0}\nUniversity: {1}\nLevel: {2}\nProject: {3}\nLink: {4}'''.format(\n\t\t\tself._name,\n\t\t\tself._university,\n\t\t\tself._level,\n\t\t\tself._project.get_name(),\n\t\t\tself._link\n\t\t\t)\n\n\tdef to_string_adm(self):\n\t\treturn '''\\\nStudent: {0}\nUniversity: {1}\nLevel: {2}\nProject: {3}\nLink: {4}\nAv. mood grade: {5}'''.format(\n\t\t\tself._name,\n\t\t\tself._university,\n\t\t\tself._level,\n\t\t\tself._project.get_name(),\n\t\t\tself._link,\n\t\t\tround(self.calc_average_mood(), 2)\n\t\t\t)\n\n\tdef to_string_teacher(self):\n\t\treturn '''\\\nStudent: {0}\nUniversity: {1}\nLevel: {2}\nProject: {3}\nLink: {4}\nAv. project grade: {5}'''.format(\n\t\t\tself._name,\n\t\t\tself._university,\n\t\t\tself._level,\n\t\t\tself._project.get_name(),\n\t\t\tself._link,\n\t\t\tround(self.calc_average_grade(), 2)\n\t\t\t)\n\n\tdef __str__(self):\n\t\treturn self.to_string()\n\n\tdef to_dictionary(self):\n\t\tmood_grades = [str(integer) for integer in self._mood_grades]\n\t\tproject_grades = [str(integer) for integer in self._project.get_grades()]\n\t\treturn {\n\t\t\t\"user_id\": self._user_id,\n\t\t\t\"university\": self._university,\n\t\t\t\"name\": self._name,\n\t\t\t\"link\": self._link,\n\t\t\t\"level\": self._level,\n\t\t\t\"mood_grades\": \",\".join(mood_grades),\n\t\t\t\"project_name\": self._project.get_name(),\n\t\t\t\"project_grades\": \",\".join(project_grades),\n\t\t\t\"match_state\" : self._match_state,\n\t\t}\n\nif (__name__ == \"__main__\"):\n\tstud = Student(\n\t\tuser_id = 312162559,\n\t\tuniversity = \"University of Oxford\",\n\t\tname = \"Daniil\",\n\t\tlink = \"t.me/zkerriga\",\n\t\tlevel = 1,\n\t\tproject_name = \"double integral\",\n\t\tproject_grades = [4, 4],\n\t\tmood_grades = [4, 5, 5]\n\t)\n\tprint(stud)\n\ndef create_student_from_name(user_id:int, input_name:str, username):\n\tprojects = ['Double integral', 'Differential equation']\n\tuniversitys = [\"University of Oxford\", \"California Institute of Technology\", \"University of Cambridge\", \"Yale University\", \"The University of Chicago\"]\n\tlevels = [1, 2]\n\tgrades = [1, 2, 3, 4, 5]\n\n\tif (username):\n\t\tlink = \"t.me/\" + username\n\telse:\n\t\tlink = \"t.me/\" + \"error_link\"\n\treturn Student(\n\t\tuser_id,\n\t\trandom.choice(universitys),\n\t\tinput_name,\n\t\tlink,\n\t\trandom.choice(levels),\n\t\trandom.choice(projects),\n\t\t[random.choice(grades), random.choice(grades), random.choice(grades)],\n\t\t[random.choice(grades), random.choice(grades), random.choice(grades)]\n\t)\n\n","repo_name":"zkerriga/p2p","sub_path":"Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"31973072586","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Articles\n\ndef index_blog2(request):\n articles = Articles.objects.order_by('-dat_e')\n context = {\n 'title': 'blog2',\n 'articles': articles\n }\n\n return render(request, 'blog2/index_blog2.html', context)\n\ndef detail2(request, blog_id):\n blog = get_object_or_404(Articles, pk=blog_id)\n context = {\n 'title': 'blog2',\n 'blog': blog\n }\n\n return render(request, 'blog2/detail2.html', context)","repo_name":"Aleksey1975/05062023","sub_path":"blog2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2477034295","text":"#\n# @lc app=leetcode id=26 lang=python3\n#\n# [26] Remove Duplicates from Sorted Array\n#\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n index = 0\n while index < len(nums) - 1:\n if nums[index] == nums[index + 1]:\n del nums[index]\n index -= 1\n index += 1\n return len(nums)\n","repo_name":"OhYoooo/Leetcode","sub_path":"python/26.remove-duplicates-from-sorted-array.py","file_name":"26.remove-duplicates-from-sorted-array.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38244791511","text":"from http.server import HTTPServer, SimpleHTTPRequestHandler\nimport ssl\nimport sys\n\nif len(sys.argv) < 5:\n print('\\n(*) USAGE:\\t python3 {0} server_ip server_port key certificate'.format(sys.argv[0]))\n print('(*) Example:\\t python3 {0} 10.1.10.99 443 cert.key cert.pem\\n'.format(sys.argv[0]))\n sys.exit()\n\nhandler = HTTPServer((sys.argv[1], int(sys.argv[2])), SimpleHTTPRequestHandler)\nhandler.socket = ssl.wrap_socket (handler.socket,\n keyfile=sys.argv[3],\n certfile=sys.argv[4], server_side=True)\n\nwith handler as httpd:\n print('\\n' + '-' * 80 + '\\n')\n print('Server running on https://{0}:{1}\\n'.format(sys.argv[1], sys.argv[2]))\n print('-' * 80 + '\\n')\n httpd.serve_forever()\n","repo_name":"marstrander/BasicHTTPSserver","sub_path":"BasicHTTPSserver.py","file_name":"BasicHTTPSserver.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31838677562","text":"import cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread(\"ireland.jpg\")\r\nr = cv2.selectROI(img)\r\nprint(r)\r\nimg1 = img\r\nfor i in range(r[1],r[3]):\r\n for j in range(r[0],r[2]):\r\n for k in range(0,3):\r\n img1[i][j][k]=0\r\ncv2.imwrite('blk.jpg', img1)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n#crop = np.zeros([r[3]-r[1], r[2]-r[0], 3], dtype=np.uint8)","repo_name":"vishwesh-19/Digital-Image-Processing","sub_path":"Assign2_crop/assign2_crop.py","file_name":"assign2_crop.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41490367910","text":"class Node:\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n\nclass Queue:\n def __init__(self, head = None, tail = None):\n self.head = head\n self.tail = tail\n\n def PrintAll(self):\n ptr = self.head\n while(ptr is not None):\n print(ptr.data, end=' ')\n ptr = ptr.next\n print()\n\n def Enqueue(self,value):\n node = Node(value)\n if(self.head is None):\n self.head = node\n self.tail = node\n else:\n self.tail.next = node\n self.tail = node\n self.PrintAll()\n\n def Dequeue(self):\n if(self.head is None):\n return \"Queue is empty\"\n value = self.head.data\n self.head = self.head.next\n if(self.head is None):\n self.tail = None\n return value\n\nqueue = Queue()\nqueue.Enqueue(1)\nqueue.Enqueue(2)\nprint(queue.Dequeue())\nprint(queue.Dequeue())\nprint(queue.Dequeue())","repo_name":"venkateshraizaday/DataStructures","sub_path":"Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72310567873","text":"from data_preprocess.standardizing import preprocess_df\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport random\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision import transforms\nimport numpy as np\nfrom utilities import DataLoader, PreProc\nimport para\nfrom tensorboardX import SummaryWriter\nimport torch.nn as nn\n\n\nLEARNING_RATE = 0.0001\nBATCH_SIZE = 32\nEPOCH = 30\n\nclass CustomDatasetFromPre(Dataset):\n def __init__(self, data, transforms=None):\n\n self.data = data\n self.data = np.asarray(self.data).astype(np.float32)\n self.transforms = transforms\n # self.size = self.data.shape[0] * self.data.shape[1]\n # self.zeros_num = int(self.size * percent)\n # self.ones_num = self.size - self.zeros_num\n # self.matrix = np.hstack((np.zeros(self.zeros_num)))\n\n\n def __getitem__(self, index):\n if self.transforms is not None:\n data = self.transforms(self.data)\n label_pos = index + self.data.shape[1]/2 + 1\n label = self.data[label_pos]\n high_pos = index\n low_pos = index + self.data.shape[1] + 1\n train_data = np.concatenate(self.data[high_pos:label_pos-1,:], self.data[label_pos+1:low_pos, :])\n return (train_data, label)\n\n def __len__(self):\n return len(self.data) - self.data.shape[1] - 1\n\nclass CNN(torch.nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Sequential( # input shape (1, 28, 28)\n nn.Conv2d(\n in_channels=1, # input height\n out_channels=16, # n_filters\n kernel_size=5, # filter size\n stride=1, # filter movement/step\n padding=2, # padding=(kernel_size-1)/2 当 stride=1\n ), # output shape (16, dimension, dimension)\n nn.ReLU(), # activation\n nn.MaxPool2d(kernel_size=2), # 在 2x2 空间里向下采样, output shape (16, 14, 14)\n )\n self.conv2 = nn.Sequential( # input shape (16, dimension/2, dimension/2)\n nn.Conv2d(16, 32, 5, 1, 2), # output shape (32, dimension/2, dimension/2)\n nn.ReLU(), # activation\n nn.MaxPool2d(2), # output shape (32, 7, 7)\n )\n\n def forward(self, x):\n x = F.dropout(x)\n x = F.relu(self.hidden(x))\n x = self.predict(x)\n return x\n\n\nnet2 = torch.nn.Sequential(\n torch.nn.Dropout(0.1),\n torch.nn.Linear(84, 100),\n torch.nn.ReLU(),\n torch.nn.Linear(100, 200),\n torch.nn.ReLU(),\n torch.nn.Linear(200, 100),\n torch.nn.ReLU(),\n torch.nn.Linear(100, 84)\n)\nnet3 = torch.nn.Sequential(\n torch.nn.Dropout(0.1),\n torch.nn.Linear(84, 200),\n torch.nn.ReLU(),\n torch.nn.Linear(200, 500),\n torch.nn.ReLU(),\n torch.nn.Linear(500, 200),\n torch.nn.ReLU(),\n torch.nn.Linear(200, 84)\n)\n\n\nif __name__ == \"__main__\":\n transformations = transforms.Compose([transforms.ToTensor()])\n dl = DataLoader()\n machine_num = 0 # 0号为001,1号为002,以此类推\n import para\n raw_data = dl(para.train_data,machine_num) #加载数据\n\n data = raw_data[:,1:] #移除时间列\n pp = PreProc(dl.t[1:])\n pp.fit(data)#预处理\n inputs = pp.transform(data) #预处理输出\n print(np.any(np.isnan(inputs)))#检测是否有NaN\n # custom_data_from_csv = CustomDatasetFromCSV(csv_path='/Users/yangyucheng/Desktop/SCADA/train/201807_1.csv')\n custom_data_from_pre = CustomDatasetFromPre(inputs)\n dataset_loader = torch.utils.data.DataLoader(dataset=custom_data_from_pre,\n batch_size=BATCH_SIZE,\n shuffle=False)\n\n net = Net(n_feature=84, n_hidden=100, n_output=84)\n optimizer = torch.optim.Adam(net2.parameters(), lr=LEARNING_RATE)\n loss_func = torch.nn.MSELoss()\n plt.ion()\n plt.show()\n writer = SummaryWriter()\n iteration = 0\n for epoch in range(EPOCH):\n for step, (batch_x, batch_y) in enumerate(dataset_loader):\n net2.zero_grad()\n prediction = net2(batch_x)\n loss = loss_func(prediction, batch_y)\n # print('predicion ', prediction[0])\n # print('label ', batch_y[0])\n loss.backward()\n optimizer.step()\n iteration += 1\n print('Epoch: ', epoch, '| Step: ', step, '| Loss', loss)\n writer.add_scalar('data/loss', loss, iteration)\n writer.export_scalars_to_json(\"./test.json\")\n writer.close()\n torch.save(net2, 'net.pkl')\n","repo_name":"Youngyi/YLB-tech","sub_path":"CNN/CNN_test.py","file_name":"CNN_test.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2439390435","text":"# If nose sees us\ndef test_mpi_send():\n import os\n # Run this file with two processors\n assert os.popen('mpiexec -np 2 python /home/mrocklin/workspace/ape/ape/test/test_mpi_env_send.py').read() == \"True\"\n\nimport sys\nimport os\nsys.path.insert(0,os.path.abspath(\"/home/mrocklin/workspace/ape/\"))\n# If we're being run by someone (hopefully mpiexec)\nif __name__ == '__main__':\n\n from ape.env_manip import *\n from mpi4py import MPI\n import numpy as np\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n num_procs = MPI.COMM_WORLD.Get_size()\n\n if rank == 0:\n x = T.matrix('x')\n y = x+x*x\n env = theano.FunctionGraph([x], [y])\n s = pack(env)\n comm.send(s, dest=1, tag = 1234)\n\n if rank == 1:\n s = comm.recv(source = 0, tag = 1234)\n env = unpack(s)\n f = theano.function(env.inputs, env.outputs[0])\n sys.stdout.write(str(f(np.ones((5,5)).astype(np.float32)).sum() == 50))\n","repo_name":"mrocklin/ape","sub_path":"ape/test/test_mpi_env_send.py","file_name":"test_mpi_env_send.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"33708445390","text":"class Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n res = [1] * len(nums)\n prefix, postfix = 1, 1\n # go forward\n for i in range(len(nums)):\n res[i] *= prefix\n prefix *= nums[i]\n # go backward\n for i in range(len(nums)-1, -1, -1):\n res[i] *= postfix\n postfix *= nums[i]\n return res","repo_name":"x12hengyu/algorithm-notes","sub_path":"blind75/238_Product_of_Array_Except_Self.py","file_name":"238_Product_of_Array_Except_Self.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23585367511","text":"#!/usr/bin/python3\n\nPI = 3.14159265358979323846264338327950288\n\nT = int(input())\n\nfor case in range(1, T+1):\n maxSurface = -1\n N, K = map(int, input().split())\n pancakes = [None] * N\n for i in range(N):\n pancakes[i] = tuple(map(int, input().split()))\n pancakes.sort(key = lambda tup: tup[0])\n\n for i in range(K, N+1):\n firsts = sorted(pancakes[:i-1], key = lambda tup: tup[0] * tup[1], reverse=True)\n surface = pancakes[i-1][0]**2 + 2*pancakes[i-1][0]*pancakes[i-1][1]\n surface += sum(map(lambda tup: 2*tup[0]*tup[1], firsts[:K-1]))\n maxSurface = max(surface, maxSurface)\n\n print(\"Case #\" + str(case) + \": \" + str(PI * maxSurface))\n \n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/167.py","file_name":"167.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17792839878","text":"def binary_search(x, T): # 二分\n t = []\n low = 0\n length = len(T) - 1\n while low <= length:\n mid = int((low + length) / 2)\n if x < T[mid]:\n length = mid - 1\n elif x > T[mid]:\n low = mid + 1\n else:\n t.append(mid)\n for i in range(1,mid+1):\n if T[mid-i]==x:\n t.append(mid-i)\n else:\n break\n for j in range(1,length-mid+1):\n if T[mid+j]==x:\n t.append(mid+j)\n else:\n break\n return t\n\n\nT = [1, 2, 3, 4, 4, 5]\nx = input(\"输入查询的数:\")\nx = int(x)\nprint(binary_search(x, T))\n\n'''\n# 顺序查找\ndef sequence_search(list1, m):\n return [i for (i, m) in enumerate(list1) if m == x]\n\n\nT = [1, 2, 3, 4, 5, 5, 8]\nx = input(\"输入查询的数:\")\nx = int(x)\nif x in T:\n print(sequence_search(T, x))\nelse:\n print(\"0\")\n'''\n","repo_name":"starshine0618/analysis","sub_path":"test3/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17797915843","text":"#1) Write a program to reverse a string.\n# Sample input: “1234abcd”\n# Expected output: “dcba4321”\n\nstr1=input(\"enter a string:\")\nrev_str=str1[::-1]\nprint(rev_str)\n\n\n#output\n# enter a string:1234abcd\n# dcba4321\n#\n# Process finished with exit code 0\n#\n","repo_name":"deepa-karthik/Python_assignments","sub_path":"task4/task4_1.py","file_name":"task4_1.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17073980419","text":"from ..utils import rest\nfrom starkcore.utils.checks import check_date\nfrom starkcore.utils.api import from_api_json\nfrom starkcore.utils.resource import Resource\nfrom starkcore.utils.checks import check_datetime\nfrom ..utils.parse import parse_and_verify\nfrom ..boleto.log.__log import _resource as _boleto_log_resource\nfrom ..invoice.log.__log import _resource as _invoice_log_resource\nfrom ..deposit.log.__log import _resource as _deposit_log_resource\nfrom ..transfer.log.__log import _resource as _transfer_log_resource\nfrom ..taxpayment.log.__log import _resource as _tax_payment_log_resource\nfrom ..darfpayment.log.__log import _resource as _darf_payment_log_resource\nfrom ..boletoholmes.log.__log import _resource as _boleto_holmes_log_resource\nfrom ..brcodepayment.log.__log import _resource as _brcode_payment_log_resource\nfrom ..boletopayment.log.__log import _resource as _boleto_payment_log_resource\nfrom ..utilitypayment.log.__log import _resource as _utility_payment_log_resource\n\n\n_resource_by_subscription = {\n \"transfer\": _transfer_log_resource,\n \"invoice\": _invoice_log_resource,\n \"deposit\": _deposit_log_resource,\n \"boleto\": _boleto_log_resource,\n \"brcode-payment\": _brcode_payment_log_resource,\n \"boleto-payment\": _boleto_payment_log_resource,\n \"utility-payment\": _utility_payment_log_resource,\n \"darf-payment\": _darf_payment_log_resource,\n \"tax-payment\": _tax_payment_log_resource,\n \"holmes\": _boleto_holmes_log_resource,\n}\n\n\nclass Event(Resource):\n \"\"\"# Webhook Event object\n An Event is the notification received from the subscription to the Webhook.\n Events cannot be created, but may be retrieved from the Stark Bank API to\n list all generated updates on entities.\n ## Attributes (return-only):\n - id [string]: unique id returned when the event is created. ex: \"5656565656565656\"\n - log [Log]: a Log object from one of the subscribed services (TransferLog, InvoiceLog, DepositLog, BoletoLog, BoletoHolmesLog, BrcodePaymentLog, BoletoPaymentLog, UtilityPaymentLog, TaxPaymentLog or DarfPaymentLog)\n - created [datetime.datetime]: creation datetime for the notification event. ex: datetime.datetime(2020, 3, 10, 10, 30, 0, 0)\n - is_delivered [bool]: true if the event has been successfully delivered to the user url. ex: False\n - subscription [string]: service that triggered this event. ex: \"transfer\", \"utility-payment\"\n - workspace_id [string]: ID of the Workspace that generated this event. Mostly used when multiple Workspaces have Webhooks registered to the same endpoint. ex: \"4545454545454545\"\n \"\"\"\n\n def __init__(self, log, created, is_delivered, subscription, workspace_id, id):\n Resource.__init__(self, id=id)\n\n self.created = check_datetime(created)\n self.is_delivered = is_delivered\n self.subscription = subscription\n self.workspace_id = workspace_id\n self.log = log\n if subscription in _resource_by_subscription:\n self.log = from_api_json(resource=_resource_by_subscription[subscription], json=log)\n\n\n_resource = {\"class\": Event, \"name\": \"Event\"}\n\n\ndef get(id, user=None):\n \"\"\"# Retrieve a specific notification Event\n Receive a single notification Event object previously created in the Stark Bank API by its id\n ## Parameters (required):\n - id [string]: object unique id. ex: \"5656565656565656\"\n ## Parameters (optional):\n - user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call\n ## Return:\n - Event object with updated attributes\n \"\"\"\n return rest.get_id(resource=_resource, id=id, user=user)\n\n\ndef query(limit=None, after=None, before=None, is_delivered=None, user=None):\n \"\"\"# Retrieve notification Events\n Receive a generator of notification Event objects previously created in the Stark Bank API\n ## Parameters (optional):\n - limit [integer, default None]: maximum number of objects to be retrieved. Unlimited if None. ex: 35\n - after [datetime.date or string, default None]: date filter for objects created only after specified date. ex: datetime.date(2020, 3, 10)\n - before [datetime.date or string, default None]: date filter for objects created only before specified date. ex: datetime.date(2020, 3, 10)\n - is_delivered [bool, default None]: bool to filter successfully delivered events. ex: True or False\n - user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call\n ## Return:\n - generator of Event objects with updated attributes\n \"\"\"\n return rest.get_stream(\n resource=_resource,\n limit=limit,\n after=check_date(after),\n before=check_date(before),\n is_delivered=is_delivered,\n user=user,\n )\n\n\ndef page(cursor=None, limit=None, after=None, before=None, is_delivered=None, user=None):\n \"\"\"# Retrieve paged Events\n Receive a list of up to 100 Event objects previously created in the Stark Bank API and the cursor to the next page.\n Use this function instead of query if you want to manually page your requests.\n ## Parameters (optional):\n - cursor [string, default None]: cursor returned on the previous page function call\n - limit [integer, default 100]: maximum number of objects to be retrieved. It must be an integer between 1 and 100. ex: 50\n - after [datetime.date or string, default None]: date filter for objects created only after specified date. ex: datetime.date(2020, 3, 10)\n - before [datetime.date or string, default None]: date filter for objects created only before specified date. ex: datetime.date(2020, 3, 10)\n - is_delivered [bool, default None]: bool to filter successfully delivered events. ex: True or False\n - user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call\n ## Return:\n - list of Event objects with updated attributes\n - cursor to retrieve the next page of Event objects\n \"\"\"\n return rest.get_page(\n resource=_resource,\n cursor=cursor,\n limit=limit,\n after=check_date(after),\n before=check_date(before),\n is_delivered=is_delivered,\n user=user,\n )\n\n\ndef delete(id, user=None):\n \"\"\"# Delete a webhook Event entity\n Delete a of notification Event entity previously created in the Stark Bank API by its ID\n ## Parameters (required):\n - id [string]: Event unique id. ex: \"5656565656565656\"\n ## Parameters (optional):\n - user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call\n ## Return:\n - deleted Event object\n \"\"\"\n return rest.delete_id(resource=_resource, id=id, user=user)\n\n\ndef update(id, is_delivered, user=None):\n \"\"\"# Update notification Event entity\n Update notification Event by passing id.\n If is_delivered is True, the event will no longer be returned on queries with is_delivered=False.\n ## Parameters (required):\n - id [list of strings]: Event unique ids. ex: \"5656565656565656\"\n - is_delivered [bool]: If True and event hasn't been delivered already, event will be set as delivered. ex: True\n ## Parameters (optional):\n - user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call\n ## Return:\n - target Event with updated attributes\n \"\"\"\n payload = {\n \"isDelivered\": is_delivered\n }\n return rest.patch_id(resource=_resource, id=id, user=user, payload=payload)\n\n\ndef parse(content, signature, user=None):\n \"\"\"# Create single notification Event from a content string\n Create a single Event object received from event listening at subscribed user endpoint.\n If the provided digital signature does not check out with the StarkBank public key, a\n starkbank.error.InvalidSignatureError will be raised.\n ## Parameters (required):\n - content [string]: response content from request received at user endpoint (not parsed)\n - signature [string]: base-64 digital signature received at response header \"Digital-Signature\"\n ## Parameters (optional):\n - user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call\n ## Return:\n - Parsed Event object\n \"\"\"\n return parse_and_verify(\n content=content,\n signature=signature,\n user=user,\n resource=_resource,\n key=\"event\",\n )\n","repo_name":"starkbank/sdk-python","sub_path":"starkbank/event/__event.py","file_name":"__event.py","file_ext":"py","file_size_in_byte":8657,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"31395906252","text":"import os\nimport json\nimport asyncio\nimport requests\n\nfrom typing import List \n\nfrom web3 import Web3\nfrom web3.middleware import construct_sign_and_send_raw_middleware\n\nfrom Crypto.Hash import keccak\n\nfrom websockets.client import connect\n\nfrom dotenv import load_dotenv\n\n\nload_dotenv()\n\n\nclass ListingSniper:\n\n def __init__(self, token: str = None):\n\n self.token_to_snipe = token\n\n self.w3 = Web3(Web3.WebsocketProvider(os.environ.get(\"WSS_PROVIDER_URI\")))\n self.account = self.w3.eth.account.from_key(os.environ.get(\"PRIVATE_KEY\"))\n self.w3.middleware_onion.add(construct_sign_and_send_raw_middleware(self.account))\n\n self.universal_router_contract = self.w3.eth.contract(\n address=os.environ.get(\"UNIVERSAL_ROUTER_ADDRESS\")\n )\n \n bot_token = os.environ.get(\"TG_BOT_TOKEN\")\n chat_id = os.environ.get(\"BANTER_ID\")\n self.tg_bot_url = f\"https://api.telegram.org/bot{bot_token}/sendMessage?chat_id={chat_id}&text=\"\n\n with open(\"erc20_abi.json\") as file:\n self.erc20_abi = json.load(file)\n\n async def handle_event(self, event: dict):\n '''\n processes incoming events emitted by the uniswap factory contract.\n '''\n \n topics = event[\"params\"][\"result\"][\"topics\"] \n\n # the first (formally 0-th) entry of the list of topics encodes the eventtype itself.\n token_0 = self.w3.to_checksum_address(\"0x\" + topics[1][26:])\n token_1 = self.w3.to_checksum_address(\"0x\" + topics[2][26:])\n\n token0_contract = self.w3.eth.contract(token_0, abi=self.erc20_abi)\n token0_symbol = token0_contract.functions.symbol().call()\n\n token1_contract = self.w3.eth.contract(token_1, abi=self.erc20_abi)\n token1_symbol = token1_contract.functions.symbol().call()\n\n print(f\"pair created: ({token0_symbol}/{token1_symbol})\")\n \n sub_hash = event[\"params\"][\"subscription\"]\n\n url = self.tg_bot_url + f\"{token0_symbol} / {token1_symbol}\\n{token0_contract}\\n{token1_contract}\"\n requests.get(url) \n\n if token0_symbol == self.token_to_snipe or token1_symbol == self.token_to_snipe:\n \n if sub_hash == self.v3_sub_hash:\n # do v3 shit\n pass\n elif sub_hash == self.v2_sub_hash:\n pass # do v2 shit\n\n return\n \n @staticmethod\n async def subscribe(socket, address: str, topics: List[str] = []):\n data = dict(\n jsonrpc=\"2.0\", \n id=1, \n method=\"eth_subscribe\", \n params=[\"logs\", dict(address=address, topics=topics)]\n )\n await socket.send(json.dumps(data))\n\n @staticmethod\n def encode_event_sig(sig: str):\n k = keccak.new(digest_bits=256)\n k.update(sig.encode())\n return \"0x\" + str(k.hexdigest())\n \n async def run(self):\n\n v3 = os.environ.get(\"V3_FACTORY_ADDRESS\")\n v2 = os.environ.get(\"V2_FACTORY_ADDRESS\")\n\n async with connect(os.environ.get(\"ARBITRUM_WSS_URI\")) as socket:\n pool_created_topic = self.encode_event_sig(\n \"PoolCreated(address,address,uint24,int24,address)\"\n )\n\n # subscribing to the v3 first ...\n await self.subscribe(socket, v3, [pool_created_topic])\n v3_sub_res = await socket.recv()\n self.v3_sub_hash = json.loads(v3_sub_res)[\"result\"]\n print(\"subscribed to v3 events:\", self.v3_sub_hash)\n\n # now the v2 event\n await self.subscribe(socket, v2, [])\n v2_sub_res = await socket.recv()\n self.v2_sub_hash = json.loads(v2_sub_res)[\"result\"]\n\n print(\"subscribed to v2 events:\", self.v2_sub_hash)\n\n while 1:\n\n try:\n msg = await asyncio.wait_for(socket.recv(), timeout=15)\n event = json.loads(msg)\n await self.handle_event(event)\n \n except:\n pass\n\nif __name__ == \"__main__\":\n sniper = ListingSniper()\n asyncio.run(sniper.run())\n","repo_name":"gemsandfrens/lpscanner","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41985989582","text":"from re import S\nfrom app import app, db\nfrom common.AppException import AppException\n#from model.StockTrade import StockTrade\nfrom model.StockSymbol import StockSymbol\nfrom model.orden import OrdenModel\nfrom model.posicion import PosicionModel\nfrom model.TipoModel import TipoModel\n\nfrom reader.posicion import PosicionReader\nfrom reader.operacion import OperacionReader\nfrom reader.calendariodiario import CalendarioDiarioReader\n\nfrom datetime import datetime, date, time, timedelta\nfrom common.Response import Response\nfrom common.Error import Error\nfrom sqlalchemy import desc\nfrom sqlalchemy.orm import join\nimport sqlalchemy.sql.functions as func\nfrom sqlalchemy.sql import extract\nfrom controller.base import Base\n\n\nclass OperacionManager(Base): \n def __init__(self):\n pass\n\n def _get_handler(self, asset_type=\"equity\"):\n return self.handlers[asset_type]\n\n def get_operaciones(self, args={}):\n records = PosicionReader.get_operaciones(self.usuario.id)\n return Response().from_raw_data(records)\n\n def get_rentabilidad_ult30dias(self, args={}): \n fechas = {}\n rows = []\n\n d = timedelta(days=30)\n fch_actual = date.today() \n fch_desde = fch_actual - d \n \n records = OperacionReader.get_rentabilidad_diaria(self.usuario.id, fch_desde=fch_desde, fch_hasta=fch_actual)\n\n for elem in records:\n rows.append({\n \"fch_transaccion\":elem.fch_transaccion,\n \"imp_rentabilidad\": 0 if elem.imp_rentabilidad is None else elem.imp_rentabilidad\n })\n\n return Response().from_raw_data(rows)\n\n def get_rentabilidades_x_periodo(self, args={}):\n records = []\n\n #rentabilidad ultimo dia\n rent_ultdia = OperacionReader.get_rentabilidad_ultdia(usuario_id=self.usuario.id)\n\n records.append({\n \"periodo\":\"Dia - {0}\".format(rent_ultdia.fch_transaccion.strftime(\"%d/%m/%Y\")),\n \"imp_rentabilidad\": rent_ultdia.imp_rentabilidad\n })\n\n #rentabilidad ultima semana\n anyo, semana, imp_rentabilidad = OperacionReader.get_rentabilidad_ultsemana(usuario_id=self.usuario.id)\n records.append({\n \"periodo\": \"Semana - {0}/{1}\".format(anyo, semana),\n \"imp_rentabilidad\": imp_rentabilidad\n })\n\n #rentabilidad ultimo mes\n anyo, mes, imp_rentabilidad = OperacionReader.get_rentabilidad_ultmes(usuario_id=self.usuario.id)\n records.append({\n \"periodo\": \"Mes - {0}/{1}\".format(anyo, mes),\n \"imp_rentabilidad\": imp_rentabilidad\n })\n\n #rentabilidad ultimo anyo\n anyo, imp_rentabilidad = OperacionReader.get_rentabilidad_ultanyo(usuario_id=self.usuario.id)\n records.append({\n \"periodo\": \"Año - {0}\".format(anyo),\n \"imp_rentabilidad\": imp_rentabilidad\n })\n\n return Response().from_raw_data(records)\n\nclass EliminadorEntryPoint:\n def __init__(self):\n pass\n\n def procesar(self, args={}):\n try:\n self._validar(args)\n opers = self._collect(args)\n eliminador = Eliminador()\n eliminador.procesar(opers)\n db.session.commit()\n return Response(msg=\"Las Operaciones se han eliminado correctamente\").get()\n except Exception as e:\n db.session.rollback()\n return Response().from_exception(e)\n\n def _validar(self, args={}):\n errors = []\n if \"del_opers\" not in args:\n errors.append(\"El parámetro 'del_opers' no ha sido enviado\")\n \n if len(errors) > 0:\n raise AppException(msg=\"Se han encontrado errores de validación\",errors=errors)\n\n def _collect(self, args={}):\n return args[\"del_opers\"] \n \n\nclass BuscadorOperaciones:\n def __init__(self):\n pass\n\n def obt_historial_oper(self, args={}):\n try:\n query = db.session.query(\n StockTrade.id,\n StockTrade.num_operacion,\n StockTrade.order_id,\n StockTrade.num_orden,\n StockTrade.asset_type,\n StockTrade.symbol,\n StockTrade.trade_type,\n TipoModel.tipo_nombre.label(\"tipo_oper_nombre\"),\n StockTrade.cantidad,\n StockTrade.saldo,\n StockTrade.trade_date,\n StockTrade.trade_month,\n StockTrade.imp_accion,\n StockTrade.imp_operacion,\n StockTrade.imp_accion_origen,\n StockTrade.realized_gl \n ).outerjoin(TipoModel, StockTrade.trade_type == TipoModel.tipo_id)\n query = query.order_by(StockTrade.trade_date.desc(),StockTrade.symbol.asc(),StockTrade.num_operacion.desc())\n data = query.all()\n return Response(raw_data=data).get()\n except Exception as e:\n return Response().from_exception(e)\n\n\n\n\n","repo_name":"ToxidSeed/bagholdercuy","sub_path":"controller/operacion.py","file_name":"operacion.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74445209795","text":"from models import *\nimport torch.nn as nn\n# from metrics import CharacterErrorRate,ExactMatch\nfrom typing import List\nfrom models.score import exact_match_score, edit_distance\nfrom models.models import ResNet_Transformer\nimport transformers\nimport torch\n\ndef get_device(cuda_index:int):\n \"\"\"\n 选择合适的device\n \"\"\"\n if cuda_index==-1:\n device = torch.device('cpu')\n return device\n device = torch.device('cuda:{}'.format(cuda_index)) if torch.cuda.is_available() else torch.device('cpu')\n \n return device\n\nclass Lit_Resnet_Transformer():\n def __init__(\n self,\n args,\n d_model: int,\n dim_feedforward: int,\n nhead: int,\n dropout: float,\n num_decoder_layers: int,\n max_output_len: int,\n sos_index: int,\n eos_index: int,\n pad_index: int,\n unk_index: int,\n num_classes: int,\n lr: float = 0.001,\n weight_decay: float = 0.0001,\n milestones: List[int] = [5],\n gamma: float = 0.1,\n ):\n\n self.lr = lr\n self.weight_decay = weight_decay\n self.milestones = milestones\n self.gamma = gamma\n self.args = args\n self.models = ResNet_Transformer(\n args,\n d_model, dim_feedforward,\n nhead, dropout,\n num_decoder_layers,\n max_output_len,\n sos_index, eos_index,\n pad_index, num_classes,\n ).to(get_device(args.cuda_index))\n self.ignore_indices = [sos_index, pad_index, eos_index, unk_index]\n self.loss_fn = nn.CrossEntropyLoss(ignore_index=pad_index)\n\n\n def cal_loss(self, batch):\n imgs, targets,mask_target,length = batch\n if mask_target is not None:\n logits = self.models(imgs, mask_target[:, :-1],length) #original mask_target[:, :-1]\n else:\n logits = self.models(imgs, targets[:, :-1],length)\n loss = self.loss_fn(logits, targets[:, 1:])\n\n #self.log(\"train/loss\", loss)\n return loss\n \n def cal_loss_editDistance_exactMatch(self, batch):\n with torch.no_grad():\n imgs, targets,mask_target,length = batch\n if mask_target is not None:\n logits = self.models(imgs, mask_target[:, :-1],length)\n else:\n logits = self.models(imgs, targets[:, :-1],length)\n # logits = self.models(imgs, targets[:, :-1],lengths)\n loss = self.loss_fn(logits, targets[:, 1:]) # 算loss始终和真正的target比较\n # val_cer = self.val_cer(logits, targets)\n #self.log(\"val/loss\", loss, on_step=False, on_epoch=True, prog_bar=True)\n\n preds = self.models.predict(imgs)\n val_edit_distance = edit_distance(preds, targets,self.ignore_indices)\n val_exact_match = exact_match_score(preds, targets, self.ignore_indices)\n # print(val_edit_distance, val_exact_match)\n return loss, val_edit_distance, val_exact_match\n\n def configure_optimizers(self):\n optimizer = torch.optim.AdamW(self.models.parameters(), lr=self.lr, weight_decay=self.weight_decay)\n # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.milestones, gamma=self.gamma)\n scheduler = transformers.get_cosine_schedule_with_warmup( \n optimizer,\n num_warmup_steps=self.args.step_per_epoch//2, #int(1000*len(train_loader)/2128.875)\n num_training_steps = self.args.epoches*(self.args.step_per_epoch)\n )\n return optimizer, scheduler\n \n\n \n","repo_name":"Andrew-wong-ty/MathImg2LaTeX","sub_path":"models/lit_models.py","file_name":"lit_models.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"10372741187","text":"#%%\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom wind_interpolate import Interpolation\nfrom numba import njit\nimport time\nfrom pyscipopt import Model,quicksum\nimport math\n#%%\nclass DataLoader:\n \n def __init__(self,DAResultFolder,RTDataFolder):\n self.RTDataFolder=RTDataFolder\n self.DAResultFolder=DAResultFolder\n \n def readWind(self,fileName,U,interval):\n \"\"\"\n 函数功能:从文件中读取实际运行日风电功率\n 输入参数: \n fileName RTDataFolder路径下的风电数据文件名\n U:风电机组总数\n 返回值:\n W_rt 24x1800 风电场整体功率\n W_perUnit 24*(60/interval)x interval*30 单台风机功率\n W_rt_min 24x1 每个断面风机最大功率最小值\n \"\"\" \n wind_dataset=pd.read_excel(self.RTDataFolder+fileName)\n W=wind_dataset.values[10:34,2]\n W=Interpolation(W,43200) #三次样条插值,从24个点变为24*1800个点\n W_perUnit=W.reshape(( int(24*(60/interval)),int(interval*30)))\n W_rt=W*U #拓展到整个风电场的功率预测\n W_rt=W_rt.reshape((24,1800)) #行代表第几个小时,列代表第几个小时的第几个2s\n W_rt_min=np.zeros(24,dtype='float32') #每小时的风电最大出力预测的最小值\n for t in range(24):\n W_rt_min[t]=W_rt[t,:].min()\n return W_perUnit,W_rt,W_rt_min\n \n \n def readRegD(self,fileName):\n \"\"\"\n 函数功能:从文件中读取实际运行日调频信号\n 输入参数: fileName RTDataFolder路径下的调频信号数据文件名\n 返回值: regD 24x1800\n \"\"\"\n regD_dataset=pd.read_excel(self.RTDataFolder+fileName) #调频信号\n regD=regD_dataset.values[0:43200:1,2] #取自PJM市场2015-01-01\n regD=regD.reshape(24,1800)\n regD=np.float32(regD)\n return regD\n \n def readWindRegEps(self,fileName):\n \"\"\"\n #读取风电调频误差\n 返回值:eps_wind_reg 24x1800\n \"\"\"\n with open(self.RTDataFolder+fileName,\"rb\") as f:\n eps_wind_reg=pickle.load(f) #风电调频误差 均值为0标准差为0.05的正态分布\n eps_wind_reg=eps_wind_reg.reshape(24,1800)\n \n return eps_wind_reg\n \n \n def readV(self):\n with open(self.DAResultFolder+\"V.pkl\",\"rb\") as f:\n V=pickle.load(f)\n return V\n \n def readStateSpace(self):\n with open(self.DAResultFolder+\"T.pkl\",\"rb\") as f:\n T=pickle.load(f)\n with open(self.DAResultFolder+\"E.pkl\",\"rb\") as f:\n E=pickle.load(f)\n return T,E\n def readActionSpace(self):\n with open(self.DAResultFolder+\"P_wda.pkl\",\"rb\") as f:\n P_wda=pickle.load(f)\n with open(self.DAResultFolder+\"B_reg.pkl\",\"rb\") as f:\n B_reg=pickle.load(f)\n with open(self.DAResultFolder+\"P_bda.pkl\",\"rb\") as f:\n P_bda=pickle.load(f)\n return B_reg,P_wda,P_bda\n \nclass WINDUC:\n \n def __init__(self,UCResultFolder,t,numUnit):\n self.UCResultFolder=UCResultFolder\n self.t=t\n self.numUnit=numUnit\n \n def readSolU(self):\n \"\"\"\n 函数功能:从文件中读取上一个断面风电机组的起停状态\n 输入参数: filePath 文件所在路径\n 返回值: [u1,u2,...ui] 列表\n \"\"\"\n with open(self.UCResultFolder+str(self.t-1)+\".txt\") as f:\n tmp1=f.read()\n tmp2=tmp1.split(\"\\n\")\n tmp3=[float(tmp2[i]) for i in range(len(tmp2))]\n return tmp3\n\n def windUC(self,P_all,interval):\n \"\"\"\n 风机组合优化\n \n Parameters\n ----------\n t:当前断面时刻\n P_all : 风电在当前断面断面每2s分辨率的实时出力\n interval:多久一个断面 60的整数倍\n Returns\n -------\n 当前断面的风机成本\n \n \"\"\"\n t=self.t\n U=self.numUnit\n model=Model(\"UC in halfHour\"+str(t))\n \n if t==0: #初始状态为全开机\n u_lastTime=[1 for i in range(U)] #上一个时刻机组的起停状态\n else:\n #获取上一个时刻的起停状态\n u_lastTime=self.readSolU()\n \n #添加变量\n print(\"正在添加第\"+str(t)+\"/\"+str(24*(60/interval)-1)+\"个风机组合优化模型变量\")\n x,y,u,p={},{},{},{}\n for i in range(U):\n u[i]=model.addVar(vtype=\"B\",name=\"u(%s)\"%i)\n for k in range(int(interval*30)):\n x[i,k]=model.addVar(vtype=\"B\",name=\"x(%s,%s)\"%(i,k))\n y[i,k]=model.addVar(vtype=\"B\",name=\"y(%s,%s)\"%(i,k))\n p[i,k]=model.addVar(vtype=\"C\",name=\"p(%s,%s)\"%(i,k),lb=0,ub=1.5)\n \n z=model.addVar(\"z\") #目标函数的线性化表达\n \n #设置目标函数\n model.setObjective(z,sense=\"minimize\")\n model.addCons(\n quicksum(quicksum(u[i]*(x[i,k]*a+y[i,k]*b)*2 for k in range(int(interval*30)))+u[i]*(1-u_lastTime[i])*c + u_lastTime[i]*(1-u[i])*d for i in range(U))==z \n )\n \n #约束\n print(\"正在添加第\"+str(t)+\"/\"+str(24*(60/interval)-1)+\"个风机组合优化模型约束\")\n for i in range(U):\n for k in range(int(interval*30)):\n model.addCons(x[i,k]+y[i,k]==1) #状态唯一性\n model.addCons(x[i,k]*(p[i,k]-1.4)<=0) #判断风机是否在功率段1\n model.addCons(y[i,k]*(p[i,k]-1.4)>=0) #判断风机是否在功率段2\n model.addCons(p[i,k]<=W_perUnit[t,k]) #风机总功率小于预测最大出力\n #当ui=0的时候将风机功率拉到0 \n model.addCons(p[i,k]<=0+u[i]*1.5)\n model.addCons(p[i,k]>=0-u[i]*1.5)\n \n for k in range(int(interval*30)): \n model.addCons(quicksum(p[i,k] for i in range(U))==P_all[k]) #风机组合\n \n #模型求解\n print(\"正在求解第\"+str(t)+\"/\"+str(24*(60/interval)-1)+\"个风机组合优化模型\")\n start=time.time()\n model.optimize()\n sol = model.getBestSol()\n print(\"第\"+str(t)+\"/\"+str(24*(60/interval)-1)+\"个风机组合优化模型求解完成,用时\"+str(-start+time.time())+\"秒\")\n \n #导出求解结果\n with open(self.UCResultFolder+str(t)+\".txt\",\"w\") as f:\n for i in range(U):\n f.write(str(sol[u[i]]))\n if i W_rt_min[t]:\n immediateR=-1000000\n cumR=--1000000\n else:\n #计算风电和储能预留的上下调频容量\n Pw_rup=min(W_rt_min[t]-P_wda[p_wda],maxPwUp)\n Pw_rdn=min(P_wda[p_wda],maxPwDown)\n Pb_rup=Pb_max-P_bda[p_bda]\n Pb_rdn=Pb_max+P_bda[p_bda]\n \n #上下调频容量约束\n if Pw_rup+Pb_rup < B_reg[b_reg] or Pw_rdn+Pb_rdn < B_reg[b_reg]:\n immediateR=-1000000\n cumR=-1000000\n else:\n #计算储能的调频功率和充放成本\n cost_b=0\n P_breg=0.0\n E_next=E[e]\n for k in range(1800):\n if regD_rt[t,k]*B_reg[b_reg]>min(W_rt[t,k]-P_wda[p_wda],maxPwUp):#如果上调频需求超过了风的最大上调容量\n P_breg=regD_rt[t,k]*B_reg[b_reg]-min(W_rt[t,k]-P_wda[p_wda],maxPwUp)+eps_wind_reg[t,k] #储能出力来补这个差\n elif regD_rt[t,k]*B_reg[b_reg]<-Pw_rdn: #如果下调频需求超过了最大下调容量\n P_breg=regD_rt[t,k]*B_reg[b_reg]+Pw_rdn+eps_wind_reg[t,k] #储能充电来补这个差\n else:\n P_breg=eps_wind_reg[t,k] #储能补风电调频误差\n \n #计算储能充放成本\n P_b=P_bda[p_bda]+P_breg\n if P_b>=0: #如果储能的净充放方向为放电\n deltaE=((P_b)/eta)*0.0005556 #这2s的能量变化\n cost_b+=((deltaE*Cb_in)/(2*N_max*E_max)) #这2s的放电成本\n elif P_b < 0: #如果储能的净充放方向为充电\n deltaE=((P_b)*eta)*0.0005556 #这2s的能量变化\n cost_b+=(((-deltaE)*Cb_in)/(2*N_max*E_max))#这2s的充电成本\n \n #储能能量转换 2s分辨率的数值积分\n E_next=E_next-deltaE\n \n #储能电池容量约束\n if E_next < E_min or E_next > E_max: #如果储能容量越限了\n immediateR=-1000000\n cumR=-1000000\n else: \n #计算当前动作下的即时回报\n r_eng=lambda_eng[t]*(P_bda[p_bda]+P_wda[p_wda])#能量市场收益\n r_reg=(lambda_cap[t]+lambda_mil[t]*R[t])*K_aver*B_reg[b_reg] #调频市场收益\n immediateR=r_eng+r_reg-cost_b\n \n #搜索状态空间中E_next对应哪一个状态\n E_next=round(E_next,1)\n for m in range(E.size):\n if abs(E_next - E[m])<=0.01:\n e_next=m\n \n #时间过渡\n if t==23:\n t_next=t\n elif t<23:\n t_next==t+1\n \n #计算采取当前动作过渡到下一个状态之后,按pi进行决策的累积收益\n cumR=V[t_next,e_next]\n \n #计算总收益\n sumR=immediateR+cumR\n \n #选择寻优法\n if sumR > bestsumR:\n \n bestsumR=sumR\n \n bestb_reg=b_reg\n bestp_wda=p_wda\n bestp_bda=p_bda\n \n bestImmediateR=immediateR\n bestE_next=E_next\n \n \n return bestb_reg,bestp_wda,bestp_bda,bestImmediateR,bestE_next\n \n\n \n#%% 参数设定\nDAResultFolder=\"result/D-1/\"\nRTDataFolder=\"data/D/\"\ndataloader=DataLoader(DAResultFolder,RTDataFolder)\n\n#风电参数\ninterval=30 #机组组合间隔\nPw_max=30 #额定容量 MW \nnumUnit=20 #风电机组数量\neps_wind_reg=dataloader.readWindRegEps(\"eps_wind_reg.pkl\")\nW_perUnit,W_rt,W_rt_min=dataloader.readWind(\"west_wind_farm.xlsx\", U=numUnit,interval=interval)\nclimbingUpRate=0.2 #上爬坡率\nclimbingDownRate=0.15 #下爬坡率\nmaxPwUp=climbingUpRate*Pw_max #1h的最大向上变化功率\nmaxPwDown=climbingDownRate*Pw_max #1h的最大向下变化功率\nCw_in=8000000 #购置成本 /元\nCw_in_perUnit=Cw_in\na=(12*Cw_in_perUnit)/(1.3e8*60) #功率段1运行成本\nb=(17*Cw_in_perUnit)/(9.0e7*60) #功率段2运行成本\nc=(12*Cw_in_perUnit)/(1.3e8) #启动成本\nd=(2.5*Cw_in_perUnit)/(1.3e8) #停机成本\n\n\n#储能电池参数\nPb_max=6 #最大充放功率 MW\nE_max=6.0 #额定容量 MWh\nE_min=0.9\neta=0.95 #充放效率\nN_max=5000 #100%DOD下最大充放次数\nCb_in=10800000 #初始投资成本 1080万¥\n\n\n#实际调频信号\nregD_rt=dataloader.readRegD(\"regD.xlsx\")\n\n#市场参数\nis_dollar=1 # 市场价格单位判断 0人名币 1 美元\nexchangeRate=6.86 #美元汇率\nprice_dataset=pd.read_excel(\"data/market price/From_IEEE2017.xlsx\") #能量、调频市场价格\nlambda_eng=np.float32(price_dataset.values[1::1,1])*(1+is_dollar*(exchangeRate-1)) #日前能量市场价格 ¥/MWh\n\n#调频市场参数\nlambda_cap=np.float32(price_dataset.values[1::1,3])*(1+is_dollar*(exchangeRate-1)) #调频容量价格 ¥/MWh\nlambda_mil=np.float32(price_dataset.values[1::1,5])*(1+is_dollar*(exchangeRate-1)) #调频里程价格 ¥/MWh\nk1=5 #k1性能分数\nk2=0 #k2性能分数\nk3=0.95 #k3性能分数\nK_aver=np.float32(0.25*(2*k1+k2+k3)*0.95) #综合性能指标分数\n#每小时调频里程标幺值计算\nR=np.zeros(24,dtype='float32') #MW\nfor t in range(24):\n tmp=0\n for i in range(1800):\n if i< 1799:\n tmp+=abs(regD_rt[t,i+1]-regD_rt[t,i])\n R[t]=tmp/1800\n#%%MDP元素定义\n\nPenalty=-10000000 #惩罚项\n\nT,E=dataloader.readStateSpace()\nB_reg,P_wda,P_bda=dataloader.readActionSpace()\nV=dataloader.readV()\n\n#制作一个最优策略表,用于查询在状态t,e下的最优策略\n\"\"\"\nbestPi=[[i for j in range(E.size)]for i in range(T.size)]\nfor t in range(T.size):\n for e in range(E.size):\n bestPi[t][e]=np.unravel_index(np.argmax(pi[t,e,:,:,:]),pi[t,e,:,:,:].shape)\n\"\"\"\n\n#%% 运行测试\n \n \ne=np.where(E==3)[0][0] #储能的初始状态 \nsoc_record=np.zeros(24,dtype='float32') #每个小时储能的soc状态\nB_ENG=np.zeros(24,dtype='float32') #每个小时的能量投标记录\nB_REG=np.zeros(24,dtype='float32') #每个小时的调频容量投标记录\nP_WDA=np.zeros(24,dtype='float32') #每个小时的风电能量基点记录\nP_BDA=np.zeros(24,dtype='float32') #每个小时的储能能量基点记录\nP_WREG=np.zeros((24,1800),dtype='float32') #每个小时每个2s的风电调频功率记录\nP_BREG=np.zeros((24,1800),dtype='float32') #每个小时每个2s的储能调频功率记录\nI_ENG=np.zeros(24,dtype='float32')# 每小时能量市场收益\nI_REG=np.zeros(24,dtype='float32')# 每小时调频市场收益\nI_SUM=np.zeros(24,dtype='float32')# 每小时总收益\nC_BES=np.zeros(24,dtype='float32') #每小时储能成本\nC_WIND=np.zeros(24,dtype='float32')#每小时风电成本\nP_WRUP=np.zeros(24,dtype='float32') #风电预留上调频量 \nsumR=0 #累计收益\n\nfor t in range(T.size):\n \n #计时\n start=time.time()\n \n #记录每小时储能状态\n soc_record[t]=E[e]/E_max\n \n \n #rollout算法做滚动优化\n b_reg,p_wda,p_bda,immediateR,E_next=tmp=rollOut(t,e)\n \n #显示算法时间\n print(\"用时: {} s\".format(time.time()-start))\n \n #记录该动作\n B_REG[t]=B_reg[b_reg]\n P_WDA[t]=P_wda[p_wda]\n P_BDA[t]=P_bda[p_bda]\n \n #记录预留上调频容量\n P_WRUP[t]=W_rt_min[t]-P_WDA[t]\n \n #记录风电调频功率\n for k in range(1800):\n Pw_rup=min(W_rt[t,k]-P_wda[p_wda],maxPwUp)\n Pw_rdn=min(P_wda[p_wda],maxPwDown)\n \n if regD_rt[t,k]*B_reg[b_reg]>=Pw_rup:#如果上调频需求超过了风的最大上调容量\n P_WREG[t,k]=Pw_rup\n P_BREG[t,k]=regD_rt[t,k]*B_reg[b_reg]-Pw_rup+eps_wind_reg[t,k]\n \n elif regD_rt[t,k]*B_reg[b_reg]<=-Pw_rdn: #如果下调频需求超过了风电的最大下调容量\n P_WREG[t,k]=-Pw_rdn\n P_BREG[t,k]=regD_rt[t,k]*B_reg[b_reg]+Pw_rdn+eps_wind_reg[t,k] \n else:\n P_WREG[t,k]=regD_rt[t,k]*B_reg[b_reg] #跟踪调频需求信号\n P_BREG[t,k]=eps_wind_reg[t,k] \n \n #记录收益\n B_ENG[t]=P_WDA[t]+P_BDA[t] \n I_ENG[t]=lambda_eng[t]*B_ENG[t] #能量市场收益\n I_REG[t]=(lambda_cap[t]+lambda_mil[t]*R[t])*K_aver*B_REG[t] #调频市场收益\n I_SUM[t]=immediateR#总收益\n \n #状态转换\n E_next=round(E_next,1)\n e_next=np.where(E==E_next)[0][0]\n e=e_next\n \n\n#保存收益情况\npickle.dump(I_ENG,open(\"result/D/I_ENG.pkl\",\"wb\"))\npickle.dump(I_REG,open(\"result/D/I_REG.pkl\",\"wb\"))\npickle.dump(C_BES,open(\"result/D/C_BES.pkl\",\"wb\"))\n\n#%%可视化 求解结果\n\nplt.rcParams['font.sans-serif']=['SongNTR'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\n\n#绘图\nplt.bar(np.arange(24),P_WDA,width=0.6,label=\"Wind Energy Basepoint\",color=\"#2878b5\")\nplt.bar(np.arange(24),P_WRUP,width=0.6,label=\"Regulation Capacity Reserved\",color=\"#9ac9db\",bottom=P_WDA)\nplt.plot(np.arange(24),P_BDA,label=\"ESS Energy Basepoint \",color=\"#b8e994\",linewidth=2,marker=\".\")\nplt.plot(np.arange(0,24),W_rt_min,color=\"#c82423\",label=\"Wind Power Forecast\",linewidth=2,marker=\".\")\nplt.plot(np.arange(24),B_ENG,color=\"#f8ac8c\",label=\"Energy Market Bidding\",linewidth=2,marker=\".\")\nplt.plot(np.arange(24),B_REG,color=\"#ff8884\",label=\"Regulation Market Bidding\",linewidth=2,marker=\".\")\n\n#去掉上框线和右框线\nax=plt.gca() #gca:get current axis得到当前轴\n#设置图片的右边框和上边框为不显示\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\n#定义图例的字体\nplt.legend(loc=\"upper right\",ncol=2,frameon=False)\n#添加x标��,y标签\nplt.xlabel(\"Time/h\",size=16,font='SongNTR')\nplt.ylabel(\"Power/MW\",size=16,font='SongNTR')\nplt.xticks(np.arange(0,24,2),family=\"Times New Roman\")\nplt.yticks(np.arange(0,20,2),family=\"Times New Roman\")\n\nplt.grid()\nplt.savefig(\"figure/D/result.png\",dpi=500)\n\nplt.show()\n\n#%%单个断面的运行状态\nP_WREG=P_WREG.reshape(24,1800)\nP_BREG=P_BREG.reshape(24,1800)\nfor t in range(2):\n print(\"正在生成第\"+str(t)+\"小时断面风储运行状态图(\"+str(t)+\"/23)\")\n plt.figure()\n P_WDA_draw=np.ones(1800,dtype='float32')*P_WDA[t]\n plt.plot(np.arange(1800),W_rt[t,:],label=\"Maximum Power Output\",color=\"#fdcb6e\")\n plt.plot(np.arange(1800),B_REG[t]*regD_rt[t,:]+P_WDA[t],label=\"Actual Regulation Demand\",color=\"#e17055\")\n plt.bar(np.arange(1800),P_WDA_draw,width=1,label=\"Wind Energy Basepoint\",color=\"#dfe6e9\")\n plt.bar(np.arange(1800),P_WREG[t,:],width=1,bottom=P_WDA_draw,label=\"Wind Regulation Power\",color=\"#74b9ff\")\n plt.bar(np.arange(1800),P_BREG[t,:],width=1,bottom=P_WDA_draw+P_WREG[t,:],label=\"ESS Regulation Power\",color=\"#00b894\") \n #plt.title(\"第\"+str(t)+\"个小时风储运行状态(MW)\")\n plt.xlabel(\"Time/s\",size=15,font='SongNTR')\n plt.ylabel(\"Power/MW\",size=15)\n plt.xticks(np.arange(0,1800+150,150),family=\"Times New Roman\")\n plt.yticks(np.arange(0,35,5),family=\"Times New Roman\")\n plt.grid()\n plt.legend(frameon=False)\n path=\"figure/D/Cross-section\"+str(t)\n plt.savefig(path,dpi=500)\n#%%风机组合优化\nUCResultFolder=\"result/D/windUC/windUC_hour\"\nP_WREG=P_WREG.reshape(int((60/interval)*24),int(interval*30))\nCw=0\nfor j in range(int((60/interval)*24)):\n windUCOptimizer=WINDUC(UCResultFolder,j,numUnit)\n P_all=P_WDA[math.floor(j/3)]+P_WREG[j,:] #计算风电2s分辨率下的总功率\n Cw+=windUCOptimizer.windUC(P_all,interval) #风电机组组合\n \n#%%机组组合结果可视化\nUCResultFolder=\"result/D/windUC/windUC_hour\"\nX=np.ones((numUnit,72)) #行为机组编号,列为每个小时\nX[1,2]=0\n'''\nfor t in range(72):\n with open(UCResultFolder+str(t)+\".txt\") as f:\n tmp1=f.read()\n tmp2=tmp1.split(\"\\n\")\n tmp3=[float(tmp2[i]) for i in range(len(tmp2))]\n X[:,t]=tmp3\n''' \n \nplt.imshow(X,cmap=\"gray\",origin=\"upper\")\nplt.grid() \nplt.xlabel(\"Time/20min\",size=12,font='SongNTR')\nplt.ylabel(\"Unit Number\",size=12,font='SongNTR')\nplt.xticks(np.arange(0,72+1,1),font='SongNTR')\nplt.yticks(np.arange(0,numUnit+1,2),font='SongNTR')\n\nplt.savefig(\"figure/windUC.png\",dpi=500) \n\n#%%收益数据统计\n\nprint(\"能量市场收益:\"+str(I_ENG.sum())+\"元\")\nprint(\"调频市场收益:\"+str(I_REG.sum())+\"元\")\nprint(\"储能成本:\"+str(-(I_SUM.sum()-I_REG.sum()-I_ENG.sum()))+\"元\")\nprint(\"风电成本\"+str(Cw)+\"元\")\nprint(\"总收益:\"+str(I_SUM.sum()-Cw)+\"元\")\n \n \n \n ","repo_name":"BigdogManLuo/Wind-ESS-Optimization-in-Frequency-Regulation-Market","sub_path":"base/02_RealTimeOpt.py","file_name":"02_RealTimeOpt.py","file_ext":"py","file_size_in_byte":20696,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39175386194","text":"from autocorrect import Speller\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom rdflib.plugins.sparql.parser import SelectQuery\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom articles.es_documents import ArticleDocument\nfrom articles.models import WebResource\nfrom rdf_ontologies.RDF_graph import my_graph\nfrom search.constants import bi_encoder\nfrom search.faiss_index import text_index\nfrom search.semantic_search import search, search_faiss\nfrom search.serializers import WebResourceSerializer\nfrom search.topics_separator import get_web_resource_tags\nfrom search.utils import get_similar_web_resources\n\n\n# Create your views here.\nclass Search(TemplateView):\n template_name = 'search/results_list.html'\n\n # def perform_spellcheck\n def get_results(self, query, top_k):\n return search_faiss(query, top_k=top_k, index=text_index, model=bi_encoder)\n\n def get_context_data(self, **kwargs):\n NIKOMY_NE_KAZHI = 1\n context = super().get_context_data(**kwargs)\n query = self.request.GET.get('query')\n context['page'] = page = self.request.GET.get('page', 1)\n context['query'] = query\n if query:\n # results = search(query, top_k=15, model=bi_encoder)\n top_k = 60 + NIKOMY_NE_KAZHI\n results = self.get_results(query, top_k)\n else:\n results = []\n\n context['page'] = page = int(page)\n context['results'] = results[(page - 1) * 15 + NIKOMY_NE_KAZHI: page * 15 + NIKOMY_NE_KAZHI]\n context['num_pages'] = len(results) // 15\n context['num_pages_range'] = range(1, context['num_pages'] + 1)\n if query:\n spell = Speller('uk')\n corrected_query = spell(query)\n if corrected_query != query:\n context['corrected_query'] = corrected_query\n return context\n\n\n# class ImagesSearch(TemplateView):\nclass SearchBetweenSimilar(Search):\n\n def get_results(self, query, top_k):\n results = search_faiss(query, top_k=61, index=text_index, model=bi_encoder)\n urls = [f'<{result.url}>' for result in results]\n sparql_query = SelectQuery(\n f\"\"\"\n PREFIX voc: \n SELECT ?url\n WHERE {{\n ?url voc:має_спільну_тему_з IN ({urls})\n }}\n \"\"\"\n )\n results = my_graph.query(sparql_query)\n results = [ArticleDocument.get(id=result[0]) for result in results]\n return results\n\n\nclass FindTags(APIView):\n def get(self, *args, **kwargs):\n web_resource_id = kwargs['wr_id']\n wr = WebResource.objects.get(pk=web_resource_id)\n tags = get_web_resource_tags(wr)\n return Response({'tags': tags})\n\n\nclass SimilarWebResources(ListAPIView):\n serializer_class = WebResourceSerializer\n\n def get_queryset(self):\n web_resource_id = self.kwargs['wr_id']\n wr = WebResource.objects.get(pk=web_resource_id)\n results = get_similar_web_resources(wr)\n return results\n\n\nclass GraphSearch(TemplateView):\n template_name = 'search/graph.html'\n\n\ndef get_connected_objects(graph, subject):\n objects = graph.predicate_objects(subject)\n return objects\n\n\nclass GraphSearchAPI(APIView):\n def get(self, *args, **kwargs):\n query = self.request.GET.get('query')\n # get all subjects/predicates/objects that contain query and all connected nodes for this nodes\n sparql_query = f\"\"\"\n PREFIX voc: \n SELECT ?subject ?predicate ?object\n WHERE {{\n ?subject ?predicate ?object .\n FILTER (regex(?subject, \"{query}\", \"i\") || regex(?predicate, \"{query}\", \"i\") || regex(?object, \"{query}\", \"i\"))\n }}\n \"\"\"\n results = my_graph.query(sparql_query)\n\n nodes = list()\n connections = list()\n for result in results:\n current_subject = result[0]\n d3_node = {'id': current_subject, 'group': current_subject, 'value': 2}\n if not d3_node in nodes:\n nodes.append(d3_node)\n d3_node_index = nodes.index(d3_node)\n # d3_connection = {'source': result[0], 'target': result[2], 'value': result[1]}\n # if not d3_connection in connections:\n # connections.add(d3_connection)\n connected_objects = get_connected_objects(my_graph, current_subject)\n for pred, obj in connected_objects:\n object_node = {'id': obj, 'group': obj, 'value': 2}\n if not object_node in nodes:\n nodes.append(object_node)\n object_node_index = nodes.index(object_node)\n d3_connection = {'source': d3_node_index, 'target': object_node_index, 'value': 2, 'predicate': pred}\n if not d3_connection in connections:\n connections.append(d3_connection)\n\n return Response({'nodes': list(nodes), 'links': list(connections)})\n","repo_name":"andriyprodan/oogleg","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3898341519","text":"import random\n\nintentos = 0\n\nprint(\"Hola! ¿Cual es tu nombre?\")\nmiNombre = input()\n\nnumero = random.randint(1,50)\nprint(\"Bueno, \" + miNombre + \", estoy pensando un numero entre 1 y 50\")\n\nfor intentos in range(6):\n print(\"Adivina.\")\n adiv = input()\n adiv = int(adiv)\n\n if adiv < numero:\n print(\"Muy bajo\")\n\n if adiv > numero:\n print(\"Muy alto\")\n\n if intentos == 3:\n if (numero % 2 == 0):\n print(\"El numero es par\")\n else:\n print(\"El numero es impar\")\n\n if adiv == numero:\n break\n\nif adiv == numero:\n intentos = str(intentos + 1)\n print(\"Bien, \" + miNombre + \"! Adivinaste mi numero en \" + intentos + \" intentos\")\n\nif adiv != numero:\n numero = str(numero)\n print(\"Perdiste, el numero que estaba pensado era \" + numero + \".\")\n","repo_name":"franala/python2020","sub_path":"Practica2/ejercicio9.py","file_name":"ejercicio9.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2108469384","text":"'''\nIterator:\n--> Iterator in Python is simply an object that can be iterated upon.\nAn object which will return data, one element at a time.\nIterating Through an Iterator:\n--> We use the next() function to manually iterate through all the items of an\niterator. When we reach the end and there is no more data to be returned,\nit will raise the StopIteration Exception.\n'''\nmy_list = [4, 7, 0, 3]\n# get an iterator using iter()\nmy_iter = iter(my_list)\n# iterate through it using next()\nprint(next(my_iter))\nprint(next(my_iter))\nprint(my_iter.__next__()) # Alternative way to use next() Function\nprint(my_iter.__next__())\n# This will raise error, no items left\n# next(my_iter)\n\n# Working of for loop for Iterators:\nlist1 = [2, 4, 6, 8, 10]\nfor i in list1:\n iter1 = iter(list1)\n\n print(next(iter1))\n print(next(iter1))\n print(next(iter1))\n print(next(iter1))\n print(next(iter1))\n\n# Building Custom Iterator:\n\n\nclass PowTwo:\n \"\"\"Class to implement an iterator\n of powers of two\"\"\"\n\n def __init__(self, max=0):\n self.max = max\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n <= self.max:\n result = 2 ** self.n\n self.n += 1\n return result\n # else:\n # raise StopIteration\n\n\n# create an object\nnumbers = PowTwo(3)\n\n# create an iterable from the object\ni = iter(numbers)\n\n# Using next to get to the next iterator element\nprint(next(i))\nprint(next(i))\nprint(next(i))\nprint(next(i))\n# print(next(i))\n\n# Python Infinite Iterator:\n\n\nclass InfIter:\n \"\"\"Infinite iterator to return all\n odd numbers\"\"\"\n\n def __iter__(self):\n self.num = 1\n return self\n\n def __next__(self):\n num = self.num\n self.num += 2\n return num\n\n\na = iter(InfIter())\nprint(next(a))\nprint(next(a))\nprint(next(a))\nprint(next(a))","repo_name":"Karthi2245/MCS_0038_Core_Python.","sub_path":"_17_iterator_and_generator/My_Notes/_01_Iterator.py","file_name":"_01_Iterator.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26888378039","text":"try:\n import pyurdme\nexcept ImportError:\n import warnings\n warnings.warn(\"Package 'pyurdme' cannot be found and is required for PySB/pyURDME simulations. See XXX for further details.\")\nfrom pysb.simulate import Simulator\nfrom pysb.bng import generate_equations\nimport re\nimport sympy\nimport numpy as np\nimport itertools\nimport pysb\n\ndef _translate_parameters(model, param_values=None):\n # Error check\n if param_values is not None and len(param_values) != len(model.parameters):\n raise Exception(\"len(param_values) must equal len(model.parameters)\")\n unused = model.parameters_unused()\n param_list = (len(model.parameters)-len(unused)) * [None]\n count = 0\n for i,p in enumerate(model.parameters):\n if p not in unused:\n if param_values is not None:\n val=param_values[i]\n else:\n val=p.value\n param_list[count] = pyurdme.Parameter(name=p.name, expression=val)\n count += 1\n return param_list\n\ndef _translate_species(model, initial_dist, dif0=None, y0=None):\n # Error check\n \n \n if y0 and len(y0) != len(model.species):\n raise Exception(\"len(dif0) and len(y0) must equal len(model.species)\") \n species_list = len(model.species) * [None]\n for i,sp in enumerate(model.species):\n val = 0.\n if dif0:\n val=dif0[i]\n else:\n for id in model.diffusivities:\n if str(id[0]) == str(sp):\n val=id[1]\n species_list[i] = pyurdme.Species(name=\"__s%d\" % i, diffusion_constant=val)\n\n distribution_list = len(initial_dist.keys()) * [None]\n for i,sp in enumerate(model.species):\n if str(sp) in initial_dist.keys():\n val = 0.\n if y0:\n val=y0[i]\n else:\n for ic in model.initial_conditions:\n if str(ic[0]) == str(sp):\n val=np.round(ic[1].value)\n distribution_list[i] = [initial_dist[str(sp)],{'__s%d'%i:val}]\n else: pass\n\n return species_list, distribution_list\n\n \ndef _translate_reactions(model):\n rxn_list = len(model.reactions) * [None]\n for n,rxn in enumerate(model.reactions):\n reactants = {}\n products = {}\n # reactants \n for r in rxn[\"reactants\"]:\n r = \"__s%d\" % r\n if r in reactants:\n reactants[r] += 1\n else:\n reactants[r] = 1\n # products\n for p in rxn[\"products\"]:\n p = \"__s%d\" % p\n if p in products:\n products[p] += 1\n else:\n products[p] = 1\n # replace terms like __s**2 with __s*(__s-1)\n rate = str(rxn[\"rate\"])\n pattern = \"(__s\\d+)\\*\\*(\\d+)\"\n matches = re.findall(pattern, rate)\n for m in matches:\n repl = m[0]\n for i in range(1,int(m[1])):\n repl += \"*(%s-%d)\" % (m[0],i)\n rate = re.sub(pattern, repl, rate)\n # expand expressions\n for e in model.expressions:\n rate = re.sub(r'\\b%s\\b' % e.name, '('+sympy.ccode(e.expand_expr(model))+')', rate)\n # replace observables w/ sums of species\n for obs in model.observables:\n obs_string = ''\n for i in range(len(obs.coefficients)):\n if i > 0: obs_string += \"+\"\n if obs.coefficients[i] > 1: \n obs_string += str(obs.coefficients[i])+\"*\"\n obs_string += \"__s\"+str(obs.species[i])\n if len(obs.coefficients) > 1: \n obs_string = '(' + obs_string + ')'\n rate = re.sub(r'%s' % obs.name, obs_string, rate)\n # create reaction\n rxn_list[n] = pyurdme.Reaction(name = 'rule%s' % str(rxn[\"rule\"]),\\\n reactants = reactants,\\\n products = products,\\\n propensity_function = rate)\n return rxn_list\n \ndef _translate(model, mesh, initial_dist, param_values=None, dif0=None, y0=None):\n \n \n urdme_model = pyurdme.URDMEModel(model.name)\n urdme_model.add_species(_translate_species(model, initial_dist, dif0, y0)[0])\n urdme_model.mesh = mesh\n urdme_model.add_parameter(_translate_parameters(model, param_values))\n urdme_model.add_reaction(_translate_reactions(model))\n initial_d_info = _translate_species(model, initial_dist, dif0, y0)[1]\n for id in initial_d_info:\n getattr(urdme_model, id[0][0])({urdme_model.get_species(id[1].keys()[0]):id[1].values()[0]}, id[0][1])\n \n return urdme_model\n\nclass PyurdmeSimulator(Simulator):\n \n def __init__(self, model, tspan=None, mesh=None, initial_dist=None, cleanup=True, verbose=False):\n super(PyurdmeSimulator, self).__init__(model, tspan, verbose)\n generate_equations(self.model, cleanup, self.verbose)\n \n def run(self, tspan=None, mesh = None, initial_dist=None, param_values=None, dif0=None, y0=None, solver = 'nsm'):\n\n\n if tspan is not None:\n self.tspan = tspan\n elif self.tspan is None:\n raise Exception(\"'tspan' must be defined.\")\n \n if mesh is not None:\n self.mesh = mesh\n elif self.mesh is None:\n raise Exception(\"Mesh must be defined.\")\n \n if initial_dist is not None:\n self.initial_dist = initial_dist\n elif self.initial_dist is None:\n raise Exception(\"Initial distribution of proteins must be defiened\") \n \n \n urdme_model = _translate(self.model, self.mesh, self.initial_dist, param_values, dif0, y0)\n urdme_model.timespan(self.tspan)\n \n result = urdme_model.run(report_level=1)\n \n # species\n \n trajectories = np.zeros((len(result['sol'].keys()),len(result.get_timespan())))\n for i, sp in enumerate(result['sol'].keys()):\n trajectories[i] = np.sum(result.get_species(sp),axis=1)\n trajectories = trajectories.T\n \n self.y = trajectories\n \n # output time points (in case they aren't the same tspan, which is possible in BNG)\n time = result.get_timespan()\n self.tout = np.tile(time,(len(self.y),1))\n\n # observables and expressions\n self._calc_yobs_yexpr(param_values)\n \n self.simulation = result\n \n def _calc_yobs_yexpr(self, param_values=None):\n super(PyurdmeSimulator, self)._calc_yobs_yexpr()\n \n def get_yfull(self):\n return super(PyurdmeSimulator, self).get_yfull() \n\n\ndef run_pyurdme(model, tspan, mesh, initial_dist, param_values=None, dif0=None, y0=None, verbose=True):\n \"\"\"Runs pyurdme using PySB\n the initial distribution should be a dict:\n initial_dist={DISC(bf=None):['set_initial_condition_scatter', *arguments, i.e point=[0.5,0.5]]}\"\"\"\n sim = PyurdmeSimulator(model, verbose=verbose)\n sim.run(tspan, mesh, initial_dist, param_values , dif0, y0)\n return sim.simulation\n","repo_name":"LoLab-MSM/pysb-stochkit_pyurdme","sub_path":"pysb_pyurdme.py","file_name":"pysb_pyurdme.py","file_ext":"py","file_size_in_byte":7088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72963459075","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nFigure 3: Flow-reconstructed Hernquist accelerations.\n\nCreated: May 2021\nAuthor: A. P. Naik\n\"\"\"\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom os.path import exists\n\nsys.path.append(\"../src\")\nfrom cbe import calc_accel_CBE\nfrom utils import diff_DF, sample_velocities\nfrom constants import M_sun, kpc, G\nfrom ml import load_flow, calc_DF_ensemble\n\n\nif __name__ == \"__main__\":\n\n # check if plot data exists, otherwise generate\n dfile = \"fig3_data.npz\"\n if not exists(dfile):\n\n # hernquist params and scalings\n M = 1e+10 * M_sun\n a = 5 * kpc\n u_q = 10 * a\n u_p = np.sqrt(2 * G * M / a)\n\n # set up grid of (real space) points along x axis\n Nx = 50\n x = np.linspace(0, 20, Nx + 1)[1:] * a\n y = np.zeros_like(x)\n z = np.zeros_like(x)\n pos = np.stack((x, y, z), axis=-1)\n\n # exact acceleration\n a_exact = -(G * M / a**2) / (1 + x / a)**2\n\n # load flows\n n_flows = 30\n flows0 = []\n flows1 = []\n flows10 = []\n flowsaniso = []\n for i in range(n_flows):\n fname = f\"../nflow_models/hq_iso_orig/{i}_best.pth\"\n flows0.append(load_flow(fname, 6, 8, 64))\n fname = f\"../nflow_models/hq_iso_1pc/{i}_best.pth\"\n flows1.append(load_flow(fname, 6, 8, 64))\n fname = f\"../nflow_models/hq_iso_10pc/{i}_best.pth\"\n flows10.append(load_flow(fname, 6, 8, 64))\n fname = f\"../nflow_models/hq_aniso_orig/{i}_best.pth\"\n flowsaniso.append(load_flow(fname, 6, 8, 64))\n\n # loop over spatial points, get accel at each point\n a_iso0 = np.zeros_like(a_exact)\n a_iso1 = np.zeros_like(a_exact)\n a_iso10 = np.zeros_like(a_exact)\n a_aniso = np.zeros_like(a_exact)\n Nv = 1000\n v_esc = np.sqrt(2 * G * M / (x + a))\n for i in tqdm(range(Nx)):\n\n # set up grid of phase space points\n ve = v_esc[i]\n p = sample_velocities(Nv, v_max=0.9 * ve, pos=pos[i],\n vt_min=0.1 * ve, vx_min=0.2 * ve)\n q = np.tile(pos[i], [Nv, 1])\n\n # get acceleration from recon DF\n for j in range(4):\n model = [a_iso0, a_iso1, a_iso10, a_aniso][j]\n flows = [flows0, flows1, flows10, flowsaniso][j]\n df_args = {'u_q': u_q, 'u_p': u_p, 'flows': flows}\n f_ref = calc_DF_ensemble(q[0], p[0], **df_args)\n gradxf, gradvf = diff_DF(q, p, df_func=calc_DF_ensemble,\n df_args=df_args)\n gradxf /= f_ref\n gradvf /= f_ref\n model[i] = calc_accel_CBE(p, gradxf, gradvf)[0]\n\n # rescale data\n u = G * M / a**2\n x = x / a\n y0 = -a_exact / u\n y1 = -a_iso0 / u\n y2 = -a_iso1 / u\n y3 = -a_iso10 / u\n y4 = -a_aniso / u\n\n # save data file\n np.savez(dfile, x=x, y0=y0, y1=y1, y2=y2, y3=y3, y4=y4)\n\n else:\n # load data file\n data = np.load(dfile)\n x = data['x']\n y0 = data['y0']\n y1 = data['y1']\n y2 = data['y2']\n y3 = data['y3']\n y4 = data['y4']\n\n # set up figure\n fig = plt.figure(figsize=(3.3, 3.4), dpi=150)\n left = 0.16\n bottom = 0.11\n top = 0.99\n right = 0.99\n dX = (right - left)\n dY1 = (top - bottom) * 0.65\n dY2 = (top - bottom) * 0.35\n ax1 = fig.add_axes([left, bottom + dY2, dX, dY1])\n ax2 = fig.add_axes([left, bottom, dX, dY2])\n\n # plot settings\n plt.rcParams['text.usetex'] = True\n plt.rcParams['font.family'] = 'serif'\n plt.rcParams['font.size'] = 9\n plt.rcParams['ytick.labelsize'] = 8\n plt.rcParams['xtick.labelsize'] = 8\n sargs = {'zorder': 10, 's': 4}\n labels = [\n 'Exact',\n r'Isotropic, $\\sigma=0$',\n r'Isotropic, $\\sigma = 1\\%$',\n r'Isotropic, $\\sigma = 10\\%$',\n r'Anisotropic, $\\sigma = 0$'\n ]\n\n # plot\n c = plt.cm.Spectral(np.linspace(0, 1, 10))\n c1 = c[0]\n c2 = c[2]\n c3 = c[7]\n c4 = c[9]\n ax1.plot(x, y0, label=labels[0], c='k', lw=2)\n ax1.scatter(x, y1, label=labels[1], c=c1[None], **sargs)\n ax1.scatter(x, y2, label=labels[2], c=c2[None], **sargs)\n ax1.scatter(x, y3, label=labels[3], c=c3[None], **sargs)\n ax1.scatter(x, y4, label=labels[4], c=c4[None], **sargs)\n ax2.plot(x, y1 / y0 - 1, c=c1, lw=2)\n ax2.plot(x, y2 / y0 - 1, c=c2, lw=2)\n ax2.plot(x, y3 / y0 - 1, c=c3, lw=2)\n ax2.plot(x, y4 / y0 - 1, c=c4, lw=2)\n xlim = ax2.get_xlim()\n ax2.plot([xlim[0], xlim[1]], [0, 0], c='k', ls='dotted')\n ax2.set_xlim(xlim)\n\n # scales, labels etc.\n ax1.set_yscale('log')\n ax2.set_xlabel(r\"$x\\ /\\ a$\")\n ax1.set_ylabel(r\"$(\\partial \\Phi / \\partial x)\\ / \\ (GM/a^2)$\")\n ax2.set_ylabel(r\"Model / Exact - 1\")\n ax1.yaxis.set_label_coords(-0.135, 0.5)\n ax2.yaxis.set_label_coords(-0.135, 0.5)\n ax1.legend(frameon=False)\n ax1.tick_params(which='both', top=True, right=True, direction='inout')\n ax2.tick_params(which='both', top=True, right=True, direction='inout')\n ax2.set_ylim(-0.1, 0.1)\n\n # save figure\n fig.savefig(\"fig3_accs.pdf\")\n","repo_name":"aneeshnaik/HernquistFlows","sub_path":"figures/fig3_accs.py","file_name":"fig3_accs.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10150496009","text":"#!/usr/bin/env python3\n\nfrom __future__ import annotations\n\nfrom typing import cast\n\nimport logging\nimport os\nimport sys\n\nif sys.version_info < (3, 9):\n sys.exit('Gajim needs Python 3.9+')\n\nimport subprocess\nfrom pathlib import Path\n\nfrom setuptools import setup\nfrom setuptools.command.build_py import build_py as _build\nfrom setuptools.command.install import install as _install\n\n\nDataFilesT = list[tuple[str, list[str]]]\n\n\nMAN_FILES = [\n 'gajim.1',\n 'gajim-remote.1'\n]\nMETA_FILES = [\n ('data/org.gajim.Gajim.desktop', 'share/applications', '--desktop'),\n ('data/org.gajim.Gajim.appdata.xml', 'share/metainfo', '--xml')]\n\n\nTRANS_DIR = Path('po')\nTRANS_TEMPLATE = TRANS_DIR / 'gajim.pot'\nREPO_DIR = Path(__file__).resolve().parent\nBUILD_DIR = REPO_DIR / 'build'\n\nALL_LINGUAS = sorted([lang.stem for lang in TRANS_DIR.glob('*.po')])\n\n\nlogging.basicConfig(level='INFO', format='%(levelname)s: %(message)s')\nlog = logging.getLogger()\n\n\ndef newer(source: Path, target: Path) -> bool:\n if not source.exists():\n raise ValueError('file \"%s\" does not exist' % source.resolve())\n if not target.exists():\n return True\n\n from stat import ST_MTIME\n mtime1 = source.stat()[ST_MTIME]\n mtime2 = target.stat()[ST_MTIME]\n\n return mtime1 > mtime2\n\n\ndef build_translation() -> None:\n for lang in ALL_LINGUAS:\n po_file = TRANS_DIR / f'{lang}.po'\n mo_file = BUILD_DIR / 'mo' / lang / 'LC_MESSAGES' / 'gajim.mo'\n mo_dir = mo_file.parent\n if not (mo_dir.is_dir() or mo_dir.is_symlink()):\n mo_dir.mkdir(parents=True)\n\n if newer(po_file, mo_file):\n subprocess.run(['msgfmt',\n str(po_file),\n '-o',\n str(mo_file)],\n cwd=REPO_DIR,\n check=True)\n\n log.info('Compiling %s >> %s', po_file, mo_file)\n\n\ndef install_trans(data_files: DataFilesT) -> None:\n for lang in ALL_LINGUAS:\n mo_file = str(BUILD_DIR / 'mo' / lang / 'LC_MESSAGES' / 'gajim.mo')\n target = f'share/locale/{lang}/LC_MESSAGES'\n data_files.append((target, [mo_file]))\n\n\ndef build_man() -> None:\n '''\n Compress Gajim manual files\n '''\n newdir = BUILD_DIR / 'man'\n if not (newdir.is_dir() or newdir.is_symlink()):\n newdir.mkdir()\n\n for man in MAN_FILES:\n filename = Path('data') / man\n man_file_gz = newdir / (man + '.gz')\n if man_file_gz.exists():\n if newer(filename, man_file_gz):\n man_file_gz.unlink()\n else:\n continue\n\n import gzip\n # Binary io, so open is OK\n with open(filename, 'rb') as f_in,\\\n gzip.open(man_file_gz, 'wb') as f_out:\n f_out.writelines(f_in)\n log.info('Compiling %s >> %s', filename, man_file_gz)\n\n\ndef install_man(data_files: DataFilesT) -> None:\n man_dir = BUILD_DIR / 'man'\n target = 'share/man/man1'\n\n for man in MAN_FILES:\n man_file_gz = str(man_dir / (man + '.gz'))\n data_files.append((target, [man_file_gz]))\n\n\ndef build_intl() -> None:\n '''\n Merge translation files into desktop and mime files\n '''\n base = BUILD_DIR\n\n for filename, _, option in META_FILES:\n newfile = base / filename\n newdir = newfile.parent\n if not (newdir.is_dir() or newdir.is_symlink()):\n newdir.mkdir()\n merge(Path(filename + '.in'), newfile, option)\n\n\ndef install_intl(data_files: DataFilesT) -> None:\n for filename, target, _ in META_FILES:\n data_files.append((target, [str(BUILD_DIR / filename)]))\n\n\ndef merge(in_file: Path,\n out_file: Path,\n option: str,\n po_dir: str = 'po') -> None:\n '''\n Run the msgfmt command.\n '''\n if in_file.exists():\n cmd = (('msgfmt %(opt)s -d %(po_dir)s --template %(in_file)s '\n '-o %(out_file)s') %\n {'opt': option,\n 'po_dir': po_dir,\n 'in_file': in_file,\n 'out_file': out_file})\n if os.system(cmd) != 0:\n msg = ('ERROR: %s was not merged into the translation files!\\n' %\n out_file)\n raise SystemExit(msg)\n log.info('Compiling %s >> %s', in_file, out_file)\n\n\nclass Build(_build):\n def run(self):\n build_translation()\n if sys.platform != 'win32':\n build_man()\n build_intl()\n _build.run(self)\n\n\nclass Install(_install):\n def run(self):\n data_files = cast(DataFilesT, self.distribution.data_files) # pyright: ignore # noqa: E501\n install_trans(data_files)\n if sys.platform != 'win32':\n install_man(data_files)\n install_intl(data_files)\n _install.run(self) # pyright: ignore\n\n\n# only install subdirectories of data\ndata_files_app_icon = [\n ('share/icons/hicolor/scalable/apps',\n ['gajim/data/icons/hicolor/scalable/apps/org.gajim.Gajim.svg']),\n ('share/icons/hicolor/scalable/apps',\n ['gajim/data/icons/hicolor/scalable/apps/org.gajim.Gajim-symbolic.svg'])\n]\n\ndata_files: DataFilesT = data_files_app_icon\n\nsetup(\n cmdclass={\n 'build_py': Build,\n 'install': Install,\n },\n entry_points={\n 'console_scripts': [\n 'gajim-remote = gajim.gajim_remote:main',\n ],\n 'gui_scripts': [\n 'gajim = gajim.gajim:main',\n ]\n },\n data_files=data_files\n)\n","repo_name":"SWAG-MLG-420/Gajim_SWAG_MLG_M.O.D","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8675558183","text":"import eyed3 as e3\nimport os, json\nfrom openpyxl import Workbook\nfrom openpyxl.styles import colors\nfrom openpyxl.styles import Font, Color\n\n#base functions\ndef type_mp3(name):\n return name.endswith(\".mp3\")\n\ndef auto_size(worksheet):\n for col in worksheet.columns:\n max_length = 0\n column = col[0].column_letter # Get the column number\n for cell in col:\n try: # Necessary to avoid error on empty cells\n if len(str(cell.value)) > max_length:\n max_length = len(cell.value)\n except:\n pass\n adjusted_width = (max_length + 2) * 1.2\n worksheet.column_dimensions[column].width = adjusted_width\n #adjust dimension\n\n\n\ndef songs_read(root_path):\n #file_list extraction\n \n files_list = os.listdir(root_path)\n\n songs_list = list(filter(type_mp3, files_list))\n\n #export data to excel file\n wb = Workbook()\n ws = wb.active\n\n ws.title = \"Songs\"\n ws.freeze_panes = \"A4\"\n\n #Column names\n ws.append([\"\", \"Total songs: \", len(songs_list)])\n ws.append([\"\"])\n ws.append([\"id\", \"File name\", \"Title\", \"Artist\", \"Album\", \"Album Artist\"])\n\n for song in songs_list:\n song_info = [\"\"] * 6\n song_file = e3.load(root_path + song)\n \n song_info[0] = song\n song_info[1] = song\n song_info[2] = song_file.tag.title\n song_info[3] = song_file.tag.artist\n song_info[4] = song_file.tag.album\n song_info[5] = song_file.tag.album_artist\n\n ws.append(song_info)\n\n #Editing\n ws.row_dimensions[3].font = Font(name=\"Calibri\", size=12, bold=True)\n auto_size(ws)\n ws.column_dimensions['A'].hidden= True\n\n wb.save(root_path + \"songs.xlsx\")\n\n print(\"File 'songs.xlsx' generated\")\n","repo_name":"c-brugo/song-mng","sub_path":"songs_data_read.py","file_name":"songs_data_read.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2827640760","text":"from tkinter import Tk,Button,Label,Entry,Scrollbar,Text,END,Y,LEFT,RIGHT,Frame, INSERT,X,HORIZONTAL,BOTTOM,TOP,BOTH,TRUE,NONE\nfrom tkinter import ttk\nimport copy\nimport webbrowser\nimport time\nimport subprocess\nfrom tkinter.colorchooser import askcolor\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.filedialog import askdirectory\nimport xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import Element, SubElement, tostring\nfrom xml.dom import minidom\nimport xml.dom.minidom\nfrom xml.etree import ElementTree\nimport os\nfrom Nodo import Nodo\nfrom Lista import Lista\nfrom Evento import Evento\n\neventos = Lista()\n\ndef ctrlEvent(event):\n if(12==event.state and event.keysym=='c' ):\n return\n else:\n return \"break\"\n\ndef documento():\n path = 'ensayo.pdf'\n subprocess.Popen([path], shell=True)\n\ndef ayuda():\n ayuda = Tk()\n ayuda.title(\"Ayuda\")\n ancho_ventana = 403\n alto_ventana = 160\n ayuda.configure(bg=\"#B3B3B3\")\n x_ventana = raiz.winfo_screenwidth() // 2 - ancho_ventana // 2\n y_ventana = raiz.winfo_screenheight() // 2 - alto_ventana // 2\n posicion = str(ancho_ventana) + \"x\" + str(alto_ventana) + \"+\" + str(x_ventana) + \"+\" + str(y_ventana)\n ayuda.geometry(posicion)\n b1 = Button(ayuda,text=\"Datos del Desarrollador\",command=datos,bg = \"#2D9AB7\",fg = \"#FFFFFF\",font=(\"Lucida Console\",17))\n b1.place(x=30,y=30)\n b2 = Button(ayuda,text=\"Documentacion\",command=documento,bg = \"#2D9AB7\",fg = \"#FFFFFF\",font=(\"Lucida Console\",17))\n b2.place(x=100,y=90)\n ayuda.mainloop()\n\ndef datos():\n dato = Tk()\n dato.title(\"Datos del desarrolador\")\n tk = Label(dato,text=\"Edwin Estuardo Reyes Reyes \\n201709015\\n 4to Semestre \",bg = \"#2D9AB7\",fg = \"#FFFFFF\",font=(\"Lucida Console\",17))\n tk.place(x=10,y=30)\n ancho_ventana = 390\n alto_ventana = 120\n dato.configure(bg=\"#2D9AB7\")\n x_ventana = raiz.winfo_screenwidth() // 2 - ancho_ventana // 2\n y_ventana = raiz.winfo_screenheight() // 2 - alto_ventana // 2\n posicion = str(ancho_ventana) + \"x\" + str(alto_ventana) + \"+\" + str(x_ventana) + \"+\" + str(y_ventana)\n dato.geometry(posicion)\n\ndef carga():\n root = Tk()\n root.withdraw()\n root.wm_attributes(\"-topmost\", 1)\n archivo = askopenfilename(filetypes =((\"Archivo XML\", \"*.xml\"),(\"Todos Los Archivos\",\"*.*\")),title = \"Busque su archivo.\")\n root.update()\n root.destroy() \n f = open (archivo,\"r\") \n mensaje = f.read() \n Texto1.config(state='normal')\n Texto1.delete(\"1.0\",\"end\")\n Texto1.insert(INSERT,mensaje)\n Texto1.config(state='disable')\n f.close()\n x = 0\n state = 0\n auxiliar = ''\n palabra = ''\n while(True):\n actual = mensaje[x]\n if state == 0:\n if actual == '<':\n state = 1\n x = x + 1\n else:\n x = x + 1\n elif state == 1: #analiza la palabra inicial\n if actual == '>' :\n if auxiliar =='EVENTOS':\n state = 2\n auxiliar = ''\n x = x + 1\n else:\n state = 0 \n x = x + 1\n auxiliar = ''\n else:\n auxiliar = auxiliar + actual\n x = x + 1 \n elif state == 2: \n if actual == '<':\n state = 3\n x = x + 1\n else:\n x = x + 1\n elif state == 3: # analiza la palabra evento\n if actual == '>':\n if auxiliar == 'EVENTO':\n auxiliar = ''\n state = 4\n fecha = ''\n reportado = ''\n afectados = Lista()\n error = ''\n x = x + 1\n elif auxiliar == '/EVENTOS':\n break\n else:\n auxiliar = ''\n state = 2\n x = x + 1\n else:\n auxiliar = auxiliar + actual\n x = x + 1\n elif state == 4: #empieza a analizar en todos los eventos\n if actual == ',':\n state = 5\n x = x + 1\n else:\n x = x + 1\n elif state == 5: #analiza la fecha\n if actual == '1' or actual == '2' or actual == '3' or actual == '4' or actual == '5' or actual == '6' or actual == '7' or actual == '8' or actual == '9' or actual == '0' or actual == '/':\n auxiliar = auxiliar + actual\n x = x + 1\n elif ord(actual) == 32:\n x = x + 1\n else:\n fecha = auxiliar\n auxiliar = ''\n x = x + 1\n state = 6\n elif state == 6: #analiza reportado\n if actual == ':':\n x = x + 1\n valido = False\n state = 7\n else:\n x = x + 1\n elif state == 7 :\n if ord(actual) == 32 or actual == '>' or actual == '<' or actual == '\"':\n if valido != True:\n auxiliar = ''\n x = x + 1\n elif ord(actual) == 10:\n reportado = auxiliar\n state = 8\n auxiliar = ''\n valido = False\n else:\n if actual == '@':\n valido = True\n auxiliar = auxiliar + actual\n x = x + 1\n elif state == 8: # empieza analizar usuarios afectados\n if actual == ':':\n x = x + 1\n valido = False\n state = 9\n else:\n x = x + 1\n elif state == 9 :\n #print(actual)\n #time.sleep(1)\n if ord(actual) == 32 or actual == '>' or actual == '<' or actual == '\"':\n if valido != True:\n auxiliar = ''\n x = x + 1\n elif ord(actual) == 10 :\n if mensaje[x-1] != ',':\n nodo = Nodo(auxiliar)\n afectados.Agregar(nodo)\n state = 10\n auxiliar = ''\n valido = False\n x = x + 1\n else:\n auxiliar = ''\n valido = False\n x = x + 1\n elif actual == ',':\n valido = False\n nodo = Nodo(auxiliar)\n afectados.Agregar(nodo)\n x = x + 1\n elif ord(actual) == 9:\n x = x + 1\n else:\n if actual == '@':\n valido = True\n auxiliar = auxiliar + actual\n x = x + 1\n elif state == 10:\n if actual == ':':\n x = x + 1\n state = 11\n else:\n x = x + 1\n elif state == 11 :\n if ord(actual) >= 48 and ord(actual) <= 57:\n auxiliar = auxiliar + actual\n x = x + 1\n elif actual == '-':\n error = auxiliar\n state = 2\n auxiliar = '' \n nodo = Evento(fecha,reportado,afectados,error)\n nodos = Nodo(nodo)\n eventos.Agregar(nodos) \n x = x + 1 \n else:\n x = x + 1\n print(\"salio\")\n eventos.Print()\n\n\n \n \n\ndef enviar():\n print(\"\")\n\n\nraiz = Tk()\nraiz.title(\"Principal\")\nancho_ventana = 1280\nalto_ventana = 720\nraiz['bg'] = '#B3B3B3'\nx_ventana = raiz.winfo_screenwidth() // 2 - ancho_ventana // 2\ny_ventana = raiz.winfo_screenheight() // 2 - alto_ventana // 2\nposicion = str(ancho_ventana) + \"x\" + str(alto_ventana) + \"+\" + str(x_ventana) + \"+\" + str(y_ventana)\nraiz.geometry(posicion)\ntamaño = 17\nraiz.iconbitmap(\"codificacion.ico\")\nbotonCargar = Button(raiz,text=\"CARGAR ARCHIVO\",command=carga,bg = \"#2D9AB7\",fg = \"#FFFFFF\",font=(\"Lucida Console\",tamaño))\nbotonCargar.place(x=230,y=20)\nbotonOperacion = Button(raiz,text=\"PETICIONES\",bg = \"#2D9AB7\",fg = \"#FFFFFF\",font=(\"Lucida Console\",tamaño))\nbotonOperacion.place(x=550,y=20)\nbotonAyuda = Button(raiz,text=\"AYUDA\",command=ayuda,bg = \"#2D9AB7\",fg = \"#FFFFFF\",font=(\"Lucida Console\",tamaño))\nbotonAyuda.place(x=1150,y=20)\nlabel1 = Label(raiz,text=\"ENTRADA\",bg=\"#B3B3B3\",fg=\"#FFFFFF\",font=(\"Lucida Console\",17))\nlabel1.place(x=250,y=170)\n\n\nbotonEnviar = Button(raiz,text=\"ENVIAR\",command=enviar,bg=\"#04a30c\",fg=\"#FFFFFF\",font=(\"Lucida Console\",17))\nbotonEnviar.place(x=250,y=100)\nbotonReset = Button(raiz,text=\"RESET\",bg=\"#c90d0d\",fg=\"#FFFFFF\",font=(\"Lucida Console\",17))\nbotonReset.place(x=950,y=100)\n\nframe = Frame(raiz)\nframe.place(x=40,y=200)\nXScroll_izquierda = Scrollbar(frame,orient='horizontal')\nYScroll_izquierda = Scrollbar(frame)\nXScroll_izquierda.pack(side=BOTTOM,fill=X)\nYScroll_izquierda.pack(side=LEFT,fill=Y)\nTexto1 = Text(frame, height=29, width=65, wrap=NONE, xscrollcommand=XScroll_izquierda.set, yscrollcommand=YScroll_izquierda.set)\nTexto1.pack(side=TOP, fill=BOTH, expand=TRUE)\nXScroll_izquierda.config(command=Texto1.xview)\nYScroll_izquierda.config(command=Texto1.yview)\nTexto1.pack(side=\"left\")\nTexto1.bind(\"\", lambda e: ctrlEvent(e))\nTexto1.config(state='disable')\n\n\n\nlabel2 = Label(raiz,text=\"SALIDA\",bg=\"#B3B3B3\",fg=\"#FFFFFF\",font=(\"Lucida Console\",17))\nlabel2.place(x=950,y=170)\nfram2 = Frame(raiz)\nfram2.place(x=700,y=200)\nXScroll_derecha = Scrollbar(fram2,orient='horizontal')\nYScroll_derecha = Scrollbar(fram2)\nXScroll_derecha.pack(side=BOTTOM,fill=X)\nYScroll_derecha.pack(side=LEFT,fill=Y)\nTexto2 = Text(fram2,height=29,width=65, wrap=NONE, xscrollcommand=XScroll_derecha.set, yscrollcommand=YScroll_derecha.set)\nTexto2.pack(side=TOP, fill=BOTH, expand=TRUE)\nXScroll_derecha.config(command=Texto2.xview)\nYScroll_derecha.config(command=Texto2.yview)\nTexto2.pack(side=\"left\")\nTexto2.bind(\"\", lambda e: ctrlEvent(e))\nTexto2.config(state='disable')\n\n\n\n\n\nraiz.mainloop()","repo_name":"EstuardoReyes/IPC2_Proyecto3_201709015","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":10014,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31212485848","text":"class Square():\r\n\t\r\n\tsquare_list = []\r\n\t\r\n\tdef __init__(self, s1):\r\n\t\tself.s1 = s1\r\n\t\tself.square_list.append(self.s1)\r\n\t\r\n\tdef calculate_perimiter(self):\r\n\t\tself.perimeter = self.s1 * 4\r\n\t\tprint(\"The permiter of this square is: \" + str(self.perimeter))\t\r\n\r\nsq1 = Square(10)\r\nsq2 = Square(15)\r\nsq3 = Square(30)\r\nsq4 = Square(50)\r\n\r\nprint(Square.square_list)\r\nsq1.calculate_perimiter()\r\n","repo_name":"willr786/TSTP","sub_path":"stp_c14_1.py","file_name":"stp_c14_1.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23027889165","text":"'''\n reader for [INI]tial value [C]ondition\n'''\n\n__author__ = ('Tom Streubel',) # alphabetical order of surnames\n__credits__ = tuple() # alphabetical order of surnames\n\n'''\n imports\n =======\n'''\nfrom simulator.netgraph import network_dae, base_edge, node\nfrom simulator.resources.units import pressure_to_bar, flow_to_kilogramm_per_second\nfrom simulator.resources.auxiliary import DimensionError\nfrom simulator.readerWriter.read_config_yaml import configuration\n\nimport numpy as np\n\nimport csv\n\nfrom typing import Union, List\n\n\ndef create_inic(net : network_dae, dictInic : Union[List, None] = None,\n deadPressure : float = 50.0, deadPressure_unit : str = 'bar',\n deadFlow : float = 0.0, deadFlow_unit : str = 'kg/s',\n x0 : Union[np.ndarray, None] = None) -> np.ndarray:\n '''\n :param net:\n :param dictInic:\n :param deadPressure: pressure in the dead state [50.0 'bar' is default]\n :param deadPressure_unit:\n :param deadFlow: flow in the dead state [0.0 'kg/s' is default and recommended]\n :param deadFlow_unit:\n :param x0: shape fitting vector to update or None (the latter is default)\n :return:\n '''\n\n '''\n if no state x0 to update was provided a dead state will be created.\n '''\n deadPressure = pressure_to_bar(deadPressure, deadPressure_unit)\n deadFlow = flow_to_kilogramm_per_second(deadFlow, deadFlow_unit)\n\n if x0 is None:\n x0 = np.zeros(shape = (net.dim,))\n for node_types in [net.all_node_types, net.all_hidden_node_types]:\n for node_type in node_types:\n for nodeInstance in net.typeReg[node_type]:\n x0[nodeInstance.p_press_id] = deadPressure\n\n for edge_types in [net.all_edge_types, net.all_hidden_edge_types]:\n for edge_type in edge_types:\n for edgeInstance in net.typeReg[edge_type]:\n x0[edgeInstance.qL_leftFlow_id] = deadFlow\n x0[edgeInstance.qR_rightFlow_id] = deadFlow\n else:\n if x0.shape[0] != net.dim:\n raise DimensionError(\"x0 provided doesn't fit dim : {}\".format(net.dim))\n\n\n '''\n this function can return a dead state if no dictInic is provided. \n So the following code is executed only if a dictInic was provided\n '''\n if not (dictInic is None):\n # ''' gather norm density '''\n # for dictRow in dictInic:\n # if dictRow['!'] == '!':\n # continue\n #\n # # if dictRow['Object'] == '_SYS' and dictRow['Parameter'] == 'RHO_0':\n # # net.gasMix.rho_0 = (float(dictRow['Value']), dictRow['Unit'])\n\n ''' actually filling out x0 by entries from dictInic (which comes from e.g. a csv) '''\n for dictRow in dictInic:\n if dictRow['!'] == '!':\n continue\n\n obj = dictRow['Object']\n param = dictRow['Parameter']\n try:\n val = float(dictRow['Value'])\n except:\n val = dictRow['Value']\n unit = dictRow['Unit']\n\n if obj in net.nameReg:\n net_elem_instance : Union[node, base_edge] = net.nameReg[obj]\n if 'node' in net_elem_instance.type:\n nodeInstance : node = net_elem_instance\n\n if param == 'P':\n x0[nodeInstance.p_press_id] = pressure_to_bar(val, unit)\n\n elif 'edge' in net_elem_instance.type:\n edgeInstance : base_edge = net_elem_instance\n\n if param == 'M':\n x0[edgeInstance.qL_leftFlow_id] = flow_to_kilogramm_per_second(val, unit,\n density = net.gasMix.rho_0,\n density_unit = net.gasMix.rho_0_unit)\n elif param == 'ML':\n x0[edgeInstance.qL_leftFlow_id] = flow_to_kilogramm_per_second(val, unit,\n density = net.gasMix.rho_0,\n density_unit = net.gasMix.rho_0_unit)\n elif param == 'MR':\n x0[edgeInstance.qR_rightFlow_id] = flow_to_kilogramm_per_second(val, unit,\n density = net.gasMix.rho_0,\n density_unit = net.gasMix.rho_0_unit)\n\n ''' dealing with hidden objects '''\n for edge_type in net.all_hidden_edge_types:\n for edgeInstance in net.typeReg[edge_type]:\n linked_edge : base_edge = net.nameReg[edgeInstance.linked_to]\n if edgeInstance.linked_on == 'left':\n x0[edgeInstance.qL_leftFlow_id] = x0[linked_edge.qL_leftFlow_id]\n x0[edgeInstance.qR_rightFlow_id] = x0[linked_edge.qL_leftFlow_id]\n else:\n x0[edgeInstance.qL_leftFlow_id] = x0[linked_edge.qR_rightFlow_id]\n x0[edgeInstance.qR_rightFlow_id] = x0[linked_edge.qR_rightFlow_id]\n\n for node_type in net.all_hidden_node_types:\n for nodeInstance in net.typeReg[node_type]:\n linked_node : node = net.nameReg[nodeInstance.linked_to]\n while hasattr(linked_node, \"linked_to\"): linked_node = net.nameReg[linked_node.linked_to]\n x0[nodeInstance.p_press_id] = x0[linked_node.p_press_id]\n\n return x0\n\n\ndef retrieve_inic_csv(config : configuration) -> List:\n with open(config.path_in_inic_csv, mode='r') as eventList:\n eventListRows = csv.DictReader(eventList, delimiter = config.delimiter)\n\n return list(eventListRows)\n\n\n''' shortcut '''\ndef create_inic_csv(net : network_dae, config : configuration,\n deadPressure : float = 50.0, deadPressure_unit : str = 'bar',\n deadFlow : float = 0.0, deadFlow_unit : str = 'kg/s',\n x0 : Union[np.ndarray, None] = None) -> np.ndarray:\n return create_inic(net = net, dictInic = retrieve_inic_csv(config),\n deadPressure = deadPressure, deadPressure_unit = deadPressure_unit,\n deadFlow = deadFlow, deadFlow_unit = deadFlow_unit,\n x0 = x0)\n","repo_name":"berlinade/python-transient-gas-network-simulator","sub_path":"simulator/readerWriter/read_inic_csv.py","file_name":"read_inic_csv.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23613090651","text":"\"\"\"M=int(raw_input())\nP=float(raw_input())\nX=int(raw_input())\n\ndef millionaire(round_num,probab,probab1,now,now1):\n\tif round_num<=M:\n\t\tif now<=X:\n\t\t\tprint \"<\",probab+probab1*P,now\n\t\t\treturn millionaire(round_num+1,probab+probab1*P,probab1*(1-P),now+now1/2,now1/2)\n\t\telse:\n\t\t\tprint \">\",probab,now\n\t\t\treturn millionaire(round_num+1,probab,probab1*(1-P),now-now1/2,now1/2)\n\telse:\n\t\treturn probab\n\t\t\n\t\nprint millionaire(0,0,1,500000,500000)\"\"\"\nimport sys\n\nI=int(sys.stdin.readline())\nfor i in range(I):\n\tcount=0\n\tB=1\n\tO=1\n\tBl=[]\n\tOl=[]\n\tBp=0\n\tOp=0\n\tT=map(str,sys.stdin.readline().split(\" \"))\n\tT[-1]=T[-1][:-1]\n\tfor j in range(1,len(T),2):\n\t\tif T[j]==\"B\":\n\t\t\tBl.append(T[j+1])\n\t\telse:\n\t\t\tOl.append(T[j+1])\n\tfor j in range(2,len(T),2):\n\t\tflago=0\n\t\tflagb=0\n\t\twhile T[j]!=0:\n\t\t\tif B==int(T[j]) and T[j-1]==\"B\":\n\t\t\t\tT[j]=0\n\t\t\t\tBl.pop(0)\n\t\t\t\tflagb=1\n\t\t\telif O==int(T[j]) and T[j-1]==\"O\":\n\t\t\t\tT[j]=0\n\t\t\t\tOl.pop(0)\n\t\t\t\tflago=1\n\t\t\tif flagb==0 and len(Bl)!=0:\t\n\t\t\t\tif Bint(Bl[0]):\n\t\t\t\t\tB-=1\n\t\t\tif flago==0 and len(Ol)!=0:\n\t\t\t\tif Oint(Ol[0]):\n\t\t\t\t\tO-=1\n\t\t\tcount+=1\n\tsys.stdout.write(\"Case #\")\n\tsys.stdout.write(str(i+1)+\": \")\n\tsys.stdout.write(str(count)+\"\\n\")\n\"\"\"\tfor j in range(1,len(T),2):\n\t\tif T[j]==\"O\":\n\t\t\tOl+=T[j+1]\n\t\telif T[j]==\"B\":\n\t\t\tBl+=T[j+1]\n\t\t\t\n\tprint Bl,Ol\n\twhile Bl!=[] or Ol!=[]:\n\t\t\n\t\tif len(Bl)!=0:\n\t\t\tq=int(Bl[0])\n\t\t\tprint Bl,Ol\n\t\t\tif B==q and (Op-Bp==1):\n\t\t\t\tBl.pop(0)\n\t\t\t\tBp+=1\n\t\t\telif Bq:\n\t\t\t\tB-=1\n\t\t\n\t\tif len(Ol)!=0:\n\t\t\tw=int(Ol[0])\n\t\t\tif O==w and (Op-Bp==0):\n\t\t\t\tOl.pop(0)\n\t\t\t\tOp+=1\n\t\t\telif Ow:\n\t\t\t\tO-=1\n\t\t\tcount+=1\n\t\tprint Bl,Ol\"\"\"\n\t#print \"Case #%d:\"%i+1,\n\t#print count","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_74/1192.py","file_name":"1192.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24969334858","text":"import socket # 导入 socket 模块\nimport time\nfrom threading import Thread\nimport re\n\nADDRESS = ('', 20437) # 绑定地址\ng_socket_server = None # 负责监听的socket\ng_conn_pool = [] # 连接池\n\nclass Device_que(object):\n def __init__(self,IMEI):\n self.imei = IMEI\n self.sendmessage = 0\n self.upd10x = None\n self.upd20x = None\n self.upd30x = None\n self.updatemax = 0\n\n\ndef start():\n name = input('请输入待升级的IMEI文件名:')\n r_file = open(name,'r')\n while True:\n oneline = r_file.readline().strip('\\n')\n if 0==len(oneline):\n r_file.close()\n break\n # de = 'de'+oneline\n #创建IMEI对象\n # de = Device_que(oneline)\n dict1[oneline]= Device_que(oneline)\n # print(dict1)\n\n # print(device)\n\ndef initserver():\n global g_socket_server\n g_socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建 socket 对象\n g_socket_server.bind(ADDRESS)\n g_socket_server.listen(512) # 最大等待数(有很多人理解为最大连接数,其实是错误的)\n print('服务器已启动,等待客户连接')\n\ndef listion():\n client,_ = g_socket_server.accept()\n g_conn_pool.append(client)\n # print(client)\n # print('*'*88)\n # print(_)\n th = Thread(target=message_handle,args=(client,_,))\n #设置守护线程\n th.setDaemon(True)\n th.start()\n\ndef message_handle(client,_):\n while True:\n now = int(time.time())\n timeArray = time.localtime(now)\n otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n\n raw = client.recv(2048)\n if len(raw) == 0:\n client.close()\n # 删除连接\n g_conn_pool.remove(client)\n print(str(_)+\"客户端下线了。\")\n print(device.imei+\"设备下线了。\")\n break\n raw_split = raw.decode(encoding='utf8').split(',')\n with open(\"1.txt\", \"a\", encoding=\"UTF-8\") as f:\n f.write(str(otherStyleTime)+str(_)+'收到的原始log:'+raw.decode(encoding='utf8')+'\\n')\n # print(raw_split[2]) #IMEI\n # print(raw_split[4]) #升级过程\n #增加判断,如果检测有ACK回复或者'100',则不发送升级指令\n # device = 'de'+raw_split[2]\n # if raw_split[2] not in dict1.keys():\n # print('break')\n # break\n # device = dict1[raw_split[2]]\n device = dict1[re.findall('86\\d{13}',str(raw.decode(encoding='utf8')))[0]]\n # device = Device_que(raw_split[2]) #调试使用\n # print(raw_split[0][-5:])\n # print(device.sendmessage)\n if raw_split[0][-5:] == 'GTUPD'or device.sendmessage==1:\n device.sendmessage = 1\n # print('upd shoudo')\n\n if device.sendmessage != 1:\n client.sendall('AT+GTUPD=zk600,0,,10,0,http://iot.spin.pm:8080/ninebot/firmware/iot/R05A03V02.enc,,0,0,,,,FFFF$'.encode(encoding='utf8'))\n device.sendmessage=1\n with open(\"1.txt\", \"a\", encoding=\"UTF-8\") as f:\n f.write(str(otherStyleTime)+str(_) +'向'+ re.findall('86\\d{13}',str(raw.decode(encoding='utf8')))[0]\n +'发送的指令:' + 'AT+GTUPD=zk600,0,,10,0,http://iot.spin.pm:8080/ninebot/firmware/iot/R05A03V02.enc,,0,0,,,,FFFF$' + '\\n')\n\n # if raw_split[4]=='301' and raw_split[0][-5:] == 'GTUPD':\n # print('*'*12)\n # time.sleep(3)\n # client.sendall('AT+GTBSI=zk600,iot.spin.pm,8383,FFFF$'.encode(encoding='utf8'))\n # with open(\"1.txt\", \"a\", encoding=\"UTF-8\") as f:\n # f.write(str(otherStyleTime)+str(_) + re.findall('86\\d{13}',str(raw.decode(encoding='utf8')))[0]+\n # '升级成功,并发送的指令:' + 'AT+GTBSI=zk600,iot.spin.pm,8383,FFFF$' + '\\n')\n #根据升级协议,分别打印出对应log到指令文件中\n if raw_split[0][-5:] == 'GTUPD':\n if raw_split[4]=='301':\n time.sleep(3)\n client.sendall('AT+GTBSI=zk600,iot.spin.pm,8383,FFFF$'.encode(encoding='utf8'))\n with open(\"1.txt\", \"a\", encoding=\"UTF-8\") as f:\n f.write(str(otherStyleTime)+str(_) + re.findall('86\\d{13}',str(raw.decode(encoding='utf8')))[0]+\n '升级成功,并发送的指令:' + 'AT+GTBSI=zk600,iot.spin.pm,8383,FFFF$' + '\\n')\n with open(\"log2.txt\", \"a\", encoding=\"UTF-8\") as f:\n f.write(str(otherStyleTime)+'\\t'+re.findall('86\\d{13}',str(raw.decode(encoding='utf8')))[0]+'\\t'+\n 'Update successed:'+'\\t'+raw_split[4]+ '\\n')\n with open(\"E:/Server/FotaToolV0.06/FotaToolV0.06 8594/project/Server/deltabin/log2.txt\", \"a\", encoding=\"UTF-8\") as f:\n f.write(str(otherStyleTime)+'\\t'+\n 'Update successed:'+'\\t'+ re.findall('86\\d{13}',str(raw.decode(encoding='utf8')))[0]+'\\n')\n\n elif raw_split[4] in ['101','102','103','202','302','303']:\n # print('upadtemax'+str(device.updatemax) )\n # print(device.updatemax < 2)\n #控制最多下发升级指令的次数,如果发现升级失败,尝试多发一次升级指令\n if device.updatemax < 3:\n # 间隔2min之后再下发升级指令\n print('-----:'+str(device.updatemax+1))\n time.sleep(int(12*(device.updatemax+1)))\n print('休眠时间是:'+str((13*(device.updatemax+1))))\n device.sendmessage=0\n device.updatemax+=1\n # print('upadtemax'+str(device.updatemax))\n with open(\"log2.txt\", \"a\", encoding=\"UTF-8\") as f:\n f.write(str(otherStyleTime)+'\\t'+re.findall('86\\d{13}',str(raw.decode(encoding='utf8')))[0]+'\\t'+\n 'Upgrade failed:'+'\\t'+raw_split[4]+ '\\n')\n\n elif raw_split[4] in ['100','200','201','300']:\n with open(\"log2.txt\", \"a\", encoding=\"UTF-8\") as f:\n f.write(str(otherStyleTime)+'\\t'+re.findall('86\\d{13}',str(raw.decode(encoding='utf8')))[0]+'\\t'+\n 'Upgrading:'+'\\t'+raw_split[4]+ '\\n')\n\n\n\n\n\n\n print(str(_)+'收到的原始log:'+raw.decode(encoding='utf8'))\n w_file.write(str(_)+'收到的原始log:'+raw.decode(encoding='utf8')+'\\n')\n\n\n\n\n\n\nif __name__ == '__main__':\n global dict1, w_file,j\n j=0\n dict1 ={}\n start()\n w_file = open('123.txt','w')\n w_file.write('1111')\n\n while True:\n # start()\n try:\n initserver()\n listion()\n # print(j)\n if j == len(dict1.keys()):\n w_file.close()\n exit()\n except:\n print(KeyError)\n\n","repo_name":"Dylan-bai/Dylan_tools","sub_path":"update sw server_V3.0.py","file_name":"update sw server_V3.0.py","file_ext":"py","file_size_in_byte":6902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4708190296","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 20 12:00:47 2018\n\n@author: doru\n\"\"\"\n\n# gene expression should be at data.X\n# gene names should be at data.var_names\n# cell names should be in data.obs_names and must be unique\n# categories (the meta data) should be columns at data.obs\n# any exception from the above assumptions will result in errors\n\nimport sys\nfile_name = sys.argv[1]\noutput_folder = sys.argv[2]\nprint(\"printing file_name\")\nprint(file_name)\nprint(\"printing output folder\")\nprint(output_folder)\n\nimport matplotlib; matplotlib.use('Agg');\nimport scanpy.api as sc\nfrom os.path import join\nfrom scipy.io import mmwrite\n\ndata = sc.read(file_name)\nprint(\"printing data\")\nprint(data)\n\n# save expression data\nexpression_data_filename = join(output_folder, 'expression')\ngene_expression = data.X\nmmwrite(expression_data_filename, gene_expression)\n\n# save gene names\ngene_names_file = join(output_folder, \"gene_names.csv\")\ndata.var_names.to_series().to_csv(gene_names_file)\n\n# save cell names\ncell_names_file = join(output_folder, \"cell_names.csv\")\ndata.obs_names.to_series().to_csv(cell_names_file)\n \n# get categories\nmeta_data = join(output_folder, \"meta_data.csv\")\ndata.obs.to_csv(meta_data)\n\n","repo_name":"haniffalab/scRNA-seq_analysis","sub_path":"pipelines/24_scanpy_to_seurat/scanpy_to_seurat.py","file_name":"scanpy_to_seurat.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"6075369345","text":"import json\n\n#每个line为一个视频数据,分为可访问(正常视频)的和不可访问的(已被删除的视频)\nglobal normal\nnormal = 0\nglobal un_normal\nun_normal = 0\n\n#以csv的格式存放清洗数据, 文件名text.csv\nfile = open(\"text.csv\",\"a\")\nfile.write(\"视频编号,观看数,收藏数,投币数,推荐数\\n\")\n\n#数据的第一次清洗\ndef clear(dicte):\n\tglobal normal\n\tglobal un_normal\n\tif dicte[\"message\"] != \"0\":\n\t\tun_normal += 1#计数反常视频\n\telse:\n\t\tnormal += 1#计数\n\t\tdata = dicte[\"data\"]\n\n\t\taid = str(data[\"aid\"])#视频编号\n\t\tview = str(data[\"view\"])#观看数\t\n\t\tfavorite = str(data[\"favorite\"])#收藏数\n\t\tcoin = str(data[\"coin\"])#硬币数\n\t\tlike = str(data[\"like\"])#点赞数\n\n\t\t#存放清洗数据\n\t\tconent = aid+\",\"+view+\",\"+favorite+\",\"+coin+\",\"+like\n\t\tfile.write(conent)\n\t\tfile.write('\\n')\n\t\t\n\nf = open(\"text.txt\",\"r+\") #打开原始数据\nfor line in f:\n\tif line != \"获取异常\":\n\t#每次处理一行,数据为字典样式\n\t\tdicte = json.loads(line)\n\t\tclear(dicte)\nprint(normal)\nprint(un_normal)\nf.close()\nfile.close()","repo_name":"Alucard-DKS/bilibili-users","sub_path":"clear.py","file_name":"clear.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31223205437","text":"from RFC import RFC\nfrom time import sleep\nimport deviceChange\nimport threading\nimport Queue\nimport mysql.connector as db\nimport thread\nimport glob\nimport logging\nimport logging.handlers\nimport os\nimport re\n\n\n\n\ndeviceCheck = deviceChange.device()\ndeviceCheck.base_devices()\ndeviceCheck.start()\nmacaddress = []\ndeviceList = []\n\n\n\nclass sqlQueue(threading.Thread):\n def __init__(self, switch, type):\n self.mostrecent = {}\n self.mostrecentstat = {}\n self.mostrecentstat_id = {}\n self.scriptconfig = {}\n self.requestdict = {}\n self.status = {}\n self.counter = 0\n self.pushlock = True\n self.pulllock = True\n \n threading.Thread.__init__(self)\n #connection = db.MySQLConnection(host=self.adress, user=self.login, passwd=self.passw, db=self.datab)\n self.switch = switch\n self.type = type\n self.pullcounter = 0\n self.pullmax = 10\n \n self.pushcounter = 0\n self.pushmax = 10\n self.running = True\n \n self.mostrecentMode = {}\n self.mostrecentProc = {}\n self.entryIdCounter = {}\n self.mostrecentupload = {}\n self.maclist = []\n \n self.newDevices = []\n self.newDeviceToggle = False\n\n self.devicelistMAC = []\n\n def stopfn(self):\n print(\"stopping\")\n self.switch = False\n exit()\n \n def startfn(self):\n self.switch = True\n \n def run(self):\n if self.type == \"push\":\n self.pushqueue = Queue.Queue()\n self.push()\n elif self.type == \"pull\":\n self.pullqueue = Queue.Queue()\n self.pull()\n elif self.type == \"most recent\":\n self.mostRecent()\n elif self.type == \"script\":\n self.scriptConfig()\n else:\n raise Exception(\"Invalid type\")\n\n def push(self):\n while self.switch:\n sleep(0.5)\n if (self.pushqueue.qsize() > 0) and (self.pushcounter <= self.pushmax) and self.pushlock:\n self.pushlock = False\n thread.start_new_thread(self.push_thread,())\n sleep(0.5)\n\n def push_req(self, data, type):\n entry = [data, type]\n self.pushqueue.put(entry, False)\n return\n \n def push_thread(self):\n self.pushcounter += 1\n if (self.pushqueue.qsize() == 0):\n self.pushcounter -= 1\n return\n datatype, size = self.pushqueue.get_nowait()\n self.pushlock = True\n connection = db.MySQLConnection(host=cfg.mysql['adress'], user=cfg.mysql['login'], passwd=cfg.mysql['passw'], db=cfg.mysql['datab'])\n cursor = connection.cursor()\n data, type = datatype\n if size == \"large\":\n for i in data:\n cursor.execute(i)\n connection.commit()\n elif size == \"small\":\n cursor.execute(data)\n connection.commit()\n self.pushqueue.task_done()\n self.pushcounter -= 1\n cursor.close()\n connection.close()\n\n\n def pull(self):\n while self.switch:\n sleep(0.5)\n if (self.pullqueue.qsize() > 0) and (self.pullcounter <= self.pullmax) and (self.pulllock):\n self.pulllock = False\n thread.start_new_thread(self.pull_thread, ())\n \n\n def pull_req(self, id, request, type):\n if id not in self.status:\n self.status[id] = False\n if id not in self.requestdict:\n self.requestdict[id] = \"\"\n entry = [id, request]\n self.pullqueue.put(entry, False)\n \n def pull_thread(self):\n self.pullcounter += 1\n if (self.pullqueue.qsize() == 0):\n self.pullcounter -= 1\n return\n id, request = self.pullqueue.get_nowait()\n self.pulllock = True\n connection = db.MySQLConnection(host=cfg.mysql['adress'], user=cfg.mysql['login'], passwd=cfg.mysql['passw'], db=cfg.mysql['datab'])\n cursor = connection.cursor()\n\n try:\n cursor.execute(request)\n except:\n self.pullqueue.task_done()\n cursor.close()\n connection.close()\n self.pullcounter -= 1\n returnval = []\n for item in cursor:\n returnval.append(item)\n \n self.requestdict[id] = returnval\n self.status[id] = True\n self.pullqueue.task_done()\n cursor.close()\n connection.close()\n self.pullcounter -= 1\n\n def pull_result(self, id):\n self.status[id] = False\n return self.requestdict.get(id, \"none\")\n\n def currentstatus(self, id):\n #pullstatus\n return self.status.get(id, False)\n \n def mostRecentSetup(self, scid):\n self.mostrecentstat_id[scid] = False\n self.mostrecentMode[scid] = False\n\n\n def mostRecent(self):\n while self.switch:\n connection = db.MySQLConnection(host=cfg.mysql['adress'], user=cfg.mysql['login'], passwd=cfg.mysql['passw'], db=cfg.mysql['datab'])\n cursor = connection.cursor()\n sleep(0.5)\n cursor.execute(\"select count(*) from scan\")\n for item in cursor:\n count = item[0]\n if count > self.counter:\n self.counter = count\n cursor.execute(\"select macaddress, scan_data, time_stamp, entry_id from scan ORDER BY entry_id DESC limit 1\")\n for itemd in cursor:\n mac, data, time, entry_id = itemd\n if mac not in self.maclist:\n query = (\"insert into buffer (entry_id, scan_data, macaddress) values (0, '[0.0]', %s)\" % (mac))\n self.tableSetup(mac, \"buffer\", query)\n self.maclist.append(mac)\n self.newDevices.append(mac)\n self.newDeviceToggle = True\n \n listup = [entry_id, data, time]\n self.mostrecent[mac] = listup\n scid1 = (\"Peak%s\" % mac)\n scid2 = (\"Delta%s\" % mac)\n self.mostrecentstat_id[scid1] = True\n self.mostrecentstat_id[scid2] = True\n self.entryIdCounter[entry_id] = 0\n thread.start_new_thread(self.mostRecentUpload, (entry_id, mac, data))\n cursor.close()\n connection.close()\n\n def mostRecentScriptStatus(self, scid, status):\n self.mostrecentMode[scid] = status\n\n\n def mostRecentUpload(self, entry_id, mac, data):\n scidPeak = (\"Peak%s\" % mac)\n scidDelta = (\"Delta%s\" % mac)\n scidentryPeak = (\"Peak%s%s\" % (mac, entry_id))\n scidentryDelta = (\"Delta%s%s\" % (mac, entry_id))\n val = 0\n timeout = 0\n scidPeakToggle = False\n scidDeltaToggle = False\n while True:\n sleep(0.5)\n if self.mostrecentMode.get(scidPeak, False):\n val += 1\n scidPeakToggle = True\n if self.mostrecentMode.get(scidDelta, False):\n val += 1\n scidDeltaToggle = True\n compareval = self.entryIdCounter.get(entry_id, 100)\n if compareval == val:\n connection = db.MySQLConnection(host=cfg.mysql['adress'], user=cfg.mysql['login'], passwd=cfg.mysql['passw'], db=cfg.mysql['datab'])\n cursor = connection.cursor()\n updateitem = (\"Update buffer set entry_id = %s, scan_data = '%s' where macaddress = %s\" % (entry_id, data, mac))\n cursor.execute(updateitem)\n if scidPeakToggle:\n updateitemPeak = self.mostrecentupload.get(scidentryPeak, \"\")\n cursor.execute(updateitemPeak)\n if scidDeltaToggle:\n updateitemDelta = self.mostrecentupload.get(scidentryDelta, \"\")\n cursor.execute(updateitemDelta)\n connection.commit()\n print(\"%s-%s; Uploaded bulk\" % (entry_id, mac))\n cursor.close()\n connection.close()\n return\n \n else:\n val = 0\n timeout += 1\n\n if timeout == 300:\n print(\"timed out on %s\" % entry_id)\n self.mostrecentMode[scidPeak] = False\n self.mostrecentMode[scidDelta] = False\n return\n\n\n def mostRecentCompleted(self, entry_id, scid, request):\n scidentry = (\"%s%s\" % (scid, entry_id))\n val = self.entryIdCounter.get(entry_id, 0)\n val += 1\n self.entryIdCounter[entry_id] = val\n self.mostrecentupload[scidentry] = request\n\n \n def mostRecentStatus(self, id, type_id):\n return self.mostrecentstat_id.get(type_id, False)\n\n def mostRecentGet(self, id, type_id):\n self.mostrecentstat_id[type_id] = False\n return self.mostrecent.get(id,\"False\")\n \n def newDevicesGet(self):\n templist = []\n for items in self.newDevices:\n templist.append(items)\n self.newDevices = []\n self.newDeviceToggle = False\n return templist\n\n def newDevicesStatus(self):\n if self.newDeviceToggle:\n self.newDeviceToggle = False\n return True\n if not self.newDeviceToggle:\n return False\n\n\n def tableSetup(self, mac, script, query):\n connection = db.MySQLConnection(host=cfg.mysql['adress'], user=cfg.mysql['login'], passwd=cfg.mysql['passw'], db=cfg.mysql['datab'])\n cursor = connection.cursor()\n cursor.execute(\"select count(*) from %s where macaddress = %s\" % (script, mac))\n for updatecount in cursor:\n if updatecount[0] == 0:\n cursor.execute(query)\n connection.commit()\n elif updatecount[0] > 1:\n cursor.execute(\"delete from %s where macaddress = %s\" % (script, mac))\n cursor.execute(query)\n connection.commit()\n cursor.close()\n connection.close()\n\n def scriptConfig(self):\n while self.switch:\n connection = db.MySQLConnection(host=cfg.mysql['adress'], user=cfg.mysql['login'], passwd=cfg.mysql['passw'], db=cfg.mysql['datab'])\n cursor = connection.cursor()\n cursor.execute(\"select macaddress, average_state, average_span, average_accuracy, peakhold_state, peakhold_toggle, deltalog_state, deltalog_scan, difflog_state, downsamp_state, downsamp_level from ScriptConfig\")\n for items in cursor:\n mac = items[0]\n if mac not in self.devicelistMAC:\n self.devicelistMAC.append(mac)\n self.scriptconfig[mac] = items\n cursor.close()\n connection.close()\n sleep(5)\n\n def scriptConfigGet(self, id):\n return self.scriptconfig.get(id, False)\n\n def stopAll(self):\n self.running = False\n\n\nlogging.basicConfig(filename='logs/main.log',level=logging.DEBUG,format='%(asctime)s %(message)s')\nlogging.info('Started main.py')\nDeviceDict = {}\n\npushqueue = sqlQueue(True, \"push\")\npullqueue = sqlQueue(True, \"pull\")\nmostrecentqueue = sqlQueue(True, \"most recent\")\nscriptqueue = sqlQueue(True, \"script\")\n\npushqueue.start()\npullqueue.start()\nmostrecentqueue.start()\nscriptqueue.start()\n\nprint(\"Setting up systems\")\nsleep(2)\n#setup device check and RFC\nif mostrecentqueue.newDevicesStatus():\n jeff = mostrecentqueue.newDevicesGet()\n for items in jeff:\n print(\"new device found: %s\" % items)\n items = RFC(items, pushqueue, pullqueue, mostrecentqueue, scriptqueue, logger)\n items.start()\n deviceList.append(items)\n\n\nwhile True:\n try:\n if mostrecentqueue.newDevicesStatus():\n for items in mostrecentqueue.newDevicesGet():\n print(\"new device found: %s\" % items)\n items = RFC(items, pushqueue, pullqueue, mostrecentqueue, scriptqueue, menu)\n items.start()\n deviceList.append(items)\n\n \n\n except (KeyboardInterrupt, SystemExit):\n pushqueue.stopAll()\n pullqueue.stopAll()\n mostrecentqueue.stopAll()\n scriptqueue.stopAll()\n for i in range(len(deviceList)):\n deviceList[i].stop_all()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Ludicrux/RF-Scout","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5475612193","text":"class Solution:\n def maxResult(self, nums: List[int], k: int) -> int:\n #alright so let's recursively work backwards\n #make an array of the potential number of steps you can take\n options = []\n for choice in range(0,k):\n options.append(nums[len(nums)-1-choice])\n \n total = 0\n leg = len(nums)-1\n #loop through the list backwards\n while True:\n if leg==0:\n break\n total+= max(options)\n \n #and to shift the options to the new paradigm\n for shiftAmount in range(0,k):\n \n leg+=1\n #shift individual indexes once\n for oneTime in options:\n if oneTime == options[len(options)-1]:\n options[oneTime]=nums[len(nums)-1-oneTime-leg]\n else:\n options[oneTime]=options[oneTime+1]\n \n #ughhhh but I have to also make sure the out of bounds stuff doesn't happen\n \n #keep shifting until you hit the distance\n if max(options)==options[shiftAmount]:\n break\n \n # then repeat until you reach the beginning\n return total","repo_name":"sirbobthemarvelous/leetcode","sub_path":"Python Jump Game VI WIP.py","file_name":"Python Jump Game VI WIP.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42323105898","text":"from nv_ui import *\nimport numpy as np\nimport os\nimport cv2\nfrom conf import *\nfrom utils import *\nfrom log import nv_logger\nimport threading\nfrom nv_ui import *\nfrom queue import Queue\nfrom analyze import analyze\nfrom threading import Thread\nfrom multi_tracker import *\nlog = nv_logger().log_msg\n\ntxt = \"\"\"\nTODO: nv.py (.local nv3):\n\t1. determine if face recognition box and person detection box exist with overlap.\n\t2. test intersection/union (url1='https://answers.opencv.org/question/67091/how-to-find-if-2-rectangles-are-overlapping-each-other/')\n\t3. make union/intersection functions(url='https://answers.opencv.org/question/90455/how-to-perform-intersection-or-union-operations-on-a-rect-in-python/')\n\tdef union(a,b):\n\t x = min(a[0], b[0])\n\t y = min(a[1], b[1])\n\t w = max(a[0]+a[2], b[0]+b[2]) - x\n\t h = max(a[1]+a[3], b[1]+b[3]) - y\n\t return (x, y, w, h)\n\t\n\tdef intersection(a,b):\n\t x = max(a[0], b[0])\n\t y = max(a[1], b[1])\n\t w = min(a[0]+a[2], b[0]+b[2]) - x\n\t h = min(a[1]+a[3], b[1]+b[3]) - y\n\t if w<0 or h<0: return () # or (0,0,0,0) ?\n\t return (x, y, w, h)\n\t\nTODO: Tracker:\n\t1. implement into nv.py, using self.is_tracking and frame skip to avoid detection time.\n\t2. explore possibility of using centroid tracking, possibly create a centroid multi_tracker class\nTODO: io/queue class object - handles reading and replacing message objects to the queue\n\tholds command, process (detection), and image feed queues.\n\tpass object to all threads, ensure each thread can get instructions/resources\n\tfrom that object ALONE.\n\tOpening communication/registration of sender/receiver:\n\t\tNeeds to contain the pid of script file and thread type (object.thread_type property)\nTODO: Clean up imports. either entire cv2 and specify targets with '.'s, or get anal and specify all of which we're using... no reason for both. Me bein' lazy.\nTODO: integrate object tracking algorithm (centroid???)\nTODO: PTZ():\n\t1. integrate serial servo controller\n\t2. integrate ipcamera ptz requests control\n\t3. design, implement, debug simulated ptz control via zoom/crop\n\t4. design, implement, debug virtual gamer/control \n\"\"\"\nlog(txt, 'info')\n#input(\"READ FIRST!!! Press a key to continue...\")\nclass nv(threading.Thread):\n\tdef __init__(self, camera_id, cap=None, run_ui=True):\n\t\tself.run_ui = run_ui\n\t\tconf = readConf()\n\t\tif cap is None:\n\t\t\tsrc = conf['cameras'][camera_id]['src']['url']\n\t\t\tself.cap = cv2.VideoCapture(src)\n\t\telse:\n\t\t\tself.cap = cap.cap\n\t\tself.camera_id = camera_id\n\t\tself.opts = conf['cameras'][self.camera_id]\n\t\tself.processing = self.opts['processing']\n\t\tself.fps = 30\n\t\tself.w = None\n\t\tself.h = None\n\t\tself.maxsize = None\n\t\tself.has_ptz = self.opts['has_ptz']\n#\t\tif self.ptz == None:\n#\t\t\tself.ptz = ptz_control()\n#\t\t\tself.ptz.set_speed(self.ptz_speed)\n#\t\telse:\n#\t\t\tself.ptz = None\n#\t\tif self.run_ui:\n#\t\t\tprint(\"Running ui..\")\n#\t\t\tself.ui = self.start_ui()\n#\t\t\tprint(\"Ui running!\")\n#\t\tif self.has_ptz:\n#\t\t\tself.ui.ptz.events = True\n#\t\t\tself.preset = None\n#\t\t\ttour_dest = 0\n\t\tself.runloop = False\n\t\tif self.processing:\n\t\t\tself.det_thread = self.setup_detection()\n\t\telse:\n\t\t\tself.det_thread = None\n\t\tself.tryct = 500\n\t\tself.tries = 0\n\t\tself.is_tracking = False\n\t\tself.mt = multi_tracker(camera_id=self.camera_id)\n\t\tself.trackers = {}\n\t\tlog(f\"nv.init():Display loop initialized!\", 'info')\n\n\n\tdef setup_detection(self):\n\t\tself.maxsize = self.opts['maxsize']\n\t\tself.in_q = Queue(maxsize=self.maxsize)\n\t\tself.out_q = Queue(maxsize=self.maxsize)\n\t\t#if self.has_ptz:\n\t\t#\tif self.ui.ptz is None:\n\t\t#\t\tself.ui.ptz = ptz_control(self.camera_id)\n\t\t#\tt = Thread(target=analyze, args=(self.camera_id,self.in_q,self.out_q,self.ui.ptz))\n\t\t#else:\n\t\tt = Thread(target=analyze, args=(self.camera_id,self.in_q,self.out_q))\n\t\tt.setDaemon(True)\n\t\treturn t\n\n\tdef det_send(self, img):\n\t\tif not self.in_q.full():\n\t\t\tself.in_q.put_nowait(img)\n\t\t\treturn True\n\t\telse:\n\t\t\tif self.tries <= self.tryct:\n\t\t\t\tself.tries += 1\n\t\t\t\t#print(f\"attempts:{self.tries}/{self.tryct}, full:{self.in_q.full()}, empty:{self.in_q.empty()}, unfinished:{self.in_q.unfinished_tasks}, maxsize:{self.in_q.maxsize}, qsize:{self.in_q.qsize()}\")\n\t\t\t\treturn True\n\t\t\telif self.tries == self.tryct:\n\t\t\t\tlog(f\"nv.det_send:Detection input has been too full, too long! This taint good...\", 'error')\n\t\t\t\ttry:\n\t\t\t\t\twhile not self.in_q.empty():\n\t\t\t\t\t\tself.in_q.get_nowait()\n\t\t\t\t\t\tself.in_q.task_done()\n\t\t\t\t\tlog(f\"nv.det_send:Detection queue cleared!\", 'warning')\n\t\t\t\t\treturn True\n\t\t\t\texcept Exception as e:\n\t\t\t\t\ttxt = f\"nv.det_send:Tried to clear queue, failed, wtf????\"\n\t\t\t\t\traise Exception('Shit Be Broken', txt)\n\n\tdef det_recv(self):\n\t\tif not self.out_q.empty():\n\t\t\tdets = self.out_q.get_nowait()\n\t\t\ttry:\n\t\t\t\tself.out_q.task_done()\n\t\t\t\treturn True, dets\n\t\t\texcept Exception as e:\n\t\t\t\tlog(f\"nv.det_recv:Got image but couldn't flag as finished! Unknown error ({e})\", 'error')\n\t\t\t\treturn False, None\n\t\telse:\n\t\t\treturn False, None\n\n\tdef test_inner(self, det1, det2):\n\t\t#tests whether one box is inside another. (tracking a face and the person at the same time\n\t\tif det1.area > det2.area:#test which box is larger, identify each\n\t\t\touter = det1\n\t\t\tinner = det2\n\t\telif det2.area > det1.area:\n\t\t\touter = det2\n\t\t\tinner = det1\n\t\t\n\t\t#if coordinates of inner exists completely inside of outer, then it's a face.\n\t\tif inner.l >= outer.l and inner.r <= outer.r and inner.t >= outer.t and inner.b <= outer.b:\n\t\t\t#set detected face name to person detection's class_name (overwrite)\n\t\t\touter.class_name = inner.class_name\n\t\t\treturn True, outer\n\t\telse:\n\t\t\treturn False, None\n\n\n\tdef nv_run(self):\n\t\tself.nv_thread = Thread(target=self.run)\n\t\tself.nv_thread.setDaemon(True)\n\t\tself.nv_thread.start()\n\t\t\n\t\t\n\tdef run(self):\n\t\tif self.processing:\n\t\t\tself.det_thread.start()\n\t\tlog(f\"nv.init():Starting nv loop...\", 'info')\n\t\tcv2.namedWindow('image', cv2.WINDOW_NORMAL)\n\t\tlog(f\"nv.init():Display loop started!\", 'info')\n\t\tself.runloop = True\n\t\twhile self.runloop:\n\t\t\tret, img = self.cap.read()\n\t\t\tif ret:\n\t\t\t\tif self.processing:\n\t\t\t\t\tif not self.is_tracking:\n\t\t\t\t\t\tret = self.det_send(img)\n\t\t\t\t\t\tif not ret:\n\t\t\t\t\t\t\tlog(f\"nv.run:Unable to send to detector! Aborting...\", 'error')\n\t\t\t\t\t\t\tself.runloop = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tdret, dets = self.det_recv()\n\t\t\t\t\t\tif dret:\n\t\t\t\t\t\t\tnewdets = []\n\t\t\t\t\t\t\tfor det in dets:\n\t\t\t\t\t\t\t\t#print(\"detection type:\", det.type)\n\t\t\t\t\t\t\t\tself.is_tracking, self.trackers = self.mt.add(img=img, box=det.box, class_name=det.class_name, confidence=det.confidence, max_age=174)\n\t\t\t\t\t\t\t\tif self.is_tracking and self.trackers != {}:\n\t\t\t\t\t\t\t\t\timg = self.draw_on_image(img, self.trackers)\n\t\t\t\t\telse:#if currently tracking an object...\n\t\t\t\t\t\tself.is_tracking, self.trackers = self.mt.update(img=img)\n\t\t\t\t\t\t#print(\"tracking:\", self.is_tracking)\n\t\t\t\t\t\tif self.is_tracking:\n\t\t\t\t\t\t\timg = self.draw_on_image(img, self.trackers)\n\t\t\t\tcv2.imshow('image', img)\n\t\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\t\tself.stop()\n\t\t\t\t\tbreak\n\t\t\t\tif self.w is None and self.h is None:\n\t\t\t\t\tself.w, self.h = int(img.shape[1]), int(img.shape[0])\n\t\t\t\t\tcv2.resizeWindow('image', (self.w, self.h))\n\n\tdef stop(self):\n\t\tself.cap.release()\n\t\texit()\n\n\n\tdef draw_on_image(self, img, trackers=None):\n\t\tif trackers is None:\n\t\t\ttrackers = self.trackers\n\t\tw = img.shape[1]\n\t\th = img.shape[0]\n\t\tdrawn = img.copy()\n\t\tfor tid in trackers.keys():\n\t\t\tt = trackers[tid]\n\t\t\t#print(f\"box={t.box}, name={t.class_name}, size={t.size}, center={t.center}, area={t.area}\")\n\t\t\tdrawn = cv2.rectangle(img, (int(t.l), int(t.t)), (int(t.r), int(t.b)), t.color, t.line_size)\n\t\t\ty = t.t - 30\n\t\t\tcoords = (int(t.l), int(y))\n\t\t\tdrawn = cv2.putText(drawn, t.class_name, coords, t.font, t.font_scale, t.color, t.line_size, t.line_type)\n\t\treturn drawn\n\n\t#get all current class properties (use instead of opts dict)\n\tdef get_props(self):\n\t\treturn self.__dict__\n\n\t#update class property to change functionality on the fly\n\tdef set_props(self, prop, val=None):\n\t\ttry:\n\t\t\tself.__dict__[prop] = val\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tlog(f\"DISPLAY_THREAD:{self.camera_id}:PROPERTY_SET:Var={prop}, Value:{val} Error:{e}\", 'info')\n\t\t\treturn False\n\nif __name__ == \"__main__\":\n\timport sys\n\ttry:\n\t\tcamera_id = int(sys.argv[1])\n\texcept:\n\t\tcamera_id = 0\n\tfrom capture import *\n\tfrom nv import nv\n\tcap = capture(camera_id)\n\td = nv(camera_id, cap)\n\td.nv_run()\n\t#ui = nv_ui(camera_id)\n\twhile True:\n\t\t#ui.ui_loop()\n\t\tpass\n","repo_name":"darthmonkey2004/nv3","sub_path":"nv.py","file_name":"nv.py","file_ext":"py","file_size_in_byte":8296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18768059028","text":"'''\nCreated on 23.02.2018\nDatabase interface tests which are testing\ncorrectness of the tables' schemas and correctness of tables' creation\n\nReference: Code taken and modified from PWP2018 exercise\n'''\n\nimport sqlite3, unittest\n\nfrom src.db import engine, connection, constants\n\n#Path to the database file, different from the deployment db\nDB_PATH = 'db/goalz_test.db'\nENGINE = engine.Engine(DB_PATH)\n\nINITIAL_USERS_SIZE = 6\nINITIAL_GOALS_SIZE = 9\nINITIAL_RESOURCES_SIZE = 5\n\nclass CreatedTablesTestCase(unittest.TestCase):\n '''\n Test cases for the created tables.\n '''\n #INITIATION AND TEARDOWN METHODS\n @classmethod\n def setUpClass(cls):\n '''\n Creates the database structure. Removes first any preexisting database file.\n '''\n print(\"Testing \", cls.__name__)\n ENGINE.remove_database()\n ENGINE.create_tables()\n\n @classmethod\n def tearDownClass(cls):\n '''Remove the testing database'''\n print(\"Testing ENDED for \", cls.__name__)\n ENGINE.remove_database()\n\n def setUp(self):\n '''\n Populates the database\n '''\n try:\n #This method loads the initial values from goalz_data_dump.sql\n ENGINE.populate_tables()\n #Creates a Connection instance to use the API\n self.connection = ENGINE.connect()\n except Exception as e: \n #For instance if there is an error while populating the tables\n ENGINE.clear()\n\n def tearDown(self):\n '''\n Close underlying connection and remove all records from database\n '''\n self.connection.close()\n ENGINE.clear()\n\n def test_users_table_schema(self):\n '''\n Checks that the USERS table has the right schema.\n '''\n print('('+self.test_users_table_schema.__name__+')', \\\n self.test_users_table_schema.__doc__)\n\n con = self.connection.con\n with con:\n c = con.cursor()\n\n # Retrieve column information\n # Every column will be represented by a tuple with the following attributes:\n # (id, name, type, notnull, default_value, primary_key)\n c.execute('PRAGMA TABLE_INFO({})'.format('users'))\n\n # Collect names in a list\n result = c.fetchall()\n names = [tup[1] for tup in result]\n types = [tup[2] for tup in result]\n real_names=['user_id','nickname','registration_date','password']\n real_types=['INTEGER','TEXT','INTEGER','TEXT']\n # Check that names and types are correct\n self.assertEqual(names, real_names) \n self.assertEqual(types, real_types)\n\n def test_users_table_created(self):\n '''\n Checks that the USERS table initially contains INITIAL_USERS_SIZE number of users (check goalz_data_dump.sql).\n '''\n print('('+self.test_users_table_created.__name__+')', \\\n self.test_users_table_created.__doc__)\n #Create the SQL Statement\n keys_on = 'PRAGMA foreign_keys = ON'\n query = 'SELECT * FROM users'\n #Get the sqlite3 con from the Connection instance\n con = self.connection.con\n with con:\n #Cursor and row initialization\n con.row_factory = sqlite3.Row\n cur = con.cursor()\n #Provide support for foreign keys\n cur.execute(keys_on)\n #Execute main SQL Statement\n cur.execute(query)\n users = cur.fetchall()\n #Assert\n self.assertEqual(len(users), INITIAL_USERS_SIZE)\n\n def test_user_profile_table_schema(self):\n '''\n Checks that the USER_PROFILE table has the right schema.\n '''\n print('(' + self.test_user_profile_table_schema.__name__ + ')', \\\n self.test_user_profile_table_schema.__doc__)\n\n con = self.connection.con\n with con:\n c = con.cursor()\n\n # Retrieve column information\n # Every column will be represented by a tuple with the following attributes:\n # (id, name, type, notnull, default_value, primary_key)\n c.execute('PRAGMA TABLE_INFO({})'.format('user_profile'))\n\n # Collect names in a list\n result = c.fetchall()\n names = [tup[1] for tup in result]\n types = [tup[2] for tup in result]\n real_names = ['user_profile_id', 'user_id', 'firstname', 'lastname', 'email', 'website', 'rating', 'age', 'gender']\n real_types = ['INTEGER', 'INTEGER', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'REAL', 'INTEGER', 'TEXT']\n # Check that names and types are correct\n self.assertEqual(names, real_names)\n self.assertEqual(types, real_types)\n\n # Check that foreign keys are correctly set\n foreign_keys = [('users', 'user_id', 'user_id')]\n c.execute('PRAGMA FOREIGN_KEY_LIST({})'.format('user_profile'))\n result = c.fetchall()\n result_filtered = [(tup[2], tup[3],tup[4]) for tup in result]\n for tup in result_filtered:\n self.assertIn(tup, foreign_keys)\n\n def test_user_profile_table_created(self):\n '''\n Checks that the USER_PROFILE table initially contains INITIAL_USERS_SIZE number of user_profiles (check goalz_data_dump.sql).\n '''\n print('(' + self.test_user_profile_table_created.__name__ + ')', \\\n self.test_user_profile_table_created.__doc__)\n # Create the SQL Statement\n keys_on = 'PRAGMA foreign_keys = ON'\n query = 'SELECT * FROM user_profile'\n # Get the sqlite3 con from the Connection instance\n con = self.connection.con\n with con:\n # Cursor and row initialization\n con.row_factory = sqlite3.Row\n cur = con.cursor()\n # Provide support for foreign keys\n cur.execute(keys_on)\n # Execute main SQL Statement\n cur.execute(query)\n user_profiles = cur.fetchall()\n # Assert\n self.assertEqual(len(user_profiles), INITIAL_USERS_SIZE)\n\n def test_goals_table_schema(self):\n '''\n Checks that the GOALS table has the right schema.\n '''\n print('(' + self.test_goals_table_schema.__name__ + ')', \\\n self.test_goals_table_schema.__doc__)\n\n con = self.connection.con\n with con:\n c = con.cursor()\n\n # Retrieve column information\n # Every column will be represented by a tuple with the following attributes:\n # (id, name, type, notnull, default_value, primary_key)\n c.execute('PRAGMA TABLE_INFO({})'.format('goals'))\n\n # Collect names in a list\n result = c.fetchall()\n names = [tup[1] for tup in result]\n types = [tup[2] for tup in result]\n real_names = ['goal_id', 'parent_id', 'user_id', 'title', 'topic', 'description', 'deadline', 'status']\n real_types = ['INTEGER', 'INTEGER', 'INTEGER', 'TEXT', 'TEXT', 'TEXT', 'INTEGER', 'INTEGER']\n # Check that names and types are correct\n self.assertEqual(names, real_names)\n self.assertEqual(types, real_types)\n\n # Check that foreign keys are correctly set\n foreign_keys = [('goals', 'parent_id', 'goal_id'),\n ('users', 'user_id', 'user_id')]\n c.execute('PRAGMA FOREIGN_KEY_LIST({})'.format('goals'))\n result = c.fetchall()\n result_filtered = [(tup[2], tup[3], tup[4]) for tup in result]\n for tup in result_filtered:\n self.assertIn(tup, foreign_keys)\n\n def test_goals_table_created(self):\n '''\n Checks that the GOALS table initially contains INITIAL_GOALS_SIZE number of goals (check goalz_data_dump.sql).\n '''\n print('(' + self.test_goals_table_created.__name__ + ')', \\\n self.test_goals_table_created.__doc__)\n # Create the SQL Statement\n keys_on = 'PRAGMA foreign_keys = ON'\n query = 'SELECT * FROM goals'\n # Get the sqlite3 con from the Connection instance\n con = self.connection.con\n with con:\n # Cursor and row initialization\n con.row_factory = sqlite3.Row\n cur = con.cursor()\n # Provide support for foreign keys\n cur.execute(keys_on)\n # Execute main SQL Statement\n cur.execute(query)\n goals = cur.fetchall()\n # Assert\n self.assertEqual(len(goals), INITIAL_GOALS_SIZE)\n\n def test_resources_table_schema(self):\n '''\n Checks that the RESOURCES table has the right schema.\n '''\n print('(' + self.test_resources_table_schema.__name__ + ')', \\\n self.test_resources_table_schema.__doc__)\n\n con = self.connection.con\n with con:\n c = con.cursor()\n\n # Retrieve column information\n # Every column will be represented by a tuple with the following attributes:\n # (id, name, type, notnull, default_value, primary_key)\n c.execute('PRAGMA TABLE_INFO({})'.format('resources'))\n\n # Collect names in a list\n result = c.fetchall()\n names = [tup[1] for tup in result]\n types = [tup[2] for tup in result]\n real_names = ['resource_id', 'goal_id', 'user_id', 'title', 'link', 'topic', 'description', 'required_time', 'rating']\n real_types = ['INTEGER', 'INTEGER', 'INTEGER', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'INTEGER', 'REAL']\n # Check that names and types are correct\n self.assertEqual(names, real_names)\n self.assertEqual(types, real_types)\n\n # Check that foreign keys are correctly set\n foreign_keys = [('goals', 'goal_id', 'goal_id'),\n ('users', 'user_id', 'user_id')]\n c.execute('PRAGMA FOREIGN_KEY_LIST({})'.format('resources'))\n result = c.fetchall()\n result_filtered = [(tup[2], tup[3], tup[4]) for tup in result]\n for tup in result_filtered:\n self.assertIn(tup, foreign_keys)\n\n def test_resources_table_created(self):\n '''\n Checks that the RESOURCES table initially contains INITIAL_RESOURCES_SIZE number of resources (check goalz_data_dump.sql).\n '''\n print('(' + self.test_goals_table_created.__name__ + ')', \\\n self.test_goals_table_created.__doc__)\n # Create the SQL Statement\n keys_on = 'PRAGMA foreign_keys = ON'\n query = 'SELECT * FROM resources'\n # Get the sqlite3 con from the Connection instance\n con = self.connection.con\n with con:\n # Cursor and row initialization\n con.row_factory = sqlite3.Row\n cur = con.cursor()\n # Provide support for foreign keys\n cur.execute(keys_on)\n # Execute main SQL Statement\n cur.execute(query)\n resources = cur.fetchall()\n # Assert\n self.assertEqual(len(resources), INITIAL_RESOURCES_SIZE)\n\n def test_foreign_keys_status(self):\n '''\n Checks that the foreign keys support is set and unset correctly.\n '''\n print('(' + self.test_foreign_keys_status.__name__ + ')', \\\n self.test_foreign_keys_status.__doc__)\n # Checks that foreign key support is activated\n success = self.connection.set_foreign_keys_support()\n if success:\n fk_status = self.connection.check_foreign_keys_status()\n self.assertTrue(fk_status)\n\n # Checks that foreign key support is deactivated\n success = self.connection.unset_foreign_keys_support()\n if success:\n fk_status = self.connection.check_foreign_keys_status()\n self.assertFalse(fk_status)\n\nif __name__ == '__main__':\n print('Start running database tests')\n unittest.main()\n","repo_name":"ChouaibHamek/goalz_api","sub_path":"test/database_api_tests_tables.py","file_name":"database_api_tests_tables.py","file_ext":"py","file_size_in_byte":12041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34742618579","text":"from common.helpers.common_dependency_helper import register_common_mox_dependencies\nfrom common.utilities.inversion_of_control import dependencies, Dependency\nfrom core.common.utilities.errors import BadRequestError, NotFoundError, DataError\nfrom core.common.utilities.helpers import generate_id\nfrom core.service.svc_main.implementation.service_endpoints.endpoint_field_data import STORE_COUNT_DB_FIELDS\nfrom core.service.svc_main.implementation.service_endpoints.endpoint_helpers.store_count_file_updater import StoreCountFileUpdater\nimport mox\n\n\n__author__ = 'vgold'\n\n\nclass StoreCountFileUpdaterTests(mox.MoxTestBase):\n\n def setUp(self):\n\n # call parent set up\n super(StoreCountFileUpdaterTests, self).setUp()\n\n # register mock dependencies\n register_common_mox_dependencies(self.mox)\n\n # Set mock attributes on WorkflowService instance for calls to record\n self.mock = self.mox.CreateMock(StoreCountFileUpdater)\n self.mock.main_access = self.mox.CreateMockAnything()\n self.mock.main_access.wfs = self.mox.CreateMockAnything()\n self.mock.main_access.mds = self.mox.CreateMockAnything()\n self.mock.main_param = self.mox.CreateMockAnything()\n self.mock.main_param.mds = self.mox.CreateMockAnything()\n self.mock.StoreCountTimeSeriesMaintainer = self.mox.CreateMockAnything()\n\n # Set mock attributes on WorkflowService instance for calls to ignore\n self.mock.cfg = Dependency(\"MoxConfig\").value\n self.mock.logger = Dependency(\"FlaskLogger\").value\n\n # Create caller context\n self.context = {\n \"user_id\": 1,\n \"source\": \"test_store_count_file_updater.py\"\n }\n\n self.mock.context = self.context\n\n def doCleanups(self):\n\n super(StoreCountFileUpdaterTests, self).doCleanups()\n dependencies.clear()\n\n ##########################################################################\n # StoreCountFileUpdater._validate_request_data()\n\n def test_validate_request_data(self):\n\n updater = StoreCountFileUpdater.__new__(StoreCountFileUpdater)\n\n correct_request_data1 = {\n \"company_id\": 1,\n \"e_store_count_t_1\": 1,\n \"f_A_t_1_needs_review\": 1,\n \"f_E_t_1_needs_review\": 1\n }\n\n correct_request_data2 = {\n \"company_id\": 1,\n \"e_store_count_t_1\": \"1\",\n \"f_A_t_1_needs_review\": 1,\n \"f_E_t_1_needs_review\": 1\n }\n\n updater.request_data = {}\n with self.assertRaises(BadRequestError):\n updater._validate_request_data()\n\n updater.request_data = dict(correct_request_data1, company_id=\"\")\n with self.assertRaises(BadRequestError):\n updater._validate_request_data()\n\n updater.request_data = dict(correct_request_data1, e_store_count_t_1=\"\")\n with self.assertRaises(BadRequestError):\n updater._validate_request_data()\n\n updater.request_data = dict(correct_request_data1, f_A_t_1_needs_review=\"\")\n with self.assertRaises(BadRequestError):\n updater._validate_request_data()\n\n updater.request_data = dict(correct_request_data1, f_E_t_1_needs_review=\"\")\n with self.assertRaises(BadRequestError):\n updater._validate_request_data()\n\n updater.request_data = dict(correct_request_data1, e_store_count_t_1=\"asdf\")\n with self.assertRaises(BadRequestError):\n updater._validate_request_data()\n\n updater.request_data = correct_request_data1\n result = updater._validate_request_data()\n self.assertEqual(result, updater)\n\n updater.request_data = correct_request_data2\n result = updater._validate_request_data()\n self.assertEqual(result, updater)\n\n ##########################################################################\n # StoreCountFileUpdater._get_store_count_file_to_update()\n\n def test_get_store_count_file_to_update__with_file(self):\n\n file_id = str(generate_id())\n company_id = str(generate_id())\n\n params = \"params\"\n self.mock.main_param.mds.create_params(origin=\"_get_store_count_file_to_update\", resource=\"find_entities_raw\",\n query=mox.IgnoreArg(), entity_fields=mox.IgnoreArg(), flatten=True).AndReturn({\"params\": params})\n\n mds_file = 'file'\n self.mock.main_access.mds.call_find_entities_raw(\"file\", params).AndReturn([mds_file])\n\n self.mox.ReplayAll()\n\n self.mock.STORE_COUNT_DB_FIELDS = STORE_COUNT_DB_FIELDS\n self.mock.file_id = file_id\n self.mock.company_id = company_id\n result = StoreCountFileUpdater._get_store_count_file_to_update(self.mock)\n\n self.assertEqual(result, self.mock)\n self.assertEqual(mds_file, self.mock.file)\n\n def test_get_store_count_file_to_update__no_file(self):\n\n file_id = str(generate_id())\n company_id = str(generate_id())\n\n params = \"params\"\n self.mock.main_param.mds.create_params(origin=\"_get_store_count_file_to_update\", resource=\"find_entities_raw\",\n query=mox.IgnoreArg(), entity_fields=mox.IgnoreArg(), flatten=True).AndReturn({\"params\": params})\n\n self.mock.main_access.mds.call_find_entities_raw(\"file\", params).AndReturn([])\n\n self.mox.ReplayAll()\n\n self.mock.STORE_COUNT_DB_FIELDS = STORE_COUNT_DB_FIELDS\n self.mock.file_id = file_id\n self.mock.company_id = company_id\n with self.assertRaises(NotFoundError):\n StoreCountFileUpdater._get_store_count_file_to_update(self.mock)\n\n def test_get_store_count_file_to_update__multiple_files(self):\n\n file_id = str(generate_id())\n company_id = str(generate_id())\n\n params = \"params\"\n self.mock.main_param.mds.create_params(origin=\"_get_store_count_file_to_update\", resource=\"find_entities_raw\",\n query=mox.IgnoreArg(), entity_fields=mox.IgnoreArg(), flatten=True).AndReturn({\"params\": params})\n\n self.mock.main_access.mds.call_find_entities_raw(\"file\", params).AndReturn([1, 1])\n\n self.mox.ReplayAll()\n\n self.mock.STORE_COUNT_DB_FIELDS = STORE_COUNT_DB_FIELDS\n self.mock.file_id = file_id\n self.mock.company_id = company_id\n with self.assertRaises(DataError):\n StoreCountFileUpdater._get_store_count_file_to_update(self.mock)\n\n ##########################################################################\n # StoreCountFileUpdater._update_store_count_file()\n\n def test_update_store_count_file(self):\n\n a_store_count_t_1 = 8\n\n f_A_t_1_needs_review = 1\n f_E_t_1_needs_review = 1\n e_store_count_t_1 = 10\n\n _file = {\n 'data.a_store_count_t_1': a_store_count_t_1\n }\n\n file_id = generate_id()\n\n file_data = {\n \"data.f_A_t_1_needs_review\": f_A_t_1_needs_review,\n \"data.f_E_t_1_needs_review\": f_E_t_1_needs_review,\n \"data.e_store_count_t_1\": e_store_count_t_1,\n \"data.a_store_count_t_1\": a_store_count_t_1,\n \"data.delta_end\": a_store_count_t_1 - e_store_count_t_1,\n \"data.delta_end_percent\": float(a_store_count_t_1 - e_store_count_t_1) / a_store_count_t_1\n }\n\n self.mock.main_access.mds.call_update_entity('file', file_id, self.context, field_data = file_data)\n\n self.mox.ReplayAll()\n\n self.mock.f_A_t_1_needs_review = f_A_t_1_needs_review\n self.mock.f_E_t_1_needs_review = f_E_t_1_needs_review\n self.mock.e_store_count_t_1 = e_store_count_t_1\n self.mock.file = _file\n self.mock.file_id = file_id\n result = StoreCountFileUpdater._update_store_count_file(self.mock)\n\n self.assertEqual(result, self.mock)\n\n ##########################################################################\n # StoreCountFileUpdater._maintain_store_count_time_series()\n\n def test_maintain_store_count_time_series(self):\n\n company_id = 1\n company_ids = [company_id]\n\n maintainer = self.mox.CreateMockAnything()\n self.mock.StoreCountTimeSeriesMaintainer(company_ids, self.context).AndReturn(maintainer)\n maintainer.run()\n\n self.mox.ReplayAll()\n\n self.mock.company_id = company_id\n result = StoreCountFileUpdater._maintain_store_count_time_series(self.mock)\n\n self.assertEqual(result, self.mock)\n\n","repo_name":"erezrubinstein/aa","sub_path":"tests/unit_tests/core_tests/service_tests/main_tests/test_store_count_file_updater.py","file_name":"test_store_count_file_updater.py","file_ext":"py","file_size_in_byte":8493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8680031782","text":"#Grab a key from an external file with the format \"[key_name]:[key]\"\nkeyfile_name = \"keyfile\"\n\ndef get_key(key_name):\n\twith open(keyfile_name) as f:\n\t\tkeylist = f.readlines()\n\tfor x in keylist:\n\t\tif(x.split(\":\"))[0] == key_name:\n\t\t\treturn x.split(\":\")[1].strip()\n\tprint(str(\"Key \"+key_name+\" not found.\"))\n\treturn None\n","repo_name":"sircinnamon/senseHAT","sub_path":"keyGrabber.py","file_name":"keyGrabber.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23188756475","text":"#Tarea View\nimport json\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, viewsets, status\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom django.db.models import Sum\nfrom django.core.files import File\nfrom api.permisos import CatedraticoUser, EstudianteUser\n\nfrom api.models import Tarea\nfrom api.models import AsignacionCatedraticoCurso\nfrom api.serializers import TareaSerializer, TareaRegistroSerializer\n\nclass TareaViewset(viewsets.ModelViewSet):\n queryset = Tarea.objects.filter(activo=True)\n #definer permisos para este recurso\n permission_classes = [CatedraticoUser | EstudianteUser ]\n\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n filter_fields = (\"tituloTarea\",)\n search_fields = (\"tituloTarea\",)\n ordering_fields = (\"tituloTarea\",)\n\n def get_serializer_class(self):\n \"\"\"Define serializer para API\"\"\"\n if self.action == 'retrieve':\n return TareaSerializer\n \n\n def create(self, request):\n try:\n data = request.data\n archivo = data.get(\"archivo\")\n datos = json.loads(data[\"data\"])\n \n #validacion de los datos al serializer\n serializer = TareaRegistroSerializer(data=datos)\n \n if serializer.is_valid():\n #insertar los datos luego de validar\n Tarea.objects.create(\n tituloTarea = datos.get(\"tituloTarea\"),\n descripcion = datos.get(\"descripcion\"),\n fechaHoraEntrega = datos.get(\"fechaHoraEntrega\"),\n valorTarea = datos.get(\"valorTarea\"),\n archivo = File(archivo),\n asignacion = AsignacionCatedraticoCurso.objects.get(pk = datos.get(\"id\"))\n )\n else:\n print(\"error en la validacion de datos\")\n \n return Response({'Registro creado'}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n\n def update(self, request, pk):\n try:\n \n data = request.data\n archivo = data.get(\"archivo\")\n datos = json.loads(data[\"data\"])\n\n #Modificar datos\n tarea = Tarea.objects.get(pk=pk)\n if archivo is not None:\n if tarea.archivo is not None:\n tarea.archivo.delete()\n tarea.archivo = File(archivo)\n \n tarea.tituloTarea = datos.get(\"tituloTarea\")\n tarea.descripcion = datos.get(\"descripcion\")\n tarea.fechaHoraEntrega = datos.get(\"fechaHoraEntrega\")\n tarea.valorTarea = datos.get(\"valorTarea\")\n tarea.save()\n\n return Response({'registro modificado'}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n \n\n def list(self, request):\n id = request.query_params.get(\"id\")\n listar = Tarea.objects.filter(asignacion = id, activo=True)\n #paginando resultado\n paginador = PageNumberPagination()\n resultado_pagina = paginador.paginate_queryset(listar, request)\n serializer = TareaSerializer(resultado_pagina, many=True)\n return paginador.get_paginated_response(serializer.data)\n\n\n def destroy(self, request, pk):\n try:\n tarea = Tarea.objects.get(pk=pk)\n if tarea.archivo is not None:\n tarea.archivo.delete()\n tarea.delete()\n return Response({'registro eliminado'}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n\n @action(detail=False, methods=[\"get\"])\n def sumarTarea(self, request):\n id=request.query_params.get(\"id\")\n suma = Tarea.objects.filter(asignacion = id, activo=True).aggregate(Sum('valorTarea'))\n return Response(suma, status = status.HTTP_200_OK)\n\n\n @action(methods=[\"get\"], detail=False)\n def tarea_curso(self, request):\n id = request.query_params.get(\"id_asignacion\")\n tareas = Tarea.objects.filter(asignacion = id, activo = True).order_by('-creado')\n \n #paginando el resultado\n paginador = PageNumberPagination()\n resultado_pagina = paginador.paginate_queryset(tareas, request)\n serializer = TareaSerializer(resultado_pagina, many=True)\n return paginador.get_paginated_response(serializer.data)\n\n\n @action(methods=[\"get\"], detail=False)\n def nota_tarea(self, request):\n id = request.query_params.get(\"id_tarea\")\n nota = Tarea.objects.get(pk=id)\n dato = {\n 'nota': nota.valorTarea\n }\n return Response(dato, status=status.HTTP_200_OK)\n\n","repo_name":"raulIxc85/Proyecto-Aula-Virtual","sub_path":"api/viewsets/tarea.py","file_name":"tarea.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25407888181","text":"from gpiozero import DigitalInputDevice, DigitalOutputDevice\nfrom guizero import App, Text, PushButton\nimport time\n\nmold_open = DigitalInputDevice(4) #assign input pin (GPIO4 or Pin 7) to mold open signal\nejector_fire = DigitalInputDevice(17) #assign input pin (GPIO17 or Pin 11) to ejector fire signal\nalarm_pin = DigitalOutputDevice(27) #assign output pin (GPIO27 or pin12) to alarm signal\nalarm_reset_pin = DigitalOutputDevice(23) #assign output pin (GPIO23 or pin 16) to alarm reset signal\nalarm_button = DigitalInputDevice(22) #assign input pin (GPIO22 or pin 15) to alarm reset button\n\n\ndef check_button():\n #if button.is_pressed: capture() #if button is pressed run \"capture\"\n t = time.localtime() #grab the current time\n current_time = (str(t[3])+\":\"+str(t[4])+\":\"+str(t[5])) #format it as a (hour:minute:second)\n \n if mold_open.value == 1: #if the mold open signal line is on\n print(\"\") #add an empty line\n print(current_time +\": mold open\") #report button status\n mold_open_text.value = 'mold open: 1'\n else:\n mold_open_text.value = 'mold open: 0'\n \n if ejector_fire.value == 1: #if the mold open signal line is on\n print(\"\") #add an empty line\n print(current_time +\": ejector fire\") #report button status\n ejector_fire_text.value = 'ejector fire: 1'\n else:\n ejector_fire_text.value = 'ejector fire: 0'\n\napp = App(title='main', layout='auto', width = 250, height = 150) #create the main application window\napp.when_closed=quit #when the close button is pressed on the main window, stop the program\nmold_open_text = Text(app, text='mold open: 0')\nejector_fire_text = Text(app, text='ejector fire: 0')\nalarm_but = PushButton(app, command=lambda: alarm_pin.blink(on_time=0.1,n=1), text=\"Simulate Alarm\") #add alarm button\nreset_but = PushButton(app, command=lambda: alarm_reset_pin.blink(on_time=0.1,n=1), text=\"Reset Alarm\") #add reset button\napp.repeat(1, check_button)\napp.display()","repo_name":"cameroncircowestfalltechnik/Image_Compare","sub_path":"Wiring_Check.py","file_name":"Wiring_Check.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72334456194","text":"import re\nimport requests as rq \nfrom bs4 import BeautifulSoup\nimport traceback\n\nfrom storage import session_scope, Movie, MovieAward\nfrom constants import OMDB_API_KEY, WIKI_BASE\n\n\ndef get_omdb_info(imdb_id):\n return rq.get(f'http://www.omdbapi.com/?i={imdb_id}&apikey={OMDB_API_KEY}&tomatoes=true').json()\n\ndef get_imdb_id_from_wiki(wiki_url):\n soup = BeautifulSoup(rq.get(WIKI_BASE + wiki_url).text, 'lxml')\n ext_links = soup.find('span', {'class': 'mw-headline', 'id': 'External_links'})\n link = ext_links.findNext('a')\n while link:\n match = re.search('imdb.com/title/([\\w]+)', link.get('href', ''))\n if match:\n return match.group(1)\n link = link.findNext('a')\n\n raise Exception(f\"No urls found for {wiki_url}\")\n\ndef get_clean_list(items):\n if items == 'N/A':\n return None\n return [x.strip() for x in items.split(',')]\n\ndef fetch_movie_info(chunk_size=10):\n with session_scope() as session:\n current_wiki_urls = set([x.movie_wiki_url for x in session.query(Movie.movie_wiki_url).all()])\n movie_wiki_urls = set([x[0] for x in session.query(MovieAward.movie_wiki_url).distinct().all()])\n urls_to_fetch = list(movie_wiki_urls - current_wiki_urls)\n for i in range(0, len(urls_to_fetch), chunk_size):\n print(f\"Fetched {i} / {len(urls_to_fetch)} movies\")\n fetched_movies = []\n for wiki_url in urls_to_fetch[i:i+chunk_size]:\n try:\n imdb_id = get_imdb_id_from_wiki(wiki_url)\n print(f\"{wiki_url} | {imdb_id}\")\n info = get_omdb_info(imdb_id)\n ratings = info.get('Ratings')\n imdb_rating = None\n if ratings:\n imdb_rating = next(iter([rat['Value'] for rat in ratings if rat['Source'] == 'Internet Movie Database']))\n if imdb_rating:\n imdb_rating = float(imdb_rating.split('/')[0])\n if not imdb_rating:\n print(f\"\\nWARNING: No rating for id={imdb_id}\")\n\n if info.get('imdbVotes') == 'N/A':\n imdb_votes = None\n else:\n imdb_votes = int(info.get('imdbVotes').replace(',', ''))\n\n fetched_movies.append(Movie(\n imdb_id=imdb_id,\n movie_wiki_url=wiki_url,\n title=info.get('Title'),\n release_year=info.get('Year'),\n directors=get_clean_list(info.get('Director')),\n actors=get_clean_list(info.get('Actors')),\n runtime=info.get('Runtime'),\n genres=get_clean_list(info.get('Genre')),\n countries=get_clean_list(info.get('Country')),\n imdb_rating=imdb_rating,\n imdb_votes=imdb_votes,\n box_office=info.get('BoxOffice'),\n rt_url=info.get('tomatoURL'),\n ))\n except:\n print(f\"\\n\\n\\nERROR, problem with url={wiki_url}\")\n print(info)\n traceback.print_exc()\n print(\"\\n\\n\\n\")\n\n session.bulk_save_objects(fetched_movies)\n session.commit()\n\nfetch_movie_info()\n","repo_name":"Buzdygan/movie_analysis","sub_path":"src/get_movie_info.py","file_name":"get_movie_info.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23564552294","text":"\"\"\" helper function\n\nauthor baiyu\n\"\"\"\n\nimport sys\n\nimport numpy\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\n\ndef get_network(args):\n \"\"\" return given network\n \"\"\"\n # 定义获取网络的函数\n # 从models中导入相应的模型\n # 并最终返回指定的模型\n\n if args.net == 'vgg16':\n from models.vgg import vgg16_bn\n net = vgg16_bn()\n elif args.net == 'vgg13':\n from models.vgg import vgg13_bn\n net = vgg13_bn()\n elif args.net == 'vgg11':\n from models.vgg import vgg11_bn\n net = vgg11_bn()\n elif args.net == 'vgg19':\n from models.vgg import vgg19_bn\n net = vgg19_bn()\n elif args.net == 'densenet121':\n from models.densenet import densenet121\n net = densenet121()\n elif args.net == 'densenet161':\n from models.densenet import densenet161\n net = densenet161()\n elif args.net == 'densenet169':\n from models.densenet import densenet169\n net = densenet169()\n elif args.net == 'densenet201':\n from models.densenet import densenet201\n net = densenet201()\n elif args.net == 'googlenet':\n from models.googlenet import googlenet\n net = googlenet()\n elif args.net == 'inceptionv3':\n from models.inceptionv3 import inceptionv3\n net = inceptionv3()\n elif args.net == 'inceptionv4':\n from models.inceptionv4 import inceptionv4\n net = inceptionv4()\n elif args.net == 'inceptionresnetv2':\n from models.inceptionv4 import inception_resnet_v2\n net = inception_resnet_v2()\n elif args.net == 'xception':\n from models.xception import xception\n net = xception()\n elif args.net == 'resnet18':\n from models.resnet import resnet18\n net = resnet18()\n elif args.net == 'resnet34':\n from models.resnet import resnet34\n net = resnet34()\n elif args.net == 'resnet50':\n from models.resnet import resnet50\n net = resnet50()\n elif args.net == 'resnet101':\n from models.resnet import resnet101\n net = resnet101()\n elif args.net == 'resnet152':\n from models.resnet import resnet152\n net = resnet152()\n elif args.net == 'preactresnet18':\n from models.preactresnet import preactresnet18\n net = preactresnet18()\n elif args.net == 'preactresnet34':\n from models.preactresnet import preactresnet34\n net = preactresnet34()\n elif args.net == 'preactresnet50':\n from models.preactresnet import preactresnet50\n net = preactresnet50()\n elif args.net == 'preactresnet101':\n from models.preactresnet import preactresnet101\n net = preactresnet101()\n elif args.net == 'preactresnet152':\n from models.preactresnet import preactresnet152\n net = preactresnet152()\n elif args.net == 'resnext50':\n from models.resnext import resnext50\n net = resnext50()\n elif args.net == 'resnext101':\n from models.resnext import resnext101\n net = resnext101()\n elif args.net == 'resnext152':\n from models.resnext import resnext152\n net = resnext152()\n elif args.net == 'shufflenet':\n from models.shufflenet import shufflenet\n net = shufflenet()\n elif args.net == 'shufflenetv2':\n from models.shufflenetv2 import shufflenetv2\n net = shufflenetv2()\n elif args.net == 'squeezenet':\n from models.squeezenet import squeezenet\n net = squeezenet()\n elif args.net == 'mobilenet':\n from models.mobilenet import mobilenet\n net = mobilenet()\n elif args.net == 'mobilenetv2':\n from models.mobilenetv2 import mobilenetv2\n net = mobilenetv2()\n elif args.net == 'nasnet':\n from models.nasnet import nasnet\n net = nasnet()\n elif args.net == 'attention56':\n from models.attention import attention56\n net = attention56()\n elif args.net == 'attention92':\n from models.attention import attention92\n net = attention92()\n elif args.net == 'seresnet18':\n from models.senet import seresnet18\n net = seresnet18()\n elif args.net == 'seresnet34':\n from models.senet import seresnet34\n net = seresnet34()\n elif args.net == 'seresnet50':\n from models.senet import seresnet50\n net = seresnet50()\n elif args.net == 'seresnet101':\n from models.senet import seresnet101\n net = seresnet101()\n elif args.net == 'seresnet152':\n from models.senet import seresnet152\n net = seresnet152()\n\n else:\n print('the network name you have entered is not supported yet')\n # 如果没有相应的模型,则输出错误提示\n sys.exit()\n # 结束该程序\n\n if args.gpu: #use_gpu\n # 判断是否使用gpu\n net = net.cuda()\n # 使用cuda来进行神经网络的计算\n\n return net\n # 返回模型\n\n\ndef get_training_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True):\n \"\"\" return training dataloader\n Args:\n mean: mean of cifar100 training dataset\n std: std of cifar100 training dataset\n path: path to cifar100 training python dataset\n batch_size: dataloader batchsize\n num_workers: dataloader num_works\n shuffle: whether to shuffle\n Returns: train_data_loader:torch dataloader object\n \"\"\"\n # 获取train_dataloader\n # 对图像进行变换\n\n transform_train = transforms.Compose([\n # 对图像进行变换\n #transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n # 对图像进行随机裁剪,裁剪到32*32大小,\n transforms.RandomHorizontalFlip(),\n # 对图像进行随机水平翻转\n transforms.RandomRotation(15),\n # 对图像进行随机旋转15度以内\n transforms.ToTensor(),\n # 将图像由numpy转换为tensor类型\n transforms.Normalize(mean, std)\n # 对图像进行归一化处理\n ])\n cifar100_training = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)\n # 获取cifar100训练集,默认会进行下载\n cifar100_training_loader = DataLoader(\n cifar100_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n # 对dataset用dataloader进行包装\n\n return cifar100_training_loader\n # 返回training_loader\n\ndef get_test_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True):\n \"\"\" return training dataloader\n Args:\n mean: mean of cifar100 test dataset\n std: std of cifar100 test dataset\n path: path to cifar100 test python dataset\n batch_size: dataloader batchsize\n num_workers: dataloader num_works\n shuffle: whether to shuffle\n Returns: cifar100_test_loader:torch dataloader object\n \"\"\"\n # 获取testdataloader\n # 对图像进行变换\n\n transform_test = transforms.Compose([\n # 对测试集图片进行变换\n transforms.ToTensor(),\n # 将图像表示转换为tensor\n transforms.Normalize(mean, std)\n # 对图像进行归一化处理\n ])\n\n cifar100_test = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)\n # 获取ciar100_test数据集,默认下载图像\n cifar100_test_loader = DataLoader(\n cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n # 用dataloader封装dataset\n\n return cifar100_test_loader\n # 返回test_loader\n\ndef compute_mean_std(cifar100_dataset):\n \"\"\"compute the mean and std of cifar100 dataset\n Args:\n cifar100_training_dataset or cifar100_test_dataset\n witch derived from class torch.utils.data\n\n Returns:\n a tuple contains mean, std value of entire dataset\n \"\"\"\n # 计算数据集的均值和方差\n\n data_r = numpy.dstack([cifar100_dataset[i][1][:, :, 0] for i in range(len(cifar100_dataset))])\n # 获取数据集r通道的数据\n data_g = numpy.dstack([cifar100_dataset[i][1][:, :, 1] for i in range(len(cifar100_dataset))])\n # 获取数据集g通道的数据\n data_b = numpy.dstack([cifar100_dataset[i][1][:, :, 2] for i in range(len(cifar100_dataset))])\n # 获取数据集b通道的数据\n mean = numpy.mean(data_r), numpy.mean(data_g), numpy.mean(data_b)\n # 获取rgb通道的均值\n std = numpy.std(data_r), numpy.std(data_g), numpy.std(data_b)\n # 获取rgb通道的方差\n\n return mean, std\n # 返回数据集的均值和方差\n\nclass WarmUpLR(_LRScheduler):\n \"\"\"warmup_training learning rate scheduler\n Args:\n optimizer: optimzier(e.g. SGD)\n total_iters: totoal_iters of warmup phase\n \"\"\"\n # 定义了初始化学习率的类\n def __init__(self, optimizer, total_iters, last_epoch=-1):\n # 定义该类的初始化函数\n\n self.total_iters = total_iters\n # 定义总的迭代次数\n super().__init__(optimizer, last_epoch)\n # 调用父类_LRScheduler的初始化函数\n\n def get_lr(self):\n \"\"\"we will use the first m batches, and set the learning\n rate to base_lr * m / total_iters\n \"\"\"\n # 获取当前学习率\n return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]\n # 返回当前学习率\n","repo_name":"dandan3029/1.image-classification","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8314148743","text":"from collections import deque\nimport einops\nimport gym\nimport numpy as np\nfrom stable_baselines3.common.atari_wrappers import (\n NoopResetEnv,\n FireResetEnv,\n EpisodicLifeEnv,\n MaxAndSkipEnv,\n ClipRewardEnv,\n WarpFrame,\n)\nimport torch\n\n\nATARI_ENVS = [\"Breakout\", \"Pong\", \"SpaceInvaders\", \"StarGunner\"]\n\n\ndef preprocess_env(env, episodic_life=True):\n env = NoopResetEnv(env, noop_max=30)\n env = FireResetEnv(env)\n if episodic_life:\n env = EpisodicLifeEnv(env)\n env = MaxAndSkipEnv(env, skip=4)\n env = ClipRewardEnv(env)\n env = WarpFrame(env)\n env = FrameStack(env, n_frames=4)\n return env\n\n\ndef batchify(state, add_channel_dim=False):\n batch_obs = torch.tensor(np.array(state), dtype=torch.float32).unsqueeze(0)\n if add_channel_dim:\n batch_obs = einops.rearrange(\n batch_obs, \"b i j c -> b c i j\"\n ) # batch, channel, row, column\n return batch_obs\n\n\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, n_frames):\n \"\"\"Stack n_frames last frames.\n Returns lazy array, which is much more memory efficient.\n See Also\n --------\n stable_baselines.common.atari_wrappers.LazyFrames\n :param env: (Gym Environment) the environment\n :param n_frames: (int) the number of frames to stack\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.n_frames = n_frames\n self.frames = deque([], maxlen=n_frames)\n shp = env.observation_space.shape\n self.observation_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(shp[0], shp[1], shp[2] * n_frames),\n dtype=env.observation_space.dtype,\n )\n\n def reset(self, **kwargs):\n obs = self.env.reset(**kwargs)\n for _ in range(self.n_frames):\n self.frames.append(obs)\n return self._get_ob()\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.frames.append(obs)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.n_frames\n return LazyFrames(list(self.frames))\n\n\nclass ScaledFloatFrame(gym.ObservationWrapper):\n def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.observation_space = gym.spaces.Box(\n low=0, high=1.0, shape=env.observation_space.shape, dtype=np.float32\n )\n\n def observation(self, observation):\n # careful! This undoes the memory optimization, use\n # with smaller replay buffers only.\n return np.array(observation).astype(np.float32) / 255.0\n\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"\n This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n This object should only be converted to np.ndarray before being passed to the model.\n :param frames: ([int] or [float]) environment frames\n \"\"\"\n self._frames = frames\n self._out = None\n\n def _force(self):\n if self._out is None:\n self._out = np.concatenate(self._frames, axis=2)\n self._frames = None\n return self._out\n\n def __array__(self, dtype=None):\n out = self._force()\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n def __len__(self):\n return len(self._force())\n\n def __getitem__(self, i):\n return self._force()[i]\n","repo_name":"bengreenberg5/dqn","sub_path":"dqn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22734063091","text":"import DQNAgentDdqnNei as DQNAgent\nfrom gensim.models import Word2Vec\nimport numpy as np\n\n \n \n \n \n \ndef main():\n\n word2VecModel= Word2Vec.load('yumodel').wv\n \n wordVectorLen=400#this is yumodel's len size\n \n neighborLen=10 #this is neighbor list's length\n \n word2Idx, wordEmbeddings=DQNAgent.build_embed( word2VecModel,wordVectorLen)\n\n str0Id=word2Idx[\"yes\"]\n str1Id=word2Idx[\"no\"]\n\n\n\n\n matrixDict={}\n\n agent = DQNAgent.DQNAgentClass( wordEmbeddings, neighborLen, matrixDict)############need to modification\n \n textFeatue=[]\n similarFeature=[]\n nei1Feature=[]\n nei2Feature=[]\n nei3Feature=[]\n \n textFeatue.append([str0Id]+[2]+[0]*4)\n similarFeature.append([0]*10)\n nei1Feature.append([0]*10)\n nei2Feature.append([0]*10)\n nei3Feature.append([0]*10)\n \n \n similarFeature=np.asarray(similarFeature)\n nei1Feature=np.asarray(nei1Feature)\n nei2Feature=np.asarray(nei2Feature)\n nei3Feature=np.asarray(nei3Feature)\n\n act_values = agent.model.predict([textFeatue,similarFeature, nei1Feature, nei2Feature, nei3Feature])\n \n print(act_values)\n ##################################\n textFeatue=[]\n similarFeature=[]\n nei1Feature=[]\n nei2Feature=[]\n nei3Feature=[]\n \n textFeatue.append([str1Id]+[2]+[0]*4)\n similarFeature.append([0]*10)\n nei1Feature.append([0]*10)\n nei2Feature.append([0]*10)\n nei3Feature.append([0]*10)\n \n \n similarFeature=np.asarray(similarFeature)\n nei1Feature=np.asarray(nei1Feature)\n nei2Feature=np.asarray(nei2Feature)\n nei3Feature=np.asarray(nei3Feature)\n\n act_values = agent.model.predict([textFeatue,similarFeature, nei1Feature, nei2Feature, nei3Feature])\n \n print(act_values)\n \n \n \n \n \n\n\n\nif __name__ == '__main__':\n main() ","repo_name":"AndroidTestBugReport/Android-test","sub_path":"rf/dindroid-code/collect_word_analysis.py","file_name":"collect_word_analysis.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5134668145","text":"\"\"\"FinancialControlAPI URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom RevenueExpenseControl.views import *\nfrom rest_framework import routers, permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Financial Control API\",\n default_version='v1',\n description=\"An API for Financial Control\",\n terms_of_service=\"#\",\n contact=openapi.Contact(email=\"kalimarapeleteiro@gmail.com\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=[permissions.AllowAny],\n)\n\nrouter = routers.DefaultRouter()\nrouter.register('receitas', ReceitasViewSet, basename='Receitas')\nrouter.register('despesas', DespesasViewSet, basename='Despesas')\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\tpath('', include(router.urls)),\n path('receitas//', ReceitaEspecificaViewSet.as_view()),\n path('despesas//', DespesaEspecificaViewSet.as_view()),\n path('receitas///', ReceitaMesViewSet.as_view()),\n path('despesas///', DespesaMesViewSet.as_view()),\n path('resumo///', ResumoViewSet.as_view()),\n path('doc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-swagger-ui')\n]\n","repo_name":"KalimaraPeleteiro/Financial_Control_API","sub_path":"FinancialControlAPI/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25321582284","text":"# coding: utf-8\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torchvision\nimport torchvision.transforms as T\nimport PIL\n\nimport numpy as np\n\n\nSQUEEZENET_MEAN = np.array([0.485, 0.456, 0.406], dtype=np.float32)\nSQUEEZENET_STD = np.array([0.229, 0.224, 0.225], dtype=np.float32)\n\ndef preprocess(img, size=512):\n transform = T.Compose([\n T.Scale(size),\n T.ToTensor(),\n T.Normalize(mean=SQUEEZENET_MEAN.tolist(),\n std=SQUEEZENET_STD.tolist()),\n T.Lambda(lambda x: x[None]),\n ])\n return transform(img)\n\ndef deprocess(img):\n transform = T.Compose([\n T.Lambda(lambda x: x[0]),\n T.Normalize(mean=[0, 0, 0], std=[1.0 / s for s in SQUEEZENET_STD.tolist()]),\n T.Normalize(mean=[-m for m in SQUEEZENET_MEAN.tolist()], std=[1, 1, 1]),\n T.Lambda(rescale),\n T.ToPILImage(),\n ])\n return transform(img)\n\ndef rescale(x):\n low, high = x.min(), x.max()\n x_rescaled = (x - low) / (high - low)\n return x_rescaled\n\ndtype = torch.FloatTensor\n\n# Load the pre-trained SqueezeNet model.\ncnn = torchvision.models.squeezenet1_1(pretrained=True).features\ncnn.type(dtype)\n\n#don't want to train the model any further, so we don't want PyTorch to waste computation \n# computing gradients on parameters we're never going to update.\nfor param in cnn.parameters():\n param.requires_grad = False\n\ndef extract_features(x, cnn):\n \"\"\"\n Use the CNN to extract features from the input image x.\n\n \"\"\"\n features = []\n prev_feat = x\n for i, module in enumerate(cnn._modules.values()):\n next_feat = module(prev_feat)\n features.append(next_feat)\n prev_feat = next_feat\n return features\n\ndef content_loss(content_weight, content_current, content_original):\n \"\"\"\n Compute the content loss for style transfer.\n\n \"\"\"\n _, C, H, W = content_current.size()\n\n F = content_current.view(C, H*W)\n P = content_original.view(C, H*W)\n \n loss = content_weight * (torch.sum((F - P)**2))\n \n return loss\n\ndef gram_matrix(features, normalize=True):\n \"\"\"\n Compute the Gram matrix from features.\n\n \"\"\"\n N, C, H, W = features.size()\n F = features.view(N, C, H*W) \n gram = torch.mm(F[0,:,:], F[0,:,:].transpose(1,0))\n if normalize == True:\n gram /=(H*W*C)\n gram = gram.unsqueeze(0)# Add back first dimension\n \n return gram\n\ndef style_loss(feats, style_layers, style_targets, style_weights):\n \"\"\"\n Computes the style loss at a set of layers.\n\n \"\"\"\n style_loss = Variable(torch.zeros(1))\n for i, layer in enumerate(style_layers):\n style_loss += style_weights[i] * torch.sum((gram_matrix(feats[layer]) - style_targets[i])**2)\n \n return style_loss\n\ndef tv_loss(img, tv_weight):\n \"\"\"\n Compute total variation loss.\n\n \"\"\"\n loss = tv_weight * (torch.sum((img[:,:,:,1:]-img[:,:,:,:-1])**2)+torch.sum((img[:,:,1:,:]-img[:,:,:-1,:])**2))\n return loss\n\ndef style_transfer(content_image, style_image, image_size, style_size, content_layer, content_weight,\n style_layers, style_weights, tv_weight, init_random = False):\n # Extract features for the content image\n content_img = preprocess(PIL.Image.open(content_image), size=image_size)\n content_img_var = Variable(content_img.type(dtype))\n feats = extract_features(content_img_var, cnn)\n content_target = feats[content_layer].clone()\n\n # Extract features for the style image\n style_img = preprocess(PIL.Image.open(style_image), size=style_size)\n style_img_var = Variable(style_img.type(dtype))\n feats = extract_features(style_img_var, cnn)\n style_targets = []\n for idx in style_layers:\n style_targets.append(gram_matrix(feats[idx].clone()))\n\n # Initialize output image to content image or nois\n if init_random:\n img = torch.Tensor(content_img.size()).uniform_(0, 1)\n else:\n img = content_img.clone().type(dtype)\n\n img_var = Variable(img, requires_grad=True)\n\n #optimization hyperparameters\n initial_lr = 3.0\n decayed_lr = 0.1\n decay_lr_at = 180\n \n optimizer = torch.optim.Adam([img_var], lr=initial_lr)\n \n for t in range(200):\n if t < 190:\n img.clamp_(-1.5, 1.5)\n optimizer.zero_grad()\n feats = extract_features(img_var, cnn)\n \n # Compute loss\n c_loss = content_loss(content_weight, feats[content_layer], content_target)\n s_loss = style_loss(feats, style_layers, style_targets, style_weights)\n t_loss = tv_loss(img_var, tv_weight) \n loss = c_loss + s_loss + t_loss\n \n loss.backward()\n\n # Perform gradient descents on our image values\n if t == decay_lr_at:\n optimizer = torch.optim.Adam([img_var], lr=decayed_lr)\n optimizer.step()\n\n return deprocess(img.cpu())\n","repo_name":"illiteratexz/Style_transfer_pyProject","sub_path":"style_transfer.py","file_name":"style_transfer.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7780331564","text":"import listen\nimport icmd\nimport speech_recognition as sr\nimport speak\nimport mailer\n\n\nwhile True:\n print(\"slucham\")\n print(\"\\033c\", end=\"\")\n # listen for wake word\n word = listen.igor_listen()\n\n try:\n # if wake word is activated, start giving cmmands\n if 'głośniej' or 'ciszej' in word:\n icmd.igor_volume(word) \n if 'ahoj' in word:\n speak.playsound.playsound(r'audio/static/command.mp3')\n try:\n # give voice command to trigger functions\n command = listen.igor_listen()\n # report current time\n if 'koniec' in command:\n exit()\n if 'godzina' in command:\n time_report = icmd.igor_time()\n speak.igor_speak(time_report)\n # report current date\n if 'dzień' in command:\n date_report = icmd.igor_date()\n speak.igor_speak(date_report)\n # open search in google\n if 'szukaj' in command:\n speak.playsound.playsound(r'audio/static/search.mp3')\n search = listen.igor_listen()\n search_report = icmd.igor_search(search)\n speak.igor_speak(search_report)\n # report weather\n if 'pogoda' in command:\n speak.playsound.playsound(r'audio/static/weather.mp3')\n location = listen.igor_listen()\n weather_report = icmd.igor_weather(location)\n speak.igor_speak(weather_report)\n # shoping list\n if 'zakupy' in command: \n speak.igor_speak('Otwieram liste zakupów')\n products_list = []\n while True:\n speak.igor_speak('Jaki produkt dodać?')\n item = listen.igor_listen()\n if item != 'koniec':\n products_list.append(item)\n else:\n break\n speak.igor_speak('Co teraz?')\n shopping_list_option = listen.igor_listen()\n if 'poczta' in shopping_list_option:\n mailer.igor_mail(products_list)\n speak.igor_speak('Wysłane')\n if 'muzyka' in command:\n speak.igor_speak('Co włączyć?')\n youtube_search = listen.igor_listen()\n icmd.igor_youtube(youtube_search)\n if 'encyklopedia' in command:\n speak.playsound.playsound(r'audio/static/search.mp3')\n wikipedia_search = listen.igor_listen()\n wikipedia_report = icmd.igor_wikipedia(wikipedia_search)\n speak.igor_speak(wikipedia_report)\n # error handling - unknown command\n except sr.UnknownValueError:\n print('Nie rozumiem polecenia')\n # error handling - problem with request\n except sr.RequestError:\n print('request error')\n # error handling - no wake word \n except TypeError:\n print('brak komend')","repo_name":"jeremiasz-goti/igor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7579361668","text":"import sys\n\n\nclass Node:\n\n def __init__(self, character='', frequency=0, ht=None):\n self.character = character\n self.frequency = frequency\n self.left = None\n self.right = None\n self.huffman = ht\n\n def __str__(self):\n return '({}, {})'.format(self.character, self.frequency)\n\n\nclass minHeap:\n\n def __init__(self):\n self.root = None\n self.size = 0\n\n def recursive_print(self, node):\n if node:\n output = node.__str__()\n if node.left:\n output = self.recursive_print(node.left) + ' <- ' + output\n if node.right:\n output += ' -> ' + self.recursive_print(node.right)\n return output\n return \"\"\n\n def __str__(self):\n node = self.root\n\n return self.recursive_print(node)\n\n def swap(self, node1, node2):\n temp_char = node2.character\n temp_freq = node2.frequency\n temp_ht = node2.huffman\n\n node2.character = node1.character\n node2.frequency = node1.frequency\n node2.huffman = node1.huffman\n\n node1.character = temp_char\n node1.frequency = temp_freq\n node1.huffman = temp_ht\n\n def percolate_up(self, path_to_parent, child):\n parent = self.root\n\n if child == self.root:\n return\n\n for digit in path_to_parent:\n if digit == '0':\n parent = parent.left\n else:\n parent = parent.right\n\n if child.frequency < parent.frequency:\n self.swap(parent, child)\n self.percolate_up(path_to_parent[:-1], parent)\n\n def add_node(self, child):\n if not self.root:\n self.root = child\n self.size = 1\n return\n\n self.size += 1\n\n \"\"\"Calculate location of next node by converting the new size to a binary representation.\n bin(num) returns a string starting in \"0b\" and the first digit after that is unused\"\"\"\n path = bin(self.size)[3:]\n parent = self.root\n\n for digit in path[:-1]: # Stop on the last level\n if digit == '0':\n parent = parent.left\n else:\n parent = parent.right\n\n if path[-1] == '0':\n parent.left = child\n else:\n parent.right = child\n\n if child.frequency < parent.frequency:\n self.percolate_up(path[:-1], child)\n\n def pop_min(self):\n \"\"\"Calculate location of next node by converting the new size to a binary representation.\n bin(num) returns a string starting in \"0b\" and the first digit after that is unused\"\"\"\n path_to_latest_child = bin(self.size)[3:]\n min = Node(self.root.character, self.root.frequency, self.root.huffman)\n explorer = self.root\n parent = self.root\n\n if self.size == 1:\n self.root = None\n else:\n for digit in path_to_latest_child:\n parent = explorer\n if digit == '0':\n explorer = explorer.left\n else:\n explorer = explorer.right\n\n # Remove what will be the root(lowest node) after swap on line 118\n if str(path_to_latest_child)[-1] == '0':\n parent.left = None\n else:\n parent.right = None\n\n self.swap(self.root, explorer)\n\n # Heapify: make sure the heap retains its properties, now that we have swapped a new node\n # to the root\n node = self.root\n while node:\n if node.left and node.frequency > node.left.frequency:\n self.swap(node, node.left)\n node = node.left\n elif node.right and node.frequency > node.right.frequency:\n self.swap(node, node.right)\n node = node.right\n else:\n break\n\n self.size -= 1\n\n return min\n\n\nclass huffman_Tree:\n\n def __init__(self, root=None):\n self.root = root\n self.left_child = None\n self.right_child = None\n\n def add_children(self, first, second):\n self.left_child = first\n self.right_child = second\n\n def gen_cipher_recursive(self, node, cipher, path):\n char = node.character\n if char:\n cipher[char] = path\n else:\n self.gen_cipher_recursive(node.huffman.left_child, cipher, path + \"0\")\n self.gen_cipher_recursive(node.huffman.right_child, cipher, path + \"1\")\n\n return\n\n \"\"\" Recursively fill a dictionary(cipher) with the codes as we traverse the huffman tree\"\"\"\n def gen_cipher(self, cipher):\n if self.left_child and self.right_child:\n self.gen_cipher_recursive(self.left_child, cipher, \"0\")\n self.gen_cipher_recursive(self.right_child, cipher, \"1\")\n else:\n return dict()\n\n\ndef huffman_encoding(data):\n output = ''\n min_heap = minHeap()\n freqs = dict()\n\n for char in data:\n if char not in freqs:\n freqs[char] = 1\n else:\n freqs[char] += 1\n\n for (key, value) in freqs.items():\n cur = Node(key, value)\n min_heap.add_node(cur)\n\n while min_heap.size > 1: # while there are at least two elements in minHeap\n first = min_heap.pop_min()\n second = min_heap.pop_min()\n\n tree = huffman_Tree()\n tree.root = Node(frequency=first.frequency + second.frequency, ht=tree)\n\n tree.add_children(first, second)\n\n min_heap.add_node(tree.root)\n\n cipher = freqs.copy()\n tree = min_heap.root\n if tree:\n if tree.huffman:\n tree.huffman.gen_cipher(cipher)\n else:\n for key in cipher:\n cipher[key] = \"0\"\n\n for char in data:\n output += cipher.get(char)\n\n return output, tree\n\n\ndef huffman_decoding(data, tree):\n output = ''\n cur = tree\n\n if cur and cur.huffman:\n for bit in data:\n if bit == '0':\n cur = cur.huffman.left_child\n else:\n cur = cur.huffman.right_child\n\n if cur and cur.character:\n output += cur.character\n cur = tree\n else: # there is no huffman tree, because there is only one letter\n if cur:\n for bit in data:\n output += cur.character\n\n\n return output\n\n\nif __name__ == \"__main__\":\n test = \"AAAAAAABBBCCCCCCCDDEEEEEE\"\n\n encoded_data, tree = huffman_encoding(test)\n\n a_great_sentence = \"The bird is the word\"\n\n print(\"The size of the data is: {}\\n\".format(sys.getsizeof(a_great_sentence)))\n print(\"The content of the data is: {}\\n\".format(a_great_sentence))\n\n encoded_data, tree = huffman_encoding(a_great_sentence)\n\n print(\"The size of the encoded data is: {}\\n\".format(sys.getsizeof(int(encoded_data, base=2))))\n print(\"The content of the encoded data is: {}\\n\".format(encoded_data))\n\n decoded_data = huffman_decoding(encoded_data, tree)\n\n print(\"The size of the decoded data is: {}\\n\".format(sys.getsizeof(decoded_data)))\n print(\"The content of the encoded data is: {}\\n\".format(decoded_data))\n print(\"===================================================================================\")\n\n a_smaller_sentence = \"\"\n\n print(\"The size of the data is: {}\\n\".format(sys.getsizeof(a_smaller_sentence)))\n print(\"The content of the data is: {}\\n\".format(a_smaller_sentence))\n\n encoded_data, tree = huffman_encoding(a_smaller_sentence)\n\n #print(\"The size of the encoded data is: {}\\n\".format(sys.getsizeof(int(encoded_data, base=2))))\n print(\"The content of the encoded data is: {}\\n\".format(encoded_data))\n\n decoded_data = huffman_decoding(encoded_data, tree)\n\n print(\"The size of the decoded data is: {}\\n\".format(sys.getsizeof(decoded_data)))\n print(\"The content of the encoded data is: {}\\n\".format(decoded_data))\n print(\"===================================================================================\")\n\n the_greatest_sentence = \"The important thing is not to stop questioning. Curiosity has its own reason for existing. One cannot help but be in awe when one contemplates the mysteries of eternity, of life, of the marvellous structure of reality.\"\n\n print(\"The size of the data is: {}\\n\".format(sys.getsizeof(the_greatest_sentence)))\n print(\"The content of the data is: {}\\n\".format(the_greatest_sentence))\n\n encoded_data, tree = huffman_encoding(the_greatest_sentence)\n\n print(\"The size of the encoded data is: {}\\n\".format(sys.getsizeof(int(encoded_data, base=2))))\n print(\"The content of the encoded data is: {}\\n\".format(encoded_data))\n\n decoded_data = huffman_decoding(encoded_data, tree)\n\n print(\"The size of the decoded data is: {}\\n\".format(sys.getsizeof(decoded_data)))\n print(\"The content of the encoded data is: {}\\n\".format(decoded_data))\n print(\"===================================================================================\")\n\n\n an_edgy_sentence = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n\n print(\"The size of the data is: {}\\n\".format(sys.getsizeof(an_edgy_sentence)))\n print(\"The content of the data is: {}\\n\".format(an_edgy_sentence))\n\n encoded_data, tree = huffman_encoding(an_edgy_sentence)\n\n print(\"The size of the encoded data is: {}\\n\".format(sys.getsizeof(int(encoded_data, base=2))))\n print(\"The content of the encoded data is: {}\\n\".format(encoded_data))\n\n decoded_data = huffman_decoding(encoded_data, tree)\n\n print(\"The size of the decoded data is: {}\\n\".format(sys.getsizeof(decoded_data)))\n print(\"The content of the encoded data is: {}\\n\".format(decoded_data))\n print(\"===================================================================================\")\n","repo_name":"plumedrift/Project2","sub_path":"Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":9816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3781635868","text":"# 图像二值化 我们只关心图像的形状 不关心颜色\n# 将图像居中处理 提高正确率\nimport numpy as np\nimport struct\ndef get_images():\n filename = './dataset/train-images.idx3-ubyte'\n binfile = open(filename, 'rb')\n buffers = binfile.read()\n magic_num, pic_num, height, width = struct.unpack_from('>4I', buffers, 0)\n offset = struct.calcsize('>4I')\n pics_size = pic_num * height * width\n data_fmt = f'>{pics_size}B'\n imgs = struct.unpack_from(data_fmt, buffers, offset)\n img_array = np.reshape(imgs, (pic_num, height, width))\n return img_array\n\nif __name__ == '__main__':\n imgs = get_images()\n binary_images = []\n # 遍历所有图片\n for img in imgs:\n image_left = 28\n image_right = 0\n image_top = 28\n image_bottom = 0\n for row in range(28):\n for col in range(28):\n if img[row][col] > 0:\n img[row][col] = 1\n image_left = col if col < image_left else image_left\n image_right = col if col < image_right else image_right\n image_top = row if row < image_top else image_top\n image_bottom = row if row < image_bottom else image_bottom\n # 计算有效图像的高度和宽度\n new_height = image_bottom - image_top + 1\n new_width = image_right -image_left + 1\n # 构建一个全为B的二维图像矩阵\n dataMat = np.zeros((28, 28))\n # 计算偏移量\n left = (28 - new_width) // 2\n top = (28 - new_height) //2\n\n # 居中操作\n for row in range(new_height):\n for col in range(new_width):\n dataMat[row+top][col+left] = img[row+image_top][col+image_left]\n # 将图像降至一维\n dataMat = np.reshape(dataMat, (-1))\n binary_images.append(dataMat)\n","repo_name":"zanglin02/pycharm","sub_path":"python基础/d06手写数字识别/01 课上代码/f004 binary.py","file_name":"f004 binary.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31795435755","text":"from libs.function import Interval, Function\nfrom libs.breaking_points import calculate_at_point\nfrom .discontinuous_func import support_discontinuous\n\n\n@support_discontinuous\ndef integrate(func: Function, interval: Interval, partition_count: int) -> float:\n left, right = interval\n step = (right - left) / partition_count\n x = left + step\n result = 0\n for _ in range(partition_count - 1):\n result += calculate_at_point(func, x)\n x += step\n result = (result * 2 + calculate_at_point(func, left) + calculate_at_point(func, right)) * step / 2\n return result\n","repo_name":"raineduc/comp_math","sub_path":"lab3/methods/trapezoidal_method.py","file_name":"trapezoidal_method.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30062774533","text":"import os\r\nimport discord\r\nimport json\r\nfrom decouple import config\r\nfrom discord.ext import commands\r\nfrom tasks.table import Table\r\nfrom tasks.category import Category\r\nfrom tasks.quest import Quest\r\nfrom tasks.tk import Tk\r\n\r\n\r\nclass Query(commands.Cog):\r\n \"\"\"\r\n -->>> Works with Query <<<--\r\n\r\n !create ammo - Create category of tables all ammunition.\r\n \"\"\"\r\n\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n self.AMMO_CAT_NAME = config(\"AMMO_CAT_NAME\")# Discord Category name\r\n self.CALIBERS = config(\"CALIBERS\")# Chosen based on JSON file 'name' to filter categories\r\n self.CALIBERS_NAME = config(\"CALIBERS_NAME\")# table at https://escapefromtarkov.fandom.com/wiki/Ballistics\r\n self.CH_NAME_ONLY = config(\"CH_NAME_ONLY\")# For creating and deleting ds channels only\r\n\r\n self.DS_USERS_TK = config(\"DS_USERS_TK\")\r\n \r\n # str -> list\r\n self.CALIBERS = list(map(lambda x: x.split(',')[-1], json.loads(self.CALIBERS)))\r\n # self.CALIBERS = self.CALIBERS[5:7]# tests - ['12/70', '20/70']\r\n\r\n self.CALIBERS_NAME = list(map(lambda x: x.split(',')[-1], json.loads(self.CALIBERS_NAME)))\r\n # self.CALIBERS_NAME = self.CALIBERS_NAME[5:7] # test - ['12x70mm', '20x70mm']\r\n\r\n # DS text channel only\r\n self.CH_NAME_ONLY = list(map(lambda x: x.split(',')[-1], json.loads(self.CH_NAME_ONLY)))\r\n # self.CH_NAME_ONLY = self.CH_NAME_ONLY[5:7]\r\n \r\n\r\n # Create Ammo\r\n async def create_ammo(self, ctx):\r\n try:\r\n ammo_cat = await Category.create_ammo_category(self, ctx, self.AMMO_CAT_NAME)# Create Category\r\n await Table.create_ammo_table(self, ctx, ammo_cat, self.CH_NAME_ONLY, self.CALIBERS_NAME)# Create Ammo Table\r\n\r\n except Exception as error:\r\n await ctx.send(\"Oops... Not possible to create or find Category.\")\r\n print(\"--> Error: Query - Not possible to create or find Category. <--\")\r\n print(error)\r\n\r\n\r\n # Create TK\r\n # async def create_tk(self, ctx):\r\n # try:\r\n # ...\r\n # await Tk.create_tk_table(self, ctx, self.DS_USERS_TK)\r\n\r\n # except Exception as error:\r\n # await ctx.send(\"Oops... Not possible to create or find Little TK table.\")\r\n # print(\"--> Error: Query - Not possible to create or find Little TK table. <--\")\r\n # print(error)\r\n \r\n\r\n\r\n # Delete Ammo\r\n async def delete_ammo(self, ctx):\r\n try:\r\n ammo_cat = discord.utils.get(ctx.guild.categories, name=self.AMMO_CAT_NAME)# Get Ammo Category\r\n await Table.delete_ammo_table(self, ctx, ammo_cat, self.CH_NAME_ONLY)# Delete Ammo Tables\r\n await Category.delete_ammo_category(self, ctx, ammo_cat, self.AMMO_CAT_NAME)# Delete Category\r\n \r\n except Exception as error:\r\n await ctx.send(\"Oops... Not possible to delete Ammo table.\")\r\n print(\"--> Error: Query - Ammo table not deleted. <--\")\r\n print(error)\r\n\r\n \r\n # Update Ammo\r\n async def update_ammo(self, ctx):\r\n try:\r\n ammo_cat = await Category.create_ammo_category(self, ctx, self.AMMO_CAT_NAME)# Create Category\r\n await Table.update_ammo_table(self, ctx, ammo_cat, self.CH_NAME_ONLY, self.CALIBERS_NAME)# Update Ammo Table\r\n\r\n except Exception as error:\r\n await ctx.send(\"Oops... Not possible to create or find Category.\")\r\n print(\"--> Error: Query - Not possible to create or find Category. <--\")\r\n print(error)\r\n\r\n \r\n # Clear Update Ammo\r\n async def clear_update_ammo(self, ctx):\r\n try:\r\n ammo_cat = await Category.create_ammo_category(self, ctx, self.AMMO_CAT_NAME)# Create Category\r\n await Table.clear_update_ammo_table(self, ctx, ammo_cat, self.CH_NAME_ONLY, self.CALIBERS_NAME)# Update Ammo Table\r\n\r\n except Exception as error:\r\n await ctx.send(\"Oops... Not possible to create or find Category.\")\r\n print(\"--> Error: Query - Not possible to create or find Category. <--\")\r\n print(error)\r\n\r\n\r\n # Quests\r\n async def show_quest(self, ctx, option):\r\n try:\r\n await Quest.create_quest(self, ctx, option)# Create Quest\r\n\r\n except Exception as error:\r\n await ctx.send(\"Oops... Not possible to show or find Quest.\")\r\n print(\"--> Error: Query - Not possible to show or find Quest. <--\")\r\n print(error)\r\n\r\n\r\n # Create Ammo Command\r\n @commands.command(aliases=['creates', 'c'], help=\"Creates table for the option selected. Arguments: 'ammo'.\")\r\n async def create(self, ctx, option=''):\r\n try:\r\n if option == 'ammo':\r\n await self.create_ammo(ctx)\r\n else:#'ammo', 'maps', 'quests','traders', 'hideout', 'item_preset', 'items'\r\n await ctx.reply(\"No data selected. Please choose one option: `'ammo'`. and type `!create 'option'`.\")\r\n \r\n except Exception as error:\r\n print(\"--> Error: Query - Command !create. <--\")\r\n print(error)\r\n\r\n\r\n # Create TK Command - \"async def create(self, ctx):\" makes create ammo unusable\r\n # @commands.command(name=\"tk\", help=\"Creates the Little TK table. No Arguments.\")\r\n # async def create(self, ctx):\r\n # try:\r\n # await self.create_tk(ctx)\r\n \r\n # except Exception as error:\r\n # print(\"--> Error: Query - Command !tk. <--\")\r\n # print(error)\r\n\r\n\r\n # Delete Ammo Command\r\n @commands.command(aliases=['deletes', 'del', 'd'], help=\"Deletes the whole Category for the option selected. Arguments: 'ammo'.\")\r\n async def delete(self, ctx, option=''):\r\n try:\r\n if option == 'ammo':\r\n await self.delete_ammo(ctx)\r\n else:#'ammo', 'maps', 'quests','traders', 'hideout', 'item_preset', 'items'\r\n await ctx.reply(\"No data selected. Please choose one option: `'ammo'`. and type `!delete 'option'`.\")\r\n \r\n except Exception as error:\r\n print(\"--> Error: Query - Command !delete. <--\")\r\n print(error)\r\n\r\n\r\n # Update Ammo Command\r\n @commands.command(aliases=['updates', 'u'], help=\"Show or Clear Updates for option selected. Arguments: '', 'ammo', 'clear'.\")\r\n async def update(self, ctx, option='ammo'):\r\n try:\r\n if option == 'ammo':# Update\r\n await self.update_ammo(ctx)\r\n elif option == 'clear':# Clear update\r\n await self.clear_update_ammo(ctx)\r\n else:#'ammo', 'maps', 'quests','traders', 'hideout', 'item_preset', 'items'\r\n await ctx.reply(\"No data selected. Please choose one option: `''`, `'ammo'` or `'clear'`. and type `!update 'option'`.\")\r\n\r\n except Exception as error:\r\n print(\"--> Error: Query - Command !update. <--\")\r\n print(error)\r\n \r\n\r\n # Quests Command\r\n @commands.command(aliases=['quests', 'q'], help=\"Show the Quests as commanded. Arguments: 'kappa'.\")\r\n async def quest(self, ctx, option='', option2=''):\r\n try:\r\n if option != '':\r\n await self.show_quest(ctx, option)\r\n else:#'', 'all', 'kappa','lvl xx', 'item'\r\n await self.show_quest(ctx, option)\r\n # await ctx.reply(\"No data selected. Please choose one option: `'all'`, `'kappa'`, `'lvl'` or `'item'`. and type `!quest 'option'`.\")\r\n await ctx.reply(\"No data selected. Please choose one option: `'all'`, `'kappa'` or `'text'`. and type `!quest 'option'`.\")\r\n \r\n except Exception as error:\r\n print(\"--> Error: Query - Command !quest. <--\")\r\n print(error)\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Query(bot))\r\n","repo_name":"Oracon/tarkov","sub_path":"commands/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":7820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13652278829","text":"from base_doc import BaseDoc\nfrom process_config import add_history\nimport subprocess\n\nclass Devices(BaseDoc):\n def get_rev_list(self):\n revs=[]\n initial_revs = super(Devices, self).get_rev_list()\n for rev in initial_revs:\n doc = self.db.get(self.doc['_id'], rev=rev)\n if doc['changed_by'] == 'user':\n revs.append(doc['_rev'])\n return revs\n\n def reload_hostapd(self):\n cmd = ['/etc/init.d/hostapd', 'reload']\n res = subprocess.Popen(cmd)\n\n def remove_from_hostapd_blacklist(self, mac):\n mac_str = \"%s\\n\" % (mac)\n with open('/etc/hostapd.deny', 'r+') as hsd:\n lines = hsd.readlines()\n if mac_str in lines:\n lines.remove(mac_str)\n hsd.seek(0)\n hsd.writelines(lines)\n hsd.truncate()\n self.reload_hostapd()\n\n def undo(self):\n rev_list = self.get_rev_list()\n res = \"\"\n if len(rev_list) > 0:\n doc = self.db.get(self.doc['_id'], rev=rev_list[0])\n self.doc['device_name'] = doc['device_name']\n self.doc['device_type'] = doc['device_type']\n self.doc['name'] = doc['name']\n self.doc['action'] = doc['action']\n self.doc['changed_by'] = 'user'\n res = self.db.save_doc(self.doc, force_update=True)\n else:\n self.doc['_deleted'] = True\n self.remove_from_hostapd_blacklist(self.doc['mac_address'])\n res = self.db.save_doc(self.doc, force_update=True)\n doc_arr = [{'doc_id': self.doc['_id'], 'doc_rev': res['rev'], 'doc_collection': self.doc['collection'], 'action': 'delete'}]\n add_history.add_history_item(\"Device removed\", \"%s has been removed\" % self.doc['device_name'], doc_arr, undoable=False)\n\n return res['rev']\n","repo_name":"rjspencer1989/phd_code","sub_path":"undo/doc_types/devices.py","file_name":"devices.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74335365955","text":"import argparse\nimport wandb\nfrom os.path import join\nfrom os import makedirs\nimport importlib\n\n\ndef main(args):\n \"\"\"Set up pipeline for training.\"\"\"\n # Imports\n task = importlib.import_module(\"tasks.%s\" % args.task_name)\n factory = task.Factory(resume=args.resume, resume_dir=args.resume_dir)\n\n # Make logger and log directory\n makedirs(join(\"..\", \"runs\", \"%s\" % args.task_name), exist_ok=True)\n wandb.init(project=args.project_name,\n group=args.group_name,\n id=args.run_id,\n resume=args.resume,\n config=factory.fconf,\n dir=join(\"..\", \"runs\", \"%s\" % args.task_name)\n )\n run = wandb.run\n\n with run:\n optimizer = factory.make_optimizer(logger=run)\n print(\"Train\")\n optimizer.train(args.epochs, 100)\n factory.post_processing(optimizer)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--task_name\", type=str)\n parser.add_argument(\"--group_name\", type=str, default=None)\n parser.add_argument(\"--epochs\", type=int, default=20000)\n parser.add_argument(\"--project_name\", type=str, default=\"SynapseGAN\")\n parser.add_argument(\"--resume\", type=bool, default=False)\n parser.add_argument(\"--run_id\", type=str, default=None)\n parser.add_argument(\"--resume_dir\", type=str, default=None)\n main(parser.parse_args())\n","repo_name":"mackelab/synapsegan","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23589684961","text":"import numpy as np\nfrom itertools import combinations\nfrom math import ceil, floor, pi\nfrom functools import lru_cache\n\n\ndef get_num_rider(N, C, tickets):\n tickets_per_cust = np.zeros(C)\n tickets_per_seat = np.zeros(N)\n for seat, c in tickets:\n tickets_per_cust[c] += 1\n tickets_per_seat[seat] += 1\n\n density = np.cumsum(tickets_per_seat) / (np.arange(N)+1)\n min_rides = np.max(density)\n min_rides = max(min_rides, max(tickets_per_cust))\n\n return min_rides\n\n\n\ndef rollercoaster(N, C, M, tickets):\n \"\"\"\n N: seats\n C: customers\n M: tickets\n tickets: (position, buyer) sorted\n \"\"\"\n num_rides = get_num_rider(N, C, tickets)\n\n tickets_per_cust = np.zeros(C)\n tickets_per_seat = np.zeros(N)\n for seat, c in tickets:\n tickets_per_cust[c] += 1\n tickets_per_seat[seat] += 1\n\n promotions_per_seat = np.maximum(0, tickets_per_seat - num_rides)\n num_promotions = sum(promotions_per_seat)\n\n # seats = np.zeros((num_rides, N))\n # customer_in_ride = np.zeros((num_rides, C))\n # num_rides = 0\n #\n # for seat, c in tickets:\n # ride = 0\n # while seats[ride, seat] or customer_in_ride[ride]:\n # ride += 1\n\n\n\n return num_rides, num_promotions\n\n\nif __name__ == '__main__':\n # PATH_IN = 'sample.in'\n PATH_IN = 'B-small-attempt0.in'\n # PATH_IN = 'A-large-practice.in'\n PATH_OUT = PATH_IN[:-3] + '.out'\n\n f_in = open(PATH_IN, 'r')\n f_out = open(PATH_OUT, 'w')\n\n T = int(f_in.readline())\n for t in range(T):\n line = f_in.readline().split()\n\n N = int(line[0])\n C = int(line[1])\n M = int(line[2])\n print(N, C, M)\n\n tickets = []\n for i in range(M):\n line = f_in.readline().split()\n tickets.append((int(line[0])-1, int(line[1])-1))\n\n tickets = sorted(tickets)\n print(tickets)\n\n runs, promos = rollercoaster(N, C, M, tickets)\n # runs = get_num_rider(N, C, tickets)\n\n print('Case #%i: %i %i' % (t + 1, runs, promos))\n f_out.write('Case #%i: %i %i\\n' % (t + 1, runs, promos))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_213/108.py","file_name":"108.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34879020477","text":"print (\"Hi Player, I'm Computer. Let's play Rock, Paper, Scissors!\")\r\nimport random\r\nRunning = True\r\nwhile Running:\r\n print (\"Rock = R\\nPaper = P\\nScissors = S\")\r\n Options = (\"R\", \"P\", \"S\")\r\n UserChoice = input(\"Rock, Paper, Scissors?\\nSHOOT!!! ('R', 'P', 'S'):\").upper()\r\n if UserChoice not in Options:\r\n print (\"Invalid input, please try again.\\n\")\r\n continue \r\n ComputerChoice = random.choice(Options)\r\n print (f\"\\nPlayer ({UserChoice}): CPU ({ComputerChoice}).\")\r\n\r\n if UserChoice == ComputerChoice:\r\n print (\"Both players picked \" + UserChoice + \". It's a draw.\\nPlay again.\\n\")\r\n continue\r\n elif UserChoice == \"R\":\r\n if ComputerChoice == \"S\":\r\n print (\"Rock crushes scissors, YOU WIN. Hippie!\")\r\n else:\r\n print(\"Paper covers rock. COMPUTER WINS, sowie.\")\r\n elif UserChoice == \"S\":\r\n if ComputerChoice == \"P\":\r\n print (\"Scissors cuts paper. YOU WIN. Yaay!\")\r\n else:\r\n print (\"Rock crushes scissors. COMPUTER WINS, sowie.\")\r\n elif UserChoice == \"P\":\r\n if ComputerChoice == \"R\":\r\n print (\"Paper covers rock, YOU WIN. Swoosh!\")\r\n else:\r\n print (\"Scissors cuts paper. COMPUTER WINS, sowie.\")\r\n Running = False\r\n","repo_name":"AmyAphiar/Zuri-Tasks","sub_path":"Rock, Paper, Scissors.py","file_name":"Rock, Paper, Scissors.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14755399474","text":"#!/usr/bin/env python3\n\nimport os\nimport sqlite3\n\ndb=sqlite3.connect(\"dbase/pigpio.sqlite\")\n\nc=db.cursor()\n\nc.execute(\"select file_name from pigpio\")\n\nnames = c.fetchall()\n\nfor n in names:\n os.system(\"bin/html.py {0} >HTML/{0}.html\".format(n[0]))\n print(n[0])\n\nc.close()\n\ndb.close()\n\n","repo_name":"joan2937/pigpio","sub_path":"DOC/bin/build_site.py","file_name":"build_site.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":1330,"dataset":"github-code","pt":"61"} +{"seq_id":"26145798154","text":"from django.urls import path, include\nfrom . import views\nfrom rest_framework.routers import DefaultRouter\n\n\nrouter = DefaultRouter()\nrouter.register(\"books\", views.BooksViewSet, basename=\"books\")\nrouter.register(\"users\", views.UserViewSet, basename=\"users\")\nrouter.register('add_to_cart', views.CartItemViewSet, basename=\"add_to_cart\")\nrouter.register(\"cart\", views.CartViewSet, basename=\"cart\")\nrouter.register(\"orders\", views.OrderViewSet, basename=\"orders\")\nrouter.register(\"delivery\", views.DeliveryViewSet, basename=\"delivery\")\n\nurlpatterns = [\n path('api/', include(router.urls))\n]","repo_name":"Zubaydullo/e-bookshop_backend","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14119806147","text":"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\n\nfrom dash.dependencies import Input, Output, State\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nLogisticRegression(solver='lbfgs')\n\n\nimport plotly.figure_factory as ff\nimport pandas as pd\nimport numpy as np\nimport io\nimport requests\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.config['suppress_callback_exceptions']=True\n\n# Setting up the data infrastructure\n\n\n# Goal to take in any dataset that fits format, parses into df structure, apply tpot on it,\n# take the resulting ML method and use to fit predictions of risk scores.\n\n\n\n\n\n\n\n\n\nurl = \"https://raw.githubusercontent.com/Hoytgong/vgmed/master/finalBCdata.csv\"\ns = requests.get(url).content\ndf = pd.read_csv(io.StringIO(s.decode('utf-8')))\ndf_id = df.drop('phenotype', axis=1)\nfeatures = df_id.drop('patient_id', axis=1).values\n\ntraining_features, testing_features, training_target, testing_target = \\\n train_test_split(features, df['phenotype'].values, random_state=42)\n\nexported_pipeline = ExtraTreesClassifier(bootstrap=False, random_state = 42, criterion=\"entropy\", max_features=0.1, min_samples_leaf=18, min_samples_split=20, n_estimators=100)\nexported_pipeline.fit(training_features, training_target)\n\ntesting_features_cancer = testing_features[testing_target == 1,:]\ntesting_features_healthy = testing_features[testing_target != 1,:]\n\nprob_healthy = exported_pipeline.predict_proba(testing_features_healthy)[:,1]\nprob_cancer = exported_pipeline.predict_proba(testing_features_cancer)[:,1]\n\nhist_data = [prob_healthy, prob_cancer]\ngroup_labels = ['Healthy', 'Breast Cancer']\ncolors = ['#3A4750', '#F64E8B']\n\n## Actual GUI\napp.layout = html.Div(id='test', children=[\n html.H1('VGMED'),\n html.H6('Predictive Gene Editing Variant Dashboard'),\n\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '100%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n multiple=True\n ),\n html.Div(id='output-data-upload'),\n\n\n html.Label('Subject ID'),\n dcc.Dropdown(id='subject-id',\n options=[{'label': i, 'value': i} for i in df['patient_id'].values.tolist()]),\n dcc.RadioItems(\n id='editing-choice',\n options=[{'label': i, 'value': i} for i in ['Edit SNP', 'Optimize SNP']]\n ),\n html.Button(id='submit-button', n_clicks=0, children='Submit'),\n\n dcc.Graph(id='population-distribution-graph'),\n\n html.Div(id='branch')\n\n])\n\n\n###### Defining Callback methods #####\n\n## Distribution Figure\n@app.callback(\n Output('population-distribution-graph', 'figure'),\n [Input('submit-button', 'n_clicks')],\n [State('subject-id', 'value')])\ndef update_graph(clicks, sub_id):\n x_row = df_id.loc[df_id.patient_id == int(sub_id), :]\n x_features = x_row.drop('patient_id', axis=1).values\n x_pos = exported_pipeline.predict_proba(x_features)[:, 1][0]\n\n fig = ff.create_distplot(hist_data, group_labels, bin_size=.35, curve_type='normal', show_hist=False, colors=colors)\n fig['layout'].update(title='Risk Score Distribution of All Patients')\n fig['layout'].update(shapes=[{'type': 'line', 'x0': x_pos, 'y0': 0, 'x1': x_pos, 'y1': 22,\n 'line': {'color': '#F64E8B','width': 2}, }])\n return fig\n\n\n## Defining the parse ability\ndef parse_contents(contents, filename, date):\n content_type, content_string = contents.split(',')\n\n decoded = base64.b64decode(content_string)\n try:\n if 'csv' in filename:\n # Assume that the user uploaded a CSV file\n df = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n elif 'xls' in filename:\n # Assume that the user uploaded an excel file\n df = pd.read_excel(io.BytesIO(decoded))\n except Exception as e:\n print(e)\n return html.Div([\n 'There was an error processing this file.'\n ])\n\n return html.Div([\n html.H5(filename),\n html.H6(datetime.datetime.fromtimestamp(date)),\n\n dash_table.DataTable(\n data=df.to_dict('records'),\n columns=[{'name': i, 'id': i} for i in df.columns]\n ),\n\n html.Hr(), # horizontal line\n\n # For debugging, display the raw contents provided by the web browser\n html.Div('Raw Content'),\n html.Pre(contents[0:200] + '...', style={\n 'whiteSpace': 'pre-wrap',\n 'wordBreak': 'break-all'\n })\n ])\n\n@app.callback(Output('output-data-upload', 'children'),\n [Input('upload-data', 'contents')],\n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified')])\ndef update_output(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n return children\n\n\n## Branched callback to nest children of either Optimize SNP or Edit SNP\n@app.callback(\n Output('branch', 'children'),\n [Input('submit-button', 'n_clicks')],\n [State('subject-id', 'value'),\n State('editing-choice', 'value')])\ndef return_optimized(clicks, sub_id, editing_choice):\n if editing_choice == 'Optimize SNP':\n dff = optimization_table(int(sub_id))\n table = ff.create_table(dff, height_constant=10)\n return html.Div([dcc.Graph(id='table', figure=table)\n ])\n elif editing_choice == 'Edit SNP':\n return html.Div([\n html.Label('Select SNP to modify'),\n dcc.Dropdown(\n id='SNP-dropdown',\n options=[{'label': i, 'value': i} for i in list(df.columns.values)[1:-1]]\n ),\n html.Div(id='text'),\n\n html.Label('Edit SNP to...'),\n dcc.Dropdown(id='SNP-values'),\n\n html.Button(id='indiv-submit', n_clicks=0, children='Edit'),\n html.Div(id='hidden')\n ])\n else:\n return -1\n\n\n## Individual Editing steps\n@app.callback(\n Output('text', 'children'),\n [Input('subject-id', 'value'), Input('SNP-dropdown', 'value')])\ndef return_curr_SNP_value(sub_id, SNP):\n df = pd.read_csv(io.StringIO(s.decode('utf-8')))\n value = df.loc[df.patient_id == int(sub_id), str(SNP)].values[0]\n return 'Your current SNP value is \"{}\"'.format(value)\n\n\n@app.callback(\n Output('SNP-values', 'options'),\n [Input('subject-id', 'value'),\n Input('SNP-dropdown', 'value')])\ndef return_editing_options(sub_id, SNP):\n df = pd.read_csv(io.StringIO(s.decode('utf-8')))\n list = [0, 1, 2]\n value = df.loc[df.patient_id == int(sub_id), str(SNP)].values[0]\n list.remove(value)\n return [{'label': i, 'value': i} for i in list]\n\n@app.callback(\n Output('hidden', 'children'),\n [Input('indiv-submit', 'n_clicks')],\n [State('subject-id', 'value'),\n State('SNP-dropdown', 'value'),\n State('SNP-values', 'value')])\ndef individual_editing(in_clicks, sub_id, SNP, new_SNP_val):\n if in_clicks > 0:\n indiv_df = individual_editing(sub_id, SNP, new_SNP_val)\n indiv_table = ff.create_table(indiv_df, height_constant=10)\n return html.Div([dcc.Graph(id='indiv-table', figure=indiv_table)])\n\n#Obtaining beta values\nlistofbetas = []\nfor i in range(training_features.shape[1]):\n logit = LogisticRegression()\n logit.fit(training_features, training_target)\n beta = logit.coef_.flatten()[i]\n listofbetas.append(beta)\nbeta_matrix = np.matrix(listofbetas).transpose()\n\n\n## Put Logit regressor here\ndef individual_editing(sub_id, SNP, new_SNP_val):\n all_info = []\n x_row = df_id.loc[df_id.patient_id == int(sub_id), :]\n x_features = x_row.drop('patient_id', axis=1).values\n\n original_prob = exported_pipeline.predict_proba(x_features)[:, 1][0] ############\n og_risk_score = x_features * beta_matrix\n og_risk_score = np.array(og_risk_score)[0][0]\n\n\n ori_val = x_row.loc[:, SNP].values[0]\n\n x_row.loc[:, SNP].values[0] = new_SNP_val\n new_x_features = x_row.drop('patient_id', axis=1).values\n new_prob = exported_pipeline.predict_proba(new_x_features)[:, 1][0] #########\n new_risk_score = new_x_features * beta_matrix\n new_risk_score = np.array(new_risk_score)[0][0]\n\n all_info.append(sub_id)\n all_info.extend((og_risk_score, new_risk_score, og_risk_score - new_risk_score))\n all_info.extend((SNP, ori_val, new_SNP_val))\n\n final_df = pd.DataFrame(all_info).T\n final_df.columns = ['Participant Idx', 'Original Risk Score', 'New Risk Score', 'Risk Delta', 'SNP Name',\n 'Ori. SNP Value', 'New SNP Value']\n return final_df\n\n\n## Function defining optimization table, individual editing works, not optimization\n\n## Put Logit regressor here\ndef optimization_table(sub_id):\n SNP_names = df.columns.values.tolist()[1:-1]\n n_snps = len(SNP_names)\n\n all_info = []\n x_row = df_id.loc[df_id.patient_id == int(sub_id), :]\n x_features = x_row.drop('patient_id', axis=1).values\n # original_prob = exported_pipeline.predict_proba(x_features)[:, 1][0] ##########\n og_risk_score = x_features * beta_matrix\n og_risk_score = np.array(og_risk_score)[0][0]\n\n mylistup = []\n mylistdown = []\n\n for snp in range(0, x_features.size):\n x_features[0][snp] = (x_features[0][snp] + 1) % 3\n scoreup = x_features * beta_matrix\n scoreup = np.array(scoreup)[0][0]\n mylistup.append(scoreup)\n\n x_features[0][snp] = (x_features[0][snp] - 2) % 3 # -2+1=-1\n scoredown = x_features * beta_matrix\n scoredown = np.array(scoredown)[0][0]\n mylistdown.append(scoredown)\n\n x_features[0][snp] = (x_features[0][snp] + 1) % 3 # change back\n\n # for snp in range(0, x_features.size):\n # x_features[0][snp] = (x_features[0][snp] + 1) % 3\n # probup = exported_pipeline.predict_proba(x_features.reshape(1, -1))[0, 1] ######\n # mylistup.append(probup)\n #\n # x_features[0][snp] = (x_features[0][snp] - 2) % 3 # -2+1=-1\n # probdown = exported_pipeline.predict_proba(x_features.reshape(1, -1))[0, 1] #######\n # mylistdown.append(probdown)\n #\n # x_features[0][snp] = (x_features[0][snp] + 1) % 3 # change back\n\n upbumplist = mylistup - og_risk_score\n downbumplist = mylistdown - og_risk_score\n uplist = upbumplist.tolist()\n downlist = downbumplist.tolist()\n\n completelist = uplist + downlist + [0] # 0 for no change\n min_index = np.argmin(completelist)\n min_value = min(completelist)\n best_score = min_value + og_risk_score\n all_info.append(sub_id) # participant idx\n\n all_info.extend((og_risk_score, best_score, og_risk_score - best_score)) #######\n if (min_index == (n_snps * 2 + 1)): # no change is best\n all_info.extend((\"N/A\", \"N/A\", \"N/A\"))\n else:\n SNP_idx = min_index % n_snps\n all_info.extend((SNP_names[SNP_idx], int(x_features[0][SNP_idx]),\n int((x_features[0][SNP_idx] - 1 + 2 * float(min_index <= n_snps)) % 3)))\n\n final_df = pd.DataFrame(all_info).T\n final_df.columns = ['Participant Idx', 'Original Risk Score', 'Best Possible Score', 'Risk Decrease', 'SNP Name',\n 'Ori. SNP Value', 'New SNP Value']\n return final_df\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"Hoytgong/vgmed","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72046555714","text":"from django import views\nfrom django.urls import path\nfrom rest_framework import routers\nfrom django.contrib.auth import views as auth_views\nfrom .views import *\n# from .views import index, VideoViewFilm, VideoViewMusic,VideoViewSerie,VideoViewDocumentaire, VideoDetail, VideoSearch \n# from serializers import MusicViewSerializer\n\n# router = routers.DefaultRouter()\n# router.register('music', MusicView)\n# router.register('film', FilmView)\n# router.register('episode', EpisodeView)\n# router.register('documantaire', DocumentaireView)\n# router.register('serie', SerieView)\n\nurlpatterns = [\n path('', homepage, name='home'),\n path('home/', home, name='homepage'),\n path('bar/', testbar, name='testbar'),\n path('add-favourite/', favorite, name='favourite'),\n path('favouriteList/', FavouriteVideo.as_view(), name='list_favourite'),\n\n path('add-watch-later/', watch_later, name='watch_later'),\n path('watchLaterList/', watchLaterVideo.as_view(), name='list_watchLater'),\n \n path('submit_review_film//', submit_review_film, name='submit_review_film'),\n path('submit_review_music//', submit_review_music, name='submit_review_music'),\n path('submit_review_episode//', submit_review_episode, name='submit_review_episode'),\n path('submit_review_documentaire//', submit_review_documentaire, name='submit_review_documentaire'),\n\n \n # path('register/', register, name='register'),\n path('inscription/', inscription, name='inscription'),\n path('profile/', profile_detail, name='profil_detail'),\n path('update_profile/',update_profile , name='update_user_profile'),\n path('update/',UserUpdateView.as_view() , name='profil_update'),\n\n\n\n # path('password/', auth_views.PasswordChangeView.as_view(template_name='account/password_change.html'), name='change_password'),\n path('password/', change_password, name='change_password'),\n path('password_success/', password_success, name='password_success'),\n \n path('search/', search_video, name='search_video'),\n path('listvideo/', listvideo, name='video_list'),\n path('listmusic/', listmusic, name='music_list'),\n path('listfilm/', listfilm, name='film_list'),\n path('listdocumentaire/', listdocumentaire, name='documentaire_list'),\n path('listepisode/', listepisode, name='episode_list'),\n path('listserie/', listserie, name='serie_list'),\n\n path('musicdetail/', musicdetail, name='music_detail'),\n path('filmdetail/', filmdetail, name='film_detail'),\n path('documentairedetail/', documentairedetail, name='documentaire_detail'),\n path('episodedetail/', episodedetail, name='episode_detail'),\n path('seriedetail/', seriedetail, name='serie_detail'),\n\n path('createchaine', create_chaine, name='create_chaine'),\n\n # path('views/', CountView, name='Number_Visite'),\n\n path('music/', showmusic, name='music'),\n path('film//', showfilm, name='film'),\n path('episode/', showepisode, name='episode'),\n path('documentaire/', showdocumentaire, name='documentaire'),\n \n path('film/commentaire//', commentFilm, name='comment_film'),\n path('music/commentaire//', commentMusic, name='comment_music'),\n path('episode/commentaire//', commentEpisode, name='comment_episode'),\n path('documentaire/commentaire//',commentDocumentaire, name='comment_documentaire'),\n\n \n ]\n\n \n # path('', index, name='index'),\n # path('film/', FilmView.as_view, name='film_view'),\n # path('music/', MusicView.as_view, name='video_view_music'),\n # path('serie/', EpisodeView.as_view, name='episode_View'),\n # path('documentaire/', DocumentaireView.as_view, name='documentaire_View'),\n # path('search/', SerieView.as_view, name='video_search'),\n\n\n# urlpatterns = [\n# path('', index, name='index'),\n# #path('Film /detail/', views.VideoView.as_view(), name='video'),\n# path('film/', VideoViewFilm, name='video_view_film'),\n# path('music/', VideoViewMusic, name='video_view_music'),\n# path('serie/', VideoViewSerie, name='video_view_serie'),\n# path('documentaire/', VideoViewDocumentaire, name='video_view_documentaire'),\n# path('search/', VideoSearch.as_view, name='video_search'),\n# path('', VideoDetail.as_view(), name='video_detail'),\n\n# ]\n","repo_name":"mouad-sellak/streaming-django","sub_path":"video/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6158759348","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.template import RequestContext\nfrom songs import forms, models\n\n@login_required\ndef add(request):\n if request.method == 'POST':\n form = forms.SongForm(request.POST)\n if form.is_valid():\n song = form.save(commit=False)\n song.state = 0\n song.suggested_by = request.user\n song.save()\n vote = models.Vote(user=request.user, song=song, vote=0)\n vote.save()\n return HttpResponseRedirect('/songs')\n else:\n form = forms.SongForm()\n return render(request, 'add.html', {'form': form},\n context_instance=RequestContext(request))\n\n@login_required\ndef delete(request, song_id):\n song = models.Song.objects.get(pk=song_id)\n if song and song.suggested_by == request.user:\n votes = models.Vote.objects.filter(song=song)\n for v in votes:\n v.delete()\n song.delete()\n return HttpResponseRedirect('/songs')\n\n return HttpResponseForbidden()\n\n@login_required\ndef vote(request, song_id, vote):\n song = models.Song.objects.get(pk=song_id)\n if not song:\n return HttpResponseBadRequest()\n\n vote_obj = models.Vote.objects.all().filter(song=song, user=request.user)\n if vote_obj:\n vote_obj = vote_obj[0]\n if vote == 'x':\n vote_obj.delete()\n return HttpResponse()\n else:\n vote_obj = models.Vote()\n vote_obj.user = request.user\n vote_obj.song = song\n\n vote_obj.vote = int(vote)\n vote_obj.save()\n return HttpResponse()\n\n@login_required\ndef arrange(request, song_id):\n song = models.Song.objects.get(pk=song_id)\n if not song:\n return HttpResponseBadRequest()\n\n if not request.user.has_perm('songs.arrange'):\n return HttpResponseBadRequest()\n\n song.has_willing_arranger = True\n song.save()\n return HttpResponse()\n\n@login_required\ndef index(request):\n songs = models.Song.objects.all()\n all_votes = models.Vote.objects.all()\n states = models.Song.STATES\n dictionary = {k: songs.filter(state=states.index(k))\n for k in states if 'proposed' != k}\n proposed = [{'song': s} for s in songs.filter(state=states.index('proposed'))]\n\n if request.GET.get('filter') == 'novote':\n # only show songs that the user hasn't voted on yet\n proposed = [p for p in proposed\n if not all_votes.filter(song=p['song'], user=request.user)]\n\n for v in proposed:\n votes = all_votes.filter(song=v['song'])\n vote_counts = [0, 0, 0]\n for vote in votes:\n vote_counts[vote.vote] += 1\n v['votes'] = vote_counts\n user_vote = all_votes.filter(song=v['song'], user=request.user)[:1]\n v['user_vote'] = user_vote.get().vote if user_vote else -1\n dictionary['proposed'] = sorted(proposed, key=lambda x: x['song'].score(), reverse=True)\n\n dictionary['is_arranger'] = request.user.has_perm('songs.arrange')\n\n return render(request, 'list.html', dictionary,\n context_instance=RequestContext(request))\n","repo_name":"craigatron/acaadmin","sub_path":"songs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"41055653086","text":"import sentry_sdk\nfrom fastapi import FastAPI\nfrom fastapi.exceptions import RequestValidationError\nfrom fastapi.middleware.trustedhost import TrustedHostMiddleware\nfrom sentry_sdk.integrations.asgi import SentryAsgiMiddleware\nfrom starlette.exceptions import HTTPException as StarletteHTTPException\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.middleware.httpsredirect import HTTPSRedirectMiddleware\n\nfrom .api.api_v1.api import api_router as api_v1_router\nfrom .core.config import Settings, get_settings\nfrom .errors import log_http_error, log_unhandled_exception, log_validation_error\nfrom .middleware import LoggingMiddleware, StructlogLoggingMiddlewareFactory\n\n\nclass FastAPIStarterTemplate:\n app: FastAPI\n settings: Settings\n\n def __init__(self) -> None:\n self.settings = self.init_settings()\n self.app = self.init_app(self.settings)\n\n def create_app(self) -> FastAPI:\n self.configure_logging()\n self.configure_error_handlers()\n self.configure_default_routes()\n self.configure_middleware()\n self.configure_sentry()\n return self.app\n\n def init_settings(self) -> Settings:\n return get_settings()\n\n def init_app(self, settings: Settings) -> FastAPI:\n return FastAPI(\n title=settings.APP_TITLE,\n description=settings.APP_DESCRIPTION,\n root_path=settings.ROOT_PATH,\n openapi_url=settings.OPENAPI_URL,\n docs_url=settings.DOCS_URL,\n redoc_url=settings.REDOC_URL,\n )\n\n def configure_logging(self) -> None:\n LoggingMiddleware(\n self.app,\n factory=StructlogLoggingMiddlewareFactory(),\n log_level=self.settings.LOG_LEVEL,\n dev=self.settings.LOG_DEV,\n )\n\n def configure_error_handlers(self) -> None:\n self.app.add_exception_handler(StarletteHTTPException, log_http_error)\n self.app.add_exception_handler(RequestValidationError, log_validation_error)\n self.app.add_exception_handler(Exception, log_unhandled_exception)\n\n def configure_default_routes(self) -> None:\n self.app.include_router(api_v1_router, prefix=self.settings.API_V1_STR)\n\n def configure_middleware(self) -> None:\n if self.settings.ALLOWED_HOSTS:\n self.app.add_middleware(\n TrustedHostMiddleware,\n allowed_hosts=[str(host) for host in self.settings.ALLOWED_HOSTS],\n )\n if self.settings.CORS_ALLOW_ORIGINS:\n self.app.add_middleware(\n CORSMiddleware,\n allow_origins=[str(origin) for origin in self.settings.CORS_ALLOW_ORIGINS],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n if self.settings.HTTPS_FORCE_REDIRECT:\n self.app.add_middleware(HTTPSRedirectMiddleware)\n\n def configure_sentry(self) -> None:\n if self.settings.SENTRY_DSN:\n SentryAsgiMiddleware(self.app)\n # pylint: disable=abstract-class-instantiated\n sentry_sdk.init(\n dsn=self.settings.SENTRY_DSN,\n environment=self.settings.ENVIRONMENT,\n debug=self.settings.SENTRY_DEBUG,\n sample_rate=self.settings.SENTRY_SAMPLE_RATE,\n traces_sample_rate=self.settings.SENTRY_TRACES_SAMPLE_RATE,\n )\n","repo_name":"filipsnastins/fastapi-starter","sub_path":"src/fastapi_starter/app_factory.py","file_name":"app_factory.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12760444363","text":"#Design a program that receives two numbers using argv argument\n#and print the following arithmetical operations. (Python)\n\nimport sys\n\nx=int(sys.argv[1]) #primer numero\ny=int(sys.argv[2]) #segundo numero\n\nsum=x+y #sumar\nprint(\"The sum is:\",sum)\n\nsub=x-y #restar\nprint(\"The substraction is:\",sub)\n\ntimes=x*y #multiplicar\nprint(\"The multiplication is:\",times)\n\ndiv=x/y #dividir\nprint(\"The division is:\",div)\n\n","repo_name":"therichermx/StructuredPrograming2A","sub_path":"Unit2/Proyect/A2.py","file_name":"A2.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23454528601","text":"import sys\n\ndef minInvite(line):\n sMax, people = line.split(\" \")\n sMax = int(sMax)\n numStanding, count = int(people[0]), 0\n for i in range(1, sMax + 1):\n extra = i - numStanding\n if extra > 0:\n count += extra\n numStanding += extra\n numStanding += int(people[i])\n return count\n\n\ndef run():\n inFile = sys.argv[1]\n lines = [line.strip() for line in open(inFile)]\n T = int(lines[0])\n\n for i in range(1, T+1):\n numInvited = minInvite(lines[i])\n print(\"Case #\" + str(i) + \": \" + str(numInvited))\n\nrun()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/3268.py","file_name":"3268.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39075259071","text":"from django.shortcuts import render,redirect\nfrom consultapp.forms import *\nfrom consultapp.models import *\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate,logout,login\n\n# Create your views here.\ndef home(request):\n return render(request,'consult/home.html')\n\ndef Login(request):\n error=''\n if request.method=='POST':\n u=request.POST['uname']\n p=request.POST['pwd']\n user=authenticate(username=u,password=p)\n try:\n if user.is_staff:\n login(request,user)\n error='no'\n else:\n error='yes'\n except:\n error='yes'\n d={'error':error}\n return render(request,'consult/login.html',d)\n\ndef Logout(request):\n if not request.user.is_staff:\n return redirect('login')\n logout(request)\n return redirect('login')\n\ndef contactus(request):\n return render(request,'consult/contactus.html')\n\ndef aboutus(request):\n return render(request,'consult/aboutus.html')\n\ndef addorganisation(request):\n if not request.user.is_staff:\n return redirect(\"login\")\n form=OrganisationForm()\n if request.method==\"POST\":\n form=OrganisationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('vieworganisation')\n d={\"form\":form}\n return render(request,'consult/addorganisation.html',d)\n\ndef vieworganisation(request):\n if not request.user.is_staff:\n return redirect('login')\n org=Organisation.objects.all()\n d={'org':org}\n return render(request,'consult/view_organisation.html',d)\n\ndef deleteorganisation(request,pk):\n if not request.user.is_staff:\n return redirect('login')\n org=Organisation.objects.get(id=pk)\n org.delete()\n return redirect('vieworganisation')\n\ndef addexperts(request):\n if not request.user.is_staff:\n return redirect(\"login\")\n form=ExpertForm()\n if request.method==\"POST\":\n form=ExpertForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('viewexperts')\n d={\"form\":form}\n return render(request,'consult/addexperts.html',d)\n\ndef viewexperts(request):\n if not request.user.is_staff:\n return redirect('login')\n exp=Expert.objects.all()\n d={'exp':exp}\n return render(request,'consult/viewexperts.html',d)\n\ndef deleteexpert(request,pk):\n if not request.user.is_staff:\n return redirect('login')\n exp=Expert.objects.get(id=pk)\n exp.delete()\n return redirect('viewexpert')\n\ndef editexpert(request,pk):\n if not request.user.is_staff:\n return redirect('login')\n #if viewexperts(request):\n exp=Expert.objects.get(id=pk)\n if request.method==\"POST\":\n form=ExpertForm(request.POST,instance=exp)\n if form.is_valid():\n form.save()\n return redirect('viewexperts')\n return render(request,'consult/update.html',{'exp':exp})\n #elif vieworganisation(request):\ndef editoraganisation(request,pk):\n org=Organisation.objects.get(id=pk)\n if request.method==\"POST\":\n form=OrganisationForm(request.POST,instance=org)\n if form.is_valid():\n form.save()\n return redirect('vieworganisation')\n return render(request,'consult/updateorganisation.html',{'org':org})\n","repo_name":"dasankit1411/my_repo2","sub_path":"consultant/consultapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74196848193","text":"import discord\n\nimport persistent\nimport last_fb_post\n\npersistent.lock.acquire()\ntry:\n persistent.load_data()\nexcept FileNotFoundError:\n persistent.create_datafile()\npersistent.lock.release()\n\n\nclient = discord.Client()\n\n\ndef get_main_channel(server):\n try:\n text_channels = [chan for chan in server.channels if chan.type == discord.ChannelType.text]\n\n for chan in text_channels:\n if chan.name == 'general' or chan.name == 'glowny':\n return chan\n except:\n return None\n\n return None\n\n\ndef get_news_channel(server):\n try:\n text_channels = [chan for chan in server.channels if chan.type == discord.ChannelType.text]\n\n for chan in text_channels:\n if chan.name == 'ogloszenia':\n return chan\n except:\n return None\n\n return None\n\n\ndef get_pstr(server):\n pointemoji = None\n emojiname = None\n\n if server.name.lower() == 'rasputin':\n emojiname = 'putin'\n elif server.name.lower() == 'v-santos.pl':\n emojiname = 'vsantos'\n\n if emojiname:\n for emoji in server.emojis:\n if emoji.name == emojiname:\n pointemoji = emoji\n break\n\n if pointemoji:\n return '\\n' + str(pointemoji) + ' '\n else:\n return '\\n- '\n\n\ndef is_worthy(command_author):\n\n # Maarrk\n if command_author.id == '176918774618914826':\n return True\n # Toyer\n if command_author.id == '256169168884203520':\n return True\n\n roles = command_author.roles\n if len(roles) > 0:\n for role in roles:\n if role.name.lower() == 'testrole':\n return True\n if role.name.lower() == 'zarząd':\n return True\n if role.name.lower() == 'community manager':\n return True\n if role.name.lower() == 'administrator':\n return True\n if role.name.lower() == 'developer':\n return True\n if role.name.lower() == 'support':\n return True\n if role.name.lower() == 'moderator':\n return True\n\n return False\n\n\n@client.event\nasync def on_message(message):\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n\n # message is meant for this bot\n if message.content.lower().startswith(('vbot', 'v-bot')):\n\n words = message.content.split()\n if len(words) > 1:\n if words[1].lower() == 'sens':\n\n pstr = get_pstr(message.server)\n\n msgs = ['**Zamierzamy zgodnie z planem uruchomić zarówno serwer voice i tekstowy ponieważ:**',\n 'Decyzja o kształcie serwera nie była podjęta w 15 minut, owszem przemyśleliśmy to',\n 'Wiemy co powoduje podział graczy na dwa serwery',\n '''Jesteśmy dopiero w fazie Alfa. Dla nieoczytanych wklejam z Wikipedii:\n*Alpha software can be unstable and could cause crashes or data loss. Alpha software may not contain all of the features that are planned for the final version.*''',\n 'Serwer nie jest w ostatecznym kształcie, wybraliśmy opcję która pozwoli nam zebrać najwięcej wiedzy',\n 'Mamy chętnych do gry na obu wersjach: *Vox populi, vox Dei*']\n\n msg = pstr.join(msgs)\n await client.send_message(message.channel, msg)\n return\n\n if words[1].lower() == 'faq':\n msg = '**Najczęściej zadawane pytania:** https://www.facebook.com/vsantosrp/posts/1121559141309289'\n await client.send_message(message.channel, msg)\n return\n\n if words[1].lower() == 'fb':\n post_content = last_fb_post.get_new_post()\n\n if post_content:\n chan = get_news_channel(message.server)\n msg = '**Pojawił się nowy post na fanpage V-Santos!**\\n' + post_content\n\n if chan:\n await client.send_message(chan, msg)\n return\n else:\n await client.send_message(message.channel, msg)\n return\n\n else:\n await client.send_message(message.author, 'Nie pobrano żadnego nowego posta z fb')\n if not message.channel.type == discord.ChannelType.private:\n await client.delete_message(message)\n return\n\n\n if words[1].lower() == 'data':\n # only authorised users can use this command\n if not is_worthy(message.author):\n await client.send_message(message.author, 'Nie masz uprawnien do tej komendy')\n if not message.channel.type == discord.ChannelType.private:\n await client.delete_message(message)\n return\n\n msg = 'Nazwa kanału: %s' % message.channel.name\n await client.send_message(message.channel, msg)\n return\n\n if words[1].lower() == 'join':\n # only authorised users can use this command\n if not is_worthy(message.author):\n await client.send_message(message.author, 'Nie masz uprawnien do tej komendy')\n if not message.channel.type == discord.ChannelType.private:\n await client.delete_message(message)\n return\n\n if len(words) >= 2:\n member_joined = message.server.get_member_named(words[2])\n if member_joined:\n await on_member_join(member_joined)\n else:\n await client.send_message(message.author, 'Nie znalazłem użytkownika \"%s\" '\n 'wśród obecnie aktywnnych.'\n 'Spróbuj wpisać jego @mention po \"join\"' % words[2])\n if not message.channel.type == discord.ChannelType.private:\n await client.delete_message(message)\n else:\n await client.send_message(message.author, 'Podaj nazwę użytkownika po \"join\"')\n if not message.channel.type == discord.ChannelType.private:\n await client.delete_message(message)\n return\n\n if words[1].lower() == 'maul':\n # only authorised users can use this command\n if not is_worthy(message.author):\n await client.send_message(message.author, 'Nie masz uprawnien do tej komendy')\n if not message.channel.type == discord.ChannelType.private:\n await client.delete_message(message)\n return\n\n maul = 'Maul'\n maul_member = message.server.get_member_named('Maul#4420')\n if maul_member:\n maul = maul_member.mention\n\n msg = 'Użytkownik %s 10 lipca 2017 o 20:30 napisał:\\n' \\\n '***Wywalcie tego vbota to jest zmarnowanie produktywności ludzkiej ' \\\n 'na coś co i tak nie ma praktycznego zastosowania***' % maul\n\n await client.send_message(message.channel, msg)\n return\n\n # msg = message.author.mention + ' nie zrozumiałem polecenia\\n' \\\n # 'Ale nie lękaj się, V-Bot czuwa'\n # await client.send_message(message.channel, msg)\n await client.send_message(message.author, 'Nie zrozumiałem polecenia \"%s\"' % message.content)\n if not message.channel.type == discord.ChannelType.private:\n await client.delete_message(message)\n return\n\n elif len(words) == 1:\n msg = message.author.mention + ' nie lękaj się, V-Bot czuwa'\n await client.send_message(message.channel, msg)\n\n\n@client.event\nasync def on_member_join(member):\n chan = get_main_channel(member.server)\n tada = '\\U0001F389'\n msg = '%s **Witamy nowego użytkownika %s!** %s \\n Ekipa V-Santos życzy Ci miłej gry na naszym serwerze' \\\n % (tada, member.mention, tada)\n\n if chan:\n await client.send_message(chan, msg)\n\n msgs_priv = ['**Witaj na naszym serwerze, jestem V-Bot**',\n 'Wyślę Ci kilka rzeczy które mogą się przydać na początku:',\n 'Link do forum: *(musisz tam założyć konto żeby zagrać na V-Santos)*',\n 'Nasz fanpage na Facebook: ',\n '**Najczęściej zadawane pytania:** ']\n\n await client.send_message(member, '\\n'.join(msgs_priv))\n return\n\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\n\nclient.run('put-token-here-if-you-ever-use-it-again')\n","repo_name":"Maarrk/v-bot","sub_path":"command_response.py","file_name":"command_response.py","file_ext":"py","file_size_in_byte":9175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37639070702","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2016年12月14日\n\n@author: who8736\n'''\n\nimport numpy as np\nfrom flask import render_template, redirect, url_for\nfrom flask import send_file\nfrom bokeh.embed import components\nfrom bokeh.resources import INLINE\n# from bokeh.util.string import encode_utf8\n\nfrom plot import plotKline, BokehPlot\nfrom plot import PlotProfitsInc\n# from report import report1 as guzhiReport\nfrom report import reportValuation\nfrom report import reportIndex\nfrom sqlrw import (getChiguList, getGuzhiList, getYouzhiList,\n getStockName, readLastTTMPE, readCurrentClose,\n readCurrentPEG, readPERate, readStockKline, readIndexKline,\n readStockList, writeChigu, readValuationSammary,\n readProfitsIncAdf)\nfrom misc import tsCode\nfrom . import app\nfrom .forms import StockListForm\n\n\n@app.route('/')\ndef index():\n testText = 'testText, 中文'\n return render_template('index.html', testText=testText)\n\n\n@app.route('/stocklist', methods=[\"GET\", \"POST\"])\ndef setStockList():\n chigu = getChiguList()\n chiguStr = \"|\".join([i for i in chigu])\n form = StockListForm()\n # form.stockList = stockListStr\n if form.validate_on_submit():\n chiguStr = form.stockList.data\n # print(stockListStr)\n chigu = chiguStr.split(\"|\")\n chigu = [tsCode(ts_code) for ts_code in chigu]\n allstocks = readStockList().ts_code.to_list()\n checkFlag = True\n for ts_code in chigu:\n if ts_code not in allstocks:\n checkFlag = False\n print('%s is not a valid ts_code' % ts_code)\n break\n if checkFlag:\n print('all ok')\n writeChigu(chigu)\n return redirect(url_for('index'))\n return render_template('stocklist.html',\n form=form,\n stockListStr=chiguStr)\n\n\n@app.route('/reporttype/')\ndef reportnav(typeid):\n if typeid == 'chigu':\n stockList = getChiguList()\n elif typeid == 'youzhi':\n stockList = getYouzhiList()\n else:\n stockList = getGuzhiList()\n\n stockReportList = []\n for ts_code in stockList:\n stockName = getStockName(ts_code)\n stockClose = readCurrentClose(ts_code)\n pe = readLastTTMPE(ts_code)\n peg = readCurrentPEG(ts_code)\n pe200, pe1000 = readPERate(ts_code)\n stockReportList.append([ts_code, stockName,\n stockClose, pe, peg, pe200, pe1000])\n return render_template('reportnav.html', stockList=stockReportList)\n\n\n# @app.route('/report/')\n# def reportView(ts_code):\n# stockItem = guzhiReport(ts_code)\n# # reportstr = reportstr[:20]\n# # reportstr = 'test'\n# return render_template('report.html',\n# stock=stockItem)\n\n\n@app.route('/valuationtype/')\ndef valuationNav(typeid):\n df = readValuationSammary()\n if typeid == 'chigu':\n df = df[df['ts_code'].isin(getChiguList())]\n elif typeid == 'youzhi':\n df = df[(df.pf >= 5) & (df.pe < 30)]\n # stockReportList = np.array(df).tolist()\n return render_template('valuationnav.html', stocksDf=df)\n\n\n@app.route('/valuation/')\ndef valuationView(ts_code):\n stockItem = reportValuation(ts_code)\n # reportstr = reportstr[:20]\n # reportstr = 'test'\n return render_template('valuation.html',\n stock=stockItem)\n\n\n@app.route('/test')\ndef test():\n stocks = readProfitsIncAdf()\n print('profits_inc_adf')\n print(stocks.head())\n return render_template('test.html', stocks=stocks)\n\n\n@app.route('/test1')\ndef test1():\n return render_template('test1.html')\n\n\n@app.route('/test2')\ndef test2():\n stocks = readProfitsIncAdf()\n return render_template('test2.html', stocks=stocks)\n\n\n@app.route('/klineimg/')\ndef klineimg(ts_code):\n plotImg = plotKline(ts_code)\n plotImg.seek(0)\n return send_file(plotImg,\n attachment_filename='img.png',\n as_attachment=True)\n\n\n@app.route('/stockklineimgnew/')\ndef stockklineimgnew(ts_code):\n df = readStockKline(ts_code, days=1000)\n return _klineimg(ts_code, df)\n\n\n@app.route('/indexklineimgnew/')\ndef indexklineimgnew(ID):\n df = readIndexKline(ID, days=3000)\n return _klineimg(ID, df)\n\n\ndef _klineimg(ID, df):\n # grab the static resources\n js_resources = INLINE.render_js()\n css_resources = INLINE.render_css()\n\n plotImg = BokehPlot(ID, df)\n scripts, div = components(plotImg.plot())\n # return render_template(\"plotkline.html\", the_div=div, the_script=scripts)\n html = render_template(\n 'plotkline.html',\n plot_script=scripts,\n plot_div=div,\n js_resources=js_resources,\n css_resources=css_resources,\n )\n # return encode_utf8(html)\n return html\n\n\n@app.route('/profitsinc/')\ndef profitsIncImg(ts_code):\n js_resources = INLINE.render_js()\n css_resources = INLINE.render_css()\n\n plotImg = PlotProfitsInc(ts_code,\n startDate='20150331',\n endDate='20191231')\n scripts, div = components(plotImg.plot())\n # return render_template(\"plotkline.html\", the_div=div, the_script=scripts)\n html = render_template(\n 'plotkline.html',\n plot_script=scripts,\n plot_div=div,\n js_resources=js_resources,\n css_resources=css_resources,\n )\n return html\n\n\n@app.route('/indexinfo/')\ndef indexInfo(ID):\n stockItem = reportIndex(ID)\n return render_template('indexinfo.html',\n stock=stockItem)\n","repo_name":"who8736/stockdatamanage","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24334112815","text":"# Write a program that prints a set of elements. On the first line, you will receive two numbers - n and m,\r\n# separated by a single space - representing the lengths of two separate sets. On the next n + m lines,\r\n# you will receive n numbers, which are the numbers in the first set, and m numbers, which are in the second set.\r\n# Find all the unique elements that appear in both and print them on separate lines (the order does not matter).\r\n#\r\nm, n = [int(x) for x in input().split(\" \")]\r\nfirst = set()\r\nsecond = set()\r\nfor _ in range(m):\r\n first.add(int(input()))\r\nfor _ in range(n):\r\n second.add(int(input()))\r\nfinal = first.intersection(second)\r\nfor el in final:\r\n print(el)","repo_name":"TomaMishev/Python_3_Advanced","sub_path":"2. Tuples and Sets/Excercises/P02_set_of_elements.py","file_name":"P02_set_of_elements.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20678497025","text":"import cv2\nfrom ultralytics import YOLO\nimport time\n\nimg_pth = \"/Users/kimjunho/Desktop/컴퓨터비전3-1/[CV]A3/img_example.JPG\"\n\nmodel = YOLO(\"yolov8n.pt\")\n \nstart_time = time.time()\nresults = model(source=img_pth)\nend_time = time.time()\nexecution_time = end_time - start_time\n\nres_plotted = results[0].plot()\n\ncv2.imshow(\"result\", res_plotted)\ncv2.waitKey(0)\n\nprint(f\"Object detection executed in {execution_time:.4f} seconds.\")","repo_name":"junho2000/uni_computervision","sub_path":"assignment3/yolov8.py","file_name":"yolov8.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20963034403","text":"# imports\nimport os\nimport discord\nfrom discord.ext.commands import Bot\nfrom keepAlive import keep_alive\n\nfrom PIL import Image, ImageFont, ImageDraw\nfrom io import BytesIO\nimport functions as f\n\n#on startup\nkeep_alive()\nmy_secret = os.environ['token']\nclient = Bot(command_prefix=\"!\")\n\n# ready\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n#only use client.command\n@client.command()\nasync def ping(ctx):\n await ctx.reply(\"pong!\")\n\n@client.command()\nasync def avatar(ctx, a):\n print(a)\n disallowed_characters = \"\"\n for character in disallowed_characters:\n\t a = a.replace(character, \"\")\n user = await ctx.guild.query_members(user_ids=[int(a)])\n user = user[0]\n await ctx.send(user.avatar_url)\n\n\n@client.command()\nasync def addclass(ctx, a, b, c):\n if f.check_if_legit_name(name=a.upper()):\n something = f.add_classes_to_profile(user_id=ctx.author.id, user_name=ctx.author.name, classname=a, classnum=b, classsec=c, master_user_list=f.return_master_list())\n f.update_master_list(something)\n await ctx.send(\"Class added!\")\n else:\n await ctx.send(\"Uh oh! Check your class name for errors because that didn't work or the class doesnt exist!\")\n return\n print(something)\n \n\n@client.command()\nasync def removeclass(ctx, a, b, c):\n if f.check_if_legit_name(name=a.upper()):\n something = f.remove_classes_from_profile(user_id=ctx.author.id, user_name=ctx.author.name, classname=a, classnum=b, classsec=c, master_user_list=f.return_master_list())\n else:\n await ctx.send(\"Uh oh! The class name you just entered is not listed within your currently enrolled courses! Please type !profile to see your classes\")\n print(something)\n f.update_master_list(something) \n\n@client.command()\nasync def getUser(ctx, a=None):\n await ctx.send(f.getUser(ctx, a))\n\n@client.command()\nasync def checkclass(ctx, a):\n if f.check_if_legit_name(name=a.upper()):\n await ctx.send(\"That class is legit!\")\n else:\n await ctx.send(\"Uh oh! Check your class name of errors because that class doesnt exist!\")\n\n@client.command()\nasync def profile(ctx, a=None):\n if a == None:\n user = ctx.author\n else:\n disallowed_characters = \"\"\n for character in disallowed_characters:\n\t a = a.replace(character, \"\")\n user = await ctx.guild.query_members(user_ids=[int(a)])\n user = user[0]\n\n \n\n img = Image.open(\"honeycomb.jpg\")\n draw = ImageDraw.Draw(img)\n fnt = ImageFont.truetype(\"Pillow/Tests/fonts/FreeMono.ttf\", 75)\n fnt2 = ImageFont.truetype(\"Pillow/Tests/fonts/FreeMono.ttf\", 100)\n\n asset = user.avatar_url_as(size = 128)\n data = BytesIO(await asset.read())\n \n #pfp\n pfp = Image.open(data)\n pfp = pfp.resize((750,750))\n img.paste(pfp, (50,50))\n \n\n #card formatting\n name = user.name #text is username\n \n classN = \"Classes:\"\n\n\n leftclassadj = 850\n rightclassadj = 1425\n topclassadj = 225\n vertclassmult = 160\n description = \"Description: \"\n \n \n\n draw.text((leftclassadj, 50), name, (0,0,0), font=fnt2)\n draw.text((leftclassadj, 125), \"______________________\", (0,0,0), font=fnt)\n\n userMatrix = f.getUser(ctx, a)\n print(userMatrix)\n for i in range(0,len(userMatrix),2):\n draw.text((leftclassadj, topclassadj+vertclassmult*i/2), userMatrix[i], (0,0,0), font=fnt)\n if i+1 in range(len(userMatrix)):\n draw.text((rightclassadj, topclassadj+vertclassmult*i/2), userMatrix[i+1], (0,0,0), font=fnt)\n \n \n \n #draw.text((leftclassadj, topclassadj+vertclassmult*2), class5, (0,0,0), font=fnt)\n #draw.text((rightclassadj, topclassadj+vertclassmult*2), class6, (0,0,0), font=fnt)\n draw.text((50, 850), description, (0, 0, 0), font=fnt2)\n img.save('sample-out.jpg')\n await ctx.send(file=discord.File('sample-out.jpg'))\n\n@client.command()\nasync def infGet(ctx, a):\n await ctx.send(f.getUser(a))\nclient.run(my_secret)","repo_name":"abbiecouvillon/HowdyHack2021Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"914884102","text":"import os\nfrom flask import Flask, request\nfrom flask_cors import CORS\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torchvision import transforms\nimport torch\nimport cv2\nimport numpy as np\nimport base64\n\napp = Flask(__name__)\nCORS(app)\nUPLOAD_FOLDER = \"static\"\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\nDEVICE = \"cpu\"\nMODEL = None\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3,16,kernel_size=3,padding=1)\n self.conv2 = nn.Conv2d(16,8,kernel_size=3,padding=1)\n self.fc1 = nn.Linear(8*8*8,32)\n self.fc2 = nn.Linear(32,2)\n def forward(self,x):\n out = F.max_pool2d(torch.tanh(self.conv1(x)),2)\n out = F.max_pool2d(torch.tanh(self.conv2(out)),2)\n out = out.view(-1,8*8*8)\n out = torch.tanh(self.fc1(out))\n out = self.fc2(out)\n return out\n\n\ndef predict(image, model):\n mean = (0.7369, 0.6360, 0.5318)\n std = (0.3281, 0.3417, 0.3704)\n transformations_test = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)])\n img = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n img = cv2.resize(img,(32,32))\n img_as_tensor = transformations_test(img)\n s = nn.Softmax(dim=1)\n batch = img_as_tensor.unsqueeze(0)\n out = model(batch)\n fresh_percent = s(out)\n\n return int(fresh_percent[0][0].item()*100)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef upload_predict():\n if request.method == \"POST\":\n print(request.files)\n image_file = request.files[\"image\"]\n if image_file:\n image = cv2.imdecode(np.frombuffer(image_file.read(), np.uint8), cv2.IMREAD_UNCHANGED)\n MODEL = Net()\n MODEL.load_state_dict(torch.load(\"trained-model.pt\", map_location=torch.device(DEVICE)))\n pred = predict(image, MODEL)\n image_content = cv2.imencode('.jpg', image)[1].tobytes()\n encoded_image = base64.encodebytes(image_content)\n to_send = 'data:image/jpg;base64, ' + str(encoded_image, 'utf-8')\n return {\n 'freshness':pred\n }\n return {\n 'status':'Site is Up 🚀'\n }\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"qualiphal/freshness-detector-backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3225494577","text":"import io\nimport json\nimport sys \nimport string\nfrom recipe import Recipe\nfrom collection import Collection\nimport configure\n\nimport random\nrandom.seed(0)\n\ninput_file_name = sys.argv[1] if len(sys.argv) > 1 else \"./input/brunkhorst_recipes_formatted.txt\"\noutput_file_name = sys.argv[2] if len(sys.argv) > 2 else \"./output/brunkhorst_recipes_formatted_dynamo.json\"\n\ninput_file = io.open(input_file_name, \"r\", encoding='utf8')\n\n# List of tuples. The first element in each tuple is a list of all the raw lines in the recipe. \n# Second element is the Category object associated with the recipe\nraw_recipe_list = []\n# List of raw lines of the recipe\ncurrent_recipe_raw_lines = []\n\ncurrent_collection = None\ncollection_id = 100\ncollection_dict = dict()\n\n# Create the list of raw recipe details\nfor line in input_file:\n\tif \"@@@\" in line:\n\t\t# @@@ is the delimiter between recipes. When this is encountered, append the current list of lines\n\t\t# to recipe_list and create a new list of lines for the next recipe\n\t\traw_recipe_list.append((current_recipe_raw_lines, current_collection))\n\t\tcurrent_recipe_raw_lines = []\n\telif line[0] == \"-\":\n\t\t# These lines specify the collection that the recipe was stored in\n\t\t# Ex: -APPETIZERS AND BEVERAGES\n\t\tcollection_str = string.capwords(line.strip(\"-\"))\n\t\tcurrent_collection = Collection(collection_id, collection_str)\n\t\tcollection_dict[collection_str] = current_collection\n\t\tcollection_id += 1\n\telse: \n\t\t# Append all other lines\n\t\tcurrent_recipe_raw_lines.append(line.strip())\n\n\nrecipe_id = 100\nrecipe_class_list = list()\n\n# Create a list of the recipe rows in the dynamo table and the \nfor raw_recipe_list, collection in raw_recipe_list:\n\trecipe = Recipe(raw_recipe_list, recipe_id, collection)\n\tif recipe is not None and hasattr(recipe, \"title\"):\n\t\trecipe_class_list.append(recipe)\n\t\trecipe_id += 1\n\n# Now that the recipes have been parsed and added to Collections, cycle through each collection\n# If a collection has less than 10 members, check the configure.py file to see what the backup collections\n# we should use to fill in the remaining recipe suggestions. If there are not enough recipes to satisfy the \n# requirement, request the user updates the configure.py file\n# Ex. \"Main Dish - Turkey\" collection has only one member, so \"Main Dish - Chicken\" recipes should be used for additional recipe suggestions\nprint(\"\\n\".join([f\"{collection.name} contains {len(collection.recipe_list)} recipes\" for collection in collection_dict.values()]))\n\nfor collection_name, collection in collection_dict.items():\n\tnum_recipes = len(collection.recipe_list)\n\n\t# We want 10 suggestions per recipe, so need to ensure there are 11 available in the collections (because a recipe can't recommend itself - that would be narcissistic)\n\twhile num_recipes < 11:\n\t\tif collection_name not in configure.BACKUP_COLLECTIONS:\n\t\t\traise ValueError(f\"{collection_name} only has {num_recipes} recipes but has no backup collections specified in configure.py\")\n\t\telse:\n\t\t\ttry:\n\t\t\t\tbackup_name = configure.BACKUP_COLLECTIONS[collection_name].pop(0)\n\t\t\texcept IndexError:\n\t\t\t\traise ValueError(f\"Collection {collection_name} failed to specify more than 10 recipes. Gave {num_recipes} including those from {collection.backup_collections}\")\n\t\t\t\n\t\t\tif backup_name not in collection_dict:\n\t\t\t\traise ValueError(f\"{collection_name} in configure.py specified {backup_name} as a backup, but it does not exist\")\n\t\t\telse:\n\t\t\t\tbackup_collection = collection_dict[backup_name]\n\t\t\t\tcollection.backup_collections.append(backup_collection)\n\t\t\t\tnum_recipes += len(backup_collection.recipe_list)\n\t\t\n# Add the Dynamo JSON details to our running list of dynamo records to upload\ndynamo_records = list()\n\n# For each collection, add the records to the dynamo list\nfor collection in collection_dict.values():\n\tdynamo_records.append(collection.get_dynamo_dictionary())\n\n# For each recipe, randomly generate recipe suggestion from recipes in its category/backup categories\nfor recipe in recipe_class_list:\n\tdynamo_records.extend(recipe.get_dynamo_dictionaries())\n\tdynamo_records.extend(recipe.collection.get_recipe_suggestions(recipe))\n\n# Write lines to output file\noutput_file = io.open(output_file_name, \"w\", encoding='utf8')\nrecipe_dictionary = {\"Recipe\": dynamo_records}\n\noutput_file.write(json.dumps(recipe_dictionary, indent=4))\noutput_file.close\ninput_file.close\n\t\n","repo_name":"collinmcfadden/recipe-parser","sub_path":"recipe_parser/recipe_parser.py","file_name":"recipe_parser.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2014878839","text":"try:\n import networkx\n from matplotlib import pyplot\nexcept ImportError:\n networkx = None\n pyplot = None\n\n\ndef default_edge_weight(a, b):\n \"\"\"Default edge weight function used in build().\"\"\"\n return 1.0\n\n\ndef build(root, get_children, get_edge_weight=default_edge_weight):\n \"\"\"Create a networkx DiGraph object equivalent to the tree rooted at 'root'. The children\n of a node 'n' are obtained by 'get_children(n)'. Edge weights are obtained through\n 'get_edge_weight(a, b)'.\"\"\"\n graph = networkx.DiGraph()\n stack = [root]\n while len(stack) > 0:\n node = stack.pop()\n for child in get_children(node):\n graph.add_edge(node, child, weight=get_edge_weight(node, child))\n stack.append(child)\n return graph\n\n\ndef find_root(tree):\n \"\"\"Given a DiGraph representing a tree, find its root node.\"\"\"\n potential_roots = []\n for node, in_degree in tree.in_degree():\n if in_degree == 0:\n potential_roots.append(node)\n if len(potential_roots) != 1:\n raise ValueError(\"argument graph is not a tree\")\n return potential_roots[0]\n\n\ndef height(tree, root=None):\n \"\"\"Computes the height of the tree rooted at 'root'. 'tree' is a networkx DiGraph object.\"\"\"\n if root is None:\n root = find_root(tree)\n children = tree[root]\n if len(children) == 0:\n return 1\n else:\n return 1 + max(height(tree, child) for child in children)\n\n\ndef draw(tree, root=None, get_children=None, highlight_paths=(),\n colormap=\"jet\", axes=None, show=True):\n \"\"\"Display a tree using draw_networkx_edges().\"\"\"\n if root is None:\n root = find_root(tree)\n if axes is None:\n pyplot.figure(1)\n axes = pyplot.subplot(1, 1, 1)\n # compute the colors of the edges of the tree according to the argument 'colormap'\n if colormap is not None:\n weights = [d[\"weight\"] for _, _, d in tree.edges_iter(data=True)]\n num_weights = [w for w in weights if w is not None]\n max_weight = None\n delta_weight = 0.0\n if len(num_weights) > 0:\n max_weight = max(num_weights)\n min_weight = min(num_weights)\n delta_weight = float(max_weight - min_weight)\n if delta_weight == 0.0:\n edge_color = \"black\"\n else:\n black = (0.0, 0.0, 0.0, 1.0)\n cmap = pyplot.get_cmap(colormap)\n edge_color = [black if w is None else cmap((max_weight - w) / delta_weight)\n for w in weights]\n else:\n edge_color = \"black\"\n # compute the layout of the nodes in the tree and draw the tree's edges\n coords = layout(tree, root, get_children)\n networkx.draw_networkx_edges(tree, coords, ax=axes, arrows=False, edge_color=edge_color)\n # add highlights to the paths in 'highlight_paths'\n for path in highlight_paths:\n XYs = [coords[v] for v in path]\n Xs = [xy[0] for xy in XYs]\n Ys = [xy[1] for xy in XYs]\n axes.plot(Xs, Ys, color=\"black\", linewidth=5, dashes=[5, 15], marker=\"o\",\n markersize=5, markerfacecolor=\"black\", markeredgewidth=0, zorder=1)\n # configure the axes to show the [0, 1] x [0, 1] region and display no ticks\n axes.set_xlim(0, 1)\n axes.set_ylim(0, 1)\n axes.set_xticks([])\n axes.set_yticks([])\n if show:\n pyplot.interactive(True)\n pyplot.show()\n return axes\n\n\ndef layout(tree, root=None, get_children=None):\n \"\"\"Create a layout (a {node: (x, y)} dictionary) of the tree rooted at 'root' in the square\n region [0, 1]x[0, 1].\"\"\"\n if root is None:\n root = find_root(tree)\n coords = {}\n h = height(tree, root)\n dy = 1.0 / (h + 1.0)\n ys = [dy * i for i in xrange(h, 0, -1)]\n _layout(tree, root, get_children, depth=0, x_min=0.0, x_max=1.0, ys=ys, coords=coords)\n return coords\n\n\ndef _layout(tree, root, get_children, depth, x_min, x_max, ys, coords):\n \"\"\"Private function called by layout().\"\"\"\n # place the root of this subtree at the center of the argument x interval\n x = (x_min + x_max) * 0.5\n y = ys[depth]\n coords[root] = (x, y)\n # recursively lay out children (each with its own x interval)\n children = tree[root] if get_children is None else get_children(root)\n if len(children) > 0:\n depth += 1\n x_spacing = float(x_max - x_min) / len(children)\n x_max = x_min + x_spacing\n for child in children:\n _layout(tree, child, get_children, depth, x_min, x_max, ys, coords)\n x_min = x_max\n x_max += x_spacing\n","repo_name":"2xR/legacy","sub_path":"utils/graph/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32471066908","text":"#!/usr/bin/env python3\nimport re\nimport sys\nimport os.path\n\n\ndef parse_file(file_name):\n \"\"\"\n Take file name as input parameter.\n Parse that file for finding urls\n :return: List of urls\n \"\"\"\n with open(file_name) as file_obj:\n file_content = file_obj.read()\n\n urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', file_content)\n\n return urls\n\n\nif __name__ == '__main__':\n if os.path.exists(sys.argv[1]):\n urls = parse_file(sys.argv[1])\n if urls:\n for url in urls:\n print(url)\n else:\n print(\"No url found in log file\")\n else:\n print(\"File not exists\")\n","repo_name":"akshay196/log-beautify","sub_path":"urlfinder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70424136514","text":"import sys\nimport os\n#import datetime import datetime\nimport select\nfrom artiq.experiment import *\nfrom artiq.coredevice.ad9910 import AD9910\nfrom artiq.coredevice.ad53xx import AD53xx\nimport time\nimport numpy as np\n#import matplotlib.pyplot as plt\n\n#underflow errors happen when you are out of sync in time or trying to define a process in the past\ndef print_underflow():\n print('RTIO underflow occured')\n\n\n# Class which defines the pmt counting experiment --> shows up in the GUI as this name\nclass basic_live_scope_view2(EnvExperiment):\n def build(self):\n self.setattr_device('core') # need the core for everything\n self.setattr_device('sampler0') # where voltages being read in\n self.setattr_argument('step_size',NumberValue(default=100,unit='ms',scale=1,ndecimals=0,step=1)) #time scale on the scope,how zoomed you are in time\n self.setattr_argument('scope_count', NumberValue(default=400,unit='reads per shot',scale=1,ndecimals=0,step=1)) #how many indices you have in time axis\n self.setattr_argument('detection_time',NumberValue(default=100,unit='ms',scale=1,ndecimals=0,step=1)) #this makes detection time an attribute to change on GUI \n self.setattr_device('scheduler') # scheduler used\n \n def prepare(self):\n #function run before the experiment \n \n #each sampler takes 9us in addition to whatever the time delay is\n # scope count -1 for endpoint considerations\n # division converts it to ms\n self.time_interval = np.linspace(0,(self.step_size+9)*(self.scope_count-1)/1.0e3,self.scope_count)\n\n # turn np array into a dataset, time dataset for scope\n self.set_dataset('times', (self.time_interval),broadcast=True)\n \n def run(self):\n self.core.reset()\n while True:\n self.scheduler.pause() # allows for \"terminate instances\" functionality\n self.run_pmt()\n \n # run_pmt, this is directly counting pulses in FPGA and decorated with kernel so that artiq is listening/waiting for a pulse for 100ms \n @kernel\n def run_pmt(self):\n #while True:\n self.core.break_realtime()\n self.sampler0.init() #initializes sampler\n\n # sets the gain for each sampler\n for i in range(8):\n self.sampler0.set_gain_mu(i,0)\n\n delay(260*us)\n\n # list for redefines 0's with a value read in from sampler\n # data0 is from a single scope count\n data0 = [0]*self.scope_count\n\n # add to smp data by continuously overriding, list of 8 points of all zeros\n smp = [0]*8\n\n # smp has been overriden\n for j in range(self.scope_count):\n #acquire set of data (data)\n self.sampler0.sample_mu(smp) # reads in machine units from 8 channels\n # save the value of smp\n data0[j] = smp[0]\n delay(self.step_size*us)\n\n self.set_dataset('volts',(data0),broadcast=True)\n\n\n","repo_name":"hemmerlinglab/Electrons_Artiq_Sequences","sub_path":"artiq-master/Example_Code/live_scope_viewV2.py","file_name":"live_scope_viewV2.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16119460746","text":"# N-body gravity simulation in 300 lines of Taichi, tree method, no multipole, O(N log N)\n# Author: archibate <1931127624@qq.com>, all left reserved\nimport taichi as ti\nimport taichi_glsl as tl\nimport hub\nif not hasattr(ti, 'jkl'):\n ti.jkl = ti.indices(1, 2, 3)\n\nkDisplay = 'pixels'\nkResolution = 512\nkShapeFactor = 1\nkMaxParticles = 8192\nkMaxDepth = kMaxParticles * 1\nkMaxNodes = kMaxParticles * 4\nkDim = 2\n\ndt = 0.00005\nLEAF = -1\nTREE = -2\n\nparticle_mass = ti.field(ti.f32)\nparticle_pos = ti.Vector.field(kDim, ti.f32)\nparticle_vel = ti.Vector.field(kDim, ti.f32)\nparticle_table = ti.root.dense(ti.i, kMaxParticles)\nparticle_table.place(particle_pos).place(particle_vel).place(particle_mass)\nparticle_table_len = ti.field(ti.i32, ())\n\ntrash_particle_id = ti.field(ti.i32)\ntrash_base_parent = ti.field(ti.i32)\ntrash_base_geo_center = ti.Vector.field(kDim, ti.f32)\ntrash_base_geo_size = ti.field(ti.f32)\ntrash_table = ti.root.dense(ti.i, kMaxDepth)\ntrash_table.place(trash_particle_id)\ntrash_table.place(trash_base_parent, trash_base_geo_size)\ntrash_table.place(trash_base_geo_center)\ntrash_table_len = ti.field(ti.i32, ())\n\nnode_mass = ti.field(ti.f32)\nnode_weighted_pos = ti.Vector.field(kDim, ti.f32)\nnode_particle_id = ti.field(ti.i32)\nnode_children = ti.field(ti.i32)\nnode_table = ti.root.dense(ti.i, kMaxNodes)\nnode_table.place(node_mass, node_particle_id, node_weighted_pos)\nnode_table.dense({2: ti.jk, 3: ti.jkl}[kDim], 2).place(node_children)\nnode_table_len = ti.field(ti.i32, ())\n\n\ndisplay_image = ti.field(ti.f32, (kResolution, kResolution))\n\n\n@ti.func\ndef alloc_node():\n ret = ti.atomic_add(node_table_len[None], 1)\n assert ret < kMaxNodes\n node_mass[ret] = 0\n node_weighted_pos[ret] = particle_pos[0] * 0\n node_particle_id[ret] = LEAF\n for which in ti.grouped(ti.ndrange(*([2] * kDim))):\n node_children[ret, which] = LEAF\n return ret\n\n\n@ti.func\ndef alloc_particle():\n ret = ti.atomic_add(particle_table_len[None], 1)\n assert ret < kMaxParticles\n particle_mass[ret] = 0\n particle_pos[ret] = particle_pos[0] * 0\n particle_vel[ret] = particle_pos[0] * 0\n return ret\n\n\n@ti.func\ndef alloc_trash():\n ret = ti.atomic_add(trash_table_len[None], 1)\n assert ret < kMaxDepth\n return ret\n\n\n@ti.func\ndef alloc_a_node_for_particle(particle_id, parent, parent_geo_center,\n parent_geo_size):\n position = particle_pos[particle_id]\n mass = particle_mass[particle_id]\n\n depth = 0\n while depth < kMaxDepth:\n already_particle_id = node_particle_id[parent]\n if already_particle_id == LEAF:\n break\n if already_particle_id != TREE:\n node_particle_id[parent] = TREE\n trash_id = alloc_trash()\n trash_particle_id[trash_id] = already_particle_id\n trash_base_parent[trash_id] = parent\n trash_base_geo_center[trash_id] = parent_geo_center\n trash_base_geo_size[trash_id] = parent_geo_size\n already_pos = particle_pos[already_particle_id]\n already_mass = particle_mass[already_particle_id]\n node_weighted_pos[parent] -= already_pos * already_mass\n node_mass[parent] -= already_mass\n\n node_weighted_pos[parent] += position * mass\n node_mass[parent] += mass\n\n which = abs(position > parent_geo_center)\n child = node_children[parent, which]\n if child == LEAF:\n child = alloc_node()\n node_children[parent, which] = child\n child_geo_size = parent_geo_size * 0.5\n child_geo_center = parent_geo_center + (which - 0.5) * child_geo_size\n\n parent_geo_center = child_geo_center\n parent_geo_size = child_geo_size\n parent = child\n\n depth = depth + 1\n\n node_particle_id[parent] = particle_id\n node_weighted_pos[parent] = position * mass\n node_mass[parent] = mass\n\n\n@ti.func\ndef add_random_particles(angular_velocity):\n num = ti.static(1)\n particle_id = alloc_particle()\n if ti.static(kDim == 2):\n particle_pos[particle_id] = tl.randSolid2D() * 0.2 + 0.5\n velocity = (particle_pos[particle_id] - 0.5) * angular_velocity * 250\n particle_vel[particle_id] = tl.vec(-velocity.y, velocity.x)\n else:\n particle_pos[particle_id] = tl.randUnit3D() * 0.2 + 0.5\n velocity = (particle_pos[particle_id].xy -\n 0.5) * angular_velocity * 180\n particle_vel[particle_id] = tl.vec(-velocity.y, velocity.x, 0.0)\n particle_mass[particle_id] = tl.randRange(0.0, 1.5)\n\n\n@hub.kernel\ndef reset():\n for i in range(512):\n add_random_particles(0.4)\n\n\n@ti.func\ndef build_tree():\n node_table_len[None] = 0\n trash_table_len[None] = 0\n alloc_node()\n\n particle_id = 0\n while particle_id < particle_table_len[None]:\n alloc_a_node_for_particle(particle_id, 0, particle_pos[0] * 0 + 0.5,\n 1.0)\n\n trash_id = 0\n while trash_id < trash_table_len[None]:\n alloc_a_node_for_particle(trash_particle_id[trash_id],\n trash_base_parent[trash_id],\n trash_base_geo_center[trash_id],\n trash_base_geo_size[trash_id])\n trash_id = trash_id + 1\n\n trash_table_len[None] = 0\n particle_id = particle_id + 1\n\n\n@ti.func\ndef gravity_func(distance):\n return tl.normalizePow(distance, -2, 1e-3)\n\n\n@ti.func\ndef get_tree_gravity_at(position):\n acc = particle_pos[0] * 0\n\n trash_table_len[None] = 0\n trash_id = alloc_trash()\n assert trash_id == 0\n trash_base_parent[trash_id] = 0\n trash_base_geo_size[trash_id] = 1.0\n\n trash_id = 0\n while trash_id < trash_table_len[None]:\n parent = trash_base_parent[trash_id]\n parent_geo_size = trash_base_geo_size[trash_id]\n\n particle_id = node_particle_id[parent]\n if particle_id >= 0:\n distance = particle_pos[particle_id] - position\n acc += particle_mass[particle_id] * gravity_func(distance)\n\n else: # TREE or LEAF\n for which in ti.grouped(ti.ndrange(*([2] * kDim))):\n child = node_children[parent, which]\n if child == LEAF:\n continue\n node_center = node_weighted_pos[child] / node_mass[child]\n distance = node_center - position\n if distance.norm_sqr() > kShapeFactor**2 * parent_geo_size**2:\n acc += node_mass[child] * gravity_func(distance)\n else:\n new_trash_id = alloc_trash()\n child_geo_size = parent_geo_size * 0.5\n trash_base_parent[new_trash_id] = child\n trash_base_geo_size[new_trash_id] = child_geo_size\n\n trash_id = trash_id + 1\n\n return acc\n\n\n@ti.func\ndef substep_tree():\n particle_id = 0\n while particle_id < particle_table_len[None]:\n acceleration = get_tree_gravity_at(particle_pos[particle_id])\n particle_vel[particle_id] += acceleration * dt\n # well... seems our tree inserter will break if particle out-of-bound:\n particle_vel[particle_id] = tl.boundReflect(particle_pos[particle_id],\n particle_vel[particle_id],\n 0, 1)\n particle_id = particle_id + 1\n for i in range(particle_table_len[None]):\n particle_pos[i] += particle_vel[i] * dt\n\n\n@hub.kernel\ndef substep():\n build_tree()\n substep_tree()\n\n\n@hub.kernel\ndef render():\n for i in range(particle_table_len[None]):\n position = particle_pos[i].xy\n pix = int(position * kResolution)\n display_image[tl.clamp(pix, 0, kResolution - 1)] += 0.3\n\n\nhub.substep_nr(2)\nhub.bind_particles(particle_pos, kMaxParticles)\n","repo_name":"taichi-dev/taichi.js","sub_path":"taichihub/gallery/tree_gravity.py","file_name":"tree_gravity.py","file_ext":"py","file_size_in_byte":7858,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"61"} +{"seq_id":"9211688704","text":"from models import Peer\n\ndef add_profile(profile_object):\n queryset = Peer.objects.filter(user=profile_object.user)\n if queryset.exists():\n peerinfo_dict = {'title': 'Active Torrents', 'headers': ('Info Hash', 'Last Announce', 'State'), 'rows': []}\n \n for info in queryset.order_by('-state')[:20]:\n peerinfo_dict['rows'].append( (info.info_hash, info.last_announce, info.get_state_display()) )\n \n profile_object.add_table(peerinfo_dict)\n\n","repo_name":"zeebo/rain","sub_path":"tracker/user_profile.py","file_name":"user_profile.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"27087612883","text":"import os\nimport json\nimport sys\nimport bencodepy\nimport hashlib\nimport base64\nimport time\n\n# Define the path to the uploads directory\nuploads_path = '/home/stablebay/uploads'\n\ndef make_magnet_from_file(torrent_file) :\n with open(torrent_file, 'rb') as f:\n metadata = bencodepy.decode(f.read())\n subj = metadata[b'info']\n hashcontents = bencodepy.encode(subj)\n digest = hashlib.sha1(hashcontents).digest()\n b32hash = base64.b32encode(digest).decode()\n magnet_link = 'magnet:?' \\\n + 'xt=urn:btih:' + b32hash \\\n + '&dn=' + metadata[b'info'][b'name'].decode() \\\n + '&tr=' + metadata[b'announce'].decode() \\\n + '&xl=' + str(metadata[b'info'].get(b'length', 0))\n return magnet_link\n\n# Run the loop every 1 second\nwhile True:\n # Iterate through all subdirectories in uploads directory\n for model_name in os.listdir(uploads_path):\n model_dir = os.path.join(uploads_path, model_name)\n if not os.path.isdir(model_dir):\n continue\n\n # Check if model has a torrent file\n torrent_file = None\n for file_name in os.listdir(model_dir):\n if file_name.endswith('.torrent'):\n torrent_file = os.path.join(model_dir, file_name)\n break\n if not torrent_file:\n continue\n\n # Load model info from JSON file\n json_path = os.path.join(model_dir, 'info.json')\n with open(json_path, 'r') as f:\n model_info = json.load(f)\n\n # Extract magnet link from torrent file\n magnet_link = make_magnet_from_file(torrent_file)\n\n # Update model info with magnet link and save to JSON file\n model_info['magnet_link'] = magnet_link\n with open(json_path, 'w') as f:\n json.dump(model_info, f)\n\n # Wait for 1 second before running the loop again\n time.sleep(1)\n","repo_name":"thiefyzheng/MagnetMaker","sub_path":"MagnetMaker.py","file_name":"MagnetMaker.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71013993153","text":"from telethon import TelegramClient, events\r\n\r\napi_id = 21767556\r\napi_hash = '80e2ef1f848ffdd325c0a87592dc33ca'\r\n\r\nclient = TelegramClient('anon', api_id, api_hash)\r\n\r\n\r\n@client.on(events.NewMessage)\r\nasync def my_event_handler(event):\r\n if 'hi' in event.raw_text:\r\n await event.reply('Hello, How Are You?')\r\n\r\n\r\nclient.start()\r\nclient.run_until_disconnected()\r\n","repo_name":"jetom7007/Projects","sub_path":"Telethon/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29189646138","text":"import sys\nimport warnings\nfrom os.path import dirname, exists, join\n\nimport numpy as np\nimport pinocchio as pin\nfrom pinocchio.robot_wrapper import RobotWrapper\n\npin.switchToNumpyArray()\n\n\ndef _depr_msg(deprecated, key):\n return \"`%s` is deprecated. Please use `load('%s')`\" % (deprecated, key)\n\n\ndef getModelPath(subpath, printmsg=False):\n source = dirname(dirname(dirname(__file__))) # top level source directory\n paths = [\n join(dirname(dirname(dirname(source))), 'robots'), # function called from \"make release\" in build/ dir\n join(dirname(source), 'robots'), # function called from a build/ dir inside top level source\n join(source, 'robots') # function called from top level source dir\n ]\n try:\n from .path import EXAMPLE_ROBOT_DATA_MODEL_DIR, EXAMPLE_ROBOT_DATA_SOURCE_DIR\n paths.append(EXAMPLE_ROBOT_DATA_MODEL_DIR) # function called from installed project\n paths.append(EXAMPLE_ROBOT_DATA_SOURCE_DIR) # function called from off-tree build dir\n except ImportError:\n pass\n paths += [join(p, '../../../share/example-robot-data/robots') for p in sys.path]\n for path in paths:\n if exists(join(path, subpath.strip('/'))):\n if printmsg:\n print(\"using %s as modelPath\" % path)\n return path\n raise IOError('%s not found' % subpath)\n\n\ndef readParamsFromSrdf(model, SRDF_PATH, verbose=False, has_rotor_parameters=True, referencePose='half_sitting'):\n if has_rotor_parameters:\n pin.loadRotorParameters(model, SRDF_PATH, verbose)\n model.armature = np.multiply(model.rotorInertia.flat, np.square(model.rotorGearRatio.flat))\n pin.loadReferenceConfigurations(model, SRDF_PATH, verbose)\n q0 = pin.neutral(model)\n if referencePose is not None:\n q0 = model.referenceConfigurations[referencePose].copy()\n return q0\n\n\nclass RobotLoader(object):\n path = ''\n urdf_filename = ''\n srdf_filename = ''\n sdf_filename = ''\n urdf_subpath = 'robots'\n srdf_subpath = 'srdf'\n sdf_subpath = ''\n ref_posture = 'half_sitting'\n has_rotor_parameters = False\n free_flyer = False\n verbose = False\n\n def __init__(self):\n if self.urdf_filename:\n if self.sdf_filename:\n raise AttributeError(\"Please choose between URDF *or* SDF\")\n df_path = join(self.path, self.urdf_subpath, self.urdf_filename)\n builder = RobotWrapper.BuildFromURDF\n else:\n df_path = join(self.path, self.sdf_subpath, self.sdf_filename)\n try:\n builder = RobotWrapper.BuildFromSDF\n except AttributeError:\n raise ImportError(\"Building SDF models require pinocchio >= 3.0.0\")\n self.model_path = getModelPath(df_path, self.verbose)\n self.df_path = join(self.model_path, df_path)\n self.robot = builder(self.df_path, [join(self.model_path, '../..')],\n pin.JointModelFreeFlyer() if self.free_flyer else None)\n\n if self.srdf_filename:\n self.srdf_path = join(self.model_path, self.path, self.srdf_subpath, self.srdf_filename)\n self.robot.q0 = readParamsFromSrdf(self.robot.model, self.srdf_path, self.verbose,\n self.has_rotor_parameters, self.ref_posture)\n\n if pin.WITH_HPP_FCL and pin.WITH_HPP_FCL_BINDINGS:\n # Add all collision pairs\n self.robot.collision_model.addAllCollisionPairs()\n\n # Remove collision pairs per SRDF\n pin.removeCollisionPairs(self.robot.model, self.robot.collision_model, self.srdf_path, False)\n\n # Recreate collision data since the collision pairs changed\n self.robot.collision_data = self.robot.collision_model.createData()\n else:\n self.srdf_path = None\n self.robot.q0 = pin.neutral(self.robot.model)\n\n if self.free_flyer:\n self.addFreeFlyerJointLimits()\n\n def addFreeFlyerJointLimits(self):\n ub = self.robot.model.upperPositionLimit\n ub[:7] = 1\n self.robot.model.upperPositionLimit = ub\n lb = self.robot.model.lowerPositionLimit\n lb[:7] = -1\n self.robot.model.lowerPositionLimit = lb\n\n @property\n def q0(self):\n warnings.warn(\"`q0` is deprecated. Please use `robot.q0`\", FutureWarning, 2)\n return self.robot.q0\n\n\nclass A1Loader(RobotLoader):\n path = 'a1_description'\n urdf_filename = \"a1.urdf\"\n urdf_subpath = \"urdf\"\n srdf_filename = \"a1.srdf\"\n ref_posture = \"standing\"\n free_flyer = True\n\n\nclass ANYmalLoader(RobotLoader):\n path = 'anymal_b_simple_description'\n urdf_filename = \"anymal.urdf\"\n srdf_filename = \"anymal.srdf\"\n ref_posture = \"standing\"\n free_flyer = True\n\n\nclass ANYmalKinovaLoader(ANYmalLoader):\n urdf_filename = \"anymal-kinova.urdf\"\n srdf_filename = \"anymal-kinova.srdf\"\n ref_posture = \"standing_with_arm_up\"\n\n\nclass BaxterLoader(RobotLoader):\n path = \"baxter_description\"\n urdf_filename = \"baxter.urdf\"\n urdf_subpath = \"urdf\"\n\n\ndef loadANYmal(withArm=None):\n if withArm:\n warnings.warn(_depr_msg('loadANYmal(kinova)', 'anymal_kinova'), FutureWarning, 2)\n loader = ANYmalKinovaLoader\n else:\n warnings.warn(_depr_msg('loadANYmal()', 'anymal'), FutureWarning, 2)\n loader = ANYmalLoader\n return loader().robot\n\n\nclass CassieLoader(RobotLoader):\n path = 'cassie_description'\n sdf_filename = \"cassie_v2.sdf\"\n sdf_subpath = 'robots'\n srdf_filename = \"cassie_v2.srdf\"\n ref_posture = \"standing\"\n free_flyer = True\n\n\nclass TalosLoader(RobotLoader):\n path = 'talos_data'\n urdf_filename = \"talos_reduced.urdf\"\n srdf_filename = \"talos.srdf\"\n free_flyer = True\n has_rotor_parameters = True\n\n\nclass TalosBoxLoader(TalosLoader):\n urdf_filename = \"talos_reduced_box.urdf\"\n\n\nclass TalosFullLoader(TalosLoader):\n urdf_filename = \"talos_full_v2.urdf\"\n\n\nclass TalosFullBoxLoader(TalosLoader):\n urdf_filename = \"talos_full_v2_box.urdf\"\n\n\nclass TalosArmLoader(TalosLoader):\n urdf_filename = \"talos_left_arm.urdf\"\n free_flyer = False\n\n\nclass TalosLegsLoader(TalosLoader):\n def __init__(self):\n super(TalosLegsLoader, self).__init__()\n legMaxId = 14\n m1 = self.robot.model\n m2 = pin.Model()\n for j, M, name, parent, Y in zip(m1.joints, m1.jointPlacements, m1.names, m1.parents, m1.inertias):\n if j.id < legMaxId:\n jid = m2.addJoint(parent, getattr(pin, j.shortname())(), M, name)\n idx_q, idx_v = m2.joints[jid].idx_q, m2.joints[jid].idx_v\n m2.upperPositionLimit[idx_q:idx_q + j.nq] = m1.upperPositionLimit[j.idx_q:j.idx_q + j.nq]\n m2.lowerPositionLimit[idx_q:idx_q + j.nq] = m1.lowerPositionLimit[j.idx_q:j.idx_q + j.nq]\n m2.velocityLimit[idx_v:idx_v + j.nv] = m1.velocityLimit[j.idx_v:j.idx_v + j.nv]\n m2.effortLimit[idx_v:idx_v + j.nv] = m1.effortLimit[j.idx_v:j.idx_v + j.nv]\n assert jid == j.id\n m2.appendBodyToJoint(jid, Y, pin.SE3.Identity())\n\n upperPos = m2.upperPositionLimit\n upperPos[:7] = 1\n m2.upperPositionLimit = upperPos\n lowerPos = m2.lowerPositionLimit\n lowerPos[:7] = -1\n m2.lowerPositionLimit = lowerPos\n effort = m2.effortLimit\n effort[:6] = np.inf\n m2.effortLimit = effort\n\n # q2 = self.robot.q0[:19]\n for f in m1.frames:\n if f.parent < legMaxId:\n m2.addFrame(f)\n\n g2 = pin.GeometryModel()\n for g in self.robot.visual_model.geometryObjects:\n if g.parentJoint < 14:\n g2.addGeometryObject(g)\n\n self.robot.model = m2\n self.robot.data = m2.createData()\n self.robot.visual_model = g2\n # self.robot.q0=q2\n self.robot.visual_data = pin.GeometryData(g2)\n\n # Load SRDF file\n self.robot.q0 = readParamsFromSrdf(self.robot.model, self.srdf_path, self.verbose, self.has_rotor_parameters,\n self.ref_posture)\n\n assert (m2.armature[:6] == 0.).all()\n # Add the free-flyer joint limits to the new model\n self.addFreeFlyerJointLimits()\n\n\ndef loadTalos(legs=False, arm=False, full=False, box=False):\n if legs:\n warnings.warn(_depr_msg('loadTalos(legs)', 'talos_legs'), FutureWarning, 2)\n loader = TalosLegsLoader\n elif arm:\n warnings.warn(_depr_msg('loadTalos(arm)', 'talos_arm'), FutureWarning, 2)\n loader = TalosArmLoader\n elif full:\n if box:\n warnings.warn(_depr_msg('loadTalos(full, box)', 'talos_full_box'), FutureWarning, 2)\n loader = TalosFullBoxLoader\n else:\n warnings.warn(_depr_msg('loadTalos(full)', 'talos_full'), FutureWarning, 2)\n loader = TalosFullLoader\n else:\n if box:\n warnings.warn(_depr_msg('loadTalos(box)', 'talos_box'), FutureWarning, 2)\n loader = TalosBoxLoader\n else:\n warnings.warn(_depr_msg('loadTalos()', 'talos'), FutureWarning, 2)\n loader = TalosLoader\n return loader().robot\n\n\ndef loadTalosLegs():\n warnings.warn(_depr_msg('loadTalosLegs()', 'talos_legs'), FutureWarning, 2)\n return loadTalos(legs=True)\n\n\ndef loadTalosArm():\n warnings.warn(_depr_msg('loadTalosArm()', 'talos_arm'), FutureWarning, 2)\n return loadTalos(arm=True)\n\n\nclass HyQLoader(RobotLoader):\n path = \"hyq_description\"\n urdf_filename = \"hyq_no_sensors.urdf\"\n srdf_filename = \"hyq.srdf\"\n ref_posture = \"standing\"\n free_flyer = True\n\n\ndef loadHyQ():\n warnings.warn(_depr_msg('loadHyQ()', 'hyq'), FutureWarning, 2)\n return HyQLoader().robot\n\n\nclass BoltLoader(RobotLoader):\n path = 'bolt_description'\n urdf_filename = \"bolt.urdf\"\n srdf_filename = \"bolt.srdf\"\n ref_posture = \"standing\"\n free_flyer = True\n\n\nclass Solo8Loader(RobotLoader):\n path = 'solo_description'\n urdf_filename = \"solo.urdf\"\n srdf_filename = \"solo.srdf\"\n ref_posture = \"standing\"\n free_flyer = True\n\n\nclass SoloLoader(Solo8Loader):\n def __init__(self, *args, **kwargs):\n warnings.warn('\"solo\" is deprecated, please try to load \"solo8\"')\n return super(SoloLoader, self).__init__(*args, **kwargs)\n\n\nclass Solo12Loader(Solo8Loader):\n urdf_filename = \"solo12.urdf\"\n\n\ndef loadSolo(solo=True):\n warnings.warn(_depr_msg('loadSolo()', 'solo8'), FutureWarning, 2)\n loader = Solo8Loader if solo else Solo12Loader\n return loader().robot\n\n\nclass FingerEduLoader(RobotLoader):\n path = 'finger_edu_description'\n urdf_filename = \"finger_edu.urdf\"\n srdf_filename = \"finger_edu.srdf\"\n ref_posture = \"hanging\"\n free_flyer = False\n\n\nclass KinovaLoader(RobotLoader):\n path = \"kinova_description\"\n urdf_filename = \"kinova.urdf\"\n srdf_filename = \"kinova.srdf\"\n ref_posture = \"arm_up\"\n\n\ndef loadKinova():\n warnings.warn(_depr_msg('loadKinova()', 'kinova'), FutureWarning, 2)\n return KinovaLoader().robot\n\n\nclass TiagoLoader(RobotLoader):\n path = \"tiago_description\"\n urdf_filename = \"tiago.urdf\"\n\n\nclass TiagoDualLoader(TiagoLoader):\n urdf_filename = \"tiago_dual.urdf\"\n\n\nclass TiagoNoHandLoader(TiagoLoader):\n urdf_filename = \"tiago_no_hand.urdf\"\n\n\ndef loadTiago(hand=True):\n if hand:\n warnings.warn(_depr_msg('loadTiago()', 'tiago'), FutureWarning, 2)\n loader = TiagoLoader\n else:\n warnings.warn(_depr_msg('loadTiago(hand=False)', 'tiago_no_hand'), FutureWarning, 2)\n loader = TiagoNoHandLoader\n return loader().robot\n\n\ndef loadTiagoNoHand():\n warnings.warn(_depr_msg('loadTiagoNoHand()', 'tiago_no_hand'), FutureWarning, 2)\n return loadTiago(hand=False)\n\n\nclass ICubLoader(RobotLoader):\n path = \"icub_description\"\n urdf_filename = \"icub.urdf\"\n srdf_filename = \"icub.srdf\"\n free_flyer = True\n\n\nclass ICubReducedLoader(ICubLoader):\n urdf_filename = \"icub_reduced.urdf\"\n\n\ndef loadICub(reduced=True):\n if reduced:\n warnings.warn(_depr_msg('loadICub()', 'icub_reduced'), FutureWarning, 2)\n loader = ICubReducedLoader\n else:\n warnings.warn(_depr_msg('loadICub(reduced=False)', 'icub'), FutureWarning, 2)\n loader = ICubLoader\n return loader().robot\n\n\nclass PandaLoader(RobotLoader):\n path = \"panda_description\"\n urdf_filename = \"panda.urdf\"\n urdf_subpath = \"urdf\"\n\n\ndef loadPanda():\n warnings.warn(_depr_msg('loadPanda()', 'panda'), FutureWarning, 2)\n return PandaLoader().robot\n\n\nclass UR3Loader(RobotLoader):\n path = \"ur_description\"\n urdf_filename = \"ur3_robot.urdf\"\n urdf_subpath = \"urdf\"\n ref_posture = None\n\n\nclass UR3GripperLoader(UR3Loader):\n urdf_filename = \"ur3_gripper.urdf\"\n srdf_filename = \"ur3_gripper.srdf\"\n\n\nclass UR3LimitedLoader(UR3Loader):\n urdf_filename = \"ur3_joint_limited_robot.urdf\"\n\n\nclass UR5Loader(UR3Loader):\n urdf_filename = \"ur5_robot.urdf\"\n srdf_filename = \"ur5.srdf\"\n\n\nclass UR5GripperLoader(UR5Loader):\n urdf_filename = \"ur5_gripper.urdf\"\n srdf_filename = \"ur5_gripper.srdf\"\n\n\nclass UR5LimitedLoader(UR5Loader):\n urdf_filename = \"ur5_joint_limited_robot.urdf\"\n\n\nclass UR10Loader(UR3Loader):\n urdf_filename = \"ur10_robot.urdf\"\n\n\nclass UR10LimitedLoader(UR10Loader):\n urdf_filename = \"ur10_joint_limited_robot.urdf\"\n\n\ndef loadUR(robot=5, limited=False, gripper=False):\n if robot == 3:\n if limited:\n warnings.warn(_depr_msg('loadUr(3, limited)', 'ur3_limited'), FutureWarning, 2)\n loader = UR3LimitedLoader\n elif gripper:\n warnings.warn(_depr_msg('loadUr(3, gripper)', 'ur3_gripper'), FutureWarning, 2)\n loader = UR3GripperLoader\n else:\n warnings.warn(_depr_msg('loadUr(3)', 'ur3'), FutureWarning, 2)\n loader = UR3Loader\n elif robot == 5:\n if limited:\n warnings.warn(_depr_msg('loadUr(limited)', 'ur5_limited'), FutureWarning, 2)\n loader = UR5LimitedLoader\n elif gripper:\n warnings.warn(_depr_msg('loadUr(gripper)', 'ur5_gripper'), FutureWarning, 2)\n loader = UR5GripperLoader\n else:\n warnings.warn(_depr_msg('loadUr()', 'ur5'), FutureWarning, 2)\n loader = UR5Loader\n elif robot == 10:\n if limited:\n warnings.warn(_depr_msg('loadUr(10, limited)', 'ur10_limited'), FutureWarning, 2)\n loader = UR10LimitedLoader\n else:\n warnings.warn(_depr_msg('loadUr(10)', 'ur10'), FutureWarning, 2)\n loader = UR10Loader\n return loader().robot\n\n\nclass HectorLoader(RobotLoader):\n path = \"hector_description\"\n urdf_filename = \"quadrotor_base.urdf\"\n free_flyer = True\n\n\ndef loadHector():\n warnings.warn(_depr_msg('loadHector()', 'hector'), FutureWarning, 2)\n return HectorLoader().robot\n\n\nclass DoublePendulumLoader(RobotLoader):\n path = \"double_pendulum_description\"\n urdf_filename = \"double_pendulum.urdf\"\n urdf_subpath = \"urdf\"\n\n\ndef loadDoublePendulum():\n warnings.warn(_depr_msg('loadDoublePendulum()', 'double_pendulum'), FutureWarning, 2)\n return DoublePendulumLoader().robot\n\n\nclass RomeoLoader(RobotLoader):\n path = \"romeo_description\"\n urdf_filename = \"romeo.urdf\"\n urdf_subpath = \"urdf\"\n free_flyer = True\n\n\ndef loadRomeo():\n warnings.warn(_depr_msg('loadRomeo()', 'romeo'), FutureWarning, 2)\n return RomeoLoader().robot\n\n\nclass SimpleHumanoidLoader(RobotLoader):\n path = 'simple_humanoid_description'\n urdf_subpath = 'urdf'\n urdf_filename = 'simple_humanoid.urdf'\n srdf_filename = 'simple_humanoid.srdf'\n free_flyer = True\n\n\nclass SimpleHumanoidClassicalLoader(SimpleHumanoidLoader):\n urdf_filename = 'simple_humanoid_classical.urdf'\n srdf_filename = 'simple_humanoid_classical.srdf'\n\n\nclass IrisLoader(RobotLoader):\n path = \"iris_description\"\n urdf_filename = \"iris_simple.urdf\"\n free_flyer = True\n\n\ndef loadIris():\n warnings.warn(_depr_msg('loadIris()', 'iris'), FutureWarning, 2)\n return IrisLoader().robot\n\n\nROBOTS = {\n 'a1': A1Loader,\n 'anymal': ANYmalLoader,\n 'anymal_kinova': ANYmalKinovaLoader,\n 'baxter': BaxterLoader,\n 'cassie': CassieLoader,\n 'double_pendulum': DoublePendulumLoader,\n 'hector': HectorLoader,\n 'hyq': HyQLoader,\n 'icub': ICubLoader,\n 'icub_reduced': ICubReducedLoader,\n 'iris': IrisLoader,\n 'kinova': KinovaLoader,\n 'panda': PandaLoader,\n 'romeo': RomeoLoader,\n 'simple_humanoid': SimpleHumanoidLoader,\n 'simple_humanoid_classical': SimpleHumanoidClassicalLoader,\n 'bolt': BoltLoader,\n 'solo': SoloLoader,\n 'solo8': Solo8Loader,\n 'solo12': Solo12Loader,\n 'finger_edu': FingerEduLoader,\n 'talos': TalosLoader,\n 'talos_box': TalosBoxLoader,\n 'talos_arm': TalosArmLoader,\n 'talos_legs': TalosLegsLoader,\n 'talos_full': TalosFullLoader,\n 'talos_full_box': TalosFullBoxLoader,\n 'tiago': TiagoLoader,\n 'tiago_dual': TiagoDualLoader,\n 'tiago_no_hand': TiagoNoHandLoader,\n 'ur3': UR5Loader,\n 'ur3_gripper': UR3GripperLoader,\n 'ur3_limited': UR3LimitedLoader,\n 'ur5': UR5Loader,\n 'ur5_gripper': UR5GripperLoader,\n 'ur5_limited': UR5LimitedLoader,\n 'ur10': UR10Loader,\n 'ur10_limited': UR10LimitedLoader,\n}\n\n\ndef loader(name, display=False, rootNodeName=''):\n \"\"\"Load a robot by its name, and optionnaly display it in a viewer.\"\"\"\n if name not in ROBOTS:\n robots = \", \".join(sorted(ROBOTS.keys()))\n raise ValueError(\"Robot '%s' not found. Possible values are %s\" % (name, robots))\n inst = ROBOTS[name]()\n if display:\n if rootNodeName:\n inst.robot.initViewer()\n inst.robot.viz.loadViewerModel(rootNodeName=rootNodeName)\n else:\n inst.robot.initViewer(loadModel=True)\n inst.robot.display(inst.robot.q0)\n return inst\n\n\ndef load(name, display=False, rootNodeName=''):\n \"\"\"Load a robot by its name, and optionnaly display it in a viewer.\"\"\"\n return loader(name, display, rootNodeName).robot\n\n\ndef load_full(name, display=False, rootNodeName=''):\n \"\"\"Load a robot by its name, optionnaly display it in a viewer, and provide its q0 and paths.\"\"\"\n inst = loader(name, display, rootNodeName)\n return inst.robot, inst.robot.q0, inst.df_path, inst.srdf_path\n","repo_name":"ola31/pinocchio_test_pkgs","sub_path":"models/example-robot-data/python/example_robot_data/robots_loader.py","file_name":"robots_loader.py","file_ext":"py","file_size_in_byte":18373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9376687859","text":"import datetime\r\n\r\nimport pandas as pd\r\nfrom dateutil.relativedelta import relativedelta\r\nfrom django.db.models import Q\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import render\r\nfrom pyecharts import options as opts\r\nfrom pyecharts.charts import Bar, Pie, Timeline\r\nfrom pyecharts.globals import ThemeType\r\nfrom login.wrapper import login_required\r\nfrom .models import Debt\r\n\r\n\r\n# from pyecharts.globals import CurrentConfig\r\n# 不管用?采用模拟模板加载js的方式,看看模板中的js被解析成了什么,在embedded中替换\r\n# CurrentConfig.ONLINE_HOST = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),'static\\echarts.min.js')\r\n\r\n# Create your views here.\r\n\r\n@login_required\r\ndef index(request):\r\n debts = Debt.objects.filter(Q(clear=False) & Q(order__seller__id=1))\r\n all_debts_sum = sum((i.order.get_sum() for i in debts))\r\n return render(request, 'debt/table.html', {'debts': debts, 'all_debts_sum': all_debts_sum})\r\n\r\n@login_required\r\ndef overdue(request):\r\n debts = Debt.objects.filter(Q(clear=False) & Q(order__seller__id=1) & Q(last_date__lt=datetime.datetime.now()))\r\n all_debts_sum = sum((i.order.get_sum() for i in debts))\r\n return render(request, 'debt/overdue.html', {'debts': debts, 'all_debts_sum': all_debts_sum})\r\n\r\n\r\ndef change_order_status(request):\r\n # 错误\r\n # debt_id=request.GET('debt_id')\r\n debt_id = request.GET['debt_id']\r\n # id不是一般的属性只有一个 _\r\n # debts=Debt.objects.get(order_id=order_id)\r\n debt = Debt.objects.get(id=debt_id)\r\n if not debt.clear:\r\n debt.clear = True\r\n else:\r\n debt.clear = False\r\n debt.save()\r\n return HttpResponse(debt.clear)\r\n\r\n\r\ndef get_comps_sum(df):\r\n \"\"\"docstring for get_comps_sum\"\"\"\r\n months = df['order_date'].max().month - df['order_date'].min().month + 1\r\n comps = ['泰山集团股份有限公司', '泰安市金智达机器人科技有限责任公司', '普瑞特机械制造股份有限公司', '山东泰开高压开关有限公司', '泰安华鲁锻压机床有限公司']\r\n by = df.groupby('comp_name')\r\n d = {}\r\n for comp in comps:\r\n if comp not in by.groups:\r\n d[comp]=[0]*months\r\n continue\r\n new_df = by.get_group(comp)\r\n new_df.set_index('order_date', inplace=True)\r\n need2pad = new_df.resample('M')['sum'].sum().to_list()\r\n if len(need2pad) < months:\r\n # extend无返回值\r\n need2pad.extend([0] * (months - len(need2pad)))\r\n d[comp] = need2pad\r\n return d\r\n\r\n\r\ndef gen_chart(df):\r\n start = df['order_date'].min()\r\n end = df['order_date'].max() + relativedelta(months=1)\r\n r = pd.date_range(start, end, freq='M').to_list()\r\n time_range = [i.strftime('%Y-%m') for i in r]\r\n\r\n result = get_comps_sum(df)\r\n c = (\r\n Bar(init_opts=opts.InitOpts(width=\"500px\", height=\"600px\", theme=ThemeType.SHINE))\r\n .add_xaxis(time_range)\r\n .add_yaxis(\"泰山集团股份有限公司\", result['泰山集团股份有限公司'], stack='stack1')\r\n .add_yaxis(\"泰安市金智达机器人科技有限责任公司\", result['泰安市金智达机器人科技有限责任公司'], stack='stack1')\r\n .add_yaxis(\"普瑞特机械制造股份有限公司\", result['普瑞特机械制造股份有限公���'], stack='stack1')\r\n .add_yaxis(\"山东泰开高压开关有限公司\", result['山东泰开高压开关有限公司'], stack='stack1')\r\n .add_yaxis(\"泰安华鲁锻压机床有限公司\", result['泰安华鲁锻压机床有限公司'], stack='stack1')\r\n # 去掉bar顶端数字\r\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\r\n .set_global_opts(\r\n tooltip_opts=opts.TooltipOpts(formatter='{a}
在{b}月份欠款:
{c}元'),\r\n datazoom_opts=[opts.DataZoomOpts(), opts.DataZoomOpts(type_=\"inside\")],\r\n )\r\n )\r\n\r\n time_line = Timeline(init_opts=opts.InitOpts(width=\"1000px\", height=\"500px\", theme=ThemeType.SHINE))\r\n comps = ['泰山集团股份有限公司', '泰安市金智达机器人科技有限责任公司', '普瑞特机械制造股份有限公司', '山东泰开高压开关有限公司', '泰安华鲁锻压机床有限公司']\r\n for i, d in enumerate(time_range):\r\n pie = Pie(init_opts=opts.InitOpts(width=\"800px\", height=\"800px\", theme=ThemeType.SHINE))\r\n pie.add('', [[comp, result[comp][i]] for comp in comps])\r\n pie.set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {d}%\"))\r\n pie.set_global_opts(tooltip_opts=opts.TooltipOpts(formatter='{a}
在{b}月份欠款:
{c}元'), )\r\n time_line.add(pie, d)\r\n\r\n return c, time_line\r\n\r\n\r\ndef gen_chart_(df):\r\n df0 = df.loc[df['clear'] == False]\r\n\r\n start = df['order_date'].min()\r\n end = df['order_date'].max() + relativedelta(months=1)\r\n r = pd.date_range(start, end, freq='M').to_list()\r\n time_range = [i.strftime('%Y-%m') for i in r]\r\n\r\n result = get_comps_sum(df)\r\n result0 = get_comps_sum(df0)\r\n\r\n c = (\r\n Bar(init_opts=opts.InitOpts(width=\"1000px\", height=\"600px\", theme=ThemeType.SHINE))\r\n .add_xaxis(time_range)\r\n .add_yaxis(\"泰山集团股份有限公司\", result['泰山集团股份有限公司'], stack='stack1')\r\n .add_yaxis(\"泰山集团股份有限公司\", result0['泰山集团股份有限公司'], stack='stack0')\r\n .add_yaxis(\"泰安市金智达机器人科技有限责任公司\", result['泰安市金智达机器人科技有限责任公司'], stack='stack1')\r\n .add_yaxis(\"泰安市金智达机器人科技有限责任公司\", result0['泰安市金智达机器人科技有限责任公司'], stack='stack0')\r\n .add_yaxis(\"普瑞特机械制造股份有限公司\", result['普瑞特机械制造股份有限公司'], stack='stack1')\r\n .add_yaxis(\"普瑞特机械制造股份有限公司\", result0['普瑞特机械制造股份有限公司'], stack='stack0')\r\n .add_yaxis(\"山东泰开高压开关有限公司\", result['山东泰开高压开关有限公司'], stack='stack1')\r\n .add_yaxis(\"山东泰开高压开关有限公司\", result0['山东泰开高压开关有限公司'], stack='stack0')\r\n .add_yaxis(\"泰安华鲁锻压机床有限公司\", result['泰安华鲁锻压机床有限公司'], stack='stack1')\r\n .add_yaxis(\"泰安华鲁锻压机床有限公司\", result0['泰安华鲁锻压机床有限公司'], stack='stack0')\r\n # 去掉bar顶端数字\r\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\r\n .set_global_opts(\r\n tooltip_opts=opts.TooltipOpts(formatter='{a}
在{b}月份:
{c}元'),\r\n datazoom_opts=[opts.DataZoomOpts(), opts.DataZoomOpts(type_=\"inside\")],\r\n )\r\n )\r\n\r\n time_line = Timeline(init_opts=opts.InitOpts(width=\"1000px\", height=\"500px\", theme=ThemeType.SHINE))\r\n comps = ['泰山集团股份有限公司', '泰安市金智达机器人科技有限责任公司', '普瑞特机械制造股份有限公司', '山东泰开高压开关有限公司', '泰安华鲁锻压机床有限公司']\r\n for i, d in enumerate(time_range):\r\n pie = Pie(init_opts=opts.InitOpts(width=\"800px\", height=\"800px\", theme=ThemeType.SHINE))\r\n pie.add('', [[comp, result0[comp][i]] for comp in comps])\r\n pie.set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {d}%\"))\r\n pie.set_global_opts(tooltip_opts=opts.TooltipOpts(formatter='{b}
欠款:
{c}元'), )\r\n time_line.add(pie, d)\r\n\r\n pie = (\r\n Pie(init_opts=opts.InitOpts(width=\"1000px\", height=\"500px\", theme=ThemeType.SHINE))\r\n .add('', [[comp, sum(result0[comp])] for comp in comps])\r\n .set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {d}%\"))\r\n .set_global_opts(tooltip_opts=opts.TooltipOpts(formatter='{b}
欠款:
{c}元'), )\r\n )\r\n\r\n return c, time_line, pie\r\n\r\n@login_required\r\ndef charts(request):\r\n # 考虑自己为乙方\r\n debts = Debt.objects.filter(order__seller__id=1)\r\n df = pd.DataFrame({'comp_name': [debt.order.buyer.comp_name for debt in debts],\r\n 'order_date': [debt.order.order_date for debt in debts],\r\n 'sum': [debt.order.get_sum() for debt in debts],\r\n 'last_date': [debt.last_date for debt in debts],\r\n 'clear': [debt.clear for debt in debts]})\r\n df['order_date'] = pd.to_datetime(df['order_date'])\r\n # df0 = df.loc[df['clear'] == False]\r\n # #\r\n # # c=gen_chart(df)[0]\r\n # # time_line=gen_chart(df)[1]\r\n c0 = gen_chart_(df)[0]\r\n time_line0 = gen_chart_(df)[1]\r\n pie = gen_chart_(df)[2]\r\n\r\n return render(request, 'debt/test.html', {\r\n # 'bar': c.render_embed(),\r\n 'bar0': c0.render_embed().replace('https://assets.pyecharts.org/assets/echarts.min.js',\"/static/echarts.min.js\").replace('https://assets.pyecharts.org/assets/themes/shine.js',\"/static/shine.js\"),\r\n 'time_line': time_line0.render_embed().replace('https://assets.pyecharts.org/assets/echarts.min.js',\"/static/echarts.min.js\").replace('https://assets.pyecharts.org/assets/themes/shine.js',\"/static/shine.js\"),\r\n 'pie': pie.render_embed().replace('https://assets.pyecharts.org/assets/echarts.min.js',\"/static/echarts.min.js\").replace('https://assets.pyecharts.org/assets/themes/shine.js',\"/static/shine.js\"),\r\n })\r\n","repo_name":"mqlabc/miniERP","sub_path":"debt/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72205025474","text":"# 안전영역\nimport sys\nfrom copy import deepcopy\n\ndx = [-1, 0, 1, 0]\ndy = [0, -1, 0, 1]\n\ndef dfs(y, x):\n for i in range(4):\n ny = y + dy[i]\n nx = x + dx[i]\n\n if 0 <= ny < n and 0 <= nx < n:\n if board[ny][nx] > h:\n tmp = board[ny][nx]\n board[ny][nx] = 0\n dfs(ny, nx)\n\nif __name__ == '__main__':\n n = int(input())\n init_board = [list(map(int, input().split())) for _ in range(n)]\n\n min_h = sys.maxsize\n max_h = 0\n for i in range(n):\n for j in range(n):\n min_h = min(min_h, init_board[i][j])\n max_h = max(max_h, init_board[i][j]) \n\n max_safe = 0\n board = []\n for h in range(min_h, max_h+1):\n board = deepcopy(init_board)\n cnt = 0\n for i in range(n):\n for j in range(n):\n if board[i][j] > h:\n tmp = board[i][j]\n board[i][j] = 0\n cnt += 1\n dfs(i, j)\n max_safe = max(max_safe, cnt)\n\n print(max_safe)\n","repo_name":"hooong/TIL","sub_path":"algorithm/6/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38867478093","text":"from django.db import models\nfrom django.utils import timezone\nfrom mdeditor.fields import MDTextField\nfrom login import models as login_models\nfrom django.contrib.auth.models import User\n\n\nclass MainCategory(models.Model):\n # 主分类,例如Python基础学习,属于python主分类\n title = models.CharField(max_length=100, blank=True)\n # 分类的创建时间\n created = models.DateTimeField(default=timezone.now())\n # 创建人员\n author = models.ForeignKey(User, on_delete=models.CASCADE, default='admin')\n\n # admin站点信息 调试查看对象\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = [\"created\"]\n\n\nclass SubCategory(models.Model):\n # 文章分类\n # 分类标题\n title = models.CharField(max_length=100, blank=True)\n # 分类的创建时间\n created = models.DateTimeField(default=timezone.now())\n # 主分类\n category = models.ForeignKey(\n MainCategory,\n null=False,\n blank=False,\n on_delete=models.CASCADE,\n related_name='category',\n default='1',\n )\n author = models.ForeignKey(User, on_delete=models.CASCADE, default='admin')\n\n # admin站点信息 调试查看对象\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = [\"-created\"]\n # db_table = 'tb_category' # 修改表名\n # # verbose_name = '类别管理',\n # verbose_name_plural = verbose_name\n\n\n# 博客文章\nclass Article(models.Model):\n # 文章作者。参数on_delete 用于指定数据删除的方式,避免两个关联表数据不一致\n author = models.ForeignKey(User, on_delete=models.CASCADE, default='admin')\n # 文章标题\n title = models.CharField(max_length=100, blank=False)\n # 文章标题图\n avatar = models.ImageField(upload_to='article/%Y%m%d/', default='article/default.jpg',\n verbose_name='avatar', blank=True)\n # upload_to指定图片上传的路径,不存在则自动创建\n # blank,设置为True时,字段可以为空。设置为False时,字段是��须填写的\n # 文章标签\n tag = models.CharField(max_length=200, blank=False)\n # 概要\n summary = models.CharField(max_length=200, blank=False)\n # 浏览次数\n views = models.PositiveIntegerField('views', default=0)\n # 文章正文\n body = MDTextField()\n created = models.DateTimeField('created', auto_now_add=True) # 字段在实例第一次保存的时候会保存当前时间,不管你在这里是否对其赋值\n # 文章更新时间 参数 auto_now=True 指定每次数据更新时自动写入当前时间\n updated = models.DateTimeField(auto_now=True) # 字段保存时会自动保存当前时间,但要注意每次对其实例执行save()的时候都会将当前时间保存\n # 文章分类,使用外键\n category = models.ForeignKey(\n SubCategory,\n null=False,\n blank=False,\n on_delete=models.CASCADE,\n related_name='subcategory',\n default='1',\n )\n\n # 内部类class Meta用于给model定义元数据\n class Meta:\n # ’-created‘ 表明数据应该以倒叙排列\n ordering = [\"-created\"] # 指定模型返回的数据的排列顺序\n # db_table = 'tb_article'\n # verbose_name = ''\n # verbose_name_plural = verbose_name\n # 函数 __str__ 定义当调用对象的 str() 方法时的返回值内容\n # 它最常见的就是在Django管理后台中做为对象的显示值。因此应该总是为 __str__ 返回一个友好易读的字符串\n\n def __str__(self):\n # 将文章标题返回\n return self.title\n\n\n# 文件模型\nclass File(models.Model):\n # 所关联的文章名称,利用文章名称作为外键\n article = models.ForeignKey(Article, on_delete=models.CASCADE, default='2', blank=True)\n # 上传作者\n author = models.ForeignKey(User, on_delete=models.CASCADE, default='admin')\n # 文件名称\n name = models.CharField(max_length=200)\n # 文件保存路径\n path = models.CharField(max_length=100)\n # 介绍\n description = models.CharField(max_length=200, default='the author is lazy, didn\\'t give any information')\n # 上传时间\n upload_time = models.DateTimeField(default=timezone.now)\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = [\"-upload_time\"]\n\n\n# 链接模型\nclass Link(models.Model):\n # 链接名\n name = models.CharField(max_length=200)\n # 链接地址\n url = models.URLField(max_length=200, default=None)\n # 介绍\n description = models.CharField(max_length=200, default='the author is lazy, didn\\'t give any introduction')\n # 上传作者\n author = models.ForeignKey(User, on_delete=models.CASCADE, default='admin')\n # 上传时间\n upload_time = models.DateTimeField(default=timezone.now)\n\n def __str__(self):\n # 将文章标题返回\n return self.name\n\n class Meta:\n ordering = [\"-upload_time\"]\n","repo_name":"Layneliang24/alpha","sub_path":"campus/main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74068564674","text":"def melp_FMCQ(mag, Wf):\n \"\"\"\n Векторный кватователь амплитуд Фурье - спектра\n :param mag: амплитуды Фурье - спектра\n :param Wf: веса для Евклидова расстояния гармоник Фурье\n :return: \n f - индекс в КК амплиуд Фурье - спектра\n \"\"\"\n try:\n import numpy as np\n\n from Encoder.codebook_fmcq import FMCQ_CODEBOOK\n\n fmcq = np.asarray(FMCQ_CODEBOOK, dtype=np.float64)\n f = 0\n temp = 1000\n for i in np.arange(256):\n u = fmcq[i, :10] - mag\n rms = np.matmul(Wf, np.conj(np.transpose(u * u)))\n if rms < temp:\n temp = rms\n f = i+1\n\n return f\n except Exception as e:\n raise e\n","repo_name":"masonskii/melp","sub_path":"Encoder/melp_FMCQ.py","file_name":"melp_FMCQ.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5338731610","text":"import time\nimport networkx as nx\nfrom matplotlib import pyplot as plt\nimport json\nclass Cell(object):\n \"\"\"docstring for Cell\"\"\"\n def __init__(self, xposition, yposition, letter, board):\n super(Cell, self).__init__()\n self.xposition = xposition\n self.yposition = yposition\n self.letter = letter\n self.board = board\n self.next_letters = []\n self.neighbors = []\n\n def pos(self):\n return (self.yposition / 4,(self.board.height - self.xposition) / 4)\n\n def __str__(self):\n return self.letter \n\n def __repr__(self):\n return self.letter\n\n def get_neighbors(self):\n if self.neighbors:\n return self.neighbors\n xposition = self.xposition\n yposition = self.yposition\n neighbors = []\n if xposition > 0:\n neighbors.append(self.board.cells[xposition - 1][yposition])\n if xposition < self.board.width - 1:\n neighbors.append(self.board.cells[xposition + 1][yposition])\n if yposition > 0:\n neighbors.append(self.board.cells[xposition][yposition - 1])\n if yposition < self.board.height - 1:\n neighbors.append(self.board.cells[xposition][yposition + 1])\n if xposition > 0 and yposition > 0:\n neighbors.append(self.board.cells[xposition - 1][yposition - 1])\n if xposition < self.board.width - 1 and yposition < self.board.height - 1:\n neighbors.append(self.board.cells[xposition + 1][yposition + 1])\n if yposition > 0 and xposition < self.board.width - 1:\n neighbors.append(self.board.cells[xposition + 1][yposition - 1])\n if yposition < self.board.height - 1 and xposition > 0:\n neighbors.append(self.board.cells[xposition - 1][yposition + 1])\n self.neighbors = neighbors\n return neighbors\n\n def next_letters(self):\n if not self.next_letters:\n self.next_letters = [neighbor.letter for neighbor in self.get_neighbors()]\n return self.next_letters\n\nclass Board(object):\n \"\"\"docstring for Board\"\"\"\n def __init__(self, letters):\n super(Board, self).__init__()\n self.cells = []\n self.width = len(letters)\n self.height = len(letters[0])\n self.flat_cells = []\n for x in range(self.width):\n self.cells.append([])\n for y in range(self.height):\n self.cells[-1].append(Cell(x, y, letters[x][y], self))\n\n def get_cells(self):\n if not self.flat_cells:\n self.flat_cells = [cell for col in self.cells for cell in col]\n return self.flat_cells\n\n\ndef get_words(size=3):\n initwords = [word for word in json.loads(open(\"words_list.json\",\"r\").read()) if len(word) > 2]\n words = sorted(initwords, key=lambda k: k, reverse=True)\n return words\n\ndef check_words(words, cell, path=\"\", cellpath=[]):\n found_words = []\n raw_found_words = []\n path += cell.letter\n cellpath = cellpath.copy()\n cellpath.append(cell)\n for word in words:\n if word == path:\n raw_found_words.append(word)\n found_words.append({'word':word,'cellpath':cellpath,})\n break\n new_words = []\n for word in words:\n if word.startswith(path):\n new_words.append(word)\n if not len(new_words):\n return found_words\n for cell in cell.get_neighbors():\n if cell in cellpath:\n continue\n for found_word in check_words(new_words, cell, path, cellpath):\n if found_word['word'] not in raw_found_words:\n found_words.append(found_word)\n raw_found_words.append(found_word['word'])\n return found_words\n\ndef check_all_words(board, words):\n my_words = []\n raw_found_words = []\n for cell in board.get_cells():\n for found_word in check_words(words, cell):\n if found_word['word'] not in raw_found_words:\n my_words.append(found_word)\n raw_found_words.append(found_word['word'])\n all_words = sorted(my_words, key=lambda k: len(k['word']), reverse=True)\n return all_words\n\ndef plot(cells):\n G = nx.DiGraph()\n G.add_nodes_from([c.pos() for c in cells])\n\n plt.figure(figsize=(6,6))\n pos = {c.pos():c.pos() for c in cells}\n labels = {c.pos():c.letter for c in cells}\n if len(cells) > 1:\n for i, cell in enumerate(cells[1:]):\n G.add_edge(cells[i].pos(),cell.pos())\n nx.draw_networkx_labels(G,\n pos=pos,\n labels=labels)\n nx.draw(G,\n pos=pos,\n node_color='lightgreen', \n node_size=600)\n return plt\n\ndef go_next(current_index=0):\n all_cells = board.get_cells()\n cells = all_words[current_index]['cellpath']\n G = nx.DiGraph()\n G.add_nodes_from([c.pos() for c in all_cells])\n plt.figure(figsize=(6,6))\n pos = {c.pos():c.pos() for c in all_cells}\n labels = {c.pos():c.letter for c in all_cells}\n if len(cells) > 1:\n for i, cell in enumerate(cells[1:]):\n G.add_edge(cells[i].pos(),cell.pos())\n print('word: ', all_words[current_index]['word'])\n nx.draw_networkx_labels(G,\n pos=pos,\n labels=labels)\n colors = []\n for cell in all_cells:\n try:\n colors.append(cells.index(cell) + 1)\n except ValueError:\n colors.append(0)\n nx.draw(G,\n pos=pos,\n node_color=colors, \n node_size=600,\n cmap=plt.cm.Blues,\n )\n plt.title(all_words[current_index]['word'])\n plt.draw()\n plt.waitforbuttonpress(0)\n plt.close()\n if current_index < len(all_words) - 1:\n go_next(current_index+1)\n\ndef convert(mystring):\n return [[letter for letter in mystring[x*4:x*4+4]] for x in range(4)]\n\na = convert(input('enter a game board:\\n'))\nboard = Board(a)\nwords = get_words()\nall_words = check_all_words(board, words)\ngo_next()\n","repo_name":"noahbuttner/wordhunt","sub_path":"wordhunt.py","file_name":"wordhunt.py","file_ext":"py","file_size_in_byte":5934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29110565517","text":"from django.shortcuts import render\nimport requests, json\nfrom django.http import HttpResponse\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\nfrom ssl import SSLError\nfrom requests.exceptions import Timeout\n\n\ndef check_website_view(request):\n website_speed = \"\"\n responsiveness = \"\"\n seo_score = \"\"\n error = \"\"\n url = \"\"\n if request.method == 'POST':\n # Get the URL from the submitted form data\n url = request.POST.get('website_url', '')\n \n API_KEY=\"AIzaSyCQBP6nmcrgxTou0uax1SYzQRQnDwC2kF0\"\n api_endpoint = f\"https://www.googleapis.com/pagespeedonline/v5/runPagespeed?url={url}&key={API_KEY}&category=performance&category=accessibility&category=seo&category=pwa&category=best-practices\"\n\n # api_params = {\n # 'url': url,\n # 'key': API_KEY,\n # 'category':'performance',\n # 'category':'accessibility',\n # 'category':'seo',\n # 'category':'pwa',\n # 'category':'best-practices'\n # }\n\n # Send the API request\n response = requests.get(api_endpoint)\n\n # Parse the response JSON and extract the relevant data\n response_json = response.json()\n\n performance = float(response_json['lighthouseResult']['categories']['performance']['score']) * 100\n accessibility = float(response_json['lighthouseResult']['categories']['accessibility']['score']) * 100\n bestpractices = float(response_json['lighthouseResult']['categories']['best-practices']['score']) * 100\n seo = float(response_json['lighthouseResult']['categories']['seo']['score']) * 100\n pwa = float(response_json['lighthouseResult']['categories']['pwa']['score']) * 100\n \n fullPageScreenshot = response_json['lighthouseResult']['fullPageScreenshot']['screenshot']['data']\n context = {\n \"title\": \"RK Digital | Website Status Checker\",\n \"url\": url,\n 'performance': int(performance),\n 'accessibility': int(accessibility),\n 'bestpractices': int(bestpractices),\n 'seo': int(seo),\n 'pwa': int(pwa),\n 'fullPageScreenshot': fullPageScreenshot,\n 'success': \"#28A745\",\n 'warning': \"#FFC517\",\n 'danger': \"#DF4857\"\n }\n return render(request, \"results.html\", context)\n\n\n \n context = {'url': url,\n 'website_speed': website_speed,\n 'responsiveness': responsiveness,\n 'seo_score': seo_score,\n 'error': error,\n 'title': \"RK Digital | Website Status Checker\",\n # special \n }\n\n return render(request, 'index.html', context)\n\n\n\n# def get_page_speed(request):\n# speed_score = \"\"\n# responsive_score = \"\"\n# seo_score = \"\"\n# render_time = \"\"\n# error = \"\"\n# url = \"\"\n# if request.method == \"POST\":\n# # Replace with your own API key\n# api_key = \"AIzaSyBjHe0ZOucTsqHPptsFTIZYbBLMKZ9ApHA\"\n \n# url = request.POST['website_url']\n# # Set up the API endpoint\n# lighthouse = Lighthouse(url)\n# report = lighthouse.generate_report()\n \n# speed_score = report[\"categories\"][\"performance\"][\"score\"] * 100\n# responsive_score = report[\"categories\"][\"accessibility\"][\"score\"] * 100\n# seo_score = report[\"categories\"][\"seo\"][\"score\"] * 100\n# else:\n# error = \"Oops! something went wrong\"\n \n# context = {\n# \"website_speed\": speed_score,\n# \"responsiveness\": responsive_score,\n# \"seo_score\": seo_score,\n# 'error': error,\n# \"url\": url\n# }\n# return render(request, \"index.html\", context)\n \n# import requests\n\n# def get_page_speed(request):\n# speed_score = \"\"\n# responsive_score = \"\"\n# seo_score = \"\"\n# error = \"\"\n# url = \"\"\n \n# if request.method == \"POST\":\n# url = request.method == \"POST\"\n# endpoint = \"https://www.webpagetest.org/runtest.php\"\n# api_key = \"YOUR_API_KEY\"\n\n# params = {\n# \"url\": url,\n# \"f\": \"json\",\n# \"k\": api_key,\n# \"runs\": 1,\n# \"mobile\": 1,\n# \"location\": \"Dulles:Chrome\"\n# }\n\n# try:\n# response = requests.get(endpoint, params=params)\n# data = response.json()\n\n# # Extract relevant data from the API response\n# speed_score = data[\"data\"][\"average\"][\"firstView\"][\"score_gzip\"]\n# responsive_score = data[\"data\"][\"average\"][\"firstView\"][\"score_usecdn\"]\n# seo_score = data[\"data\"][\"average\"][\"firstView\"][\"score_cdn\"]\n\n# except requests.exceptions.RequestException as e:\n# print(\"An error occurred while fetching data from the WebPageTest API:\", e)\n# return None\n \n# context = {\n# \"website_speed\": speed_score,\n# \"responsiveness\": responsive_score,\n# \"seo_score\": seo_score,\n# 'error': error,\n# \"url\": url\n# }\n","repo_name":"uhuru-rawlings/webgrader-django","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29017908465","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('',\n url('^json/get/$', 'django_notify.views.get_notifications', name='json_get', kwargs={}), \n url('^json/mark-read/$', 'django_notify.views.mark_read', name='json_mark_read_base', kwargs={}), \n url('^json/mark-read/(\\d+)/$', 'django_notify.views.mark_read', name='json_mark_read', kwargs={}), \n url('^goto/(?P\\d+)/$', 'django_notify.views.goto', name='goto', kwargs={}), \n url('^goto/$', 'django_notify.views.goto', name='goto_base', kwargs={}), \n)\n\ndef get_pattern(app_name=\"notify\", namespace=\"notify\"):\n \"\"\"Every url resolution takes place as \"notify:view_name\".\n https://docs.djangoproject.com/en/dev/topics/http/urls/#topics-http-reversing-url-namespaces\n \"\"\"\n return urlpatterns, app_name, namespace","repo_name":"luckyjd/lms_edx","sub_path":"edx-ficus.3-3/apps/edx/venvs/edxapp/lib/python2.7/site-packages/django_notify/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"71793266754","text":"import PySimpleGUI as sg\n\nfrom .scrapper import KayakScrapper\n\nsg.theme('SandyBeach')\n\n\nclass TrackerApp:\n \"\"\"GUI for the Kayak scrapper\"\"\"\n\n def __init__(self, timeout=20):\n self.timeout = timeout\n\n def _get_input_layout(self):\n \"\"\"\n Layout of the GUI containing all input data.\n In the combo box, you can add any city you want.\n \"\"\"\n input_layout = [\n [sg.Text('Departure Airport', size=(20, 1)),\n sg.Combo([\"Strasbourg\", \"Paris\", \"Baden-Baden\", \"Mulhouse\"], default_value=\"Strasbourg\", key='from_city')],\n\n [sg.Text('Departure date', size=(20, 1)),\n sg.Input(key='departure_date', size=(20, 1)),\n sg.CalendarButton('Calendar', close_when_date_chosen=True, format='%d/%m/%Y', target='departure_date',\n no_titlebar=False)],\n\n [sg.Text('Arrival date', size=(20, 1)),\n sg.Input(key='arrival_date', size=(20, 1)),\n sg.CalendarButton('Calendar', close_when_date_chosen=True, format='%d/%m/%Y', target='arrival_date',\n no_titlebar=False)],\n\n [sg.Text('Maximum price', size=(20, 1)),\n sg.Slider((0, 500), key=\"max_price\", orientation=\"h\")],\n\n [sg.Submit(), sg.Cancel()]\n ]\n\n return input_layout\n\n def _get_output_layout(self):\n \"\"\"Layout of the GUI containing all input data\"\"\"\n output_layout = [\n [sg.Text(\"You can go to :\")],\n [sg.Text(sg.InputText(\"\"), key='results', visible=False)]\n ]\n return output_layout\n\n def _get_layout(self, input_layout, output_layout):\n \"\"\"All layout of the GUI\"\"\"\n layout = [\n [sg.Column(input_layout),\n sg.VSeperator(),\n sg.Column(output_layout)]\n ]\n return layout\n\n def run(self):\n \"\"\"Run the application until the user quit the interface\"\"\"\n input_layout = self._get_input_layout()\n output_layout = self._get_output_layout()\n layout = self._get_layout(input_layout, output_layout)\n window = sg.Window('Need fresh air ?!', layout)\n\n while True:\n event, cfg = window.read()\n if event in [\"Cancel\", sg.WIN_CLOSED]:\n break\n else:\n scrapper = KayakScrapper(cfg, self.timeout)\n possible_trips = scrapper.scrape()\n possible_trips = {k: possible_trips[k]\n for k in list(possible_trips.keys())[:10]}\n trips_string = \"\\n\".join(\n f\"- {k} for {v} euros\" for k, v in possible_trips.items())\n window['results'].Update(trips_string, visible=True)\n\n window.close()\n","repo_name":"Arnautt/kayak-scrapper","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23482536881","text":"\"SOlution for a CodeJam 2016 poblem\"\nimport sys\n\nLINES_PER_CASE = 1\n\n\ndef _get_ciphers(number):\n ciphers = []\n while number is not 0:\n ciphers.append(number % 10)\n number = number // 10\n return set(ciphers)\n\n\ndef run_problem(test):\n test = int(test[0])\n\n if test == 0:\n return \"INSOMNIA\"\n\n numbers = list(range(10))\n index = 1\n while numbers:\n mult = index * test\n ciphers = _get_ciphers(mult)\n for num in ciphers:\n try:\n numbers.remove(num)\n except Exception:\n continue\n index = index + 1\n return mult\n\n\ndef _output(results):\n with open(\"output.txt\", \"w\") as f_out:\n for index, result in enumerate(results):\n f_out.write(\"Case #{}: {}\\n\".format(index + 1, result))\n\n\ndef _read_args():\n \"Gets the test cases for the current problem\"\n number_of_cases = int(sys.stdin.readline().strip())\n test_cases = []\n for _ in range(number_of_cases):\n test_cases.append([])\n for _ in range(LINES_PER_CASE):\n test_cases[-1].append(sys.stdin.readline()[:-1])\n return test_cases\n\n\ndef main():\n \"Entry point for the problem solver\"\n results = []\n test_cases = _read_args()\n for test in test_cases:\n results.append(run_problem(test))\n _output(results)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/3227.py","file_name":"3227.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15177853219","text":"def operate(s,commands):\r\n for i in commands:\r\n if i[0] == 'pop':\r\n s.pop()\r\n elif i[0] == 'remove':\r\n s.remove(int(i[1]))\r\n else:\r\n s.discard(int(i[1]))\r\n print(sum(s))\r\n\r\nn = int(input())\r\ns = set(map(int, input().split()))\r\ncommands = []\r\nfor i in range(int(input())):\r\n commands.append(input().split())\r\noperate(s,commands)\r\n\r\n ","repo_name":"NikilReddyM/python-codes","sub_path":"set_discard_remove_pop.py","file_name":"set_discard_remove_pop.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27528918147","text":"def bot8(pbot, p8_bot, p8_human):\n phuman = 1 - pbot\n p8 = p8_bot * pbot + p8_human * phuman\n pbot_8 = (p8_bot * pbot) / (p8)\n print(pbot_8)\n\n# you can change these values to test your program with different values\npbot = 0.1\np8_bot = 0.8\np8_human = 0.05\n\nbot8(pbot, p8_bot, p8_human)\n","repo_name":"CasperKristiansson/Elements-of-AI-Building-Ai","sub_path":"Chapter 2 - Deailing With Uncertainty/Exercise 9 - Block or not.py","file_name":"Exercise 9 - Block or not.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"28597254403","text":"from input import *\nfrom place_paranthases import *\nfrom give_truth import *\nimport os\nclear = lambda: os.system('clear')\n\ndef press_0():\n\tprint(\"Introduce 'exit' if you want to go back to the menu\")\n\tst = input(\"Please introduce your proposition: \")\n\tclear()\n\tif(st == 'exit'):\n\t\treturn '-1'\n\tinput_f(st)\n\tprint(\"\")\t\n\treturn '0'\n\t\ndef press_1():\n\tprint(\"Introduce 'exit' if you want to go back to the menu\")\n\tprint(\"Introduce 'ls cl' if you want to see all cleaned propositions and their index\")\n\tprint(\"Introduce 'rm cl' followed by the index of the propositions you want to delete from the cleaned propositions\")\n\tprint(\"Introduce 'ls pr' if you want to see all well formed formulas and their index\")\n\tprint(\"Introduce 'rm pr' followed by the index of the propositions you want to delete from the well formed formulas file\")\n\tprint(\"Introduce 'rm dp cl' if you want to delete all duplicates of propositions in the cleaned proposition file\")\n\tprint(\"Introduce 'rm dp pr' if you want to delete all duplicates of propositions in the proposition file\")\n\tst = input(\"Please introduce: \")\n\tclear()\n\tif st == 'exit':\n\t\treturn '-1'\n\t\t\n\tif st == 'ls cl':\n\t\tclean_props = clean_waiting_propositions_values()\n\t\tfor i in range(len(clean_props)):\n\t\t\tprint(\"[\" + str(i) + \"]: \" + clean_props[i],end='')\n\t\treturn '1'\n\t\t\n\tif st == 'ls pr':\n\t\tabstruct_structures = propositions_values()\n\t\tfor i in range(len(abstruct_structures)):\n\t\t\tprint('[' + str(i) + ']: ' + abstruct_structures[i],end='')\n\t\treturn '1'\n\t\t\n\tif st[0:5] == 'rm cl':\n\t\tif len(st) == 5 or len(st) == 6:\n\t\t\treturn '1'\n\t\tclean_waiting_propositions_remove(int(st[6:]))\n\t\tprint(\"Proposition deleted succesfuly!\")\n\t\treturn '1'\n\t\t\n\tif st[0:5] == 'rm pr':\n\t\tif len(st) == 5 or len(st) == 6:\n\t\t\treturn '1'\n\t\tpropositions_remove(int(st[6:]))\n\t\tprint(\"Proposition deleted succesfuly!\")\n\t\treturn '1'\n\tif st == 'rm dp cl':\n\t\tprops = clean_waiting_propositions_values()\n\t\tprops = set(props)\n\t\tclean_props = open(\"clean_waiting_propositions.txt\",mode='w',encoding=\"utf-8\")\n\t\tclean_props.close()\n\t\tfor i in props:\n\t\t\tclean_waiting_propositions_write(i)\n\t\tprint(\"Duplicates removed from the clean_waiting_proposition.txt file\")\n\t\treturn '1'\n\tif st == 'rm dp pr':\n\t\tprops = propositions_values()\n\t\tprops = set(props)\n\t\ta_props = open(\"propositions.txt\",mode='w',encoding=\"utf-8\")\n\t\ta_props.close()\n\t\tfor i in props:\n\t\t\tpropositions_write(i)\n\t\tprint(\"Duplicates removed from the proposition.txt file\")\n\t\treturn '1'\n\t\ndef press_2():\n\tprint(\"Introduce 'ls' if yout want to see all cleaned propositions and their index\")\n\tprint(\"Introduce 'exit' if yout want to go back to the menu\")\n\tprint(\"The commands you can use after you introduce index of string are -l and -g\")\n\tprint(\"-l for listing of steps\")\n\tprint(\"-g for showing graphical construcion of abstract structure\")\n\tst = input(\"Plese introduce the index of the proposition you want to check: \")\n\tclear()\n\tif st == 'exit':\n\t\treturn '-1';\n\tif st == 'ls':\n\t\tclean_props = clean_waiting_propositions_values()\n\t\tclear()\n\t\tfor i in range(len(clean_props)):\n\t\t\tprint(\"[\" + str(i) + \"]: \" + clean_props[i],end='')\n\t\treturn press_2()\n\tst = st.split(\" \")\n\tnr = int(st[0])\n\td = 0\n\tif len(st) == 3 and ((st[1] == '-g' and st[2] == '-l') or (st[1] == '-l' and st[2] == '-g')):\t\n\t\td = 3\n\telif len(st) == 2:\n\t\tif st[1] == '-g':\n\t\t\td = 2\n\t\telif st[1] == '-l':\n\t\t\td = 1\n\tclean_props = clean_waiting_propositions_values()\n\trez = check_propositional_format(clean_props[nr][:len(clean_props[nr])-1],d)\n\tif type(rez) is tuple:\n\t\tif d == 1 or d == 3:\n\t\t\tprint(rez[0])\n\t\telse:\n\t\t\tprint(\"The string is not a well formed formula\")\n\t\tdlt = input('Do you want to delete the string from the \"clean_waiting_propositions.txt\" file ?[Y/N]')\n\t\tclear()\n\t\tif dlt == 'y' or dlt == 'Y':\n\t\t\tclean_waiting_propositions_remove(nr)\n\t\t\tprint(\"String was deleted succesfully\")\n\telse:\n\t\tprint(\"String \" + clean_props[nr][:len(clean_props[nr])-1] + \" is a well formed formula.\")\n\t\tprint(\"This is the abstruct structure of the formula: \" + str(rez))\n\t\tpropositions_write(str(rez) + '\\n')\n\t\tprint('Abstract structure of proposition was written to \"propositions.txt\"')\n\t\tif(d == 3 or d == 2):\n\t\t\tfi = open(\"send_cpp.txt\", mode='w')\n\t\t\tfi.write(str(rez))\n\t\t\tfi.close()\n\t\t\tos.popen('./run_here.sh')\n\treturn '2'\n\t\ndef press_3():\n\tprint(\"Introduce 'ls' if yout want to see all cleaned propositions and their index\")\n\tprint(\"Introduce 'exit' if yout want to go back to the menu\")\n\tprint(\"The command you can use after you introduce index of string is -l \")\n\tprint(\"-l for listing of steps\")\n\tst = input(\"Plese introduce the index of the proposition you want to check: \")\n\tif st == 'exit':\n\t\treturn '-1';\n\tclear()\n\tclean_props = clean_waiting_propositions_values()\n\tif st == 'ls':\n\t\tfor i in range(len(clean_props)):\n\t\t\tprint(\"[\" + str(i) + \"]: \" + clean_props[i],end='')\n\t\treturn press_3()\n\tst = st.split(\" \")\n\tnr = int(st[0])\n\td = 0\n\tif len(st) > 1 and st[1] == '-l':\n\t\td = 1\n\twff = place_paranthases(clean_props[nr][:len(clean_props[nr])-1],d)\n\tif not wff:\n\t\tprint(\"The string could not be parsed, please review your input...\")\n\t\tinn = input(\"Do you want to delete this string from the cleaned propositions file ?[Y/N]\")\n\t\tif inn == 'y' or st == 'Y':\n\t\t\tclean_waiting_propositions_remove(nr)\n\t\t\tprint('String was deleted from \"clean_waiting_propositions.txt\".')\n\telse:\n\t\tprint(\"This is the parsed ,now well formed formula: \" + str(wff))\n\t\tabstruct_structure = check_propositional_format(wff,0)\n\t\tprint(\"This is the abstruct structure: \" + str(abstruct_structure))\n\t\tinn = input(\"Do you want to write the well formed formula to clean_waiting_propositions.txt ?[Y/N]\")\n\t\tif inn == 'y' or st == 'Y':\n\t\t\tclean_waiting_propositions_write(wff + '\\n')\n\t\t\tprint('Proposition was written to \"clean_waiting_propositions.txt\".')\n\t\tinn = input(\"Do you want to write the abstruct structure to propositions.txt ?[Y/N]\")\n\t\tif inn == 'y' or st == 'Y':\n\t\t\tpropositions_write(str(abstruct_structure) + '\\n')\n\t\t\tprint('Abstruct structures was written to \"propositions.txt\".')\n\treturn '3'\n\t\t\ndef press_4():\n\tprint(\"Introduce 'ls' if yout want to see all cleaned propositions and their index\")\n\tprint(\"Introduce 'exit' if yout want to go back to the menu\")\n\tst = input(\"Plese introduce the index of the proposition you want to create the truth table for: \")\n\tif st == 'exit':\n\t\treturn '-1';\n\tclear()\n\tprops = propositions_values()\n\tif st == 'ls':\n\t\tfor i in range(len(props)):\n\t\t\tprint(\"[\" + str(i) + \"]: \" + props[i],end='')\n\t\treturn press_4()\n\tnr = int(st)\n\ttr = ast.literal_eval(props[nr][:len(props[nr])-1])\n\tmake_table(tr)\n\tos.system(\"python3 table_format.py\")\n\tos.system(\"less -S table.txt\")\n\treturn '4'\n\t\n\t\n\t\n\t\n\t\t\n","repo_name":"solnoc/Program_Propozitii","sub_path":"entrances.py","file_name":"entrances.py","file_ext":"py","file_size_in_byte":6638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30295553688","text":"from flask import Flask\n# pip3 install flask-wtf\n# pip install email_validator -i https://pypi.douban.com/simple/\nfrom flask_wtf import CSRFProtect\nimport settings\nfrom extends import *\nimport logging\n\nfrom apps.article.views import article_bp\nfrom apps.user.views import user_bp\nfrom apps.auth.views import auth_bp\nfrom apps.nlp.views import nlp_bp\n\n\"\"\"\n__init__.py 有两个作用:\n一是包含应用工厂;\n二是 告诉 Python flaskr 文件夹应当视作为一个包。\n\"\"\"\n\ndef create_app():\n # 初始化falsk框架\n # WSGI server:Web服务器网关接口,是为Python语言定义的 “Web服务器” 和 “Web应用程序或框架” 之间的一种简单而通用的 “接口”\n app = Flask(__name__, template_folder=\"../templates\", static_folder=\"../static\")\n\n # csrf 需要 setting.py 配置 SECRET_KEY(session也要)\n # csrf = CSRFProtect(app)\n\n # 加载配置信息\n app.config.from_object(settings.DevelopmentConfig)\n\n # initialize logging class\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # 关联ORM\n db.init_app(app)\n\n # 初始化bootstrap\n bootstrap.init_app(app)\n\n # 浏览器转化时间\n moment.init_app(app)\n\n # 登陆验证\n login_manager.session_protection = \"strong\"\n login_manager.login_view = \"user.user_login\"\n login_manager.login_message = \"必须登陆后才能访问\"\n login_manager.login_message_category = \"warning\"\n login_manager.init_app(app)\n\n # 邮箱\n mail.init_app(app)\n\n # Markdown\n pagedown.init_app(app)\n\n # 初始化restful\n # api.init_app(app)\n\n # 跨域问题\n # cors.init_app(app=app, supports_credentials=True)\n\n # 初始化缓存Cache\n redis_config = {\n 'CACHE_TYPE': 'redis', # 缓存类型用redis\n 'CACHE_REDIS_HOST': '172.31.8.80', # redis 主机地址\n 'CACHE_REDIS_PORT': 6379, # redis 端口号默认6379\n # 'CACHE_REDIS_PASSWORD': 'root',\n }\n cache.init_app(app, config=redis_config) # flask缓存到redis数据库\n\n # 注册蓝图\n\n app.register_blueprint(article_bp)\n app.register_blueprint(user_bp)\n app.register_blueprint(auth_bp)\n app.register_blueprint(nlp_bp)\n\n # 调试用\n from apps.temp.views import temp_bp\n app.register_blueprint(temp_bp)\n # print(app.url_map)\n return app\n\n\n\n","repo_name":"cobbai/AICenter","sub_path":"apps/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42101613988","text":"import os\nimport random\nimport sys\n\nrandom.seed(90210)\n\ndiagnoses = ['MEL', 'NV', 'BCC', 'AK', 'BKL', 'DF', 'VASC', 'SCC', 'UNK']\n\n\ndef my_algorithm(image):\n return [random.random() for _ in range(len(diagnoses))]\n\n\nif not os.path.exists('/images'):\n print('The directory /images does not exist, did you forget to mount it?', file=sys.stderr)\n sys.exit(1)\n\n# print headers\nprint(','.join(['image'] + diagnoses))\n\n# print scores for each image\nfor input_file in os.listdir('/images'):\n if input_file.endswith('.jpg'):\n scores = my_algorithm(input_file)\n print(','.join([input_file.replace('.jpg', '')] + [str(x) for x in scores]))\n","repo_name":"ImageMarkup/isic-algorithm-example","sub_path":"algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"10156088521","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport socket\nimport struct\nimport time\nimport collections\nfrom checksum import internet_checksum\n\n\nassert 3 <= sys.version_info[0], 'Requires Python 3'\n\n\nMILLISEC_PER_SEC = 1000.0 # For readability in time conversions\nRIGHT_HEXTET = 0xffff # Selects the right-most 16 bits\nBUFFER_SIZE = 2 << 5 # Size in bits of buffer in which socket data is received\nICMP_PORT_PLACEHOLDER = 1 # Port number required for socket.socket, though unused by ICMP\nICMP_HEADER_LENGTH = 8\nICMP_STRUCT_FIELDS = \"BBHHH\" # for use with struct.pack/unpack\nIP_HEADER_LENGTH = 20\n\nclass TimeoutError(Exception):\n pass\n\n\nclass ChecksumError(Exception):\n pass\n\n\n\nICMPMessage = collections.namedtuple('ICMPMessage', ['type', 'code', 'checksum',\n 'identifier', 'sequence_number']) # Named tuple for ICMP Messages\nICMPTypeCode = collections.namedtuple('ICMPTypeCode', ['type', 'code']) # For ICMP type field\nECHO_REQUEST = ICMPTypeCode(8, 0)\nECHO_REPLY = ICMPTypeCode(0, 0)\n\n\n\ndef this_instant():\n return time.perf_counter()\n\n\n\ndef ping(client_socket, dest_host, client_id, seq_no=0):\n \"\"\"\n Sends echo request, receives response, and returns RTT.\n \"\"\"\n\n def icmp_header(host_checksum):\n message = ICMPMessage(\n type=ECHO_REQUEST.type,\n code=ECHO_REQUEST.code,\n checksum=host_checksum,\n identifier=client_id,\n sequence_number=seq_no)\n return struct.pack(ICMP_STRUCT_FIELDS, *message)\n\n\n icmp_payload = struct.pack('d', this_instant()) # double-precision float\n icmp_packet_without_checksum = icmp_header(0) + icmp_payload\n checksum = internet_checksum(icmp_packet_without_checksum)\n checksum_string = hex(checksum)\n checksum_string_reversed = checksum_string[4:] + checksum_string[2:4]\n checksum_int_reversed = int(checksum_string_reversed, 16)\n \n icmp_packet = icmp_header(checksum_int_reversed) + icmp_payload\n\n\n # Get the host name (unchanged if already in IPv4 address format)\n dest_host = socket.gethostbyname(dest_host)\n\n\t# Send packet to destination host\n client_socket.sendto(icmp_packet, (dest_host, ICMP_PORT_PLACEHOLDER))\n \n # Try to get response\n datagram, addr = client_socket.recvfrom(BUFFER_SIZE)\n time_recv = this_instant()\n \n\t# Extract ICMP packet from datagram (drop IP Header)\n icmp_packet_recv = datagram[IP_HEADER_LENGTH:]\n \n\t# Compute checksum on ICMP response packet (header and payload)\n checksum_recv = internet_checksum(icmp_packet_recv)\n if checksum_recv != 0:\n raise ChecksumError()\n \n\t# Extract ICMP response header from ICMP packet (8 bytes) and unpack\n icmp_recv_header = icmp_packet_recv[0:ICMP_HEADER_LENGTH]\n recv_header = struct.unpack(ICMP_STRUCT_FIELDS, icmp_recv_header)\n \n\t# Extract ICMP response payload (remaining bytes) and unpack\n icmp_recv_payolad = icmp_packet_recv[ICMP_HEADER_LENGTH:]\n time_sent = struct.unpack('d', icmp_recv_payolad)[0]\n \n\t# Compute round-trip time from \"time sent\"\n rtt = (time_recv - time_sent) * MILLISEC_PER_SEC\n \n return (rtt, recv_header)\n\t\n\n # If things go wrong\n # ==================\n # You might like to check (\"assert\") that:\n # 1. Type field of ICMP response header is ICMP echo reply type\n # 2. Code field of ICMP response header is ICMP echo reply code\n # 3. Identifier field of ICMP response header is client_id\n # 4. len() of ICMP response payload is struct.calcsize('d')\n\n\n\n\ndef verbose_ping(host, timeout, count, log=print):\n \"\"\"\n Send ping and print session details to command prompt.\n \"\"\"\n try:\n host_ip = socket.gethostbyname(host)\n except OSError as error:\n log(error)\n log('Could not find host {}.'.format(host))\n log('Please check name and try again.')\n return\n\n \n log(\"Pinging {} [{}] with {} bytes of data \"\n .format(host, host_ip, IP_HEADER_LENGTH + ICMP_HEADER_LENGTH + struct.calcsize('d')))\n \n round_trip_times = []\n\n for seq_no in range(count):\n try:\n with socket.socket(family=socket.AF_INET, type=socket.SOCK_RAW, \n proto=socket.getprotobyname(\"icmp\")) as sock:\n sock.settimeout(timeout/MILLISEC_PER_SEC)\n client_id = os.getpid() & RIGHT_HEXTET\n delay, response = ping(sock, host, client_id=client_id, seq_no=seq_no)\n\n log(\"Reply from {:s} in {}ms: {}\".format(host_ip, delay, response))\n\n round_trip_times.append(delay)\n \n \n except socket.timeout:\n log(\"Request timed out after {}ms\".format(timeout))\n \n except ChecksumError:\n log(\"Message has been corrupted (checksum incorrect)\")\n\n except OSError as error:\n log(\"OS error: {}. Please check name.\".format(error.strerror))\n if isinstance(error, PermissionError):\n # Display the likely explanation for\n # TCP Socket Error Code \"1 = Operation not permitted\":\n log(\"NB: On some sytems, ICMP messages can\"\n \" only be sent from processes running as root.\")\n break\n\n\n num_sent = count\n num_recv = len(round_trip_times)\n num_lost = num_sent - num_recv\n rtts = round_trip_times\n \n \n\t# TODO: Compute & print packet statistics (number of packets received/lost) \n log(\"Ping statistics for {}\".format(host_ip))\n log(\"\\tPackts: Sent = {}, Received = {}, Lost = {} ({}% loss)\"\n .format(num_sent, num_recv, num_lost, (num_lost/num_sent)*100))\n\n\n\t# Compute & print statistics on round-trip times (Minimum, Maximum, Average)\n if num_recv > 0:\n log(\"Approximate round trip times in milli-seconds:\")\n log(\"\\tMinimum = {}, Maximimum = {}, Average = {}\"\n .format(min(rtts), max(rtts), sum(rtts)/len(rtts)))\n \n\n\n\n\n\nif __name__ == '__main__':\n\n import argparse\n parser = argparse.ArgumentParser(description='Test a host.')\n parser.add_argument('-w', '--timeout',\n metavar='timeout',\n type=int,\n default=1000,\n help='Timeout to wait for each reply (milliseconds).')\n parser.add_argument('-c', '--count',\n metavar='num',\n type=int,\n default=4,\n help='Number of echo requests to send')\n parser.add_argument('hosts',\n metavar='host',\n type=str,\n nargs='+',\n help='URL or IPv4 address of target host(s)')\n args = parser.parse_args()\n\n for host in args.hosts:\n verbose_ping(host, timeout=args.timeout, count=args.count)\n","repo_name":"Dinith1/SE364.a02","sub_path":"ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":6879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42974599685","text":"#!/usr/bin/env python3\n\nprint(\"Prorgam to add user to the data file\")\n\nusername = str(input(\"User: \"))\ncardUUID = str(input(\"CardUUID: \"))\nbtaddr = str(input(\"BtAddr: \"))\n\nwith open(\"data.csv\", \"a+\") as f:\n f.write(username + \",\" + cardUUID + \",\" + btaddr + \",\")\n\n","repo_name":"mememan0/2faAuth","sub_path":"addUser.py","file_name":"addUser.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22774996947","text":"\n#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\" These module contains functions which do not affect objects (self). Nevertheless\nthey will be needed for doing some basic jobs, e.g. making a dict from file, making a\nlist of from file. Find below a list of functions within this module. They will be\nused for the dashboard app to not overload the dasboard app files.\"\"\"\n\n\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n\n\ndef get_list_from_df(df, column_name):\n \"\"\"getting a list from column with unique values\n\n Returns\n -------\n list:\n a list with unique names of a column.\n \"\"\"\n\n return df[column_name].unique()\n\n\ndef create_dropdown_list(lst):\n \"\"\"Returns a list of dictionaries.\n\n Parameters\n ----------\n lst :\n with values\n\n Returns\n -------\n list:\n of dictionaries\n \"\"\"\n dropdown_list = [{'label': label, 'value': label}\n for label in sorted(lst)]\n\n return dropdown_list\n\n\ndef generate_card_content(card_header, card_value):\n \"\"\"[summary]\n\n Parameters\n ----------\n card_header : str, int...\n title for the card.\n card_value : number (float, int) can also be a str like '5'\n which will be cast into an int\n\n Returns\n -------\n list:\n with the elements card_header card_body\n \"\"\"\n\n card_head_style = {'textAlign': 'center', 'fontSize': '100%'}\n card_body_style = {'textAlign': 'center', 'fontSize': '150%'}\n card_header = dbc.CardHeader(card_header, style=card_head_style)\n card_body = dbc.CardBody(\n [\n html.H5(f'{int(card_value):,} EUR'.format(card_value).replace(',', '.'),\n className=\"card-title\", style=card_body_style),\n ]\n )\n card = [card_header, card_body]\n return card\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"pbretern/library-dashboard-system","sub_path":"src/utils_dash.py","file_name":"utils_dash.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32649481226","text":"import heapq\nclass Node:\n def __init__(self, val):\n self.value = val\n def __lt__(self, other):\n return self.value > other.value \nclass Solution:\n def fillCups(self, amount: List[int]) -> int:\n nodes = [Node(node) for node in amount if node > 0]\n heapq.heapify(nodes)\n ans = 0\n while len(nodes) > 1:\n x = heapq.heappop(nodes)\n y = heapq.heappop(nodes)\n x.value -= 1\n y.value -= 1\n if x.value > 0:\n heapq.heappush(nodes, x)\n if y.value > 0:\n heapq.heappush(nodes, y)\n ans += 1\n if len(nodes) == 1:\n ans += nodes[0].value\n return ans","repo_name":"parasv24/grind","sub_path":"2335-minimum-amount-of-time-to-fill-cups/2335-minimum-amount-of-time-to-fill-cups.py","file_name":"2335-minimum-amount-of-time-to-fill-cups.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44259846590","text":"\r\nfrom PIL import Image\r\nfrom numpy import asarray\r\nimport numpy as np\r\nimport math\r\nimport cv2\r\n\r\ndef save_func(m_image,name):\r\n Image.fromarray(m_image).save(name + '.png')\r\n\r\n\r\ndef sonuc(k,l,toplam,newimage):\r\n newimage[k][l]=int(toplam)\r\n\r\n\r\ndef filtre(i,j,image_,new_image,filter_matrix):\r\n toplam=0\r\n for f in range(0,3):\r\n for f1 in range(0,3):\r\n\r\n toplam=toplam+image_[i+f][j+f1]*filter_matrix[f][f1]\r\n\r\n sonuc(i,j,toplam,new_image)\r\n\r\ndef image_coloumn_row(zero_add_image,newimage,filter_matrix):\r\n for i in range(0, 512):\r\n for j in range(0, 512):\r\n filtre(i, j, zero_add_image, newimage,filter_matrix)\r\n\r\n\r\nimagelist=[]\r\nimage = Image.open('lunapark.jpg').convert('L')\r\nimagearray= np.asarray(image)\r\n\r\nnewimage=np.zeros([512,512])\r\nnewimage1=np.zeros([512,512])\r\nnewimage2=np.zeros([512,512])\r\n\r\n\r\nLaplace=np.asarray([[0,1,0],[1,-4,1],[0,1,0]])\r\n\r\nmatrix__90=np.asarray([[0, -1, 0],[-1, 5, -1],[0, -1, 0]])\r\nmatrix__45=np.asarray([[-1, -1, -1],[-1, 9, -1],[-1, -1, -1]])\r\n\r\n\r\nzero_add_image = np.pad(imagearray, ((1, 1), (1, 1)), 'constant')\r\n\r\nimage_coloumn_row(zero_add_image,newimage,Laplace)\r\nimage_coloumn_row(zero_add_image,newimage1,matrix__90)\r\nimage_coloumn_row(zero_add_image,newimage2,matrix__45)\r\n\r\nan_array = np.where(newimage < 0, 0, newimage)\r\nLaplace_ = np.asarray(an_array, dtype=np.uint8)\r\n\r\nan_array1 = np.where(newimage1 < 0, 0, newimage1)\r\nan_array1 = np.where(an_array1 > 255, 255, an_array1)\r\nprint(an_array1)\r\n__90 = np.asarray(an_array1, dtype=np.uint8)\r\n\r\n\r\nan_array2 = np.where(newimage2 < 0, 0, newimage2)\r\nan_array2 = np.where(an_array2 > 255, 255, an_array2)\r\n__45 = np.asarray(an_array2, dtype=np.uint8)\r\n\r\n\r\nsave_func(Laplace_,'laplace')\r\nsave_func(__45,'keskinlestirme_45')\r\nsave_func(__90,'keskinlestirme_90')\r\nsave_func(img,'dene')\r\n#\r\n#\r\n","repo_name":"melekturan/Image-Processing","sub_path":"Laplacian_Filter.py","file_name":"Laplacian_Filter.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25168599235","text":"from tests.integration_tests.test_app import app\nfrom tests.integration_tests.base_tests import SupersetTestCase\nfrom superset.db_engine_specs.base import BaseEngineSpec\nfrom superset.models.core import Database\n\n\nclass TestDbEngineSpec(SupersetTestCase):\n def sql_limit_regex(\n self,\n sql,\n expected_sql,\n engine_spec_class=BaseEngineSpec,\n limit=1000,\n force=False,\n ):\n main = Database(database_name=\"test_database\", sqlalchemy_uri=\"sqlite://\")\n limited = engine_spec_class.apply_limit_to_sql(sql, limit, main, force)\n self.assertEqual(expected_sql, limited)\n","repo_name":"apache/superset","sub_path":"tests/integration_tests/db_engine_specs/base_tests.py","file_name":"base_tests.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"11857707200","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-04-27 17:57:00\n# @Auth : ${guoshengkang} (${kangguosheng1@huokeyi.com})\n\nimport os\nimport pandas as pd\nfrom peewee import Model,PostgresqlDatabase\nfrom playhouse.pool import PooledPostgresqlDatabase\nimport datetime\n\ndb = PooledPostgresqlDatabase(database='ios2',\n host='172.12.78.217',\n port=5432,\n user='postgres',\n password='Password123',\n max_connections=20, # 可省略\n stale_timeout=300, # 可省略\n )\n\nclass BaseModel(Model):\n class Meta:\n database = db\n\n\n# sql=\"\"\"\n# \tSELECT\n# \t商品编号 AS id,\n# \tCAST(销售日期 AS date) AS date,\n# \tMAX(\"售价\") \t\t AS fact_price,\n# \tSUM(销售数量) AS quantity,\n# \t0 AS label,\n# \tt1.商品类别 AS class\n# \tFROM tmp_bgy_data_from_20160101 t1\n# \tINNER JOIN \n# \t\t(SELECT\n# \t\t商品类别,\n# \t\tcount(DISTINCT 商品编号) AS p_num\n# \t\tFROM tmp_bgy_data_from_20160101\n# \t\tGROUP BY 商品类别 HAVING count(DISTINCT 商品编号)>=20\n# \t\tORDER BY p_num\n# \t\t) t2\n# \tON t1.商品类别=t2.商品类别\n# \tINNER JOIN \n# \t\t(SELECT\n# \t\tid,\n# \t\tcount(1) AS days\n# \t\tFROM tmp_bgy_order_data\n# \t\tGROUP BY id HAVING count(1)>=20\n# \t\t) t3\n# \tON t1.商品编号=t3.id\t\n# \tWHERE \"功能号\"='1' AND 销售数量>0 AND t1.商品类别='10102'\n# \tGROUP BY t1.商品类别,商品编号,CAST(销售日期 AS date)\n# \tORDER BY t1.商品类别,商品编号,CAST(销售日期 AS date)\n# \t\"\"\"\n\n# sql=\"\"\"\n# \tSELECT\n# \tt1.*,\n# \tt2.classfication_level AS class,\n# \t0 AS label\n# \tFROM tmp_kgs_all_data t1\n# \tINNER JOIN \n# \t\t(\n# \t\tSELECT\n# \t\tDISTINCT product_id AS id,\n# \t\tclassfication_level\n# \t\tFROM ios_optimization_classificaition\n# \t\tWHERE warehouse_id=1\n# \t\t) t2\n# \tON t1.\"id\"=t2.\"id\"\n# \tWHERE t1.\"date\">='2018-01-01' \n# \tAND t2.classfication_level<=3 \n# \tAND t2.classfication_level>0\n# \tAND t1.\"id\"=43791\n# \t-- AND t1.\"id\"=44156\n# \t-- AND (t1.\"id\"=41427 OR t1.\"id\"=41841 OR t1.\"id\"=44156)\n# \tORDER BY class,id,date\n# \"\"\"\n\n# sql=\"\"\"\n# SELECT \n# t1.*\n# FROM tmp_kgs_abc_6sku t1\n# WHERE \n# -- t1.\"id\"=14118 \n# -- t1.id=17185 \n# t1.id=14270 \n# -- t1.id=12391 \n# -- t1.id=13420 \n# -- t1.id=13859\n# \"\"\"\nsql=\"\"\"\n\tSELECT \n\tt1.\"id\",\n\tt1.\"date\",\n\tt1.quantity,\n\tt1.tag_price,\n\tt1.fact_price,\n\tt1.discount/t1.quantity AS discount,\n\tt4.promotion,\n\t0 AS label\n\tFROM tmp_kgs_all_data t1\n\t-- INNER JOIN tmp_kgs_selected_class6_skus t2\n\t-- ON t1.\"id\"=t2.id\n\tINNER JOIN ios_base_product t3\n\tON t1.\"id\"=t3.product_id\n\tLEFT JOIN\n\t\t(\n\t\tSELECT\n\t\tDISTINCT bar_code,promotion_date, 1 AS promotion\n\t\tFROM tmp_kgs_promotion\n\t\t) t4\n\tON t3.bar_code=t4.bar_code AND CONCAT(EXTRACT(YEAR from t1.\"date\"),'-',EXTRACT(MONTH from t1.\"date\"))=t4.promotion_date\n\tWHERE t1.\"date\">='2018-01-01'\n\tAND t1.\"id\"=44117\n\t-- AND (t1.\"id\"=44564 OR t1.\"id\"=44655 OR t1.\"id\"=46873 OR t1.\"id\"=46876)\n\tORDER BY \"id\",\"date\"\n\t\"\"\"\n\ninput_data_df=pd.read_sql(sql, db)\n# 将date列转成日期类型\ninput_data_df['date']=input_data_df['date'].map(lambda x: datetime.datetime.strptime(str(x),'%Y-%m-%d'))\nprint(input_data_df.head())\n# print(input_data_df.dtypes) #datetime64\n\nconfig_sql='''\n\tSELECT\n\twork_day AS date,\n\tholiday_type AS holiday,\n\tmonth AS season,\n\tis_weekend AS weekend\n\tFROM \n\tios_base_work_calendar\n\t'''\nconfig_data_df=pd.read_sql(config_sql, db)\n# 将date列转成日期类型\nconfig_data_df['date']=config_data_df['date'].map(lambda x: datetime.datetime.strptime(str(x),'%Y-%m-%d'))\n# config_data_df['weekend_5']=config_data_df['weekend'].map(lambda x: x+5)\n\nprint(config_data_df.head())\nprint(config_data_df.dtypes) #datetime64","repo_name":"guoshengkang/demand-forcast","sub_path":"cdf_v1/algorithm/demand_forecast/get_data_from_db.py","file_name":"get_data_from_db.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"25579733388","text":"from ChessGame import *\r\n\r\ndef readCommand( argv ):\r\n\r\n from optparse import OptionParser\r\n\r\n usage = \" \"\r\n parser = OptionParser()\r\n\r\n parser.add_option(\"-m\", \"--mode\", dest=\"Mode\", type=\"int\", action=\"store\", default=1)\r\n parser.add_option(\"-o\", \"--opening\", dest=\"Distribution\", type=\"int\", action=\"store\", default=0)\r\n parser.add_option(\"-i\", \"--moveInfo\", dest=\"ShowMove\", action=\"store_true\", default=False)\r\n parser.add_option(\"-I\", \"--searchInfo\", dest=\"ShowSearch\", action=\"store_true\", default=False)\r\n parser.add_option(\"-a\", \"--auto\", dest=\"auto\", action=\"store_true\", default=False)\r\n parser.add_option(\"-d\", \"--depth_0\", dest=\"depth_0\", type=\"int\", action=\"store\", default=3)\r\n parser.add_option(\"-D\", \"--depth_1\", dest=\"depth_1\", type=\"int\", action=\"store\", default=3)\r\n parser.add_option(\"-p\", \"--pos_eva_0\", dest=\"use_pos_0\", action=\"store_true\", default=False)\r\n parser.add_option(\"-P\", \"--pos_eva_1\", dest=\"use_pos_1\", action=\"store_true\", default=False)\r\n parser.add_option(\"-s\", \"--save\", dest=\"SaveInfo\", action=\"store_true\", default=False)\r\n\r\n options, otherjunk = parser.parse_args(argv)\r\n if len(otherjunk) != 0:\r\n raise Exception(\"Command line input not understood: \" + str(otherjunk))\r\n args = dict()\r\n\r\n args[\"mode\"] = options.Mode\r\n args[\"dis\"] = options.Distribution\r\n args[\"depth_0\"] = options.depth_0\r\n args[\"depth_1\"] = options.depth_1\r\n\r\n if options.ShowMove == True:\r\n args[\"showMove\"] = True\r\n else:\r\n args[\"showMove\"] = False\r\n if options.ShowSearch != True:\r\n args[\"showSearch\"] = False\r\n else:\r\n args[\"showSearch\"] = True\r\n\r\n if options.auto == True:\r\n args[\"showGUI\"] = False\r\n args[\"mode\"] = 2\r\n else:\r\n args[\"showGUI\"] = True\r\n\r\n if options.use_pos_0 == True:\r\n args[\"use_pos_0\"] = True\r\n\r\n if options.use_pos_1 == True:\r\n args[\"use_pos_1\"] = True\r\n\r\n if options.SaveInfo == True:\r\n args[\"saveInfo\"] = True\r\n\r\n return args\r\n\r\nif __name__ == '__main__':\r\n\r\n import sys\r\n args = readCommand(sys.argv[1:]) # Get game components based on input\r\n game = ChessGame()\r\n game.Set(**args)\r\n game.start()","repo_name":"IngInx747/Chess","sub_path":"Chess.py","file_name":"Chess.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41046738494","text":"from django.db import models\nfrom twilio.rest import Client\nimport uuid\n\n# Create your models here.\nclass person(models.Model):\n name = models.CharField(max_length=50)\n number = models.IntegerField()\n emailid = models.EmailField(max_length=80)\n qwertypass = models.CharField(max_length=20)\n unique = models.TextField(primary_key=True, help_text='Unique ID for this person')\n\n def __str__(self):\n return str(self.number)\n\n def save(self, *args, **kwargs):\n account_sid = \"AC13925746a3d2d7a3993719c04e00ec5d\"\n auth_token = 'ed090b2d2dcf91ca19d010b88f2dbfa8'\n client = Client(account_sid, auth_token)\n\n message = client.messages.create(\n body=f'This is your OTP for {self.name} : {self.unique}',\n from_='+18646060883',\n to=f'+91{self.number}'\n )\n print(message.sid)\n\n\n return super().save(*args, **kwargs)\n","repo_name":"parthasarthi17/Django_projects","sub_path":"twillio_app/messanger/messager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7669543914","text":"# -*- coding: utf_8 -*-\r\nu\"\"\"\r\nMake Index Plug-in\r\n\r\nLicensed under the MIT License.\r\nCopyright (c) 2007-2012 Kota Saito\r\n\r\nReflec の中継状態を表す XML ファイルを作成します.\r\nReflec を複数起動している場合, 複数の中継状態が\r\n1つの XML ファイルに書かれる事になります.\r\n\"\"\"\r\n\r\nimport os\r\nimport os.path\r\nimport time\r\nimport logging\r\nimport threading\r\nfrom xml.etree.ElementTree import ElementTree, Element, SubElement\r\n\r\nfrom reflec.plugin import ReflecBasePlugin\r\n\r\n__all__ = [\"MakeIndexPlugin\"]\r\n\r\n__load__ = [\"MakeIndexPlugin\"]\r\n\r\n#-------------------------------------------------------------------------------\r\n# MakeIndexPlugin\r\n#-------------------------------------------------------------------------------\r\n\r\nclass MakeIndexPlugin(ReflecBasePlugin):\r\n u\"\"\"\r\n Reflec でのミラー状況のインデックスファイルを作成するプラグイン.\r\n \"\"\"\r\n\r\n # XML ファイルのエンコーディング\r\n encoding = \"utf-8\"\r\n\r\n # ロックを試す回数\r\n lock_try = 3\r\n\r\n def app_start(self, app):\r\n self.lock = threading.Lock()\r\n self.filename = self.app.abspath(\r\n self.option.get(\"makeindex\", \"filename\", \"index.dat\") )\r\n self.server_address = \"%s:%d\" % self.server.server_address\r\n self.start_time = time.strftime(\"%Y/%m/%d %H:%M:%S\")\r\n self.on_list = False\r\n\r\n def client_start_streaming(self, client):\r\n if not self.on_list:\r\n self.on_list = True\r\n self.emit()\r\n\r\n def server_client_num(self, server):\r\n if self.on_list:\r\n self.emit()\r\n\r\n def client_finish_streaming(self, app):\r\n if self.on_list:\r\n self.on_list = False\r\n self.emit()\r\n\r\n def emit(self, removal = False):\r\n u\"\"\"\r\n インデックスファイルを更新する.\r\n \"\"\"\r\n self.lock.acquire()\r\n try:\r\n doc = None\r\n if os.path.isfile(self.filename):\r\n doc = ElementTree(file = self.filename)\r\n else:\r\n root = Element(\"index\")\r\n doc = ElementTree(root)\r\n\r\n index = -1\r\n for i, e in enumerate(doc.findall(\"live\")):\r\n if e.get(\"server\", \"\") == self.server_address:\r\n index = i\r\n doc.getroot().remove(e)\r\n\r\n if self.on_list:\r\n doc.getroot().insert(index, self.live_element())\r\n\r\n f = open(self.filename, \"w\")\r\n f.write('\\n' % self.encoding)\r\n doc.write(f, self.encoding)\r\n f.close()\r\n finally:\r\n self.lock.release()\r\n\r\n def live_element(self):\r\n u\"\"\"\r\n 現在の状態を表す Element オブジェクトを作成して返す.\r\n \"\"\"\r\n el = Element(\"live\", server = self.server_address)\r\n SubElement(el, \"start\").text = self.start_time\r\n SubElement(el, \"num\").text = str(self.server.client_num)\r\n SubElement(el, \"max\").text = str(self.server.client_max)\r\n info = SubElement(el, \"media\")\r\n for k, v in self.client.media_info.items():\r\n SubElement(info, k).text = v\r\n return el\r\n","repo_name":"kotas/reflec","sub_path":"reflec-plugins/makeindex.py","file_name":"makeindex.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"40846718156","text":"from pygame import Surface, Vector2, Color, Rect\nfrom pygame.font import Font\n\nfrom src.config import Config, Colors\nfrom src.game.Card import Card\nfrom src.utils import text_utils\n\n\ndef draw_key_value(screen: Surface, font: Font, starting_point: Vector2, shift: Vector2, color: Color, key: str, value: any):\n text = font.render(\"{}: {}\".format(key, value), True, color)\n screen.blit(text, starting_point + shift)\n\n\ndef draw_key_multi_value(screen: Surface, font: Font, starting_point: Vector2, shift: Vector2, gap: int, color: Color, key: str, values: [str]):\n key_text = font.render(\"{}:\".format(key), True, color)\n screen.blit(key_text, starting_point + shift)\n\n for index, value in enumerate(values):\n value_text = font.render(\"{}\".format(value), True, color)\n screen.blit(value_text, starting_point + shift + index * gap)\n\n\ndef draw_cards(screen: Surface, starting_point: Vector2, color: Color, text: str, cards: list[Card]):\n # Text\n title_text = Config.FONT_SM_BOLD.render(text, True, color)\n shift: Vector2 = Vector2(10, 10)\n\n screen.blit(title_text, starting_point + shift)\n\n block_size: Vector2 = Vector2(20, 20)\n\n ind = 0\n\n for key in cards:\n row = ind // 8\n col = ind % 8\n\n card_ind = Config.FONT_SM_BOLD.render('{0:02d}'.format(key.card_id), True, (206, 215, 132))\n card_ind = text_utils.add_outline(card_ind, 2, Colors.GREY_DARK_2)\n screen.blit(card_ind, (starting_point.x + (block_size.x + 10) * col + 10 + 150, starting_point.y + (block_size.x + 5) * row))\n ind = ind + 1\n\n\n# This one has text-wrapping\ndef draw_text_in_rect(surface: Surface, text: str, color: Color, rect: Rect, font: Font, aa: bool = False, bkg: Color | None = None) -> str:\n rect = Rect(rect)\n y = rect.top\n line_spacing = -2\n\n # get the height of the font\n font_height = font.size(\"Tg\")[1]\n\n while text:\n i = 1\n\n # determine if the row of text will be outside our area\n if y + font_height > rect.bottom:\n break\n\n # determine maximum width of line\n while font.size(text[:i])[0] < rect.width and i < len(text):\n i += 1\n\n # if we've wrapped the text, then adjust the wrap to the last word\n if i < len(text):\n i = text.rfind(\" \", 0, i) + 1\n\n # render the line and blit it to the surface\n if bkg:\n image = font.render(text[:i], 1, color, bkg)\n image.set_colorkey(bkg)\n else:\n image = font.render(text[:i], aa, color)\n\n surface.blit(image, (rect.left, y))\n y += font_height + line_spacing\n\n # remove the text we just blitted\n text = text[i:]\n\n return text\n","repo_name":"iambaangkok/RootAI","sub_path":"src/utils/draw_utils.py","file_name":"draw_utils.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20435003608","text":"import os\nimport streamlit as st\nfrom pytube import YouTube\n\n# Función para descargar el video\n@st.cache(allow_output_mutation=True)\ndef download_video(video_url, selected_stream):\n try:\n yt = YouTube(video_url)\n video_stream = yt.streams.get_by_itag(selected_stream.itag)\n\n download_path = os.path.expanduser(\"~\") + os.path.sep + \"Downloads\"\n video_stream.download(output_path=download_path)\n return download_path, yt.thumbnail_url\n except Exception as e:\n return None, None\n\ndef main():\n st.title(\"YouTube Video Downloader\")\n\n # Crear un input para ingresar la URL del video\n video_url = st.text_input(\"Ingresa la URL del video de YouTube\")\n\n if video_url:\n selected_stream = None\n\n try:\n yt = YouTube(video_url)\n st.image(yt.thumbnail_url, caption=\"Miniatura del video\")\n\n # Obtener las resoluciones disponibles\n video_streams = yt.streams.filter(file_extension=\"mp4\").order_by(\"resolution\").desc()\n\n # Crear un select para elegir la calidad del video\n video_qualities = [f\"{stream.resolution}\" for stream in video_streams]\n selected_quality = st.selectbox(\"Selecciona la calidad del video:\", video_qualities)\n\n selected_stream = video_streams[video_qualities.index(selected_quality)]\n\n except Exception as e:\n st.error(\"Ocurrió un error: \" + str(e))\n\n if selected_stream and st.button(\"Descargar\"):\n download_path, thumbnail_url = download_video(video_url, selected_stream)\n\n if download_path:\n st.success(\"Descarga completada. El video se encuentra en: \" + download_path)\n st.markdown(f\"**[Descargar aquí]({download_path})**\")\n else:\n st.error(\"Ocurrió un error durante la descarga.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DevLaCruz/yt-downloader","sub_path":"youtube_downloader_app.py","file_name":"youtube_downloader_app.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19524427847","text":"#!/usr/bin/python3\n\"define a class rectangle\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"define some attributes and Getter/Setter\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"initializing the attributes required\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n @property\n def width(self):\n return self.__width\n\n @width.setter\n def width(self, value):\n \" Getter / Setter method for the property\"\n\n if not isinstance(value, int):\n raise TypeError(\"width must be an integer\")\n elif value <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = value\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, value):\n \" Getter / Setter method for the property\"\n\n if not isinstance(value, int):\n raise TypeError(\"height must be an integer\")\n elif value <= 0:\n raise ValueError(\"height must be > 0\")\n self.__height = value\n\n @property\n def x(self):\n return self.__x\n\n @x.setter\n def x(self, value):\n \" Getter / Setter method for the property\"\n\n if not isinstance(value, int):\n raise TypeError(\"x must be an integer\")\n elif value < 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = value\n\n @property\n def y(self):\n return self.__y\n\n @y.setter\n def y(self, value):\n \" Getter / Setter method for the property\"\n\n if not isinstance(value, int):\n raise TypeError(\"y must be an integer\")\n elif value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value\n\n def area(self):\n \"calculate the area of the rectangle\"\n return (self.__width * self.__height)\n\n def display(self):\n \"display the figure of the rectangle\"\n for j in range(self.__y):\n print(\"\")\n if (self.__width == 0) or (self.__height == 0):\n print(\" \" * self.__x, end='')\n return \"\"\n for i in range(self.__height):\n if i == (self.__height - 1):\n print(\" \" * self.__x, end='')\n print('#' * self.__width)\n return \"\"\n print(\" \" * self.__x, end='')\n print('#' * self.__width)\n\n def __str__(self):\n return \"[Rectangle] ({0}) {1}/{2} - {3}/{4}\".format(self.id, self.__x, self.__y, self.__width, self.__height)\n\n @classmethod\n def rebase(cls, *args):\n cls(*args)\n\n def update(self, *args, **kwargs):\n \"Update method for the instance\"\n if args and len(args) != 0:\n for i in range(len(args)):\n if i == 0:\n self.id = args[i]\n elif i == 1:\n self.width = args[i]\n elif i == 2:\n self.height = args[i]\n elif i == 3:\n self.x = args[i]\n elif i == 4:\n self.y = args[i]\n elif kwargs and len(kwargs) != 0:\n for i , j in kwargs.items():\n if i == 'id':\n self.id = j\n elif i == 'width':\n self.width = j\n elif i == 'height':\n self.height = j\n elif i == 'x':\n self.x = j\n elif i == 'y':\n self.y = j\n","repo_name":"CharlesIro1125/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36322807970","text":"from django.urls import reverse\nfrom django.contrib import admin\nfrom django.db.models import Count\nfrom django.forms.widgets import Select\nfrom django.db.models import BooleanField\nfrom django.utils.html import format_html\nfrom django.core.validators import validate_email\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalogue.forms import FixTinyMCEHasTooWideUIForm\nfrom catalogue.models import (\n CeceLabel,\n Certificate,\n Category,\n Subcategory,\n PaymentOption,\n Store,\n Brand,\n Size,\n Color,\n Material,\n Product,\n FavoriteProduct,\n)\nfrom catalogue.filters import ChoiceDropdownFilter\nfrom catalogue.filters import RelatedDropdownFilter\n\n\n@admin.register(CeceLabel)\nclass CeceLabelAdmin(admin.ModelAdmin):\n list_display = (\n \"name\",\n \"get_count\",\n )\n search_fields = (\"name\", \"info\")\n ordering = (\"name\",)\n readonly_fields = (\n \"cece_api_url\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n form = FixTinyMCEHasTooWideUIForm\n\n fieldsets = (\n (None, {\"fields\": (\"name\", \"info\")}),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\n \"cece_api_url\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n ),\n },\n ),\n )\n\n def get_queryset(self, request):\n # TODO: aggregate/annotate `count` instead of `get_count` to reduce # queries.\n return super().get_queryset(request)\n\n def get_count(self, obj):\n return Product.objects.filter(brand__in=obj.brands.all()).distinct().count()\n\n get_count.short_description = _(\"# Products\")\n # get_count.admin_order_field = \"count\"\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\n@admin.register(Certificate)\nclass CertificateAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"get_count\")\n search_fields = (\"name\", \"info\")\n ordering = (\"name\",)\n readonly_fields = (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n form = FixTinyMCEHasTooWideUIForm\n\n fieldsets = (\n (None, {\"fields\": (\"name\", \"info\")}),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n ),\n },\n ),\n )\n\n def get_queryset(self, request):\n # TODO: aggregate/annotate `count` instead of `get_count` to reduce # queries.\n return super().get_queryset(request)\n\n def get_count(self, obj):\n return Product.objects.filter(brand__in=obj.brands.all()).distinct().count()\n\n get_count.short_description = _(\"# Products\")\n # get_count.admin_order_field = \"count\"\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\n@admin.register(Subcategory)\nclass SubcategoryAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"_active\", \"get_count\", \"get_category\", \"get_section\")\n search_fields = (\"name\", \"category\")\n ordering = (\n \"category__section\",\n \"category\",\n \"name\",\n )\n readonly_fields = (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n formfield_overrides = {\n BooleanField: {\"widget\": Select},\n }\n actions = (\n \"activate\",\n \"deactivate\",\n )\n\n fieldsets = (\n (\n None,\n {\n \"fields\": (\n \"name\",\n \"category\",\n \"active\",\n )\n },\n ),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n ),\n },\n ),\n )\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .select_related(\"category\")\n .annotate(count=Count(\"products\"))\n )\n\n def _active(self, obj):\n return obj.active\n\n _active.boolean = True\n\n def get_count(self, obj):\n return obj.count\n\n get_count.short_description = _(\"# Products\")\n get_count.admin_order_field = \"count\"\n\n def get_category(self, obj):\n return format_html(\n \"{1}\".format(\n reverse(\"admin:catalogue_category_change\", args=[obj.category.pk]),\n obj.category.name,\n )\n )\n\n get_category.short_description = _(\"Category\")\n get_category.admin_order_field = \"category__name\"\n\n def get_section(self, obj):\n return obj.category.get_section_display()\n\n get_section.short_description = _(\"Section\")\n\n def activate(self, request, queryset):\n for instance in queryset:\n instance.active = True\n instance.save()\n\n activate.short_description = _(\"Activate selected Subcategories\")\n\n def deactivate(self, request, queryset):\n for instance in queryset:\n instance.active = False\n instance.save()\n\n deactivate.short_description = _(\"Deactivate selected Subcategories\")\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\nclass SubcategoryAdminInline(admin.StackedInline):\n model = Subcategory\n fields = (\"name\",)\n extra = 0\n\n\n@admin.register(Category)\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"_active\", \"get_count\", \"section\", \"get_subcategories\")\n list_filter = (\n (\"active\", ChoiceDropdownFilter),\n \"section\",\n )\n search_fields = (\"name\",)\n ordering = (\n \"section\",\n \"name\",\n )\n readonly_fields = (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n inlines = (SubcategoryAdminInline,)\n formfield_overrides = {\n BooleanField: {\"widget\": Select},\n }\n actions = (\n \"activate\",\n \"deactivate\",\n )\n\n fieldsets = (\n (None, {\"fields\": (\"name\", \"section\", \"active\")}),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n ),\n },\n ),\n )\n\n def get_queryset(self, request):\n return super().get_queryset(request).annotate(count=Count(\"products\"))\n\n def _active(self, obj):\n return obj.active\n\n _active.boolean = True\n\n def get_count(self, obj):\n return obj.count\n\n get_count.short_description = _(\"# Products\")\n get_count.admin_order_field = \"count\"\n\n def get_subcategories(self, obj):\n return format_html(\n \"
\".join(\n \"{1}\".format(\n reverse(\"admin:catalogue_subcategory_change\", args=[c.pk]), c.name\n )\n for c in obj.subcategories.iterator()\n )\n )\n\n get_subcategories.short_description = _(\"Subcategories\")\n\n def activate(self, request, queryset):\n for instance in queryset:\n instance.active = True\n instance.save()\n\n activate.short_description = _(\"Activate selected Categories\")\n\n def deactivate(self, request, queryset):\n for instance in queryset:\n instance.active = False\n instance.save()\n\n deactivate.short_description = _(\"Deactivate selected Categories\")\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\n@admin.register(PaymentOption)\nclass PaymentOptionAdmin(admin.ModelAdmin):\n list_display = (\n \"name\",\n \"show_logo\",\n )\n search_fields = (\"name\",)\n ordering = (\"name\",)\n readonly_fields = (\n \"cece_api_url\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n\n fieldsets = (\n (None, {\"fields\": (\"name\", \"logo\")}),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\n \"cece_api_url\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n ),\n },\n ),\n )\n\n def show_logo(self, obj):\n return format_html(\n \"{1}\".format(obj.logo, obj.name)\n )\n\n show_logo.short_description = _(\"Logo\")\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\n@admin.register(Store)\nclass StoreAdmin(admin.ModelAdmin):\n list_display = (\n \"name\",\n \"_active\",\n \"show_logo\",\n \"get_count\",\n )\n list_filter = (\n (\"active\", ChoiceDropdownFilter),\n (\"payment_options\", RelatedDropdownFilter),\n )\n search_fields = (\"name\", \"info\")\n ordering = (\"name\",)\n readonly_fields = (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n filter_horizontal = (\"payment_options\",)\n form = FixTinyMCEHasTooWideUIForm\n formfield_overrides = {\n BooleanField: {\"widget\": Select},\n }\n actions = (\n \"activate\",\n \"deactivate\",\n )\n\n fieldsets = (\n (\n None,\n {\"fields\": (\"name\", \"active\", \"info\", \"url\", \"logo\", \"payment_options\")},\n ),\n (\n _(\"Address (administrative)\"),\n {\"fields\": (\"address\", \"zip_code\", \"city\", \"country\")},\n ),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n ),\n },\n ),\n )\n\n def get_queryset(self, request):\n return super().get_queryset(request).annotate(count=Count(\"products\"))\n\n def _active(self, obj):\n return obj.active\n\n _active.boolean = True\n\n def get_count(self, obj):\n return obj.count\n\n get_count.short_description = _(\"# Products\")\n get_count.admin_order_field = \"count\"\n\n def show_logo(self, obj):\n return format_html(\n \"{1}\".format(obj.logo, obj.name)\n )\n\n show_logo.short_description = _(\"Logo\")\n\n def activate(self, request, queryset):\n for instance in queryset:\n instance.active = True\n instance.save()\n\n activate.short_description = _(\"Activate selected Stores\")\n\n def deactivate(self, request, queryset):\n for instance in queryset:\n instance.active = False\n instance.save()\n\n deactivate.short_description = _(\"Deactivate selected Stores\")\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\n@admin.register(Brand)\nclass BrandAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"_active\", \"get_count\", \"get_certificates\", \"get_labels\")\n list_filter = (\n (\"active\", ChoiceDropdownFilter),\n (\"labels\", RelatedDropdownFilter),\n (\"certificates\", RelatedDropdownFilter),\n )\n search_fields = (\"name\", \"info\")\n ordering = (\"name\",)\n readonly_fields = (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n filter_horizontal = (\"labels\", \"certificates\")\n form = FixTinyMCEHasTooWideUIForm\n formfield_overrides = {\n BooleanField: {\"widget\": Select},\n }\n actions = (\n \"activate\",\n \"deactivate\",\n )\n\n fieldsets = (\n (None, {\"fields\": (\"name\", \"active\", \"info\", \"url\", \"logo\")}),\n (_(\"Sustainability criteria\"), {\"fields\": (\"labels\", \"certificates\")}),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n ),\n },\n ),\n )\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .prefetch_related(\n \"labels\",\n \"certificates\",\n )\n .annotate(\n count=Count(\"products\"),\n )\n )\n\n def _active(self, obj):\n return obj.active\n\n _active.boolean = True\n\n def get_count(self, obj):\n return obj.count\n\n get_count.short_description = _(\"# Products\")\n get_count.admin_order_field = \"count\"\n\n def get_labels(self, obj):\n return format_html(\n \"
\".join(\n \"{1}\".format(\n reverse(\"admin:catalogue_cecelabel_change\", args=[c.pk]), c.name\n )\n for c in obj.labels.all()\n )\n )\n\n get_labels.short_description = _(\"Labels\")\n\n def get_certificates(self, obj):\n return format_html(\n \"
\".join(\n \"{1}\".format(\n reverse(\"admin:catalogue_certificate_change\", args=[c.pk]), c.name\n )\n for c in obj.certificates.all()\n )\n )\n\n get_certificates.short_description = _(\"Certificates\")\n\n def activate(self, request, queryset):\n for instance in queryset:\n instance.active = True\n instance.save()\n\n activate.short_description = _(\"Activate selected Brands\")\n\n def deactivate(self, request, queryset):\n for instance in queryset:\n instance.active = False\n instance.save()\n\n deactivate.short_description = _(\"Deactivate selected Brands\")\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\n@admin.register(Size)\nclass SizeAdmin(admin.ModelAdmin):\n list_display = (\n \"name\",\n \"get_count\",\n )\n search_fields = (\"name\",)\n ordering = (\"date_created\",)\n readonly_fields = (\n \"cece_api_url\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n\n fieldsets = (\n (None, {\"fields\": (\"name\",)}),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\"date_created\", \"date_updated\", \"last_updated_by\"),\n },\n ),\n )\n\n def get_queryset(self, request):\n return super().get_queryset(request).annotate(count=Count(\"products\"))\n\n def get_count(self, obj):\n return obj.count\n\n get_count.short_description = _(\"# Products\")\n get_count.admin_order_field = \"count\"\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\n@admin.register(Color)\nclass ColorAdmin(admin.ModelAdmin):\n list_display = (\n \"name\",\n \"get_count\",\n )\n search_fields = (\"name\",)\n ordering = (\"date_created\",)\n readonly_fields = (\n \"cece_api_url\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n\n fieldsets = (\n (None, {\"fields\": (\"name\",)}),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\"date_created\", \"date_updated\", \"last_updated_by\"),\n },\n ),\n )\n\n def get_queryset(self, request):\n return super().get_queryset(request).annotate(count=Count(\"products\"))\n\n def get_count(self, obj):\n return obj.count\n\n get_count.short_description = _(\"# Products\")\n get_count.admin_order_field = \"count\"\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\n@admin.register(Material)\nclass MaterialAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"get_count\")\n search_fields = (\"name\", \"info\")\n ordering = (\"name\",)\n readonly_fields = (\n \"cece_api_url\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n form = FixTinyMCEHasTooWideUIForm\n\n fieldsets = (\n (None, {\"fields\": (\"name\", \"info\")}),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\"date_created\", \"date_updated\", \"last_updated_by\"),\n },\n ),\n )\n\n def get_queryset(self, request):\n return super().get_queryset(request).annotate(count=Count(\"products\"))\n\n def get_count(self, obj):\n return obj.count\n\n get_count.short_description = _(\"# Products\")\n get_count.admin_order_field = \"count\"\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n\n# TODO: check once ProductFactory has support for from_price\nclass ProductIsOnSaleFilter(admin.SimpleListFilter):\n title = _(\"Sale\")\n parameter_name = \"sale\"\n\n def lookups(self, request, model_admin):\n return ((False, _(\"Regular\")), (True, _(\"Sale\")))\n\n def queryset(self, request, queryset):\n if self.value() == \"True\":\n return queryset.filter(from_price__isnull=False).distinct()\n elif self.value() == \"False\":\n return queryset.filter(from_price__isnull=True).distinct()\n else:\n return queryset\n\n\nclass FavoriteProductAdminInlineForProductAdmin(admin.StackedInline):\n fk_name = \"product\"\n model = FavoriteProduct\n fields = (\n \"product\",\n \"user\",\n \"size\",\n \"quantity\",\n )\n extra = 0\n\n\n@admin.register(Product)\nclass ProductAdmin(admin.ModelAdmin):\n list_display = (\n \"name\",\n \"_active\",\n \"get_brand\",\n \"get_brand_active\",\n \"get_store\",\n \"get_store_active\",\n \"get_categories\",\n \"get_sections\",\n \"cece_id\",\n \"get_favorites_count\",\n \"date_created\",\n \"date_updated\",\n )\n list_filter = (\n (\"active\", ChoiceDropdownFilter),\n (\"store__active\", ChoiceDropdownFilter),\n (\"brand__active\", ChoiceDropdownFilter),\n (\"categories__active\", ChoiceDropdownFilter),\n (\"subcategories__active\", ChoiceDropdownFilter),\n (\"categories\", RelatedDropdownFilter),\n (\"brand\", RelatedDropdownFilter),\n (\"store\", RelatedDropdownFilter),\n (\"materials\", RelatedDropdownFilter),\n (\"sizes\", RelatedDropdownFilter),\n (\"colors\", RelatedDropdownFilter),\n ProductIsOnSaleFilter,\n )\n search_fields = (\n \"name\",\n \"info\",\n \"extra_info\",\n \"cece_id\",\n \"brand__name\",\n \"store__name\",\n )\n ordering = (\"name\",)\n readonly_fields = (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n filter_horizontal = (\"categories\", \"subcategories\", \"materials\", \"sizes\", \"colors\")\n form = FixTinyMCEHasTooWideUIForm\n actions = (\n \"activate\",\n \"deactivate\",\n )\n inlines = (FavoriteProductAdminInlineForProductAdmin,)\n\n fieldsets = (\n (\n None,\n {\n \"fields\": (\n \"name\",\n \"cece_id\",\n \"url\",\n \"price\",\n \"from_price\",\n \"main_image\",\n \"thumbnail\",\n \"extra_images\",\n \"chosen_image\",\n \"info\",\n \"extra_info\",\n \"brand\",\n \"store\",\n \"categories\",\n \"colors\",\n \"sizes\",\n \"materials\",\n )\n },\n ),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\n \"cece_api_url\",\n \"slug\",\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n ),\n },\n ),\n )\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .select_related(\n \"brand\",\n \"store\",\n )\n .prefetch_related(\n \"categories\",\n \"subcategories\",\n \"colors\",\n \"sizes\",\n )\n .annotate(favorites_count=Count(\"favorites\"))\n )\n\n def _active(self, obj):\n return obj.active\n\n _active.boolean = True\n\n def get_brand(self, obj):\n return format_html(\n \"{1}\".format(\n reverse(\"admin:catalogue_brand_change\", args=[obj.brand.pk]),\n obj.brand.name,\n )\n )\n\n get_brand.short_description = _(\"Brand\")\n get_brand.admin_order_field = \"brand\"\n\n def get_brand_active(self, obj):\n return obj.brand.active\n\n get_brand_active.short_description = _(\"Brand active\")\n get_brand_active.boolean = True\n\n def get_store(self, obj):\n return format_html(\n \"{1}\".format(\n reverse(\"admin:catalogue_store_change\", args=[obj.store.pk]),\n obj.store.name,\n )\n )\n\n get_store.short_description = _(\"Store\")\n get_store.admin_order_field = \"store\"\n\n def get_store_active(self, obj):\n return obj.store.active\n\n get_store_active.short_description = _(\"Store active\")\n get_store_active.boolean = True\n\n def get_labels(self, obj):\n return format_html(\n \"
\".join(\n \"{1}\".format(\n reverse(\"admin:catalogue_label_change\", args=[c.pk]), c.name\n )\n for c in obj.brand.labels.iterator()\n )\n )\n\n get_labels.short_description = _(\"Labels\")\n get_labels.admin_order_field = \"brand__labels__name\"\n\n def get_certificates(self, obj):\n return format_html(\n \"
\".join(\n \"{1}\".format(\n reverse(\"admin:catalogue_certificates_change\", args=[c.pk]), c.name\n )\n # .iterator here seemed 300 ms faster than .all while the\n # number of queries did not increase. Curious.\n for c in obj.brand.certificates.iterator()\n )\n )\n\n get_certificates.short_description = _(\"Certificates\")\n get_certificates.admin_order_field = \"brand__certificates__name\"\n\n def get_categories(self, obj):\n return format_html(\n \"
\".join(\n \"{1}\".format(\n reverse(\"admin:catalogue_category_change\", args=[c.pk]), c.name\n )\n # for some reason iterator starts querying the db, whereas\n # all does not (b/c prefetched). Curious.\n for c in obj.categories.all()\n )\n )\n\n get_categories.short_description = _(\"Categories\")\n get_categories.admin_order_field = \"categories__name\"\n\n def get_sections(self, obj):\n return format_html(\n \"
\".join(c.get_section_display() for c in obj.categories.all())\n )\n\n get_sections.short_description = _(\"Sections\")\n get_sections.admin_order_field = \"categories__section\"\n\n def get_favorites_count(self, obj):\n return obj.favorites_count\n\n get_favorites_count.short_description = _(\"<3\")\n get_favorites_count.admin_order_field = \"favorites_count\"\n\n def activate(self, request, queryset):\n for instance in queryset:\n instance.active = True\n instance.save()\n\n activate.short_description = _(\"Activate selected Products\")\n\n def deactivate(self, request, queryset):\n for instance in queryset:\n instance.active = False\n instance.save()\n\n deactivate.short_description = _(\"Deactivate selected Products\")\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n\n def history_view(self, request, object_id, extra_context=None):\n \"\"\" Hack the history view such that it renders html \"\"\"\n s = super().history_view(request, object_id, extra_context=None)\n action_list = s.context_data[\"action_list\"]\n for log_entry in action_list:\n try:\n log_entry.change_message = format_html(log_entry.change_message)\n except KeyError:\n pass\n return s\n\n\n@admin.register(FavoriteProduct)\nclass FavoriteProductAdmin(admin.ModelAdmin):\n list_display = (\"product\", \"user\", \"size\", \"quantity\")\n search_fields = (\n \"product__cece_id\",\n \"product__name\",\n \"product__brand__name\",\n \"product__store__name\",\n \"user__email\",\n \"user__full_name\",\n )\n ordering = (\"user__id\", \"product__name\")\n readonly_fields = (\n \"date_created\",\n \"date_updated\",\n \"last_updated_by\",\n )\n\n fieldsets = (\n (None, {\"fields\": (\"product\", \"user\", \"size\", \"quantity\")}),\n (\n _(\"Meta\"),\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\"date_created\", \"date_updated\", \"last_updated_by\"),\n },\n ),\n )\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .select_related(\n \"user\",\n \"product\",\n )\n .prefetch_related(\n \"product__brand\",\n \"product__store\",\n )\n )\n\n def save_model(self, request, obj, form, change):\n obj.last_updated_by = request.user\n obj.save()\n","repo_name":"tlrh314/mancelot","sub_path":"backend/apps/catalogue/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":26711,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"25165766665","text":"import json\nimport logging\nfrom typing import Any, Optional\n\nfrom flask_appbuilder.models.sqla import Model\nfrom marshmallow import ValidationError\n\nfrom superset import security_manager\nfrom superset.commands.base import BaseCommand, UpdateMixin\nfrom superset.commands.dashboard.exceptions import (\n DashboardForbiddenError,\n DashboardInvalidError,\n DashboardNotFoundError,\n DashboardSlugExistsValidationError,\n DashboardUpdateFailedError,\n)\nfrom superset.commands.utils import populate_roles\nfrom superset.daos.dashboard import DashboardDAO\nfrom superset.daos.exceptions import DAOUpdateFailedError\nfrom superset.exceptions import SupersetSecurityException\nfrom superset.extensions import db\nfrom superset.models.dashboard import Dashboard\n\nlogger = logging.getLogger(__name__)\n\n\nclass UpdateDashboardCommand(UpdateMixin, BaseCommand):\n def __init__(self, model_id: int, data: dict[str, Any]):\n self._model_id = model_id\n self._properties = data.copy()\n self._model: Optional[Dashboard] = None\n\n def run(self) -> Model:\n self.validate()\n assert self._model\n\n try:\n dashboard = DashboardDAO.update(self._model, self._properties, commit=False)\n if self._properties.get(\"json_metadata\"):\n dashboard = DashboardDAO.set_dash_metadata(\n dashboard,\n data=json.loads(self._properties.get(\"json_metadata\", \"{}\")),\n commit=False,\n )\n db.session.commit()\n except DAOUpdateFailedError as ex:\n logger.exception(ex.exception)\n raise DashboardUpdateFailedError() from ex\n return dashboard\n\n def validate(self) -> None:\n exceptions: list[ValidationError] = []\n owners_ids: Optional[list[int]] = self._properties.get(\"owners\")\n roles_ids: Optional[list[int]] = self._properties.get(\"roles\")\n slug: Optional[str] = self._properties.get(\"slug\")\n\n # Validate/populate model exists\n self._model = DashboardDAO.find_by_id(self._model_id)\n if not self._model:\n raise DashboardNotFoundError()\n # Check ownership\n try:\n security_manager.raise_for_ownership(self._model)\n except SupersetSecurityException as ex:\n raise DashboardForbiddenError() from ex\n\n # Validate slug uniqueness\n if not DashboardDAO.validate_update_slug_uniqueness(self._model_id, slug):\n exceptions.append(DashboardSlugExistsValidationError())\n\n # Validate/Populate owner\n if owners_ids is None:\n owners_ids = [owner.id for owner in self._model.owners]\n try:\n owners = self.populate_owners(owners_ids)\n self._properties[\"owners\"] = owners\n except ValidationError as ex:\n exceptions.append(ex)\n if exceptions:\n raise DashboardInvalidError(exceptions=exceptions)\n\n # Validate/Populate role\n if roles_ids is None:\n roles_ids = [role.id for role in self._model.roles]\n try:\n roles = populate_roles(roles_ids)\n self._properties[\"roles\"] = roles\n except ValidationError as ex:\n exceptions.append(ex)\n if exceptions:\n raise DashboardInvalidError(exceptions=exceptions)\n","repo_name":"apache/superset","sub_path":"superset/commands/dashboard/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"5855445580","text":"import os\n\nfrom telemetry.internal.actions import page_action\n\n\ndef read_js():\n with open(os.path.join(os.path.dirname(__file__), 'mouse_click.js')) as f:\n return f.read()\n\n\nclass MouseClickAction(page_action.PageAction):\n _MOUSE_CLICK_JAVASCRIPT = read_js()\n\n def __init__(self, selector=None):\n super(MouseClickAction, self).__init__()\n self._selector = selector\n\n def WillRunAction(self, tab):\n \"\"\"Load the mouse click JS code prior to running the action.\"\"\"\n super(MouseClickAction, self).WillRunAction(tab)\n tab.ExecuteJavaScript(MouseClickAction._MOUSE_CLICK_JAVASCRIPT)\n done_callback = 'function() { window.__mouseClickActionDone = true; }'\n tab.ExecuteJavaScript(\"\"\"\n window.__mouseClickActionDone = false;\n window.__mouseClickAction = new __MouseClickAction(%s);\"\"\"\n % (done_callback))\n\n def RunAction(self, tab):\n code = '''\n function(element, info) {\n if (!element) {\n throw Error('Cannot find element: ' + info);\n }\n window.__mouseClickAction.start({\n element: element\n });\n }'''\n page_action.EvaluateCallbackWithElement(\n tab, code, selector=self._selector)\n tab.WaitForJavaScriptExpression('window.__mouseClickActionDone', 60)\n","repo_name":"googlearchive/big-rig","sub_path":"app/src/thirdparty/telemetry/internal/actions/mouse_click.py","file_name":"mouse_click.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":857,"dataset":"github-code","pt":"61"} +{"seq_id":"35537887490","text":"from itertools import accumulate\n\ndef solution(stones):\n\n psum = [0] + list(accumulate(stones))\n n = len(stones)\n table = [[None] * n for _ in range(n)]\n\n def solve(i, j):\n \n if i == j: \n return 0\n \n if table[i][j]: return table[i][j]\n \n case1 = psum[j+1] - psum[i+1] - solve(i+1, j)\n case2 = psum[j] - psum[i] - solve(i, j-1)\n table[i][j] = max(case1, case2)\n \n return table[i][j]\n\n return solve(0, n-1)\n\nif __name__ == \"__main__\":\n \n stones = [5,3,1,4,2]\n solution(stones)\n\n stones = [1,1,1,1,1,1,1,7,7,1,1,1,1,1,1,1]\n solution(stones)\n\n","repo_name":"kenchan0824/coding-challenges","sub_path":"Stone Game VII/topdown.py","file_name":"topdown.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"75242081474","text":"import argparse\nimport io_utils as io\nimport numpy as np\nimport vector_cache\nimport ppr\nimport vector_utils as vu\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\n\ndef plot_cache_size(network_filepath, output_file, query_size, alpha, network_size_divisor):\n cache_filepath = io.get_cache_filepath(network_filepath)\n\n weight_matrix = io.load_csr_matrix(network_filepath)\n dimension = weight_matrix.shape[0]\n dividand = int(dimension / network_size_divisor)\n ind = list(np.arange(dividand, dimension, dividand))\n ind.append(dimension)\n\n cache = vector_cache.vector_cache()\n cache.load_from_file(cache_filepath)\n\n query_nodes = vu.get_query_sets(1, query_size, range(dimension))[0]\n\n results = OrderedDict()\n results[\"Standard\"] = [ppr.standard_ppr(weight_matrix, query_nodes, alpha).num_iterations] * len(ind)\n results[\"Total Sum\"] = [ppr.cached_ppr(weight_matrix, query_nodes, cache, c_size, alpha, norm_method=vu.total_sum).num_iterations for c_size in ind]\n results[\"Twice Normalized\"] = [ppr.cached_ppr(weight_matrix, query_nodes, cache, c_size, alpha, norm_method=vu.twice_normalized).num_iterations for c_size in ind]\n results[\"Chebyshev Standard\"] = [ppr.standard_ppr(weight_matrix, query_nodes, alpha, ppr_method=ppr.chebyshev_ppr).num_iterations] * len(ind)\n results[\"Chebyshev Total Sum\"] = [ppr.cached_ppr(weight_matrix, query_nodes, cache, c_size, alpha, ppr_method=ppr.chebyshev_ppr, norm_method=vu.total_sum).num_iterations for c_size in ind]\n results[\"Chebyshev Twice Normalized\"] = [ppr.cached_ppr(weight_matrix, query_nodes, cache, c_size, alpha, ppr_method=ppr.chebyshev_ppr, norm_method=vu.twice_normalized).num_iterations for c_size in ind]\n\n for key, value in results.items():\n plt.plot(ind, value, label=key)\n plt.title(r'Cache Size vs. Number of Iterations ($\\alpha$=%.2f, |Q|=%d)' % (alpha, query_size))\n plt.xlabel(\"Cache Size\")\n plt.ylabel(\"Number of Iterations\")\n plt.ylim([0, max(value[0] for value in results.values()) * 2])\n plt.legend()\n plt.savefig(output_file)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Plots Cache Size vs. Number of Iterations for PPR experiments\")\n parser.add_argument('--network_filepath', type=str)\n parser.add_argument('--output_file', type=str)\n parser.add_argument('--query_size', type=int)\n parser.add_argument('--alpha', type=float)\n parser.add_argument('--network_size_divisor', type=int)\n parser.set_defaults(network_filepath=\"Data/Email-Enron.mat\", output_file=\"Plots/cache_size_plot.png\", query_size=200,\n alpha=.01, network_size_divisor=100)\n\n args = parser.parse_args()\n plot_cache_size(args.network_filepath, args.output_file, args.query_size, args.alpha, args.network_size_divisor)\n","repo_name":"joewledger/Indexed_PPR","sub_path":"Src/plot_cache_size.py","file_name":"plot_cache_size.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74348436675","text":"from keras.models import Sequential\nfrom keras.layers.recurrent import GRU\nfrom keras.layers import Dense, Conv1D, LeakyReLU, Dropout, BatchNormalization, TimeDistributed, ZeroPadding1D, CuDNNGRU\nfrom keras.optimizers import Adam\nfrom keras.callbacks import TensorBoard, ModelCheckpoint\nimport datetime\nfrom matplotlib import pyplot\n\nfrom utils import ensure_dirs\nfrom VADSequence import VADSequence\nfrom dataset_loader import vad_voice_train, vad_noise_train, vad_voice_test, vad_noise_test\n\nmodel_name = \"vad2\"\nrun = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")\ngpu = True\n\nensure_dirs([\"./models\", \"./models/\" + run])\n\nopt = Adam(lr=0.0001, decay=0.00005)\n\nbatch_size = 100\ntimeseries_length = 100\nnb_epochs = 60\n\nvoice_train = vad_voice_train()\nnoise_train = vad_noise_train()\nprint(\"Voice train set shape\", voice_train.shape)\nprint(\"Noise train set shape\", noise_train.shape)\ntrain_generator = VADSequence(voice_train,\n noise_train,\n timeseries_length=timeseries_length,\n batch_size=batch_size,\n name=\"train\")\ndel voice_train\ndel noise_train\n\nvoice_test = vad_voice_test()\nnoise_test = vad_noise_test()\nprint(\"Voice test set shape\", voice_test.shape)\nprint(\"Noise test set shape\", noise_test.shape)\ntest_generator = VADSequence(voice_test, noise_test,\n timeseries_length=timeseries_length,\n batch_size=batch_size,\n name=\"test\")\ndel voice_test\ndel noise_test\n\n\ntrain_sample = train_generator[0]\ntest_sample = test_generator[0]\nprint(\"Training X shape: \" + str(train_sample[0].shape))\nprint(\"Training Y shape: \" + str(train_sample[1].shape))\nprint(\"Test X shape: \" + str(test_sample[0].shape))\nprint(\"Test Y shape: \" + str(test_sample[1].shape))\n\nprint('Building CONV LSTM RNN model ...')\ninput_shape = train_sample[0].shape\n\nprint(input_shape)\n\nrecurrent_layer = CuDNNGRU if gpu else GRU\n\nmodel = Sequential()\n\nmodel.add(ZeroPadding1D(1,\n input_shape=input_shape[1:]))\n\nmodel.add(Conv1D(128, 3))\n\nmodel.add(LeakyReLU())\n\nmodel.add(BatchNormalization())\n\nmodel.add(Dropout(0.4))\n\nmodel.add(ZeroPadding1D(1))\n\nmodel.add(Conv1D(64, 3))\n\nmodel.add(LeakyReLU())\n\nmodel.add(BatchNormalization())\n\nmodel.add(Dropout(0.4))\n\nmodel.add(recurrent_layer(64, return_sequences=True))\n\nmodel.add(LeakyReLU())\n\nmodel.add(Dropout(0.4))\n\nmodel.add(BatchNormalization())\n\nmodel.add(recurrent_layer(32, return_sequences=True))\n\nmodel.add(LeakyReLU())\n\nmodel.add(Dropout(0.4))\n\nmodel.add(TimeDistributed(Dense(10)))\n\nmodel.add(LeakyReLU())\n\nmodel.add(BatchNormalization())\n\nmodel.add(Dropout(0.4))\n\nmodel.add(TimeDistributed(Dense(1, activation=\"sigmoid\")))\n\nprint(\"Compiling ...\")\nmodel.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\nmodel.summary()\n\ncallbacks = [\n TensorBoard(log_dir='./tensorboard_logs/' + model_name + '-' + run, histogram_freq=0, batch_size=batch_size),\n ModelCheckpoint(\"./models/\" + model_name + \"_\" + run + \"/model_vad2.{epoch:02d}.hdf5\", monitor='val_loss',\n verbose=0,\n save_best_only=False,\n save_weights_only=False,\n mode='auto', period=1)\n]\n\nprint(\"Training ...\")\nensure_dirs([\"./models\", \"./models/\" + model_name + \"_\" + run])\n\nhistory = model.fit_generator(train_generator,\n epochs=nb_epochs,\n validation_data=test_generator,\n callbacks=callbacks)\n\npyplot.plot(history.history['loss'])\npyplot.plot(history.history['val_loss'])\npyplot.title('model train vs validation loss')\npyplot.ylabel('loss')\npyplot.xlabel('epoch')\npyplot.legend(['train', 'validation'], loc='upper right')\npyplot.show()\n\n\ndef model(input_shape, optimizer):\n print('Building CONV LSTM RNN model ...')\n\n recurrent_layer = CuDNNGRU if gpu else GRU\n\n model = Sequential()\n\n model.add(ZeroPadding1D(1,\n input_shape=input_shape))\n\n model.add(Conv1D(128, 3))\n\n model.add(LeakyReLU())\n\n model.add(BatchNormalization())\n\n model.add(Dropout(0.4))\n\n model.add(ZeroPadding1D(1))\n\n model.add(Conv1D(64, 3))\n\n model.add(LeakyReLU())\n\n model.add(BatchNormalization())\n\n model.add(Dropout(0.4))\n\n model.add(recurrent_layer(64, return_sequences=True))\n\n model.add(LeakyReLU())\n\n model.add(Dropout(0.4))\n\n model.add(BatchNormalization())\n\n model.add(recurrent_layer(32, return_sequences=True))\n\n model.add(LeakyReLU())\n\n model.add(Dropout(0.4))\n\n model.add(TimeDistributed(Dense(10)))\n\n model.add(LeakyReLU())\n\n model.add(BatchNormalization())\n\n model.add(Dropout(0.4))\n\n model.add(TimeDistributed(Dense(1, activation=\"sigmoid\")))\n\n print(\"Compiling ...\")\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n model.summary()\n\n return model\n","repo_name":"dmednis/speaker-segmenter","sub_path":"vad-net.py","file_name":"vad-net.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15331237016","text":"print('Задание 2. Криптовалюта\\n')\n\ndata = {\n \"address\": \"0x544444444444\",\n \"ETH\": {\n \"balance\": 444,\n \"totalIn\": 444,\n \"totalOut\": 4\n },\n \"count_txs\": 2,\n \"tokens\": [\n {\n \"fst_token_info\": {\n \"address\": \"0x44444\",\n \"name\": \"fdf\",\n \"decimals\": 0,\n \"symbol\": \"dsfdsf\",\n \"total_supply\": \"3228562189\",\n \"owner\": \"0x44444\",\n \"last_updated\": 1519022607901,\n \"issuances_count\": 0,\n \"holders_count\": 137528,\n \"price\": False\n },\n \"balance\": 5000,\n \"totalIn\": 0,\n \"total_out\": 0\n },\n {\n \"sec_token_info\": {\n \"address\": \"0x44444\",\n \"name\": \"ggg\",\n \"decimals\": \"2\",\n \"symbol\": \"fff\",\n \"total_supply\": \"250000000000\",\n \"owner\": \"0x44444\",\n \"last_updated\": 1520452201,\n \"issuances_count\": 0,\n \"holders_count\": 20707,\n \"price\": False\n },\n \"balance\": 500,\n \"totalIn\": 0,\n \"total_out\": 0\n }\n ]\n}\n\nprint(f'Список ключей: {data.keys()}')\nprint(f'Список значений: {data.values()}')\ndata['ETH']['total_diff'] = 100\ndata['tokens'][0]['fst_token_info']['name'] = 'doge'\ndata['ETH']['totalOut'] = data['tokens'][0].pop('total_out') + data['tokens'][1].pop('total_out')\ndata['tokens'][1]['sec_token_info']['total_price'] = data['tokens'][1]['sec_token_info'].pop('price')","repo_name":"QuenbKing/Python_Homework","sub_path":"Module 6/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43384897188","text":"# !/usr/local/biotools/python/3.4.3/bin/python3\n__author__ = \"...\"\n__email__ = \"...\"\n__status__ = \"Dev\"\n\nimport openslide\nimport tensorflow as tf\nimport os\nimport argparse\nimport sys\nimport pwd\nimport time\nimport subprocess\nimport re\nimport shutil\nimport glob\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport tempfile\nimport math\nimport io\nimport re\nimport matplotlib\n#from skimage.filters import threshold_otsu\n#from skimage.color import rgb2lab,rgb2hed\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n#from dataset_utils import *\nfrom shapely.geometry import Polygon, Point, MultiPoint\nfrom shapely.geometry import geo\nfrom descartes.patch import PolygonPatch\nimport xml.etree.ElementTree as ET\nfrom xml.dom import minidom\n\n'''function to check if input files exists and valid'''\n\ndef concat_images(imga, imgb):\n \"\"\"\n Combines two color image ndarrays side-by-side.\n \"\"\"\n \n ha,wa = imga.shape[:2]\n hb,wb = imgb.shape[:2]\n max_height = np.max([ha, hb])\n total_width = wa+wb\n new_img = np.zeros(shape=(max_height, total_width, 3))\n new_img[:ha,:wa]=imga\n new_img[:hb,wa:wa+wb]=imgb\n return new_img\n\n\n\n\n\ndef create_binary_mask_new(input_label_file,svs_file,patch_dir,patch_level):\n\n fn=os.path.basename(svs_file)\n OSobj = openslide.OpenSlide(svs_file)\n divisor = int(OSobj.level_dimensions[0][0]/3000)\n \n patch_sub_size_x=int(OSobj.level_dimensions[0][0]/divisor)\n patch_sub_size_y=int(OSobj.level_dimensions[0][1]/divisor)\n img = OSobj.get_thumbnail((patch_sub_size_x, patch_sub_size_y))\n img = img.convert('RGB')\n img.save(os.path.join(patch_dir , fn + \"_1.png\"), \"png\")\n np_img = np.array(img)\n\n patch_sub_size_y = np_img.shape[0]\n patch_sub_size_x = np_img.shape[1]\n f, ax = plt.subplots(frameon=False)\n f.tight_layout(pad=0, h_pad=0, w_pad=0)\n ax.set_xlim(0, patch_sub_size_x)\n ax.set_ylim(patch_sub_size_y, 0)\n ax.imshow(img)\n \n poly_included_0 = []\n poly_included_1 = []\n fobj=open(input_label_file)\n header = fobj.readline()\n for i in fobj:\n i = i.strip()\n arr1 = i.split(\"\\t\")\n pca1 = int(arr1[2])/255\n arr1[1]=arr1[1].replace(\".png\",\"\")\n arr = arr1[1].split(\"_\")\n x1 = int(arr[len(arr)-5])/divisor\n x2 = int(arr[len(arr)-4])/divisor\n y1 = int(arr[len(arr)-2])/divisor\n y2 = int(arr[len(arr)-1])/divisor\n\n poly_included_0.append(Polygon([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)]))\n poly_included_1.append(pca1)\n fobj.close() \n \n for j in range(0, len(poly_included_1)):\n #print(poly_included_1[j])\n #print(poly_included_0[j])\n #sys.exit(0)\n patch1 = PolygonPatch(poly_included_0[j], facecolor=[0, 0, 0], edgecolor=\"green\", alpha=poly_included_1[j], linewidth=1,zorder=2)\n ax.add_patch(patch1)\n ax.set_axis_off()\n DPI = f.get_dpi()\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)\n f.set_size_inches(patch_sub_size_x / DPI, patch_sub_size_y / DPI)\n f.savefig(os.path.join(patch_dir , fn + \"_2.png\"), pad_inches='tight')\n\n images = [Image.open(x) for x in [os.path.join(patch_dir , fn + \"_1.png\"), os.path.join(patch_dir , fn + \"_2.png\")]]\n widths, heights = zip(*(i.size for i in images))\n\n total_width = sum(widths)\n max_height = max(heights)\n\n new_im = Image.new('RGB', (total_width, max_height))\n\n x_offset = 0\n for im in images:\n new_im.paste(im, (x_offset,0))\n x_offset += im.size[0]\n\n new_im.save(os.path.join(patch_dir , fn + \".png\"),\"png\")\n #os.remove(os.path.join(patch_dir , fn + \"_1.png\"))\n #os.remove(os.path.join(patch_dir , fn + \"_2.png\"))\n \ndef main():\n\n input_label_file=\"normalized_pca.txt\"\n svs_file=\"183533.svs\"\n patch_dir=\"...\"\n patch_level=0\n '''creating binary mask to inspect areas with tissue and performance of threshold''' \n create_binary_mask_new(input_label_file,svs_file,patch_dir,patch_level)\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Yunongzzz/AnoGAN_Preprocessing","sub_path":"summary_plots.py","file_name":"summary_plots.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12339636775","text":"\"\"\"Django settings file to get basic Django instance running.\"\"\"\nimport environ\n\n\n# SETTINGS FILE\n# ----------------------------------------------------------------------------\n# Add all secret setting variables to a config.env file in the\n# test directory\nROOT_DIR = environ.Path(__file__) - 1\nPACKAGE_DIR = environ.Path(__file__) - 2\nENV = environ.Env()\nENV.read_env(env_file=ROOT_DIR.file('config.env'))\n\n\n# DEBUG SETTINGS\n# ----------------------------------------------------------------------------\n# Used for sandbox - DO NOT USE IN PRODUCTION\nDEBUG = True\nTEMPLATE_DEBUG = True\nSQL_DEBUG = True\n\n\n# BASE DJANGO SETTINGS\n# ----------------------------------------------------------------------------\nSECRET_KEY = ENV('DJANGO_SECRET_KEY', default='214dfsdf7ughfgdasd3446@FDF46#')\nSITE_ID = 1\nINTERNAL_IPS = ('127.0.0.1',)\nROOT_URLCONF = 'urls'\nAPPEND_SLASH = True\n\n\n# ADMIN SETTINGS\n# ----------------------------------------------------------------------------\nADMINS = (\n # ('Your Name', 'your_email@domain.com'),\n)\nMANAGERS = ADMINS\n\n\n# EMAIL SETTINGS\n# ----------------------------------------------------------------------------\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# LOCALIZATION SETTINGS\n# ----------------------------------------------------------------------------\nUSE_TZ = True\nTIME_ZONE = 'UTC'\nLANGUAGE_CODE = 'en-ca'\nLANGUAGES = (\n ('en-ca', 'English'),\n)\nUSE_I18N = True\nUSE_L10N = True\n\n\n# DJANGO APPLICATIONS\n# ----------------------------------------------------------------------------\nINSTALLED_APPS = (\n # Django Apps\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.flatpages',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n # External Apps\n 'helcim',\n 'debug_toolbar',\n # Project Apps\n 'example_app.apps.ExampleAppConfig',\n)\n\n# DJANGO MIDDLEWARE\n# ----------------------------------------------------------------------------\nMIDDLEWARE = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\n\n# DATABASE SETTINGS\n# ----------------------------------------------------------------------------\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': str(ROOT_DIR.path('db.sqlite3')),\n }\n}\nATOMIC_REQUESTS = True\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',\n },\n}\n\n\n# TEMPLATE SETTINGS\n# ----------------------------------------------------------------------------\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n ROOT_DIR.path('templates'),\n ],\n 'OPTIONS': {\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ],\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.request',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.contrib.messages.context_processors.messages',\n ],\n }\n }\n]\n\n\n# MEDIA SETTINGS\n# ----------------------------------------------------------------------------\nMEDIA_ROOT = ROOT_DIR.path('media')\nMEDIA_URL = '/media/'\n\n\n# STATIC SETTINGS\n# ----------------------------------------------------------------------------\nSTATIC_URL = '/static/'\nSTATIC_ROOT = ROOT_DIR.path('static')\n\n\n# AUTHENTICATION SETTINGS\n# ----------------------------------------------------------------------------\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n)\nLOGIN_REDIRECT_URL = '/accounts/'\n\n\n# DJANGO-HELCIM SETTINGS\n# ----------------------------------------------------------------------------\nHELCIM_API_TEST = ENV('HELCIM_API_TEST', default=True)\nHELCIM_API_URL = ENV('HELCIM_API_URL', default='')\nHELCIM_ACCOUNT_ID = ENV('HELCIM_ACCOUNT_ID', default='')\nHELCIM_API_TOKEN = ENV('HELCIM_API_TOKEN', default='')\nHELCIM_TERMINAL_ID = ENV('HELCIM_TERMINAL_ID', default='')\nHELCIM_JS_CONFIG = {\n 'purchase': {\n 'url': ENV('HELCIM_JS_PURCHASE_URL', default=''),\n 'token': ENV('HELCIM_JS_PURCHASE_TOKEN', default='1234567890'),\n },\n}\nHELCIM_ENABLE_TRANSACTION_CAPTURE = ENV(\n 'HELCIM_ENABLE_TRANSACTION_CAPTURE', default=True\n)\nHELCIM_ENABLE_TRANSACTION_REFUND = ENV(\n 'HELCIM_ENABLE_TRANSACTION_REFUND', default=True\n)\nHELCIM_ENABLE_TOKEN_VAULT = ENV('HELCIM_ENABLE_TOKEN_VAULT', default=True)\nHELCIM_REDACT_ALL = ENV('HELCIM_REDACT_ALL', default=True)\n","repo_name":"studybuffalo/django-helcim","sub_path":"sandbox/scratch/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"8354240375","text":"from django.db import models\nfrom django.conf import settings\nfrom django.shortcuts import reverse\n # for forms\nfrom django import forms\n\nCATEGORY_CHOICES = (\n ('R','Running'),\n ('T','Trainers'),\n ('HT','High-tops')\n)\n\nLABEL_CHOICES = (\n ('P','primary'),\n ('S','secondary'),\n ('D','danger')\n)\n\n# Create your models here.\n\n#Item(s) in shop\nclass Item(models.Model): \n title = models.CharField(max_length = 100)\n price = models.FloatField()\n discount_price = models.FloatField(blank=True, null=True)\n category = models.CharField(choices=CATEGORY_CHOICES, max_length=2)\n label = models.CharField(choices=LABEL_CHOICES, max_length=1)\n slug = models.SlugField()\n description = models.TextField()\n out_of_stock = models.BooleanField(default=False)\n #image = models.ImageField()\n \n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse(\"core:product\", kwargs={'slug':self.slug})\n\n def get_add_to_cart_url(self):\n return reverse(\"core:add-to-cart\", kwargs={'slug':self.slug})\n \n def get_remove_from_cart_url(self):\n return reverse(\"core:remove-from-cart\", kwargs={'slug':self.slug})\n\n \n\n#Item(s) in cart\nclass OrderItem(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete = models.CASCADE)\n ordered = models.BooleanField(default=False)\n savings = False\n item = models.ForeignKey(Item, on_delete = models.CASCADE)\n quantity = models.IntegerField(default=1)\n\n def __str__(self):\n return f\"{self.quantity} of {self.item.title}\"\n \n def get_total_item_price(self):\n if (self.item.discount_price):\n total_item_price = self.quantity * self.item.discount_price\n savings = True\n return total_item_price\n else:\n total_item_price = self.quantity * self.item.price \n return total_item_price\n \n def get_item_savings(self):\n amount_saved = (self.item.price * self.quantity) - self.get_total_item_price()\n return amount_saved\n\n#orders containing item(s)\nclass Order(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete = models.CASCADE)\n\n ordered = models.BooleanField(default=False)\n start_date = models.DateTimeField(auto_now_add=True)\n items = models.ManyToManyField(OrderItem)\n ordered_date = models.DateTimeField()\n voucher = models.ForeignKey('Voucher',on_delete=models.SET_NULL, blank=True, null=True)\n #ordered_date = models.DateTimeField(auto_now_add = True)\n\n def __str__(self):\n return self.user.username\n\n def get_order_total(self):\n total = 0 #assignment\n for order_item in self.items.all():\n total += order_item.get_total_item_price()\n if self.voucher:\n total -= self.voucher.amount\n return total\n\nclass Voucher(models.Model):\n code = models.CharField(max_length=15)\n amount = models.FloatField()\n \n def __str__(self):\n return self.code\n\n\n\nclass VoucherForm(forms.Form):\n code = forms.CharField(widget=forms.TextInput(\n attrs=\n { \n 'class': 'form-control',\n 'placeholder': 'Apply voucher code',\n 'aria-label': 'Recipient\\'s username',\n 'aria-describedby': 'basic-addon2' \n }))","repo_name":"alvaryn/Ecommerce-Website","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38392074393","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('',views.moreInfo, name='class'),\n path('search',views.search, name='search'),\n path('enrol',views.enrol, name='enrol'),\n path('notenrol',views.notenrol, name='notenrol'),\n]","repo_name":"SainaDaneshmandjahromi/OnlineEducationWebApp","sub_path":"proje/classes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18769086543","text":"import os\nimport subprocess\nimport traceback\n\nimport dxpy\n\n@dxpy.entry_point(\"main\")\ndef main(hisat2_index_targz, mate1_fastq, mate2_fastq):\n\n # First, download all the input files to local storage\n inputs_dict = dxpy.download_all_inputs()\n\n # Second, unzip the inputs fastqs and update paths and names\n subprocess.check_call([\"gunzip\", inputs_dict[\"mate1_fastqgz_path\"]])\n inputs_dict[\"mate1_fastq_path\"] = inputs_dict[\"mate1_fastqgz_path\"].replace(\"*.gz\", \"\")\n inputs_dict[\"mate1_fastq_name\"] = inputs_dict[\"mate1_fastqgz_name\"].replace(\"*.gz\", \"\")\n\n subprocess.check_call([\"gunzip\", inputs_dict[\"mate2_fastqgz_path\"]])\n inputs_dict[\"mate2_fastq_path\"] = inputs_dict[\"mate2_fastqgz_path\"].replace(\"*.gz\", \"\")\n inputs_dict[\"mate2_fastq_name\"] = inputs_dict[\"mate2_fastqgz_name\"].replace(\"*.gz\", \"\")\n\n # Third, extract the index tarball\n subprocess.check_call([\"tar\", \"xf\", inputs_dict[\"hisat2_index_targz_path\"]])\n\n # Fourth, figure out what the basename of the hisat2 reference index is\n # This depends on the basename following the pattern used in the indexes\n # distributed by the authors of HISAT2, that is the index in grch37.tar.gz\n # will extract to grch37/genome*\n index_basename = os.path.join(inputs_dict[\"hisat2_index_targz_name\"][:-len(\".tar.gz\")],\n \"genome\")\n\n # Prepare the hisat2 command and run it.\n output_sam_name = inputs_dict[\"mate1_fastqgz_prefix\"] + \".sam\"\n output_bam_name = inputs_dict[\"mate1_fastqgz_prefix\"] + \".bam\"\n hisat2_cmd_template = (\"hisat2 --dta -x {index_basename} -1 {mate1_fastq} \"\n \"-2 {mate2_fastq} -S {hisat2_output_sam}\")\n hisat2_cmd = hisat2_cmd_template.format(\n index_basename=index_basename,\n mate1_fastq=inputs_dict[\"mate1_fastq_path\"],\n mate2_fastq=inputs_dict[\"mate2_fastq_path\"],\n hisat2_output_sam=output_sam_name)\n try:\n subprocess.check_call(hisat2_cmd, shell=True)\n except subprocess.CalledProcessError as exc:\n traceback.print_exc()\n raise dxpy.AppError(\n \"Error while running HISAT2: {e}. Consult the log for more information\".format(\n e=exc.message))\n \n # Convert the SAM to a BAM\n subprocess.check_call([\"samtools\", \"view\", \"-b\", output_sam_name, \"-o\", output_bam_name])\n\n # Upload the output SAM file.\n uploaded_dxfile = dxpy.upload_local_file(\"hisat2_output.sam\")\n\n # Return the ID of the uploaded SAM file associated with the \"aligned_sam\"\n # field in the outputSpec in dxapp.json.\n return {\"aligned_sam\": dxpy.dxlink(uploaded_dxfile.get_id())}\n","repo_name":"mckinsel/NEXUSWORLD-Developer-Workshop","sub_path":"HISAT2/intermediate/options_hisat2_applet_python/src/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70871306434","text":"import decimal\nimport os\nimport threading\nimport math\nimport numpy as np\nfrom collections import defaultdict\nfrom nltk.corpus import wordnet as wn\nfrom copy import deepcopy\nfrom mushroom.config import Precision\nfrom mushroom.training_nball import training_one_family_multi_relations\nfrom mushroom.util import qsr_P, load_train_dataset\nfrom mushroom.util import load_file_to_list, load_entity_vector, create_testing_ball, update_ball_margins\n\ndecimal.getcontext().prec = Precision\nDEBUG = False\n\nrecordEnhencedStem = False\nenhencedStems = []\n\nfn_synsets = getattr(wn, 'synsets')\nMaxStemLength = 10\n\n\ndef get_logfile_name(h, srel, crel):\n txt = ' '.join([h, srel, crel])\n return ''.join([str(ord(i)) if ord(i) < 97 else i for i in txt])\n\n\ndef create_ball_files(balldic, mushroomFile, outputPath=None):\n lines = []\n for ballname, values in balldic.items():\n lines.append(' '.join([ballname] + [str(ele) for ele in balldic[ballname]]))\n\n with open(os.path.join(outputPath, mushroomFile), 'w') as bfh:\n bfh.write('\\n'.join(lines) + \"\\n\")\n\n\ndef has_e2v(word, e2vec):\n if word in e2vec or word.split('.')[0] in e2vec:\n return True\n else:\n return False\n\n\ndef make_cat_members_from_multi_space_dic(trainingTree):\n catMembers = defaultdict(list)\n for root, space_mlst in trainingTree.items():\n for space, lst in space_mlst:\n spaceName ='-'.join([root, space])\n catMembers[spaceName] = lst\n catMembers[root].append(spaceName)\n return catMembers\n\n\ndef create_training_tree(stem, trueTails, e2vec, knownDataDict=dict(),\n mushroomStemRel=\"\", embedSpace = \"\", enrichLevel=-1, enlargeMR = -1):\n tree = defaultdict(list)\n tree0 = defaultdict(list)\n vstem = ['*root*'] + stem\n for p, h in zip(vstem[:-1], vstem[1:]):\n tree[p] = [(mushroomStemRel, [h])]\n tree0[p] = [h]\n tree[vstem[-1]] = [(embedSpace, trueTails)]\n tree0[vstem[-1]] = trueTails\n if enrichLevel > 0:\n cstem = deepcopy(stem[:-1])\n for hi in cstem[-enrichLevel:]:\n canlst = tree[hi][0][1]\n for ele in knownDataDict[mushroomStemRel][hi]:\n if ele not in canlst:\n canlst.append(ele)\n if enlargeMR > 0:\n eLst = [ele for ele in canlst[:enlargeMR]\n if ele in e2vec and ele not in cstem[:-enrichLevel] and ele != hi]\n else:\n eLst = [ele for ele in canlst if ele in e2vec\n and ele not in cstem[:-enrichLevel] and ele != hi]\n tree[hi] = [(mushroomStemRel, eLst)]\n tree0[hi] = eLst\n return tree, tree0\n\n\ndef create_PLC(stem, trainingTree, posDic=None, width = 30, maxPLC = 300):\n\n fpDic = dict()\n if posDic:\n stem0 =['*root*'] + stem\n posDic.update({'*root*':1})\n for i, stemEle in enumerate(stem0):\n for space, leaves in trainingTree[stemEle]:\n for leaf in leaves:\n fpDic[leaf] = [str(posDic.get(ele, 1)) for ele in stem0[:i+1]]\n fpDic[leaf] += ['0'] * (width - len(fpDic[leaf]))\n assert len(fpDic[leaf]) == width, leaf\n else:\n fpDic[stem[0]] = ['1'] + ['0'] * (width - 1)\n for i, e in enumerate(stem):\n for space, leaves in trainingTree[e]:\n for leaf in leaves:\n fpDic[leaf] = ['1'] * (i+1+1) + ['0'] * (width - i -1 -1)\n return fpDic\n\n\ndef add_stem_hight_to_results(iResultFile='', iMushroomPath=''):\n newRlt = []\n if not os.path.isfile(iResultFile):\n print(iResultFile, ' not exist')\n return\n for ln in load_file_to_list(iResultFile):\n head, tail, rel, x, stemRel, y, mrV, v = ln.split()\n mushroomName = get_logfile_name(head, stemRel, rel)\n num = -1\n if not os.path.isfile(os.path.join(iMushroomPath, mushroomName)):\n print(os.path.join(iMushroomPath, mushroomName), ' not exist')\n continue\n for mLn in load_file_to_list(os.path.join(iMushroomPath, mushroomName)):\n if \"-tr_contain\" in mLn or \"-ntr_contain\" in mLn:\n num += 1\n newRlt.append(\" \".join([ln, \"SHeight=\", str(num)]))\n with open(iResultFile+\".STH\", 'w') as ofh:\n ofh.write('\\n'.join(newRlt)+'\\n')\n ofh.flush()\n\n\ndef multi_parents(h):\n lst = h.split('-')\n if len(lst) == 1:\n return False\n for ele in lst:\n if '.' not in ele:\n return False\n return True\n\n\ndef get_truth_value_dynamic(h, t, r, relWNDic, ofile = \"\"):\n def get_values(sl, head, fun):\n pk = getattr(wn, sl)\n entity = pk(head)\n # print(head, fun)\n return [x.name() for x in getattr(entity, fun)()]\n\n A, B, rel = h, t, r\n synlem, func, inv = relWNDic[rel]\n\n if inv == \"inv\":\n flag = True\n h, t = B, A\n else:\n flag = False\n h, t = A, B\n if synlem == \"lemma\":\n h += \".\" + h.split('.')[0]\n\n vlst = get_values(synlem, h, func)\n if inv == \"bi\":\n vlst += get_values(synlem, t, func)\n\n if t in vlst and inv!=\"bi\":\n value = \"True\"\n if flag:\n h, t = t, h\n elif inv == \"bi\" and h in vlst:\n value = \"True\"\n elif len(vlst) == 0:\n value = \"unknown\"\n else:\n value = \"False\"\n newline = ' '.join([h, t, rel, value])\n with open(ofile, 'a+') as ofh:\n ofh.write(\"\\n\"+newline)\n return value\n\n\ndef get_common_hypernyms(A, B, sl=\"synset\", fun=\"common_hypernyms\"):\n ws = getattr(wn, sl)\n entity = ws(A)\n # print(head, fun)\n return [x.name() for x in getattr(entity, fun)(ws(B))]\n\n\ndef get_hypernym_paths(A):\n ws = getattr(wn, \"synset\")\n entity = ws(A)\n llst = []\n for lst in getattr(entity, 'hypernym_paths')():\n llst.append([x.name() for x in lst])\n return llst\n\n\ndef enrich_stems_with_wordnet(A, cstem=[], fn_synsets=None):\n def wn_to_wn11_name(xname):\n if A.startswith('__'):\n nlst = xname.split('.')\n n0, num = nlst[0], nlst[2].lstrip('0')\n return \"__\"+n0+\"_\"+num\n\n def get_llst(B):\n llst0 = []\n if B.startswith('__'):\n word = '_'.join(B.split('_')[2:-1])\n else:\n return cstem\n\n for ws in fn_synsets(word):\n for lst in getattr(ws, 'hypernym_paths')():\n if wn_to_wn11_name(lst[-1].name()) == B:\n llst0.append([wn_to_wn11_name(x.name()) for x in lst])\n return llst0\n\n llst = get_llst(A)\n if len(llst) == 0 and len(cstem)>1:\n A = cstem[-2]\n llst1 = get_llst(A)\n llst = [lx+[cstem[-1]] for lx in llst1]\n\n mstem, mlen = [], 0\n for alst in llst:\n sset = set(cstem).intersection(alst)\n if len(sset) > mlen:\n mlen = len(sset)\n mstem = alst\n if mstem:\n return mstem\n else:\n return cstem\n\n\ndef get_multi_parents_stem(h):\n\n cstem = []\n wslst = h.split('-')\n i, N = 0, len(wslst)\n for i in range(N):\n j = i + 1\n if j < N:\n cstem0 = get_common_hypernyms(wslst[i], wslst[j], sl=\"synset\", fun=\"common_hypernyms\")\n if len(cstem) == 0:\n cstem = cstem0\n else:\n cstem = [ele for ele in cstem if ele in cstem0]\n llst = get_hypernym_paths(wslst[0])\n for lst in llst:\n if set(lst) > set(cstem):\n return [e for e in lst if e in cstem]\n return cstem\n\n\ndef compute_vec_of_multi_parents(h, e2vec=dict()):\n v = [e2vec[ele] for ele in h.split('-')]\n return np.sum(np.array(v), axis=0)/len(v)\n\n\ndef mushroom_triple_classification(testTriples=[], knownDataDict=dict(), mushroomStemRel='',\n maxTrueTails=-1, ballMarginLst=[], widthOfPLC=30, plcDic=dict(), e2vec=dict(),\n eflag=\"\", subspaceDim=20, addDim=[512] * 100, mPHypernymPathDic=dict(),\n enrichLevel=1, enlargeMR=-1, L0=decimal.Decimal(1e+100), R0=decimal.Decimal(1e-200),\n groundTruthDict=dict(), relNballWNFile=\"\", mushroomPath=\"\", logPath=\"\",\n mainLog=\"\", mushroomResultFile=\"\", mPInterSec=False, enhanceStem=False,\n semaphorMr=threading.Semaphore(), semaphorRlt=threading.Semaphore()):\n \"\"\"\n test triple (h, r, t) true or false\n trueTails are all true tails from the training+valid datasets, which form the cap of the mushroom\n trueAncestors of h are ancestors of h in training+valid datasets, which form the stem of the mushroom\n\n load entity2vec dictionary\n some components of the mushroom do not vectors, just remove them, and log them\n\n training nball of the mushroom, and save the transformation history into file, trans_history\n\n apply trans_history for construct the nball of t\n\n if nball of t is located inside of nball of h, return true, otherwise false\n\n :param testTriples:\n :param trainTripeFile:\n :param validTripleFile:\n :param mushroomStemRels:\n :param similarWordFile:\n :param flagOfUsingSimilarWord:\n :param widthOfPLC:\n :param entity2vecFile:\n :param ballMargin:\n :param addDim:\n :param logPath:\n :param resultPath:\n :return:\n \"\"\"\n global entity2vecFile, word2vecFile, fn_synsets, MaxStemLength, DEBUG, recordEnhencedStem, enhencedStems\n\n def get_true_tails(h, r, edic=dict()):\n return [e for e in knownDataDict[r][h] if e in edic or e.split('.')[0] in edic]\n\n def get_mushroom_stem(hx, rel='', enhanceStem=False, edic=dict()):\n global fn_synsets\n\n def get_parent_pos(h0, parent):\n if len(h0.split('.')) == 2:\n pos = h0.split('.')[1]\n parentPos = list(filter(lambda ele: ele.split('.')[1] == pos, parent))\n return parentPos\n else:\n return []\n\n lst = [hx]\n i_rel = rel + \"-1\"\n if i_rel in knownDataDict:\n while True:\n parent = knownDataDict[i_rel].get(hx, False)\n if parent:\n parentPos = get_parent_pos(h, parent)\n if parentPos:\n hx = parentPos[0]\n else:\n hx = parent[0]\n if hx not in lst:\n lst.append(hx)\n else:\n break\n else:\n break\n lst.reverse()\n lst = [e for e in lst if e in edic or e.split('.')[0] in edic]\n if enhanceStem and len(lst) < MaxStemLength:\n lst = enrich_stems_with_wordnet(h, cstem=lst, fn_synsets=fn_synsets)\n return [e for e in lst if e in edic or e.split('.')[0] in edic]\n\n def create_cat_path(stem, leaves, tree=None):\n cpDic = dict()\n if tree:\n nodeLst = ['*root*']\n cpDic['*root*'] = []\n while nodeLst:\n nd = nodeLst.pop()\n children = tree.get(nd, [])\n if children:\n nodeLst += children\n for child in children:\n cpDic[child] = cpDic[nd] + [nd]\n else:\n for leaf in leaves:\n cpDic[leaf] = stem\n for i in range(len(stem)):\n cpDic[stem[i]] = stem[:i]\n return cpDic\n\n def load_nball_wn_relation(relfile):\n funDic = dict()\n for relMapping in load_file_to_list(relfile):\n wlst = relMapping.split()\n if len(wlst) > 2:\n nballrel, wnrel = wlst[:2]\n funDic[wlst[0]] = wnrel.split('.')\n return funDic\n\n def exist_mushroom(logFileName, outputPath=\"\"):\n return os.path.exists(os.path.join(outputPath, logFileName))\n\n def get_result_from_one_mushroom(h, ballMargin, mushroomStemRel, r):\n posixMgEvec = str(ballMargin) + eflag\n if enlargeMR > 0:\n posixMgEvec += \"_enlarge\" + str(enlargeMR)\n mushroomPath0 = mushroomPath + posixMgEvec\n semaphorMr.acquire()\n if not os.path.exists(mushroomPath0):\n os.makedirs(mushroomPath0)\n semaphorMr.release()\n\n logPath0 = logPath + posixMgEvec\n semaphorMr.acquire()\n if not os.path.exists(logPath0):\n os.makedirs(logPath0)\n semaphorMr.release()\n\n mushroomName = get_logfile_name(h, mushroomStemRel, r)\n logFile0 = os.path.join(logPath0, mushroomName)\n mushroomResultFile0 = mushroomResultFile + posixMgEvec\n\n posixEvecMargin1 = '1.0' + eflag\n logPathMargin1 = logPath + posixEvecMargin1\n mushroomPathMargin1 = mushroomPath + posixEvecMargin1\n mushroomNameMargin1 = os.path.join(logPathMargin1, mushroomName)\n if ballMargin != 1:\n ballDict = load_entity_vector(os.path.join(mushroomPathMargin1, mushroomName))\n ballDict = update_ball_margins(ballDict, ballMargin)\n elif not DEBUG and exist_mushroom(mushroomName, outputPath=mushroomPath0):\n ballDict = load_entity_vector(os.path.join(mushroomPath0, mushroomName))\n else:\n ballDict = training_one_family_multi_relations(treeStruc=trainingTree,\n root=trainingTree['*root*'][0][1][0],\n w2vDic=e2vec, catMemDic=catMemDic,\n catFPDic=catFPDic, ballDict=dict(),\n subspaceDim=subspaceDim,\n L0=L0, R0=R0, addDim=addDim, logFile=logFile0)\n\n lh = open(logFile0, 'a')\n lh.flush()\n lh.close()\n semaphorMr.acquire()\n create_ball_files(ballDict, mushroomName, outputPath=mushroomPath0)\n semaphorMr.release()\n\n tball = create_testing_ball(t, h, subspace=r, subspaceDim=subspaceDim, code=tPLC, w2vDic=e2vec,\n catPathDic=cpDic, catFPDic=catFPDic, addDim=addDim, L0=L0, R0=R0,\n logFile=mushroomNameMargin1)\n return ballDict['-'.join([h, r])], ballDict[h], tball, mushroomResultFile0\n\n mushroomResults = defaultdict(list)\n mainLog += eflag\n open(mainLog, 'a+').close()\n\n # relWNDic = load_nball_wn_relation(relNballWNFile)\n\n testLst = testTriples\n\n for ln in testLst:\n print('TC processing ', ln)\n h, t, r, v = ln.split()\n\n if not has_e2v(t, e2vec):\n # print('False:', ln)\n semaphorMr.acquire()\n with open(mainLog, 'a') as ofh:\n ofh.write(' '.join([t, 'is not in entity vec\\n']))\n semaphorMr.release()\n continue\n allTrueTails = get_true_tails(h, r, edic=e2vec)\n if t in allTrueTails:\n allTrueTails.remove(t)\n\n if len(allTrueTails) == 0:\n with open(mainLog, 'a') as ofh:\n semaphorMr.acquire()\n ofh.write(' '.join([h, r, 'has no tails in KG\\n']))\n semaphorMr.release()\n continue\n elif maxTrueTails > 0:\n allTrueTails.sort()\n trueTails = allTrueTails[:maxTrueTails]\n else:\n trueTails = allTrueTails\n\n if not multi_parents(h):\n stem = get_mushroom_stem(h, rel=mushroomStemRel, enhanceStem=enhanceStem, edic=e2vec)\n if recordEnhencedStem:\n for tail in trueTails:\n enhencedStems.append(' '.join(stem + [tail]))\n continue\n\n elif mPInterSec:\n stems = []\n for h0 in h.split('-'):\n # get from WN\n stem = mPHypernymPathDic.get(h0, False)\n if not stem:\n stem = [ele for ele in get_hypernym_paths(h0)[0] if ele in e2vec] # to improve\n mPHypernymPathDic[h0] = stem\n stems.append(stem)\n else:\n stem = get_multi_parents_stem(h)\n stem += [h]\n e2vec[h] = compute_vec_of_multi_parents(h, e2vec=e2vec)\n\n if not mPInterSec:\n stemHeight = len(stem) - 1\n if len(stem) == 0 or t in stem or len(stem + trueTails) != len(set(stem + trueTails)):\n # print('False:', ln)\n with open(mainLog, 'a') as ofh:\n semaphorMr.acquire()\n ofh.write(' '.join(stem + trueTails + ['has no tails in KG\\n']))\n semaphorMr.release()\n continue\n h = stem[-1]\n\n trainingTree, trainingTree0 = create_training_tree(stem, trueTails, e2vec, knownDataDict=knownDataDict,\n mushroomStemRel=mushroomStemRel, embedSpace=r,\n enrichLevel=enrichLevel, enlargeMR=enlargeMR)\n\n catMemDic = make_cat_members_from_multi_space_dic(trainingTree)\n catFPDic = create_PLC(stem, trainingTree, posDic=plcDic, width=widthOfPLC)\n\n tPLC = catFPDic[trueTails[0]]\n cpDic = create_cat_path(stem, trueTails, tree=trainingTree0)\n # print(ln, cpDic)\n if not cpDic:\n # print('False:', ln)\n semaphorMr.acquire()\n with open(mainLog, 'a') as ofh:\n ofh.write(' '.join([h] + ['has no stems in KG\\n']))\n semaphorMr.release()\n continue\n if DEBUG: ballMarginLst = ballMarginLst[:1]\n for ballMargin in ballMarginLst:\n hballsub, hball, tball, mushroomResultF0 = get_result_from_one_mushroom(h, ballMargin, mushroomStemRel,\n r)\n mrValueSubspace = qsr_P(tball, hballsub)\n mrValue = qsr_P(tball, hball)\n if mrValueSubspace: # qsr_P(tball, ballDict['-'.join([h, r])]):\n if v in [\"True\", \"1\"]:\n recordLine = ' '.join([h, t, r, \"stem=\", mushroomStemRel, str(len(stem)), \"mrT\", \"1\"])\n elif v in [\"False\", \"-1\"]:\n recordLine = ' '.join([h, t, r, \"stem=\", mushroomStemRel, str(len(stem)), \"mrT\", \"-1\"])\n else:\n recordLine = ' '.join([h, t, r, \"stem=\", mushroomStemRel, str(len(stem)), \"mrT\", \"0\"])\n elif mrValue: # qsr_P(tball, ballDict[h]) and not multi_parents(h):\n if v in [\"True\", \"1\"]:\n recordLine = ' '.join([h, t, r, \"stem=\", mushroomStemRel, str(len(stem)), \"mrTx\", \"1\"])\n elif v in [\"False\", \"-1\"]:\n recordLine = ' '.join([h, t, r, \"stem=\", mushroomStemRel, str(len(stem)), \"mrTx\", \"-1\"])\n else:\n recordLine = ' '.join([h, t, r, \"stem=\", mushroomStemRel, str(len(stem)), \"mrTx\", \"0\"])\n else:\n if v in [\"True\", \"1\"]:\n recordLine = ' '.join([h, t, r, \"stem=\", mushroomStemRel, str(len(stem)), \"mrF\", \"1\"])\n elif v in [\"False\", \"-1\"]:\n recordLine = ' '.join([h, t, r, \"stem=\", mushroomStemRel, str(len(stem)), \"mrF\", \"-1\"])\n else:\n recordLine = ' '.join([h, t, r, \"stem=\", mushroomStemRel, str(len(stem)), \"mrF\", \"0\"])\n if ballMargin == 1.0:\n print(recordLine)\n mushroomResults[mushroomResultF0].append(\" \".join([recordLine, \"SHeight=\", str(stemHeight)]))\n\n\n semaphorRlt.acquire()\n for rfile, vlst in mushroomResults.items():\n with open(rfile, 'w') as ofh:\n ofh.write('\\n'.join(vlst) + '\\n')\n semaphorRlt.release()\n\ndef pos_match(ele, lst):\n pos1 = ele.split('.')[1]\n if pos1 in [ele.split('.')[1] for ele in lst]:\n return True\n else:\n return False\n\n\ndef mushroom_triple_classification_with_different_pars(threadNum = 3,\n testTripleFile=\"\" , trainTripeFile=\"\", validTripleFile=\"\",\n mushroomStemRel=\"\", maxTrueTails=-1, enhanceStem=False,\n enrichLevel=-1, enlargeMR = -1, multiParentDicFile ='',\n widthOfPLC=0, plcFile=None, e2vecFile=\"\", ballMarginLst=[],\n subspaceDim = 20, addDim=[], mPInterSec = False,\n L0=0, R0=0, mushroomGroundTruth=\"\", relNballWNFile=\"\",\n mushroomPath=\"\", logPath=\"\", mainLog = \"\",\n mushroomResultFile=\"\"):\n\n class myThread(threading.Thread):\n\n def __init__(self, i, semaphorMr, semaphorRlt, e2vecFile):\n threading.Thread.__init__(self)\n self.semMr = semaphorMr\n self.semRlt = semaphorRlt\n self.startIndex = i * unit\n self.endIndex = min((i+1)*unit, len(allTestTriples))\n\n def run(self):\n mushroom_triple_classification(testTriples=allTestTriples[self.startIndex:self.endIndex],\n knownDataDict=knownDataDict, mushroomStemRel=mushroomStemRel,\n maxTrueTails=maxTrueTails, enrichLevel=enrichLevel, enlargeMR=enlargeMR,\n widthOfPLC=widthOfPLC, plcDic=plcDic, e2vec=e2vec, eflag = eflag,\n ballMarginLst=ballMarginLst,\n subspaceDim=subspaceDim, enhanceStem=enhanceStem,\n mPHypernymPathDic = mPHypernymPathDic,\n addDim=addDim, L0=L0, R0=R0, groundTruthDict=groundTruthDict,\n relNballWNFile=relNballWNFile, mPInterSec = mPInterSec,\n mushroomPath=mushroomPath, logPath=logPath, mainLog=mainLog,\n mushroomResultFile=mushroomResultFile,\n semaphorMr=self.semMr, semaphorRlt=self.semRlt)\n\n allTestTriples = load_file_to_list(testTripleFile)\n unit = math.ceil(len(allTestTriples) / threadNum)\n semaphorMr = threading.Semaphore()\n semaphorRlt = threading.Semaphore()\n\n print('e2vecFile', e2vecFile)\n e2vec = load_entity_vector(e2vecFile)\n eflag = get_symbol_dic_for_vec_file(e2vecFile)\n knownDataDict = load_train_dataset([trainTripeFile, validTripleFile])\n\n mPHypernymPathDic = defaultdict()\n if os.path.isfile(multiParentDicFile):\n for lst in load_file_to_list(multiParentDicFile):\n elst = lst.split()\n mPHypernymPathDic[elst[0]] = elst[1:]\n\n groundTruthDict = dict()\n for lst in load_file_to_list(mushroomGroundTruth):\n elst = lst.split()\n groundTruthDict[' '.join(elst[:-1])] = elst[-1]\n\n plcDic = dict()\n if plcFile:\n for lst in load_file_to_list(plcFile):\n pos = lst.split()\n plcDic[pos[0]] = int(pos[1]) + 1\n\n for i in range(threadNum):\n thread = myThread(i, semaphorMr, semaphorRlt, e2vecFile)\n thread.start()\n\n\ndef get_symbol_dic_for_vec_file(vfile):\n if \"TEKEE\" in vfile:\n return \"TE\"\n if \"TEKEH\" in vfile:\n return \"TH\"\n if \"TransE\" in vfile:\n return \"TransE\"\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"gnodisnait/mushroom","sub_path":"mushroom/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":24029,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37129678911","text":"import time\nimport socket\n\n\ndef socket_udp_server():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n server_address = ('0.0.0.0', 1812)\n print('Run UDP server on {}:{}'.format(*server_address))\n sock.bind(server_address)\n\n while True:\n start = time.time()\n data, address = sock.recvfrom(4096)\n print(data)\n print(address)\n # message = process(data, address)\n message = b'OK'\n sock.sendto(message, address)\n print('socket take => ', time.time() - start)\n\n\nif __name__ == \"__main__\":\n socket_udp_server()\n","repo_name":"wongxinjie/python-snippets","sub_path":"udpserver/rawudp.py","file_name":"rawudp.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74204428673","text":"from functools import reduce\nfrom os import path\n\nimport os\nimport shelve\nimport pprint\nimport re\n\n\ndef main():\n # testa_busca_diretorio_atual()\n # muda_diretorio_de_trabalho()\n # cria_diretorio()\n # busca_caminho_absoluto()\n # verifica_se_e_caminho_absoluto()\n # lista_arquivos_diretorios_e_soma_tamanho_total()\n # verifica_se_arquivo_ou_diretorio_existe()\n # le_linhas_arquivo()\n # le_arquivo()\n # escreva_em_um_arquivo_existente_ou_cria()\n # testa_modulo_shelve()\n # escreve_em_um_arquivo()\n busca_palavras_em_arquivos_py()\n\n\ndef busca_palavras_em_arquivos_py():\n palavra_a_busca = input(\"digite uma palavra: \")\n for nome_arquivo in os.listdir(os.getcwd()):\n if re.compile(\"\\w\\.py\").search(nome_arquivo) is not None:\n with open(os.path.join(os.getcwd(), nome_arquivo)) as arquivo:\n busca = re.compile(palavra_a_busca).findall(arquivo.read())\n if len(busca):\n print(busca)\n\n\ndef escreve_em_um_arquivo():\n gatos = [{'nome': 'nina', 'idade': 6}]\n dado_formatado = pprint.pformat(gatos)\n with open(\"gatos.py\", 'w') as arquivo:\n arquivo.write(f'gatos = {dado_formatado} \\n')\n from gatos import gatos\n print(gatos)\n\n\ndef testa_modulo_shelve():\n with shelve.open(\"chaves\") as arquivo:\n arquivo['teste'] = 'teste'\n arquivo['gatos'] = ['gato1', 'gato2']\n with shelve.open('chaves') as arquivo:\n for chave, valor in arquivo.items():\n print(chave, valor)\n\n\ndef muda_diretorio():\n diretorio = path.join(\"C:\\\\\", \"Users\", \"Flavio Garcia\", \"Downloads\")\n nome_arquivo = \"teste.txt\"\n os.chdir(diretorio)\n return diretorio, nome_arquivo\n\n\ndef escreva_em_um_arquivo_existente_ou_cria():\n _, nome_arquivo = muda_diretorio()\n with open(nome_arquivo, \"w\") as arquivo:\n arquivo.write(\"TESTE 4\\n\")\n arquivo.write(\"TESTE 5\\n\")\n arquivo.write(\"TESTE 6\\n\")\n\n\ndef le_linhas_arquivo():\n diretorio, nome_arquivo = muda_diretorio()\n if path.exists(path.join(diretorio, nome_arquivo)):\n with open(nome_arquivo) as arquivo:\n for indice, linha in enumerate(arquivo.readlines()):\n print(f\"{indice} - {linha}\")\n\n\ndef le_arquivo():\n diretorio, nome_arquivo = muda_diretorio()\n if path.exists(path.join(diretorio, nome_arquivo)):\n with open(nome_arquivo) as arquivo:\n conteudo = arquivo.read()\n print(conteudo)\n\n\ndef verifica_se_arquivo_ou_diretorio_existe():\n print(path.exists(\"C:\\\\Windows\\\\System32\"))\n print(path.exists(\"C:\\\\System32\"))\n print(path.isdir(\"C:\\\\Windows\\\\System32\"))\n print(path.isdir(\"C:\\\\Windows\\\\System32\\\\calc.exe\"))\n\n\ndef lista_arquivos_diretorios_e_soma_tamanho_total():\n diretorio_windows = \"C:\\\\Windows\\\\System32\"\n arquivos_windows = os.listdir(diretorio_windows)\n for arquivo in arquivos_windows:\n print(f\"{arquivo} {path.getsize(path.join(diretorio_windows, arquivo))}\")\n tamanho_arquivos = map(lambda x: path.getsize(path.join(diretorio_windows, x)), arquivos_windows)\n tamanho_total = reduce(lambda total, valor: total + valor, tamanho_arquivos, 0)\n print(f\"{tamanho_total / 100_000_000:.2f} GB\")\n\n\ndef testa_nome_diretorios():\n print(path.relpath(\"E:\\\\Cme\\\\Cme\"))\n print(path.relpath(\"E:\\\\Cme\", \"E:\\\\\"))\n print(path.dirname(\"E:\\\\Cme\\Cme\"))\n print(path.basename(\"E:\\\\Cme\\Cme\"))\n caminho_calculadora = \"C:\\\\Windows\\Sytem32\\\\calc.exe\"\n print(path.basename(caminho_calculadora)) # retorna arquivo\n print(path.dirname(caminho_calculadora)) # retorna diretorio\n print(path.split(caminho_calculadora)) # retorna diretorio e arquivo\n print(tuple(caminho_calculadora.split(path.sep)))\n\n\ndef verifica_se_e_caminho_absoluto():\n print(path.isabs(\"./\"))\n print(path.isabs(\"c:\\\\\"))\n\n\ndef busca_caminho_absoluto():\n print(path.abspath(\"../\"))\n\n\ndef cria_diretorio():\n diretorio = \"c:\\\\TestePython\\\\Teste1\\\\Teste2\"\n if not path.exists(diretorio):\n os.makedirs(diretorio)\n\n\ndef muda_diretorio_de_trabalho():\n os.chdir(path.join(\"c:\\\\\"))\n print(os.getcwd())\n\n\ndef testa_busca_diretorio_atual():\n print(os.getcwd())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"flaviogf/courses","sub_path":"geral/livro_automatize_tarefas_com_python/capitulo_8_arquivos.py","file_name":"capitulo_8_arquivos.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"27853133153","text":"import os\nimport yaml\nimport traceback\nimport argparse\nimport logging\nimport pickle\nimport torch\nimport numpy as np\nfrom utils import data_directory, experiment_directory, output_directory, seeds, pp\nfrom train import ModelController\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"cpi prediction cli args\")\n parser.add_argument(\"config_yaml_file\",\n type=str,\n choices=[\"no_conv.yml\",\n \"node_average.yml\",\n \"node_edge_average.yml\"],\n help=\"yaml configuration file for current experiment\")\n args = parser.parse_args()\n return args\n# end\n\ndef main():\n args = get_args()\n\n # Load experiment specified in system args\n config_yaml_file = args.config_yaml_file\n print(\"Running Experiment File: {}\".format(config_yaml_file))\n config_filename = config_yaml_file.split(\".\")[0] if \".\" in config_yaml_file else config_yaml_file\n exp_specs = yaml.load(open(os.path.join(experiment_directory, config_yaml_file), \"r\").read())\n\n # setup output directory\n outdir = os.path.join(output_directory, config_filename)\n if not os.path.exists(output_directory):\n os.mkdir(output_directory)\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n logging.basicConfig(filename=os.path.join(outdir, \"train_experiments.log\"),\n filemode=\"w\",\n format=\"%(name)s - %(levelname)s - %(message)s\",\n level=logging.DEBUG)\n\n # write experiment specifications to file\n with open(os.path.join(outdir, \"experiment.yml\"), \"w\") as f:\n f.write(\"{}\\n\".format(yaml.dump(exp_specs)))\n\n # perform each experiment\n prev_train_data_file = \"\"\n prev_val_data_file = \"\"\n prev_test_data_file = \"\"\n first_experiment = True\n\n # run through all experiments in yaml file\n for experiment_name, experiment_config in exp_specs[\"experiments\"]:\n train_data_file = os.path.join(data_directory, experiment_config[\"train_data_file\"])\n val_data_file = os.path.join(data_directory, experiment_config[\"val_data_file\"])\n test_data_file = os.path.join(data_directory, experiment_config[\"test_data_file\"])\n\n try:\n # Reuse train data if possible without having to reload\n if train_data_file != prev_train_data_file:\n print(\"Loading train data\")\n train_list, train_data = pickle.load(open(train_data_file, \"rb\"), encoding=\"latin-1\")\n prev_train_data_file = train_data_file\n\n if val_data_file != prev_val_data_file:\n print(\"Loading val data\")\n val_list, val_data = pickle.load(open(val_data_file, \"rb\"), encoding=\"latin-1\")\n prev_val_data_file = val_data_file\n\n if test_data_file != prev_test_data_file:\n print(\"Loading test data\")\n test_list, test_data = pickle.load(open(test_data_file, \"rb\"), encoding=\"latin-1\")\n prev_test_data_file = test_data_file\n\n # create data dictionary\n data = {\"train\": train_data, \"val\": val_data, \"test\": test_data}\n # perform experiment for each random seed\n for replica_number, seed_pair in enumerate(seeds):\n\n print(\"running experiment: {} replica num: {}\".format(experiment_name, replica_number))\n\n if not os.path.exists(os.path.join(outdir, \"chkpts_{}_{}\".format(experiment_name, replica_number))):\n os.mkdir(os.path.join(outdir, \"chkpts_{}_{}\".format(experiment_name, replica_number)))\n if not os.path.exists(os.path.join(outdir, \"persist_{}_{}\".format(experiment_name, replica_number))):\n os.mkdir(os.path.join(outdir, \"persist_{}_{}\".format(experiment_name, replica_number)))\n\n # set torch and numpy seeds\n if torch.cuda.is_available():\n experiment_config[\"cuda\"]=True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.cuda.manual_seed_all(seed_pair[\"torch_seed\"])\n else:\n experiment_config[\"cuda\"]=False\n torch.manual_seed_all(seed_pair[\"torch_seed\"])\n np.random.seed(int(seed_pair[\"np_seed\"]))\n\n trainer = ModelController(replica_number=replica_number,\n experiment_name=experiment_name,\n experiment_config=experiment_config,\n data=data[\"train\"],\n outdir=outdir)\n trainer.fit_model(exp_specs=exp_specs, data=data)\n results = trainer.inference(exp_specs=exp_specs,\n data=data[\"test\"])\n #pp.pprint(results)\n\n except Exception as er:\n if er is KeyboardInterrupt:\n raise er\n ex_str = traceback.format_exc()\n logging.error(ex_str)\n logging.error(\"Experiment failed: {}\".format(exp_specs))\n\nif __name__==\"__main__\":\n print(\"torch version: {}\".format(torch.__version__))\n print(\"cuda version: {}\".format(torch.version.cuda))\n main()\n","repo_name":"mguarin0/test-transfer","sub_path":"fout_protein_interface_prediction_using_graph_convolutional_networks/train_experiments.py","file_name":"train_experiments.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5467262437","text":"# Trueとは何か\n\n'''\nFalseとみなされるもの\nブール値\tFalse\nnull\tNone\n整数のゼロ\t0\nfloatのゼル\t0.0\n空文字\t''\n空リスト\t[]\n空タプル\t()\n空辞書\t{}\n空集合\tset()\n\n上記以外はすべてTrueとみなされる\n'''\n\nsome_list = []\nif some_list:\n\tprint(\"There's something in here\")\nelse:\n\tprint(\"Hey, it's empty!\")\n","repo_name":"0gravity000/IntroducingPython","sub_path":"04/040301.py","file_name":"040301.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2541669548","text":"import torch\nfrom torch import nn, optim\n\nfrom utils.models_defined import MNIST_LogisticRegression, MLP_Net, CNN_Net, CNN_Text, ResNet18_torch, CNNCifar_TF\n\n\nmnist_args = {\n\t# setting parameters\n\t'dataset': 'mnist',\n\t'sample_size_cap': 6000,\n\t'n_participants': 5,\n\t'split': 'powerlaw', #or 'classimbalance'\n\n\t'batch_size' : 32, \n\t'train_val_split_ratio': 0.9,\n\t'alpha': 0.95,\n\t'Gamma': 0.5,\n\n\t# model parameters\n\t'model_fn': CNN_Net, #MLP_Net, MNIST_LogisticRegression\n\t'optimizer_fn': optim.SGD,\n\t'loss_fn': nn.NLLLoss(), \n\t'lr': 0.15,\n\t'lr_decay':0.977, #0.977**100 ~= 0.1\n\n\t# fairness/training parameters\n\t'rounds': 60,\n\t'E': 1,\n}\n\n\n\nsst_args = {\n\t# setting parameters\n\t'dataset': 'sst',\n\t'n_participants': 5,\n\t'split': 'powerlaw', #or 'powerlaw' classimbalance\n\t'batch_size' : 256, \n\n\t'train_val_split_ratio': 0.9,\n\t'alpha': 0.95,\n\t'Gamma': 1,\n\t'lambda': 1, # coefficient between sign_cossim and modu_cossim\n\n\t# model parameters\n\t'model_fn': CNN_Text,\n\t'embed_num': 20000,\n\t'embed_dim': 300,\n\t'class_num': 5,\n\t'kernel_num': 128,\n\t'kernel_sizes': [3,3,3],\n\t'static':False,\n\n\t'optimizer_fn': optim.Adam,\n\t'loss_fn': nn.NLLLoss(), \n\t'lr': 1e-4,\n\t'lr_decay':0.977, #0.977**100 ~= 0.1\n\n\t# training parameters\n\t'rounds': 100,\n\t'E': 2,\n}\n\n\nmr_args = {\n\t# setting parameters\n\t'dataset': 'mr',\n\t'n_participants': 5,\n\t'split': 'powerlaw', #or 'powerlaw' classimbalance\n\n\t'batch_size' : 128, \n\t'train_val_split_ratio': 0.9,\n\t'alpha': 0.95,\n\t'lambda': 0.5, # coefficient between sign_cossim and modu_cossim\n\t'Gamma':1,\n\n\t# model parameters\n\t'model_fn': CNN_Text,\n\t'embed_num': 20000,\n\t'embed_dim': 300,\n\t'class_num': 2,\n\t'kernel_num': 128,\n\t'kernel_sizes': [3,3,3],\n\t'static':False,\n\n\t'optimizer_fn': optim.Adam,\n\t'loss_fn': nn.NLLLoss(), \n\t'lr': 5e-5,\n\t'lr_decay':0.977, #0.977**100 ~= 0.1\n\n\t# training parameters\n\t'rounds': 100,\n\t'E': 2,\n}\n\n\ncifar_cnn_args = {\n\t# setting parameters\n\t'dataset': 'cifar10',\n\t'sample_size_cap': 20000,\n\t'n_participants': 10,\n\t'split': 'powerlaw', #or 'classimbalance'\n\n\t'batch_size' : 128, \n\t'train_val_split_ratio': 0.8,\n\t'alpha': 0.95,\n\t'Gamma': 0.15,\n\t'lambda': 0.5, # coefficient between sign_cossim and modu_cossim\n\n\t# model parameters\n\t'model_fn': CNNCifar_TF, #ResNet18_torch, CNNCifar_TF\n\t'optimizer_fn': optim.SGD,\n\t'loss_fn': nn.NLLLoss(),# nn.CrossEntropyLoss(), \n\t'lr': 0.015,\n\t'lr_decay':0.977, #0.977**100 ~= 0.1\n\n\t# training parameters\n\t'rounds': 200,\n\t'E': 1,\n}\n","repo_name":"XinyiYS/Robust-and-Fair-Federated-Learning","sub_path":"utils/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"61"} +{"seq_id":"7975515338","text":"#!/usr/bin/python\n# egyptian opentype generator data\n\npres = [\n # Lookup - extensions outer dbl extensions begin outer\n {'name' : 'extensionsouterB', 'bases':'SKIP','marks' : 'extensioncontrols',\n 'contexts' : [\n {'left':[],'right':['esb']},\n {'left':[],'right':['ewb']}],\n 'details' : [{'sub':['esb'],'target':['eob']}]},\n # Lookup - extensions dbl extensions begin inner\n {'name' : 'extensionsinnerB', 'bases':'SKIP','marks' : 'extensioncontrols',\n 'contexts' : [{'left':['eob'],'right':[]}],\n 'details' : [\n {'sub':['esb'],'target':['edb']},\n {'sub':['ewb'],'target':['efb']}\n ]},\n # Lookup - extensions dbl extensions end inner\n {'name' : 'extensionsdblE', 'bases':'SKIP','marks' : 'extensioncontrols',\n 'contexts' : [\n {'left':['edb'],'right':[]},\n {'left':['efb'],'right':[]}],\n 'details' : [\n {'sub':['ese'],'target':['ede']},\n {'sub':['ewe'],'target':['efe']}\n ]},\n # Lookup - extensions dbl extensions end outer\n {'name' : 'extensionsouterE', 'bases':'SKIP','marks' : 'extensioncontrols',\n 'contexts' : [\n {'left':['ede'],'right':[]},\n {'left':['efe'],'right':[]}\n ],\n 'details' : [{'sub':['ese'],'target':['eoe']}]},\n # Lookup - convert embedded joiners to level 2 joiners\n {'name' : 'mdcLevel2_joiners', 'marks' : 'controls_a',\n 'contexts' : [\n {'left':['ss','ss'],'right':[]},\n {'left':['ss','controls_joiners','ss'],'right':[]},\n {'left':['ss','controls_joiners','controls_joiners','ss'],'right':[]},\n {'left':['ss','controls_joiners','controls_joiners','controls_joiners','ss'],'right':[]}],\n 'details' : [\n {'sub':['hj'],'target':['hj2A']},\n {'sub':['vj'],'target':['vj2A']}]},\n # Lookup - convert embedded tcb to level 2\n {'name' : 'Level2_tc', 'marks' : '*tcb12',\n 'contexts' : [\n {'left':['ss','ss'],'right':['se','se']},\n {'left':['ss','corners'],'right':['se']},\n {'left':['ss','om'],'right':['se']},\n {'left':['ss','ti'],'right':['se']},\n {'left':['ss','mi'],'right':['se']},\n {'left':['ss','bi'],'right':['se']},\n ],\n 'details' : [\n {'sub':['tcab'],'target':['tcab2']},\n {'sub':['tcbb'],'target':['tcbb2']},\n {'sub':['tcpb'],'target':['tcpb2']},\n {'sub':['tcrb'],'target':['tcrb2']},\n {'sub':['tcub'],'target':['tcub2']},\n {'sub':['tclb'],'target':['tclb2']}]},\n # Lookup - convert embedded tce to level 2\n {'name' : 'Level2_tce', 'marks' : '*tce12',\n 'contexts' : [{'left':['ss','ss'],'right':['se','se']}],\n 'details' : [\n {'sub':['tcae'],'target':['tcae2']},\n {'sub':['tcbe'],'target':['tcbe2']},\n {'sub':['tcpe'],'target':['tcpe2']},\n {'sub':['tcre'],'target':['tcre2']},\n {'sub':['tcue'],'target':['tcue2']},\n {'sub':['tcle'],'target':['tcle2']}]},\n # Lookup - convert corner embedded tcb to level 1\n {'name' : 'CornerLevel1_tcb', 'marks' : '',\n 'contexts' : [{'left':['ss'],'right':[]}],\n 'details' : [\n {'sub':['tcab'],'target':['tcab1']},\n {'sub':['tcbb'],'target':['tcbb1']},\n {'sub':['tcpb'],'target':['tcpb1']},\n {'sub':['tcrb'],'target':['tcrb1']},\n {'sub':['tcub'],'target':['tcub1']},\n {'sub':['tclb'],'target':['tclb1']}]},\n # Lookup - convert corner embedded tcbe to level 1\n {'name' : 'CornerLevel1_tce', 'marks' : '',\n 'contexts' : [{'left':[],'right':['se']}],\n 'details' : [\n {'sub':['tcae'],'target':['tcae1']},\n {'sub':['tcbe'],'target':['tcbe1']},\n {'sub':['tcpe'],'target':['tcpe1']},\n {'sub':['tcre'],'target':['tcre1']},\n {'sub':['tcue'],'target':['tcue1']},\n {'sub':['tcle'],'target':['tcle1']}]},\n # Lookup - merge level 2 embedding controls\n {'name' : 'mdcBE_merge2', 'marks' : 'controls_a',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [\n {'sub':['ss','se'],'target':['cleanup']}]},\n # Lookup - convert embedded joiners to level 1 joiners\n {'name' : 'mdcLevel1_joiners', 'marks' : 'controls_a',\n 'contexts' : [\n {'left':['ss'],'right':[]},\n {'left':['ss','controls_joiners'],'right':[]},\n {'left':['ss','controls_joiners','controls_joiners'],'right':[]}],\n 'details' : [\n {'sub':['hj'],'target':['hj1A']},\n {'sub':['vj'],'target':['vj1A']}]},\n # Lookup - convert corner insertions to level 1\n {'name' : 'mdcLevel1_corners', 'marks' : 'controls_a',\n 'contexts' : [{'left':['ss'],'right':[]}],\n 'details' : [\n {'sub':['ts'],'target':['its1A']},\n {'sub':['bs'],'target':['ibs1A']},\n {'sub':['te'],'target':['ite1A']},\n {'sub':['be'],'target':['ibe1A']},\n {'sub':['om'],'target':['om1A']},\n {'sub':['ti'],'target':['ti1A']},\n {'sub':['mi'],'target':['mi1A']},\n {'sub':['bi'],'target':['bi1A']}]},\n # Lookup - convert embedded tcb to level 1\n {'name' : 'Level1_tcb', 'marks' : '*tcb12',\n 'contexts' : [{'left':['ss'],'right':['se']}],\n 'details' : [\n {'sub':['tcab'],'target':['tcab1']},\n {'sub':['tcbb'],'target':['tcbb1']},\n {'sub':['tcpb'],'target':['tcpb1']},\n {'sub':['tcrb'],'target':['tcrb1']},\n {'sub':['tcub'],'target':['tcub1']},\n {'sub':['tclb'],'target':['tclb1']}]},\n # Lookup - convert embedded tcbe to level 1\n {'name' : 'Level1_tce', 'marks' : '*tce12',\n 'contexts' : [{'left':['ss'],'right':['se']}],\n 'details' : [\n {'sub':['tcae'],'target':['tcae1']},\n {'sub':['tcbe'],'target':['tcbe1']},\n {'sub':['tcpe'],'target':['tcpe1']},\n {'sub':['tcre'],'target':['tcre1']},\n {'sub':['tcue'],'target':['tcue1']},\n {'sub':['tcle'],'target':['tcle1']}]},\n # Lookup - merge level 1 embedding controls\n {'name' : 'mdcBE_merge1', 'marks' : 'controls_a',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [{'sub':['ss','se'],'target':['cleanup']}]},\n # Lookup - convert remaining joiners and tcbs to level 0 joiners\n {'name' : 'mdcLevel0', 'marks' : '',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [\n {'sub':['hj'],'target':['hj0A']},\n {'sub':['vj'],'target':['vj0A']},\n {'sub':['ts'],'target':['its0A']},\n {'sub':['bs'],'target':['ibs0A']},\n {'sub':['te'],'target':['ite0A']},\n {'sub':['be'],'target':['ibe0A']},\n {'sub':['om'],'target':['om0A']},\n {'sub':['ti'],'target':['ti0A']},\n {'sub':['mi'],'target':['mi0A']},\n {'sub':['bi'],'target':['bi0A']}]},\n # Lookup - corner0 tcbb1\n {'name' : 'corner0_tcbb1', 'marks' : '',\n 'contexts' : [{'left':['corners0a'],'right':[]}],\n 'details' : [\n {'sub':['tcab'],'target':['tcab1']},\n {'sub':['tcbb'],'target':['tcbb1']},\n {'sub':['tcpb'],'target':['tcpb1']},\n {'sub':['tcrb'],'target':['tcrb1']},\n {'sub':['tcub'],'target':['tcub1']},\n {'sub':['tclb'],'target':['tclb1']}]},\n # Lookup - corner1 tcbb2\n {'name' : 'corner1_tcbb2', 'marks' : '',\n 'contexts' : [{'left':['corners1a'],'right':[]}],\n 'details' : [\n {'sub':['tcab'],'target':['tcab2']},\n {'sub':['tcbb'],'target':['tcbb2']},\n {'sub':['tcpb'],'target':['tcpb2']},\n {'sub':['tcrb'],'target':['tcrb2']},\n {'sub':['tcub'],'target':['tcub2']},\n {'sub':['tclb'],'target':['tclb2']}]},\n # # Lookup - corner tcbe1\n # This and the next lookup promote the closing tc to the next level\n # in order to balance with an opening tc at the same level.\n # syntacticaly this is impure so the lookup is disabled\n # {'name' : 'corner_tcbe1', 'marks' : '*tcb01',\n # 'contexts' : [{'left':['tcb1s'],'right':[]}],\n # 'details' : [\n # {'sub':['tcae'],'target':['tcae1']},\n # {'sub':['tcbe'],'target':['tcbe1']},\n # {'sub':['tcpe'],'target':['tcpe1']},\n # {'sub':['tcre'],'target':['tcre1']},\n # {'sub':['tcue'],'target':['tcue1']},\n # {'sub':['tcle'],'target':['tcle1']}]},\n # # Lookup - corner tcbe2\n # {'name' : 'corner_tcbe2', 'marks' : '*tcb02',\n # 'contexts' : [{'left':['tcb2s'],'right':[]}],\n # 'details' : [\n # {'sub':['tcae'],'target':['tcae2']},\n # {'sub':['tcbe'],'target':['tcbe2']},\n # {'sub':['tcpe'],'target':['tcpe2']},\n # {'sub':['tcre'],'target':['tcre2']},\n # {'sub':['tcue'],'target':['tcue2']},\n # {'sub':['tcle'],'target':['tcle2']}]},\n # Lookup - tcb0\n {'name' : 'tcb0', 'marks' : '',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [\n {'sub':['tcab'],'target':['tcab0']},\n {'sub':['tcbb'],'target':['tcbb0']},\n {'sub':['tcpb'],'target':['tcpb0']},\n {'sub':['tcrb'],'target':['tcrb0']},\n {'sub':['tcub'],'target':['tcub0']},\n {'sub':['tclb'],'target':['tclb0']},\n {'sub':['tcae'],'target':['tcae0']},\n {'sub':['tcbe'],'target':['tcbe0']},\n {'sub':['tcpe'],'target':['tcpe0']},\n {'sub':['tcre'],'target':['tcre0']},\n {'sub':['tcue'],'target':['tcue0']},\n {'sub':['tcle'],'target':['tcle0']}]},\n # DYNAMIC Lookup - populated with tsg values from group data\n {'name' : 'tsg', 'marks' : '',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : []},\n # Lookup - expand bases with insertions TODO: auto expand based on glyph name trigger\n {'name' : 'expansion', 'marks' : '',\n 'contexts' : [{'left':[],'right':['tsh665655544544332211','D32','Qf','bi0A']}],\n 'details' : [{'sub':['et56'],'target':['et66']}]},\n # Lookup - accommodate base modifiers\n {'name' : 'Qf_insert', 'marks' : '',\n 'contexts' : [{'left':['Qf'],'right':[]}],\n 'details' : [{'sub':['modifiers'],'target':['modifiers','Qf']}]},\n # Lookup - accommodate base modifiers\n {'name' : 'Qf_cleanup', 'marks' : '',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [{'sub':['Qf','modifiers'],'target':['modifiers']}]},\n # Lookup - inserts quadat initial before all ETs\n {'name' : 'Qi_insert', 'marks' : '',\n 'contexts' : [],\n 'exceptcontexts' : [\n {'left':['tcb_all'],'right':[]}],\n 'details' : [\n {'sub':['et_all'],'target':['Qi','et_all']},\n {'sub':['tcb_all'],'target':['Qi','tcb_all']},\n ]},\n # Lookup - clean up cleanup glyphs\n {'name' : 'mdcB_cleanup', 'marks' : '',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [\n {'sub':['cleanup','cleanup','Qi'],'target':['Qi']},\n {'sub':['cleanup','Qi'],'target':['Qi']},\n ]},\n # DYNAMIC Lookup - insert GB1 after tbb\n {'name' : 'tcb_gb1', 'marks' : '',\n 'exceptcontexts' : [\n {'left':[],'right':['et_all']},\n ],\n 'contexts' : [{'left':['Qi'],'right':[]}],\n 'details' : []},\n # DYNAMIC Lookup - insert GB1 after incomplete controls\n {'name' : 'gb1', 'marks' : '',\n 'exceptcontexts' : [\n {'left':[],'right':['Qi']},\n {'left':[],'right':['ss']}\n ],\n 'contexts' : [\n {'left':['Qf'],'right':[]},\n {'left':['Qf','controls_b'],'right':[]},\n {'left':['Qf','controls_b','ss'],'right':[]}\n ],\n 'details' : []},\n # Lookup - clean up Qf glyphs\n {'name' : 'Qf_cleanup', 'marks' : '',\n 'contexts' : [\n {'left':[],'right':['Qi']},\n {'left':[],'right':['ss','Qi']},\n {'left':[],'right':['ss','ss','Qi']},\n ],\n 'details' : [{'sub':['Qf','controls_b'],'target':['controls_b']}]},\n # Lookup - clean up Qi glyphs\n {'name' : 'Qi_cleanup', 'marks' : '',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [{'sub':['controls_b','Qi'],'target':['controls_b']}]},\n # Lookup - clean up Qi glyphs\n {'name' : 'Qi_cleanup_tcb', 'marks' : '',\n 'contexts' : [{'left':['controls_b'],'right':[]}],\n 'details' : [{'sub':['tcb_all','Qi'],'target':['tcb_all']}]},\n # Lookup - clean up embedded Qi glyphs\n {'name' : 'Qi_dblss', 'marks' : '',\n 'contexts' : [{'left':['controls_b'],'right':['ss','Qi']}],\n 'details' : [{'sub':['ss'],'target':['su']}]},\n # Lookup - clean up embedded Qi glyphs\n {'name' : 'Qi_cleanup_su', 'marks' : '',\n 'contexts' : [\n {'left':['controls_b'],'right':[]},\n {'left':['su'],'right':[]}\n ],\n 'details' : [{'sub':['ss','Qi'],'target':['su']}]},\n # # Lookup - Insert level 1 row begin marker for dbl unbalanced begin\n # {'name' : 'Qi_dblss_begin', 'marks' : '',\n # 'contexts' : [{'left':['ss','ss'],'right':[]}],\n # 'details' : [{'sub':['Qi'],'target':['Qi','ub','r1bA']}]},\n # # Lookup - Double to single unbalanced begin\n # {'name' : 'Qi_dblss_cleanup', 'marks' : '',\n # 'contexts' : [{'left':['ss'],'right':['ub']}],\n # 'details' : [{'sub':['ss','Qi'],'target':['Qi']}]},\n # # Lookup - Insert level 1 row begin marker for dbl unbalanced begin\n # {'name' : 'Qi_ss_begin', 'marks' : '',\n # 'contexts' : [{'left':['ss'],'right':[]}],\n # 'details' : [{'sub':['Qi'],'target':['Qi','ub','r0bA']}]},\n # # Lookup - Double to single unbalanced begin\n # {'name' : 'Qi_ss_cleanup', 'marks' : '',\n # 'contexts' : [{'left':[],'right':['ub']}],\n # 'details' : [{'sub':['ss','Qi'],'target':['Qi']}]},\n # Lookup - Insert level 0 row begin marker\n {'name' : 'Qi_rbegin', 'marks' : '',\n 'exceptcontexts' : [{'left':[],'right':['ub','r0bA']}],\n 'details' : [{'sub':['Qi'],'target':['Qi','r0bA']}]},\n # Lookup - clean up embedded Qi glyphs\n {'name' : 'Qi_cleanup3', 'marks' : '',\n 'contexts' : [{'left':['ss'],'right':[]}],\n 'details' : [{'sub':['Qi'],'target':['Qi','ub']}]},\n # Lookup - clean up embedded Qi glyphs\n {'name' : 'Qi_cleanup4', 'marks' : '',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [{'sub':['ss','Qi'],'target':['Qi']}]},\n # Lookup - Insert level 0 row end marker\n {'name' : 'Qf_rend', 'marks' : '',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [{'sub':['Qf'],'target':['r0eA','Qf']}]},\n # Lookup - level 2 row begin and end\n {'name' : 'r2', 'marks' : '',\n 'contexts' : [{'left':[],'right':['et_all']},\n {'left':[],'right':['tcb_all','et_all']},\n ],\n 'details' : [\n {'sub':['vj2A'],'target':['r2eA','vj2A','r2bA']},\n {'sub':['hj2A'],'target':['r2eA','hj2A','r2bA']},\n {'sub':['corners1a'],'target':['corners1a','r2bA']},\n ]},\n # Lookup - level 2 row end before corner after 2 begin\n {'name' : 'r2e_corner', 'marks' : 'parens',\n 'contexts' : [{'left':['r2bA'],'right':[]}],\n 'details' : [{'sub':['corners1a'],'target':['r2eA','corners1a']},]},\n # Lookup - level 1 row begin and end\n # moved above r2b_corner to fix over inclusion of level 2, e.g., F20 bs ss Z11 hj ss X1 vj D21 se vj N35 se\n {'name' : 'r1', 'marks' : '',\n 'contexts' : [{'left':[],'right':['et_all']},\n {'left':[],'right':['tcb_all','et_all']},\n ],\n 'details' : [\n {'sub':['vj1A'],'target':['r1eA','vj1A','r1bA']},\n {'sub':['hj1A'],'target':['r1eA','hj1A','r1bA']},\n {'sub':['corners0a'],'target':['corners0a','r1bA']},\n ]},\n # Lookup - level 1 row begin\n {'name' : 'r1_su', 'marks' : '',\n 'contexts' : [\n {'left':['corners0a'],'right':['et_all']},\n {'left':['corners0a'],'right':['su','et_all']}\n ],\n 'details' : [\n {'sub':['su'],'target':['su','r1bA']},\n ]},\n # Lookup - level 2 row begin\n {'name' : 'r2_su', 'marks' : '',\n 'contexts' : [\n {'left':['corners1a'],'right':['et_all']},\n {'left':['corners1a','su','r1bA'],'right':['et_all']}\n ],\n 'details' : [\n {'sub':['su'],'target':['su','r2bA']},\n ]},\n # Lookup - level 2 row begin after corner before 2 end\n {'name' : 'r2b_corner', 'marks' : '*parensub',\n 'contexts' : [{'left':[],'right':['r2eA']}],\n 'details' : [{'sub':['corners0a'],'target':['corners0a','r1bA','r2bA']},]},\n # Lookup - level 2 row end before corner 1 between two r2 begins\n {'name' : 'r2_om2', 'marks' : 'parens',\n 'contexts' : [{'left':['r2bA'],'right':['r2bA']}],\n 'details' : [{'sub':['corners1a'],'target':['r2eA','corners1a']},]},\n # Lookup - level 2 row end before om 1 after r1 begins\n {'name' : 'r2_om1', 'marks' : 'parens',\n 'contexts' : [{'left':['r2bA'],'right':['r1bA']}],\n 'details' : [{'sub':['om0A'],'target':['r2eA','r1eA','om0A']},]},\n # Lookup - level 1 row end before corner 1 between two r2 begins\n {'name' : 'r1_om1', 'marks' : 'parens',\n 'contexts' : [{'left':['r1bA'],'right':['r1bA']}],\n 'details' : [{'sub':['corners0a'],'target':['r1eA','corners0a']},]},\n # Lookup - level 1 row begin after corner before 1 end\n {'name' : 'corner_swapandsize', 'marks' : '',\n 'exceptcontexts' : [{'left':['Qf'],'right':[]}],\n 'details' : [\n {'sub':['its0A'],'target':['its0B']},\n {'sub':['ibs0A'],'target':['ibs0B']},\n {'sub':['ite0A'],'target':['ite0B']},\n {'sub':['ibe0A'],'target':['ibe0B']},\n {'sub':['om0A'], 'target':['om0B' ]},\n {'sub':['ti0A'], 'target':['ti0B' ]},\n {'sub':['mi0A'], 'target':['mi0B' ]},\n {'sub':['bi0A'], 'target':['bi0B' ]},\n {'sub':['its1A'],'target':['its1B']},\n {'sub':['ibs1A'],'target':['ibs1B']},\n {'sub':['ite1A'],'target':['ite1B']},\n {'sub':['ibe1A'],'target':['ibe1B']},\n {'sub':['om1A'], 'target':['om1B' ]},\n {'sub':['ti1A'], 'target':['ti1B' ]},\n {'sub':['mi1A'], 'target':['mi1B' ]},\n {'sub':['bi1A'], 'target':['bi1B' ]},\n ]},\n # Lookup - unbalanced embedding \n {'name' : 'unbal-embedding', 'marks' : '',\n 'contexts' : [{'left':[],'right':['su']}],\n 'details' : [\n {'sub':['vj0A'],'target':['r0eA','vj0A','r0bA','r1bA']},\n {'sub':['hj0A'],'target':['r0eA','hj0A','r0bA','r1bA']},\n {'sub':['vj1A'],'target':['r1eA','vj1A','r1bA','r2bA']},\n {'sub':['hj1A'],'target':['r1eA','hj1A','r1bA','r2bA']}\n ]},\n # Lookup - insert level 0 row boundaries around joiners\n {'name' : 'r0', 'marks' : '',\n 'contexts' : [\n {'left':[],'right':['et_all']},\n {'left':[],'right':['tcb_all','et_all']},\n ],\n 'details' : [\n {'sub':['vj0A'],'target':['r0eA','vj0A','r0bA']},\n {'sub':['hj0A'],'target':['r0eA','hj0A','r0bA']},\n ]},\n # Lookup - insert level 1 row begin after unbalanced corner\n {'name' : 'unbal-corner', 'marks' : '',\n 'contexts' : [\n {'left':['corners0b'],'right':['r1bA']},\n {'left':['corners1b'],'right':['r2bA']}],\n 'details' : [{'sub':['su'],'target':['ub']}]},\n # # Lookup - insert level 1 row begin after unbalanced corner\n # {'name' : 'unbal-corner1', 'marks' : '',\n # 'contexts' : [{'left':['corners0b'],'right':[]}],\n # 'details' : [{'sub':['su'],'target':['ub','r1bA']}]},\n # # Lookup - insert level 1 row begin after unbalanced corner\n # {'name' : 'unbal-corner2', 'marks' : '',\n # 'contexts' : [{'left':['corners1b'],'right':[]}],\n # 'details' : [{'sub':['su'],'target':['ub','r2bA']}]},\n # Lookup - insert level 1 row begin after unbalanced corner\n {'name' : 'unbal-dblcorner', 'marks' : '',\n 'contexts' : [{'left':['ub','r1bA'],'right':[]}],\n 'details' : [{'sub':['su'],'target':['ub','r2bA']}]},\n # Lookup - insert level 1 row begin before level 2 begin\n {'name' : 'r2begin1', 'marks' : 'parens',\n 'contexts' : [{'left':[],'right':['r2bA']}],\n 'details' : [{'sub':['r0bA'],'target':['r0bA','r1bA']}]},\n # Lookup - insert up-level row ends after level 2 begin\n {'name' : 'r2begin2', 'marks' : '*parenstce1', #needs to block tcbe1\n 'contexts' : [{'left':['r2bA'],'right':[]}],\n 'details' : [\n {'sub':['r0eA'],'target':['r2eA','r1eA','r0eA']},\n {'sub':['r1eA'],'target':['r2eA','r1eA']},\n ]},\n # Lookup - insert up-level row end after level 1 begin\n {'name' : 'r2begin3', 'marks' : '*parenstce1',\n 'contexts' : [{'left':['r2bA'],'right':['r0eA']}],\n 'details' : [{'sub':['tce1s'],'target':['r2eA','tce1s','r1eA']}]},\n # Lookup - insert up-level row end after level 2 begin\n {'name' : 'r2begin_tce0', 'marks' : '*parenstce0',\n 'contexts' : [{'left':['r2bA'],'right':['r0eA']}],\n 'details' : [{'sub':['tce0s'],'target':['r2eA','r1eA','tce0s']}]},\n # Lookup - insert up-level row end after level 2 begin\n {'name' : 'r2begin_tce1', 'marks' : '*parenstce1',\n 'contexts' : [{'left':['r2bA'],'right':['r0eA']}],\n 'details' : [{'sub':['tce1s'],'target':['r2eA','tce1s']}]},\n # Lookup - insert up-level row begins before level 2 end \n {'name' : 'r2end', 'marks' : 'parens',\n 'contexts' : [{'left':[],'right':['r2eA']}],\n 'details' : [\n {'sub':['r0bA'],'target':['r0bA','r1bA','r2bA']},\n {'sub':['r1bA'],'target':['r1bA','r2bA']}\n ]},\n # Lookup - insert up-level row end after level 1 begin\n {'name' : 'r1begin', 'marks' : '*parenstce0', #needs to block tcbe0\n 'contexts' : [{'left':['r1bA'],'right':[]}],\n 'details' : [{'sub':['r0eA'],'target':['r1eA','r0eA']}]},\n # Lookup - insert up-level row end after level 1 begin\n {'name' : 'r1begin_tce0', 'marks' : '*parenstce0',\n 'contexts' : [{'left':['r1bA'],'right':['r0eA']}],\n 'details' : [{'sub':['tce0s'],'target':['r1eA','tce0s']}]},\n # Lookup - insert up-level row begin before level 1 end \n {'name' : 'r1end_tcb0', 'marks' : '*parenstcb0',\n 'contexts' : [\n {'left':['r0bA'],'right':['r1eA']},\n {'left':['r0bA'],'right':['c1eA']}\n ],\n 'details' : [{'sub':['tcb0s'],'target':['tcb0s','r1bA']}]},\n # Lookup - insert up-level row begin before level 1 end \n {'name' : 'r1end', 'marks' : 'parens',\n 'contexts' : [\n {'left':[],'right':['r1eA']},\n {'left':[],'right':['c1eA']}\n ],\n 'details' : [{'sub':['r0bA'],'target':['r0bA','r1bA']}]},\n # Lookup - insert level 1 row end before level 0 col end \n {'name' : 'r1end', 'marks' : 'parens',\n 'contexts' : [{'left':['r1bA'],'right':[]}],\n 'details' : [{'sub':['c0eA'],'target':['r1eA','c0eA']}]},\n # Lookup - insert level 1 row begin after level 0 col begin \n {'name' : 'r1end', 'marks' : 'parens',\n 'contexts' : [\n {'left':[],'right':['r1eA']},\n {'left':[],'right':['r2bA']}\n ],\n 'details' : [{'sub':['c0bA'],'target':['c0bA','r1bA']}]},\n # Lookup - insert column begin and end \n {'name' : 'c012', 'marks' : '',\n 'contexts' : [{'left':[],'right':[]}],\n 'details' : [\n {'sub':['r0bA'],'target':['r0bA','c0bA']},\n {'sub':['r1bA'],'target':['r1bA','c1bA']},\n {'sub':['r2bA'],'target':['r2bA','c2bA']},\n {'sub':['r0eA'],'target':['c0eA','r0eA']},\n {'sub':['r1eA'],'target':['c1eA','r1eA']},\n {'sub':['r2eA'],'target':['c2eA','r2eA']},\n ]},\n # Lookup - row 0 begin cleanup \n {'name' : 'r0b_cleanup', 'marks' : '',\n 'contexts' : [{'left':['hj0A'],'right':[]}],\n 'details' : [{'sub':['r0bA','c0bA'],'target':['c0bA']}]},\n # Lookup - row 0 end cleanup \n {'name' : 'r0e_cleanup', 'marks' : '',\n 'contexts' : [{'left':[],'right':['hj0A']}],\n 'details' : [{'sub':['c0eA','r0eA'],'target':['c0eA']}]},\n # Lookup - row 1 begin cleanup \n {'name' : 'r1b_cleanup', 'marks' : '',\n 'contexts' : [{'left':['hj1A'],'right':[]}],\n 'details' : [{'sub':['r1bA','c1bA'],'target':['c1bA']}]},\n # Lookup - row 1 end cleanup \n {'name' : 'r1e_cleanup', 'marks' : '',\n 'contexts' : [{'left':[],'right':['hj1A']}],\n 'details' : [{'sub':['c1eA','r1eA'],'target':['c1eA']}]},\n # Lookup - row 2 begin cleanup \n {'name' : 'r2b_cleanup', 'marks' : '',\n 'contexts' : [{'left':['hj2A'],'right':[]}],\n 'details' : [{'sub':['r2bA','c2bA'],'target':['c2bA']}]},\n # Lookup - row 2 end cleanup \n {'name' : 'r2e_cleanup', 'marks' : '',\n 'contexts' : [{'left':[],'right':['hj2A']}],\n 'details' : [{'sub':['c2eA','r2eA'],'target':['c2eA']}]},\n # Lookup - insert level 2 min default size\n {'name' : 'default_size_2', 'marks' : '',\n 'contexts' : [\n {'left':[],'right':['r2bA']},\n {'left':[],'right':['ub','r2bA']}\n ],\n 'details' : [{'sub':['c1bA'],'target':['c1bA','mt22']}]},\n # Lookup - insert level 1 min default size \n {'name' : 'default_size_level1', 'marks' : '',\n 'contexts' : [\n {'left':[],'right':['r1bA']},\n ],\n 'details' : [{'sub':['c0bA'],'target':['c0bA','mt43']}]},\n # Lookup - insert level 1 min default size tcbb \n {'name' : 'default_size_level1_tcb', 'marks' : '',\n 'contexts' : [\n {'left':['c0bA'],'right':['r1bA']},\n ],\n 'details' : [{'sub':['tcb0s'],'target':['tcb0s','mt43']}]},\n # Lookup - move unbalanced ss (i.e., su) to mark mt43,mt22\n {'name' : 'insertunbalancedtoken', 'marks' : 'rowmaxes',\n 'contexts' : [{'left':[],'right':['su']}],\n 'details' : [\n {'sub':['mt43'],'target':['mt43','ub']},\n {'sub':['mt22'],'target':['mt22','ub']}\n ]},\n # Lookup - clean up unbalanced su\n {'name' : 'cleanupunbalanced', 'marks' : '',\n 'details' : [\n {'sub':['c1bA','su'],'target':['c1bA']},\n {'sub':['c2bA','su'],'target':['c2bA']}\n ]},\n # Lookup - revert unbalanced tbbe\n {'name' : 'visibleunbalancedtcbe', 'marks' : '',\n 'exceptcontexts' : [\n {'left':[],'right':['c0eA']}],\n 'details' : [\n {'sub':['tcae0'],'target':['tcae']},\n {'sub':['tcbe0'],'target':['tcbe']},\n {'sub':['tcpe0'],'target':['tcpe']},\n {'sub':['tcre0'],'target':['tcre']},\n {'sub':['tcue0'],'target':['tcue']},\n {'sub':['tcle0'],'target':['tcle']},\n ]},\n # Lookup - set min default size to reducible default size\n {'name' : 'default_size_level1', 'marks' : 'rowmaxes',\n 'exceptcontexts' : [{'left':[],'right':['mt22']}],\n 'details' : [{'sub':['mt43'],'target':['et66']}]},\n # Lookup - isolated dbl segment 1\n # {'name' : 'isolated_dblss1', 'marks' : '',\n # 'contexts' : [{'left':[],'right':['ss']}],\n # 'details' : [{'sub':['ss'],'target':['Qi','ub','r0bA','c0bA']}]},\n # Lookup - isolated dbl segment 2\n # {'name' : 'isolated_dblss2', 'marks' : '',\n # 'contexts' : [{'left':['Qi','ub','r0bA','c0bA'],'right':[]}],\n # 'details' : [{'sub':['ss'],'target':['ub','r1bA','c1bA','et66','tsh666564636261565554535251464544434241363534333231262524232221161514131211','GB1','c1eA','r1eA','c0eA','r0eA','Qf']}]},\n # Lookup - isolated segment start\n # {'name' : 'isolated_ss', 'marks' : '',\n # 'details' : [{'sub':['ss'],'target':['Qi','ub','r0bA','c0bA','et66','tsh666564636261565554535251464544434241363534333231262524232221161514131211','GB1','c0eA','r0eA','Qf']}]},\n]","repo_name":"microsoft/font-tools","sub_path":"EgyptianOpenType/pres.py","file_name":"pres.py","file_ext":"py","file_size_in_byte":26641,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"61"} +{"seq_id":"35576149984","text":"import os\n\nimport cv2\nimport numpy as np\n\ncv2.setNumThreads(0)\ncv2.ocl.setUseOpenCL(False)\n\ndef load_video(filepath: str, gray_out=False, dtype=np.uint8, max_frame_count=None) -> np.ndarray:\n \"\"\"\n RGB 24bits video only?\n if gray_out:\n return np.ndarray [C(gray)=1, frame_length, H, W] uint8\n else:\n return np.ndarray [C(RGB)=3, frame_length, H, W] uint8\n \"\"\"\n if not os.path.exists(filepath):\n raise FileNotFoundError(filepath)\n capture = cv2.VideoCapture(filepath)\n\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n if max_frame_count is not None:\n assert max_frame_count > 0\n frame_count = min(frame_count, max_frame_count)\n frame_width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n frame_height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n C = 1 if gray_out else 3\n video_array = np.zeros((frame_count, frame_height, frame_width, C), dtype=dtype) # [frame_length, H, W, C]\n\n for count in range(frame_count):\n ret, frame = capture.read()\n if not ret:\n capture.release()\n raise ValueError(f'Failed to load frame #{count} of {filepath}.')\n if gray_out:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)[..., None]\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n video_array[count] = frame\n capture.release()\n video_array = video_array.transpose((3, 0, 1, 2))\n\n return video_array\n\n\ndef save_video(video_array: np.ndarray, filepath: str, fps):\n \"\"\"\n RGB 24bits video only?\n video_array: np.ndarray [C(gray, RGB)=1 or 3, frame_length, H, W] uint8\n filepath ends with:\n .avi: lossy\n .mp4: lossy\n .mkv: lossless\n \"\"\"\n video_array = video_array.transpose((1, 2, 3, 0))\n \n gray_in = video_array.shape[-1] == 1\n container_format = filepath.split('.')[-1]\n \n if container_format == 'avi':\n fourcc = cv2.VideoWriter_fourcc(*'XVID') # lossy\n elif container_format == 'mp4':\n fourcc = cv2.VideoWriter_fourcc(*'mp4v') # lossy\n elif container_format == 'mkv':\n fourcc = cv2.VideoWriter_fourcc(*'FFV1') # lossless\n else:\n raise ValueError(f'Unsupported file format: {container_format}')\n \n out = cv2.VideoWriter(filepath, fourcc, fps, (video_array.shape[2], video_array.shape[1]))\n for frame_array in video_array: # [frame_length, H, W, C]\n if gray_in:\n frame_array = cv2.cvtColor(frame_array, cv2.COLOR_GRAY2BGR)\n else:\n frame_array = cv2.cvtColor(frame_array, cv2.COLOR_RGB2BGR)\n out.write(frame_array)\n out.release()\n\n\ndef _convert_cv2_image_array(image_array, gray_out) -> np.ndarray:\n raw_gray = len(image_array.shape) == 2\n if gray_out:\n if raw_gray:\n image_array = image_array[..., None]\n else:\n image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2GRAY)[..., None]\n else:\n if raw_gray:\n image_array = cv2.cvtColor(image_array, cv2.COLOR_GRAY2RGB)\n else:\n image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)\n return image_array\n\n\ndef load_image(filepath: str, gray_out=False) -> np.ndarray:\n \"\"\"\n if gray_out or raw_gray:\n return np.ndarray [C(gray)=1, H, W] uint8, 16, ...\n else:\n return np.ndarray [C(RGB)=3, H, W] uint8, 16, ...\n \"\"\"\n if not os.path.exists(filepath):\n raise FileNotFoundError(filepath)\n image_array = _convert_cv2_image_array(cv2.imread(filepath, cv2.IMREAD_UNCHANGED), gray_out)\n image_array = image_array.transpose((2, 0, 1))\n\n return image_array\n\n\ndef load_tif_images(filepath, gray_out=False) -> np.ndarray:\n \"\"\"\n if gray_out:\n return np.ndarray [C(gray)=1, frame_length, H, W] uint8\n else:\n return np.ndarray [C(RGB)=3, frame_length, H, W] uint8\n \"\"\"\n assert filepath.endswith('tif') or filepath.endswith('tiff')\n _, array_tuple = cv2.imreadmulti('C2-!220118 cos7 wt er endo int2s 015.tif', flags=cv2.IMREAD_UNCHANGED)\n array_list = []\n for image_array in array_tuple:\n array_list.append(_convert_cv2_image_array(image_array, gray_out))\n images_array = np.stack(array_list).transpose((3, 0, 1, 2))\n\n return images_array\n","repo_name":"HenryZhou19/Pytorch-Template","sub_path":"src/datasets/modules/media_rw.py","file_name":"media_rw.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"2338975420","text":"import os\nfrom pathlib import Path\nimport json\nimport pandas as pd\nimport numpy as np\nimport nibabel as nb\nfrom scipy import interpolate, signal\nimport matplotlib.pyplot as plt\nfrom src.spike import group_peaks, read_smr\n\nhome = str(Path.home())\np = Path(home + \"/projects/critchley_depersonalisation/data\")\nparticipants = pd.read_csv(p / \"participants.tsv\", sep=\"\\t\")\npass_qa = participants.query(\"task_heartbeat_physio > 0 \").participant_id.tolist()\n\n# correct for 50 msec flag in spike file\n# The correction value was inpscted through raw spike file\n# it's not quite the same as 50\ncorrection = 44\n# correct for the average error per TR\n# calculated from files with healty trigger recording\ntr_error = -2e-05\n\nn = 0\nfor subject in pass_qa:\n n += 1\n print(f\"{n} / {len(pass_qa)}, {subject}\")\n # save the file with binirised heart beat and stimulus onset tag\n path = (\n p\n / \"derivatives\"\n / \"physio_spike2\"\n / f\"{subject}_task-heartbeat_run-1_physio.smr\"\n )\n bn = path.name.split(\"physio.smr\")[0]\n vol_path = p / subject / \"func\" / f\"{subject}_task-heartbeat_run-1_bold.json\"\n\n with open(vol_path) as f:\n data = json.load(f)\n tr = data[\"RepetitionTime\"]\n\n try:\n n_vol = data[\"dcmmeta_shape\"][-1]\n except KeyError:\n vol_path = p / subject / \"func\" / f\"{subject}_task-heartbeat_run-1_bold.nii.gz\"\n n_vol = nb.load(str(vol_path)).shape[-1]\n\n # load the four channels we need\n segment = read_smr(str(path))\n cardiac_event = segment.analogsignals[0]\n stim = segment.analogsignals[1]\n cardiac = segment.analogsignals[2]\n trigger = segment.events[0]\n spike_fs = np.round(cardiac.sampling_rate, 2)\n\n # create a data frame to store the physio data\n df = pd.DataFrame(\n cardiac.squeeze(), index=np.array(cardiac.times), columns=[\"cardiac\"]\n )\n\n # binarise the cardiac event and stim channels\n for name, ch in zip([\"cardiac_event\", \"stim\"], [cardiac_event, stim]):\n # peak detection on the spike created detection channel resample to match the cardiac signal\n f = interpolate.interp1d(\n ch.times, ch.squeeze(), \"nearest\", fill_value=\"extrapolate\"\n )\n signal = f(cardiac.times)\n df[name] = group_peaks(signal.squeeze())\n\n time = df.index.tolist()\n time_trigger = np.array(trigger.times.simplified)\n\n # estimate starting of the first volume by the 6th vol trigger log\n vol6_time = df[df.stim == 1].index[0]\n vol6_time -= correction / 1000 # correct for the square wave\n est_start = vol6_time - tr * 5\n\n df[\"trigger\"] = 0\n spike_n_trigger = len(time_trigger)\n\n if (spike_n_trigger - 1) != n_vol:\n print(f\"bad trigger(spike/nifti): {spike_n_trigger}/{n_vol}\")\n # estmate scan end time\n est_end = est_start + tr * n_vol + n_vol * tr_error\n\n # find the proximate time on the cardiac recording channel\n idx_trigger_first = np.where(time >= est_start)[0][0]\n idx_trigger_last = np.where(time <= est_end)[0][-1]\n start = time[idx_trigger_first]\n end = time[idx_trigger_last]\n\n # estimate all triggers\n time_trigger = np.linspace(start, end, n_vol + 1)\n\n # find the closest time on the cardiac recording channel\n for t in time_trigger:\n idx = np.where((time - t) <= 0)[0][-1]\n df.loc[time[idx], \"trigger\"] = 1\n # save physio\n bn = path.name.split(\"physio.smr\")[0]\n out_path = p / subject / \"func\" / f\"{bn}physio.tsv\"\n df.to_csv(out_path, sep=\"\\t\", index=False)\n os.system(f\"gzip {out_path}\")\n\n # json\n json_paths = str(p / subject / \"func\" / f\"{bn}physio.json\")\n vol1_time = df[df.trigger == 1].index[0]\n new_start_time = df.index[0] - vol1_time\n\n # edit the start time in json; align to the first volume\n with open(json_paths) as json_file:\n data = json.load(json_file)\n data[\"StartTime\"] = str(new_start_time)\n data[\"SamplingFrequency\"] = str(spike_fs)[:-3]\n\n # save json\n with open(json_paths, \"w\") as outfile:\n json.dump(data, outfile, indent=4)\n","repo_name":"htwangtw/depersonalisation","sub_path":"data/code/batch_physio_BIDS.py","file_name":"batch_physio_BIDS.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74651585154","text":"# Given an array of integers\n# find the pair of adjacent elements that has the largest product\n# return that product.\n\n# ex: inputArray = [3, 6, -2, -5, 7, 3]\n# output: solution(inputArray) = 21\n\ndef solution(inputArray):\n\n stack = []\n ans = []\n\n if len(inputArray) == 2:\n return inputArray[0] * inputArray[1]\n\n for num in inputArray:\n if not stack:\n stack.append(num)\n continue\n\n ans.append(num * stack[-1])\n stack.pop(-1)\n stack.append(num)\n\n return max(ans)\n\n\nprint(solution([5, 1, 2, 3, 1, 4]))","repo_name":"Billyhrussell/needcode","sub_path":"bad_at_cdsg/adjacent_elems.py","file_name":"adjacent_elems.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5856693740","text":"import os\nimport sys\nimport traceback\nimport unittest\n\nfrom telemetry import story\nfrom telemetry import page as page_module\nfrom telemetry import value\nfrom telemetry.value import failure\n\n\nclass TestBase(unittest.TestCase):\n def setUp(self):\n self.story_set = story.StorySet(base_dir=os.path.dirname(__file__))\n self.story_set.AddStory(page_module.Page(\n 'http://www.bar.com/', self.story_set, self.story_set.base_dir))\n\n @property\n def pages(self):\n return self.story_set.stories\n\nclass ValueTest(TestBase):\n def testName(self):\n v0 = failure.FailureValue.FromMessage(self.pages[0], 'Failure')\n self.assertEquals('Exception', v0.name)\n try:\n raise NotImplementedError()\n except Exception:\n v1 = failure.FailureValue(self.pages[0], sys.exc_info())\n self.assertEquals('NotImplementedError', v1.name)\n\n def testBuildbotAndRepresentativeValue(self):\n v = failure.FailureValue.FromMessage(self.pages[0], 'Failure')\n self.assertIsNone(v.GetBuildbotValue())\n self.assertIsNone(v.GetBuildbotDataType(\n value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))\n self.assertIsNone(v.GetChartAndTraceNameForPerPageResult())\n self.assertIsNone(v.GetRepresentativeNumber())\n self.assertIsNone(v.GetRepresentativeString())\n\n def testAsDict(self):\n v = failure.FailureValue.FromMessage(self.pages[0], 'Failure')\n d = v.AsDictWithoutBaseClassEntries()\n self.assertTrue(d['value'].find('Exception: Failure') > -1)\n\n def testFromDict(self):\n try:\n raise Exception('test')\n except Exception:\n exc_info = sys.exc_info()\n d = {\n 'type': 'failure',\n 'name': exc_info[0].__name__,\n 'units': '',\n 'value': ''.join(traceback.format_exception(*exc_info))\n }\n v = value.Value.FromDict(d, {})\n\n self.assertTrue(isinstance(v, failure.FailureValue))\n self.assertEquals(v.name, 'Exception')\n","repo_name":"googlearchive/big-rig","sub_path":"app/src/thirdparty/telemetry/value/failure_unittest.py","file_name":"failure_unittest.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":857,"dataset":"github-code","pt":"61"} +{"seq_id":"74277598593","text":"import random\n\nfrom functions.character_class_functions import Character\n\nclass Creature:\n def __init__(\n self,\n # --------------\n name: str,\n moveset: list,\n health: int,\n damage_min: int,\n damage_max: int,\n armor: int,\n loot: list,\n location: str,\n gold: int,\n xp: int,\n awareness: int,\n speed: int\n ):\n \n self.name = name\n self.moveset = moveset\n self.health = health\n self.damage_min = damage_min \n self.damage_max = damage_max\n self.armor = armor\n self.loot = loot\n self.location = location\n self.gold = gold\n self.xp = xp\n self.awareness = awareness \n self.speed = speed \n\n def compute_damage(self):\n return random.randint(self.damage_min, self.damage_max) \n\n def deal_damage_to_player(self, target: Character):\n damage_dealt = max(self.compute_damage() - target.armor['defence'], 0) \n output_text = self.name + ' ' + random.choice(self.moveset) + ' '\n misses = [\"which fails to break through your defences.\", \"missing its attack.\",\n \"failing to connect the attack.\", \"unable to land the attack.\",\n \"but you dodge away in time.\", \"barely missing you.\"]\n if damage_dealt > 0:\n output_text += 'dealing ' + str(damage_dealt) + ' damage.'\n target.health -= damage_dealt\n else:\n output_text += random.choice(misses)\n print(output_text)\n\n def spot_player(self, target: Character):\n if self.awareness >= target.dexterity:\n print('You are spotted!')\n target.stealth = False\n\n","repo_name":"EngineNoir/text_based_rpg","sub_path":"functions/creature_class_functions.py","file_name":"creature_class_functions.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74025706755","text":"import re\nimport os\n\n\ndef getCookie(header):\n cookie = \"NONE\"\n match = re.search('Set-Cookie: (.+); Domain', header)\n try:\n if match:\n cookie = match.group(1)\n except AttributeError:\n return \"NONE\"\n return cookie \n\ndef getContentType(header):\n content = \"NONE\"\n match = re.search('Content-Type: (.+)', header)\n try:\n if match:\n content = match.group(1)\n except AttributeError:\n return \"NONE\"\n return content \n\ndef returnCode(header):\n returnCode = header.split(\" \")[1];\n return int(returnCode)\n\n\ndef create_dir (localDirectoryName):\n dirName = localDirectoryName\n orignalDirName = dirName\n identifier = \"/\"\n i = 0\n while(True):\n try:\n dirName = orignalDirName+identifier\n os.mkdir(dirName)\n break;\n except FileExistsError:\n i+=1\n identifier = str(i) + \"/\"\n continue;\n return dirName\n\n","repo_name":"biniona/SampleCourseworkSolutions","sub_path":"Network Programming/HTTPImageScraper/parseLinks.py","file_name":"parseLinks.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20080636327","text":"from ncml.datasets.loaders import load_dataset\nfrom ncml.impl.gist import GISTClassifier\nimport pytest\n\nret = load_dataset('w5a')\nX_tr, y_tr, X_te, y_te = ret\n\n\ndef test_gist_mcp_clf():\n clf = GISTClassifier(loss='logistic', penalty='mcp')\n clf.fit(X_tr, y_tr)\n assert clf.score(X_tr, y_tr) > 0.95\n\n\ndef test_gist_scad_clf():\n clf = GISTClassifier(loss='logistic', penalty='scad', theta=2)\n clf.fit(X_tr, y_tr)\n assert clf.score(X_tr, y_tr) > 0.95\n\n\ndef test_gist_lasso_clf():\n clf = GISTClassifier(loss='logistic', penalty='lasso')\n clf.fit(X_tr, y_tr)\n assert clf.score(X_tr, y_tr) > 0.95\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])","repo_name":"Noilyn/ncml","sub_path":"tests/test_penalty.py","file_name":"test_penalty.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31462849526","text":"#exemplo 1\nlista = list()\nv = int(input(\"Numero vezes: \"))\nx = [lista.append(int(input(\"Qual o elemento ?\"))) for i in range(v)]\nprint(lista)\n\n#exemplo 2\nnomes = list()\nv = int(input(\"Quantos nomes você ques add? \"))\nlista =[nomes.append(input(\"Diga o nome: \")) for i in range(v)]\nprint(nomes)\n\nfor i in range(v):\n nome = input(\"Diga o nome: \")\n nomes.append(nome)\n\n#exemplo 3\nlista_1 = [1,2,4,5]\nlista_2 = [2,4,5,6]\nx = [print(f\"o índice {i} da lista 1 == ao índice {x} da lista 2.\") for i in range(len(lista_1)) for x in range(len(lista_2)) if lista_1[i] == lista_2[x]]\n\n\nfor i in range(len(lista_1)):\n for x in range(len(lista_2)):\n if lista_1[i] == lista_2[x]:\n print(\"Há elemento igual\")\n print(f\"o índice {i} da lista 1 == ao índice {x} da lista 2.\")\n \n\n","repo_name":"ramonbrito1995/desafio30diasdecodigo","sub_path":"semana1/compreensaodelista.py","file_name":"compreensaodelista.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28182042204","text":"# Starting code for HW 2. Add to this!\nimport numpy as np\nimport scipy.linalg as linalg\n\ntol = 1e-12 # tolerance\n\n# Function definitions:\nf1 = lambda x: np.cos(x[0] + x[1]) - x[1]**2\nf2 = lambda x: np.sin(x[0] - x[1]) - x[0]*x[1]\nf = lambda x: (f1(x), f2(x))\n\nf1d1 = lambda x : -np.sin(x[0] + x[1])\nf1d2 = lambda x : -np.sin(x[0] + x[1]) - 2*x[1]\nf2d1 = lambda x: np.cos(x[0] - x[1]) - x[1]\nf2d2 = lambda x: -np.cos(x[0] - x[1]) - x[0]\n \nxk = np.array([1,1]) # Initial guess x_k\n\nk = [0]\nresiduals = [np.abs(f(xk))]\n\nitnum = 0 # iteration counter\nsuccess = True # Assume success unless we fail!\n\nwhile linalg.norm(f(xk))>tol:\n A = np.array([[f1d1(xk), f1d2(xk)],[f2d1(xk), f2d2(xk)]])\n xk = xk - np.matmul(linalg.inv(A), f(xk))\n \t\n print(f\"x_{itnum} = {xk}\")\n itnum += 1\n k.append(itnum)\n residuals.append(linalg.norm(f(xk)))\n\n if itnum>10000:\n print('Something is wrong, too many iterations, let\\'s break out of this loop...!')\n success = False\n break\n\n\nif success:\n # Plotting using matplotlib, imported at the top of the code.\n print()\n print(f\"Done in {itnum} iterations\")\n print(f\"x1 = {xk[0]}\")\n print(f\"x2 = {xk[1]}\")\n","repo_name":"DirkyJerky/Uni","sub_path":"514/hwk3/HW03.py","file_name":"HW03.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26401528511","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 16 18:32:19 2020\n\n@author: AsteriskAmpersand\n\"\"\"\n\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 28 23:04:20 2019\n\n@author: AsteriskAmpersand\n\"\"\"\nimport bpy\n\ndef execute_operator(self, context):\n if self.add_rig != \"pass\":\n eval('bpy.ops.' + self.add_rig + '()')\n \nclass ImportPremade(bpy.types.PropertyGroup):\n mode_options = [\n (\"pass\", \"Select Rig\", '', 'EMPTY_DATA', 0),\n (\"mod_tools.add_fplayer_rig\", \"Player Female Rig\", \"Statyk's Female Player Rig\", 'POSE_DATA', 1),\n (\"mod_tools.add_mplayer_rig\", \"Player Male Rig\", \"Statyk's Male Player Rig\", 'POSE_DATA', 2),\n #(\"mesh.primitive_cube_add\", \"Cube\", '', 'MESH_CUBE', 1),\n ]\n\n add_rig = bpy.props.EnumProperty(\n name = \"Add Rig\",\n items=mode_options,\n description=\"Imports the selected rigging setup\",\n default=\"pass\",\n update=execute_operator\n )\n\nclass ModTools(bpy.types.Panel):\n bl_category = \"MHW Tools\"\n bl_idname = \"panel.mhw_mod\"\n bl_label = \"MOD3 Tools\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"TOOLS\"\n # bl_category = \"Tools\"\n\n addon_key = __package__.split('.')[0]\n\n def draw(self, context):\n addon = context.user_preferences.addons[self.addon_key]\n self.addon_props = addon.preferences\n \n layout = self.layout\n #self.layout.label(\"CCL Capsule Tools\")\n #self.layout.operator(\"ctc_tools.mesh_from_capsule\", icon='MESH_CUBE', text=\"Mesh from Capsule\")\n self.draw_mod_tools(context, layout)\n layout.separator()\n\n \n def draw_mod_tools(self, context, layout):\n #addon_props = self.addon_props\n col = layout.column(align = True)\n col.label(\"Custom Properties\")\n row = col.row(align = True)\n row.operator(\"mod_tools.copy_prop\", icon='MESH_DATA', text=\"Copy\")\n row.operator(\"mod_tools.paste_prop\", icon='MESH_DATA', text=\"Paste\")\n #col.separator()\n \n col.separator()\n col.prop(context.scene.import_premade, \"add_rig\", text = \"Add Rig\")\n \n col.label(\"Rename Vertex Groups\")\n row = col.row(align = True)\n row.operator(\"mod_tools.target_armature\", icon='ARMATURE_DATA', text=\"To Armature\")\n row.operator(\"mod_tools.target_weights\", icon='EMPTY_DATA', text=\"To Empty\")\n col.separator()\n \n col.operator(\"mod_tools.bone_to_id\", icon='CONSTRAINT_BONE', text=\"Rename Bones to ID\")\n col.operator(\"mod_tools.bone_to_ix\", icon='CONSTRAINT_BONE', text=\"Rename Bones to Index\")\n col.operator(\"mod_tools.reindex_bones\", icon='CONSTRAINT_BONE', text=\"Reindex Bones\")\n col.operator(\"mod_tools.bone_rename\", icon='CONSTRAINT_BONE', text=\"Rename Bones\")\n col.operator(\"mod_tools.bone_merge\", icon='CONSTRAINT_BONE', text=\"Merge Skeletons\")\n \n #col.prop(addon_props, 'limit_application', text = 'Limit to Selection')\n col.operator(\"mod_tools.mark_uv_rep\", icon='EDGESEL', text=\"Mark Repeated UVs\")\n col.operator(\"mod_tools.solve_uv_rep\", icon='SNAP_EDGE', text=\"Solve Repeated UVs\")\n col.operator(\"mod_tools.solve_sharp_rep\", icon='SNAP_EDGE', text=\"Split Sharp and Repeated UVs\")\n col.operator(\"mod_tools.clean_uvs\", icon='GROUP_UVS', text=\"Clean UV List\")\n col.operator(\"mod_tools.clean_color\", icon='COLOR', text=\"Clean Vertex Colors\")\n col.operator(\"mod_tools.generate_color\", icon='COLOR', text=\"Generate Vertex Colors\")\n col.operator(\"mod_tools.set_color\", icon='COLOR', text=\"Set Vertex Colors\")\n col.operator(\"mod_tools.clean_materials\", icon='GROUP_UVS', text=\"Clean Materials List\") \n col.operator(\"mod_tools.clean_weights\", icon='GROUP_VERTEX', text=\"Remove Unweighted Groups\")\n col.operator(\"mod_tools.limit_normalize\", icon='GROUP_VERTEX', text=\"Limit Weights to Label\")\n col.operator(\"mod_tools.mass_weight\", icon='GROUP_VERTEX', text=\"Mass Weight to Bone\")\n col.operator(\"mod_tools.nuke_weights\", icon='GROUP_VERTEX', text=\"Delete Weights\")\n col.operator(\"mod_tools.componentwise_discretization\", icon='GROUP_VERTEX', text=\"Average Weight Islands\") \n col.operator(\"mod_tools.collapse_weights\", icon='GROUP_VERTEX', text=\"Collapse Weights\")\n col.operator('mod_tools.reindex_meshes', icon='GROUP_VERTEX', text=\"Reindex Meshes\")\n col.operator(\"mod_tools.mass_triangulate\", icon='GROUP_VERTEX', text=\"Triangulate\")","repo_name":"AsteriskAmpersand/MHW-Mod3-Toolbox","sub_path":"operators/modtoolspanel.py","file_name":"modtoolspanel.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"42960165145","text":"class Solution:\n #let's create a dictionary {int:freq}, sort by the freq and return the first k\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n myDict ={}\n for num in nums:\n myDict[num] = myDict.get(num,1)+1\n\n myTuple = sorted(myDict.items(),key=lambda freq: freq[1],reverse=True)\n res = [x[0] for x in myTuple[:k]]\n return res","repo_name":"GarrettCrippen/leetcode","sub_path":"medium/topkfreq.py","file_name":"topkfreq.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10075060408","text":"import sys\n\nsys.path.append(\".\")\nimport shutil\nimport os\nimport logging\nimport traceback\nimport random\nimport pickle\nimport copy\nfrom pathlib import Path\nimport numpy as np\nimport ipdb\n\n# import ipdb\nimport hydra\nimport time\nimport multiprocessing as mp\nfrom omegaconf import DictConfig, OmegaConf\nfrom dataloader.dataloader_v2 import AgentTypeDataset\nfrom dataloader import dataloader_v2 as dataloader_v2\nfrom models import agent_pref_policy\nfrom hydra.utils import get_original_cwd, to_absolute_path\nfrom utils import utils_models_wb, utils_rl_agent\n\nfrom envs.unity_environment import UnityEnvironment\nfrom agents import MCTS_agent, MCTS_agent_particle_v2, MCTS_agent_particle\n\n# from arguments import get_args\nfrom algos.arena_mp2 import ArenaMP\nfrom utils import utils_goals\nfrom utils import utils_exception\nimport torch\n\n\ndef get_class_mode(agent_args):\n mode_str = \"{}_opencost{}_closecost{}_walkcost{}_forgetrate{}\".format(\n agent_args[\"obs_type\"],\n agent_args[\"open_cost\"],\n agent_args[\"should_close\"],\n agent_args[\"walk_cost\"],\n agent_args[\"belief\"][\"forget_rate\"],\n )\n return mode_str\n\n\ndef get_edge_class0(pred, t, source=\"pred\"):\n # pred_edge_prob = pred['edge_prob']\n edge_pred = pred[\"edge_pred\"][t] if source == \"pred\" else pred[\"edge_input\"][t]\n pred_edge_names = pred[\"edge_names\"]\n pred_nodes = pred[\"nodes\"]\n pred_from_ids = pred[\"from_id\"] # if source == 'pred' else pred['from_id_input']\n pred_to_ids = pred[\"to_id\"] # if source == 'pred' else pred['to_id_input']\n\n # edge_prob = pred_edge_prob[t]\n # edge_pred = np.argmax(edge_prob, 1)\n\n edge_pred_class = {}\n\n num_edges = len(edge_pred)\n for edge_id in range(num_edges):\n from_id = pred_from_ids[t][edge_id]\n to_id = pred_to_ids[t][edge_id]\n from_node_name = pred_nodes[from_id]\n to_node_name = pred_nodes[to_id]\n # if object_name in from_node_name or object_name in to_node_name:\n edge_name = pred_edge_names[edge_pred[edge_id]]\n if edge_name in [\"inside\", \"on\"]: # disregard room locations + plate\n if to_node_name.split(\".\")[0] in [\n \"kitchen\",\n \"livingroom\",\n \"bedroom\",\n \"bathroom\",\n \"plate\",\n ]:\n continue\n # if from_node_name.split('.')[0]\n edge_class = \"{}_{}_{}\".format(\n edge_name, from_node_name.split(\".\")[0], to_node_name.split(\".\")[1]\n )\n # print(from_node_name, to_node_name, edge_name)\n if edge_class not in edge_pred_class:\n edge_pred_class[edge_class] = 1\n else:\n edge_pred_class[edge_class] += 1\n return edge_pred_class\n\n\ndef get_edge_class(pred, t, source=\"pred\"):\n # pred_edge_prob = pred['edge_prob']\n # print(len(pred['edge_input'][t]), len(pred['edge_pred'][t]))\n edge_pred = pred[\"edge_pred\"][t] if source == \"pred\" else pred[\"edge_input\"][t]\n pred_edge_names = pred[\"edge_names\"]\n pred_nodes = pred[\"nodes\"]\n pred_from_ids = pred[\"from_id\"] if source == \"pred\" else pred[\"from_id_input\"]\n pred_to_ids = pred[\"to_id\"] if source == \"pred\" else pred[\"to_id_input\"]\n\n # edge_prob = pred_edge_prob[t]\n # edge_pred = np.argmax(edge_prob, 1)\n\n edge_pred_class = {}\n\n num_edges = len(edge_pred)\n # print(pred_from_ids[t], num_edges)\n for edge_id in range(num_edges):\n from_id = pred_from_ids[t][edge_id]\n to_id = pred_to_ids[t][edge_id]\n from_node_name = pred_nodes[from_id]\n to_node_name = pred_nodes[to_id]\n # if object_name in from_node_name or object_name in to_node_name:\n edge_name = pred_edge_names[edge_pred[edge_id]]\n if to_node_name.split(\".\")[1] == \"-1\":\n continue\n if edge_name in [\"inside\", \"on\"]: # disregard room locations + plate\n if to_node_name.split(\".\")[0] in [\n \"kitchen\",\n \"livingroom\",\n \"bedroom\",\n \"bathroom\",\n \"plate\",\n ]:\n continue\n else:\n continue\n if from_node_name.split(\".\")[0] not in [\n \"apple\",\n \"cupcake\",\n \"plate\",\n \"waterglass\",\n ]:\n continue\n\n # if from_node_name.split('.')[0]\n\n # # TODO: need to infer the correct edge class\n # if 'table' in to_node_name.split('.')[0]:\n # ipdb.set_trace()\n # edge_name = 'on'\n\n edge_class = \"{}_{}_{}\".format(\n edge_name, from_node_name.split(\".\")[0], to_node_name.split(\".\")[1]\n )\n # print(from_node_name, to_node_name, edge_name)\n if edge_class not in edge_pred_class:\n edge_pred_class[edge_class] = 1\n else:\n edge_pred_class[edge_class] += 1\n return edge_pred_class\n\n\ndef aggregate_multiple_pred(preds, t, change=False):\n edge_classes = []\n edge_pred_class_all = {}\n N_preds = len(preds)\n for pred in preds:\n edge_pred_class = get_edge_class(pred, t)\n edge_classes += list(edge_pred_class.keys())\n for edge_class, count in edge_pred_class.items():\n if edge_class not in edge_pred_class_all:\n edge_pred_class_all[edge_class] = [count]\n else:\n edge_pred_class_all[edge_class] += [count]\n if change:\n edge_input_class = get_edge_class(preds[0], t, \"input\")\n edge_classes += list(edge_input_class.keys())\n\n edge_classes = sorted(list(set(edge_classes)))\n edge_pred_class_estimated = {}\n for edge_class in edge_classes:\n if edge_class not in edge_pred_class_all:\n edge_pred_class_estimated[edge_class] = (-edge_input_class[edge_class], 0)\n continue\n curr_len = len(edge_pred_class_all[edge_class])\n if curr_len < N_preds:\n edge_pred_class_all[edge_class] += [0] * (N_preds - curr_len)\n if change:\n c = (\n np.mean(edge_pred_class_all[edge_class]) - edge_input_class[edge_class]\n if edge_class in edge_input_class\n else np.mean(edge_pred_class_all[edge_class])\n )\n else:\n c = np.mean(edge_pred_class_all[edge_class])\n edge_pred_class_estimated[edge_class] = (\n c,\n np.std(edge_pred_class_all[edge_class]),\n )\n # print(edge_class, edge_pred_class_estimated[edge_class])\n return edge_pred_class_estimated\n\n\ndef get_metrics_reward(\n alice_results, test_results, episode_ids, num_tries, time_limit=30\n):\n mS = []\n mL = []\n mSP = []\n mSwS = []\n # ipdb.set_trace()\n for seed in range(num_tries):\n alice_S = []\n alice_L = []\n normalized_by_suc = False\n for episode_id in episode_ids:\n Ls = []\n Ss = []\n SWSs = []\n L_A_seeds = []\n for seed_alice in range(num_tries):\n if episode_id not in alice_results:\n S_A, L_A = 0, time_limit\n ipdb.set_trace()\n # continue\n else:\n if alice_results[episode_id][\"S\"][seed_alice] == \"\":\n print(episode_id, seed)\n continue\n\n S_A = alice_results[episode_id][\"S\"][seed_alice]\n L_A = alice_results[episode_id][\"L\"][seed_alice]\n L_A_seeds.append(L_A)\n if episode_id not in test_results:\n # print(episode_id, seed)\n continue\n L_A_seeds = [t for t in L_A_seeds if t is not None]\n # if normalized_by_suc:\n L_A_seeds = [t for t in L_A_seeds if t < time_limit]\n\n Ls = []\n Ss = []\n for seed_bob in range(num_tries):\n if seed_bob >= len(test_results[episode_id][\"S\"]):\n continue\n try:\n if test_results[episode_id][\"S\"][seed_bob] == \"\":\n print(episode_id, seed)\n continue\n if test_results[episode_id][\"S\"][seed_bob] is None:\n print(episode_id, seed)\n continue\n S_B = test_results[episode_id][\"S\"][seed_bob]\n L_B = test_results[episode_id][\"L\"][seed_bob]\n if L_B == time_limit:\n S_B = 0.0\n except:\n ipdb.set_trace()\n\n Ls.append(L_B)\n Ss.append(S_B)\n\n Ls = [t for t in Ls if t is not None]\n if normalized_by_suc:\n Ls = [t for t in Ls if t < time_limit]\n if len(Ls) > 0:\n # if len([t for t in Ss if t == 0.]) > 0:\n # ipdb.set_trace()\n SWSs.append(\n np.mean([-ls * 1.0 / time_limit + sb for ls, sb in zip(Ls, Ss)])\n )\n # mSwS.append(SWSs)\n # if SWSs > 0:\n # cont_better += 1\n #\n\n # ipdb.set_trace()\n # print(episode_id)\n if len(L_A_seeds) > 0: # and np.mean(Ls) > 30:\n mSP.append(np.mean(L_A_seeds) / np.mean(Ls) - 1.0)\n else:\n Ls = [time_limit] * len(Ss)\n SWSs.append(0)\n\n # print(episode_id, Ss, Ls)\n\n mS.append(np.mean(Ss))\n mL.append(np.mean(Ls))\n mSwS.append(np.mean(SWSs))\n\n # print('Alice:', np.mean(alice_S), np.mean(alice_L))\n # print('Alice:', np.mean(alice_S), '({})'.format(np.std(alice_S)), np.mean(alice_L), '({})'.format(np.std(alice_L)))\n # print('Bob:', np.mean(Ss), '({})'.format(np.std(Ss)), np.mean(Ls), '({})'.format(np.std(Ls)), np.mean(SWSs), '({})'.format(np.std(SWSs)))\n\n ns = np.sqrt(len(mS))\n nsp = np.sqrt(len(mSP))\n nw = np.sqrt(len(mSwS))\n # Success, Length, SpeedUp, Reward\n return (\n np.mean(mS),\n np.mean(mL),\n np.mean(mSP),\n np.mean(mSwS),\n np.std(mS) / ns,\n np.std(mL) / ns,\n np.std(mSP) / nsp,\n np.std(mSwS) / nw,\n )\n\n\ndef get_class_from_state(state):\n id2node = {node[\"id\"]: node[\"class_name\"] for node in state[\"nodes\"]}\n edges = state[\"edges\"]\n nodes = state[\"nodes\"]\n\n edge_class_count = {}\n\n num_edges = len(edges)\n # print(pred_from_ids[t], num_edges)\n for edge_id in range(num_edges):\n from_id = edges[edge_id][\"from_id\"]\n to_id = edges[edge_id][\"to_id\"]\n from_node_name = id2node[from_id]\n to_node_name = id2node[to_id]\n # if object_name in from_node_name or object_name in to_node_name:\n edge_name = edges[edge_id][\"relation_type\"].lower()\n if edge_name in [\"inside\", \"on\"]: # disregard room locations + plate\n if to_node_name in [\n \"kitchen\",\n \"livingroom\",\n \"bedroom\",\n \"bathroom\",\n \"plate\",\n ]:\n continue\n if from_node_name in [\n \"kitchen\",\n \"livingroom\",\n \"bedroom\",\n \"bathroom\",\n \"character\",\n ]:\n continue\n if from_node_name not in [\n \"plate\",\n \"cutleryfork\",\n \"waterglass\",\n \"cupcake\",\n \"salmon\",\n \"apple\",\n \"remotecontrol\",\n \"chips\",\n \"condimentbottle\",\n \"condimentshaker\",\n \"wineglass\",\n \"pudding\",\n ]:\n continue\n else:\n continue\n\n edge_class = \"{}_{}_{}\".format(edge_name, from_node_name, to_node_name)\n if edge_class not in edge_class_count:\n edge_class_count[edge_class] = 1\n else:\n edge_class_count[edge_class] += 1\n return edge_class_count\n\n\ndef compute_dist(edge_class_1, edge_class_2):\n edge_classes = set(list(edge_class_1.keys()) + list(edge_class_2.keys()))\n dist = 0\n for edge in edge_classes:\n if edge not in edge_class_1:\n cnt1 = 0\n else:\n cnt1 = edge_class_1[edge]\n if edge not in edge_class_2:\n cnt2 = 0\n else:\n cnt2 = edge_class_2[edge]\n dist += abs(cnt1 - cnt2)\n return dist\n\n\n@hydra.main(config_path=\"../config/\", config_name=\"config_default_toy_excl_plan\")\ndef main(cfg: DictConfig):\n config = cfg\n print(\"Config\")\n print(OmegaConf.to_yaml(cfg))\n args = config\n args_pred = args.agent_pred_graph\n num_proc = 0\n\n num_tries = 5\n args.executable_file = \"/data/vision/torralba/frames/data_acquisition/SyntheticStories/website/release/simulator/v2.0/v2.2.5_beta/linux_exec.v2.2.5_beta.x86_64\"\n args.max_episode_length = 250\n args.num_per_apartment = 20\n curr_dir = os.path.dirname(os.path.abspath(__file__))\n # home_path = '../'\n rootdir = \"\"\n\n # args.dataset_path = f'{rootdir}/dataset/train_env_task_set_100_full.pik'\n args.dataset_path = f\"/data/vision/torralba/frames/data_acquisition/SyntheticStories/online_wah/agent_preferences/dataset/test_env_task_set_60_full_task.all.pik\"\n # args.dataset_path = './dataset/train_env_task_set_20_full_reduced_tasks_single.pik'\n\n # cachedir = \"/data/vision/torralba/frames/data_acquisition/SyntheticStories/agent_preferences/tshu/agent_preferences/outputs/helping_states_1_3_ip1_detfull_encoder_task_graph_20_1.0_1.0_5.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_nohold_20_1.0_1.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_20_1.0_1.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_newvaefull_encoder_task_graph_10_1.0_1.0_5.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_newvaefull_encoder_task_graph.kl0.001_10_1.0_1.0_5.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_ip1_newvaefull_encoder_task_graph.kl0.001_20_1.0_1.0_5.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_fastwalk_1_3_ip1_newvaefull_encoder_task_graph.kl0.001_20_1.0_1.0_5.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_ip1_detfull_encoder_task_graph_20_1.0_1.0_5.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_fastwalk_1_3_ip1_detfull_encoder_task_graph_20_1.0_1.0_5.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_detfull_encoder_task_graph_20_1.0_1.0_5.0\"\n\n # cachedir = f'{get_original_cwd()}/outputs/helping_action_freq_v2_20'\n # cachedir = f'{get_original_cwd()}/outputs/helping_action_freq_1'\n\n cachedir_main = f\"{get_original_cwd()}/outputs/main_agent_only_large\"\n cachedir_main = \"/data/vision/torralba/frames/data_acquisition/SyntheticStories/agent_preferences/tshu/agent_preferences/outputs/main_agent_only_large\"\n\n # # =======================\n # # oracle\n # # =======================\n # cachedir = f\"{get_original_cwd()}/outputs/helping_gt_goal\"\n\n # # # =======================\n # # ours\n # # =======================\n # cachedir = f\"/data/vision/torralba/frames/data_acquisition/SyntheticStories/online_wah/agent_preferences//results/results_smallset_help/helping_states_fastwalk_r15_0_5_ip1_detfull_alldata_20_1.0_1.0_5.0\"\n # # cachedir = f\"{get_original_cwd()}/outputs/helping_states_fastwalk_r15_0_3_ip1_detfull_encoder_task_graph_20_1.0_1.0_5.0\"\n # # # 20%\n # # cachedir = f\"{get_original_cwd()}/outputs/helping_states_fastwalk_r15_0_3_ip1_detfull_r0.2_20_1.0_1.0_5.0\"\n\n # # =======================\n # # single particle\n # # =======================\n # # cachedir = f\"/data/vision/torralba/frames/data_acquisition/SyntheticStories/online_wah/agent_preferences//results/results_smallset_help/helping_states_fastwalk_r15_0_5_ip0_detfull_alldata_1_1.0_1.0_5.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_action_freq_fastwalk_r15_0_3_ip1_detfull_encoder_task_graph_1_1.0_1.0_5.0\"\n # # 5%\n # cachedir = f\"{get_original_cwd()}/outputs/helping_action_freq_fastwalk_r15_0_3_ip1_detfull_r0.05_1_1.0_1.0_5.0\"\n\n # # 20%\n # cachedir = f\"/data/vision/torralba/frames/data_acquisition/SyntheticStories/online_wah/agent_preferences/results/results_smallset_help/helping_states_fastwalk_r15_0_5_ip0_detfull_r0.20_1_1.0_1.0_5.0\"\n\n # =======================\n # w/o inv plan\n # =======================\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_fastwalk_r15_0_3_ip0_detfull_encoder_task_graph_20_1.0_1.0_5.0\"\n # %5\n cachedir = f\"{get_original_cwd()}/outputs/helping_states_fastwalk_r15_0_3_ip0_detfull_r0.05_20_1.0_1.0_5.0\"\n\n # # # =======================\n # # ours w/ uniform proposals\n # # # =======================\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_fastwalk_r_1_3_ip1_uniform_20_1.0_1.0_5.0\"\n\n # # # =======================\n # # empowerment\n # # # =======================\n # cachedir = f\"{get_original_cwd()}/outputs/helping_empowerment_fastwalk_r15_1_3_ip0_uniform_20_1.0_1.0_5.0\"\n\n # # =======================\n # # action frequency\n # # =======================\n # # cachedir = f\"{get_original_cwd()}/outputs/helping_action_freq_fastwalk_r15_1_3_ip1_detfull_encoder_task_graph_20_1.0_1.0_5.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_action_freq_fastwalk_r15_0_3_ip1_detfull_encoder_task_graph_20_1.0_1.0_5.0\"\n # # cachedir = f\"{get_original_cwd()}/outputs/helping_action_freq_fastwalk_r15_0_3_ip1_detfull_r0.05_20_1.0_1.0_5.0\"\n\n # # =======================\n # # ours w/o returning\n # # =======================\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_fastwalk_r15_0_3_ip1_detfull_encoder_task_graph_20_1.0_1.0_0.0\"\n # cachedir = f\"{get_original_cwd()}/outputs/helping_states_fastwalk_r15_0_3_ip1_detfull_r0.05_20_1.0_1.0_0.0\"\n\n agent_types = [\n [\"full\", 0, 0.05, False, 0, \"uniform\"], # 0\n [\"full\", 0.5, 0.01, False, 0, \"uniform\"], # 1\n [\"full\", -5, 0.05, False, 0, \"uniform\"], # 2\n [\"partial\", 0, 0.05, False, 0, \"uniform\"], # 3\n [\"partial\", 0, 0.05, False, 0, \"spiked\"], # 4\n [\"partial\", 0, 0.05, False, 0.2, \"uniform\"], # 5\n [\"partial\", 0, 0.01, False, 0.01, \"spiked\"], # 6\n [\"partial\", -5, 0.05, False, 0.2, \"uniform\"], # 7\n [\"partial\", 0.5, 0.05, False, 0.2, \"uniform\"], # 8\n ]\n random_start = random.Random()\n agent_id = 0\n (\n args.obs_type,\n open_cost,\n walk_cost,\n should_close,\n forget_rate,\n belief_type,\n ) = agent_types[0]\n datafile = args.dataset_path.split(\"/\")[-1].replace(\".pik\", \"\")\n agent_args = {\n \"obs_type\": args.obs_type,\n \"open_cost\": open_cost,\n \"should_close\": should_close,\n \"walk_cost\": walk_cost,\n \"belief\": {\"forget_rate\": forget_rate, \"belief_type\": belief_type},\n }\n # TODO: add num_samples to the argument\n num_samples = args.num_samples\n num_processes = args.num_processes\n args.mode = \"{}_\".format(agent_id + 1) + \"action_freq_{}\".format(num_samples)\n # args.mode += 'v9_particles_v2'\n\n # env_task_set = pickle.load(open(args.dataset_path, 'rb'))\n # # print(env_task_set)\n # print(len(env_task_set))\n\n args.record_dir = \"{}/{}\".format(cachedir, datafile)\n record_dir_main = \"{}/{}\".format(cachedir_main, datafile)\n error_dir = \"{}/logging/{}\".format(cachedir, datafile)\n # if not os.path.exists(args.record_dir):\n # os.makedirs(args.record_dir)\n\n # if not os.path.exists(error_dir):\n # os.makedirs(error_dir)\n\n executable_args = {\n \"file_name\": args.executable_file,\n \"x_display\": 0,\n \"no_graphics\": True,\n }\n\n id_run = 0\n # random.seed(id_run)\n # episode_ids = list(range(len(env_task_set)))\n # episode_ids = sorted(episode_ids)\n # random_start.shuffle(episode_ids)\n # episode_ids = episode_ids[10:]\n\n valid_set_path = \"/data/vision/torralba/frames/data_acquisition/SyntheticStories/online_wah/agent_preferences/analysis/test_set_reduced.txt\"\n f = open(valid_set_path, \"r\")\n episode_ids = []\n for filename in f:\n episode_ids.append(int(filename.split(\"episode.\")[-1].split(\"_\")[0]))\n episode_ids = sorted(episode_ids)\n print(len(episode_ids))\n f.close()\n\n # episode_ids = [3]\n\n # # episode_ids = [20] #episode_ids\n # # num_tries = 1\n # episode_ids = [1]\n # ndict = {'on_book_329': 1}\n # env_task_set[91]['init_rooms'] = ['bedroom', 'bedroom']\n # env_task_set[91]['task_goal'] = {0: ndict, 1: ndict}\n\n # test_results_\n\n main_results, help_results = {}, {}\n num_tries = 3\n\n for iter_id in range(0, num_tries):\n # if iter_id > 0:\n # iter_id = 1\n\n steps_list, failed_tasks = [], []\n current_tried = iter_id\n\n # test_results = {}\n print(args.record_dir)\n\n if not os.path.isfile(args.record_dir + \"/results_{}.pik\".format(iter_id)):\n test_results = {}\n else:\n test_results = pickle.load(\n open(args.record_dir + \"/results_{}.pik\".format(iter_id), \"rb\")\n )\n help_results = dict(test_results)\n\n print(iter_id, len(test_results))\n\n if not os.path.isfile(record_dir_main + \"/results_{}.pik\".format(iter_id)):\n test_results = {}\n else:\n test_results = pickle.load(\n open(record_dir_main + \"/results_{}.pik\".format(iter_id), \"rb\")\n )\n main_results = dict(test_results)\n\n # print(test_results)\n\n print(len(help_results))\n\n main_dist = {}\n helper_dist = {}\n all_dist = []\n\n for episode_id in help_results:\n try:\n print(episode_id, main_results[episode_id], help_results[episode_id])\n except:\n ipdb.set_trace()\n\n if (\n np.mean(help_results[episode_id][\"S\"])\n > 0\n # and 250 in help_results[episode_id][\"L\"]\n ):\n # if episode_id in [139]:\n # log_file_name = args.record_dir + \"/logs_episode.{}_iter.{}.pik\".format(\n # episode_id, 2\n # )\n # log_res = pickle.load(open(log_file_name, \"rb\"))\n # ipdb.set_trace()\n tmp_list = list(help_results[episode_id][\"S\"])\n help_results[episode_id][\"S\"] = [\n x\n for x, y in zip(tmp_list, help_results[episode_id][\"L\"])\n if x > 0 and y < args.max_episode_length\n ]\n tmp_list = list(help_results[episode_id][\"L\"])\n help_results[episode_id][\"L\"] = [\n x for x in tmp_list if x < args.max_episode_length\n ]\n\n # main_dist[episode_id] = []\n # helper_dist[episode_id] = []\n\n # for iter_id in range(0, num_tries):\n # main_log_file_name = (\n # record_dir_main\n # + \"/logs_episode.{}_iter.{}.pik\".format(episode_id, iter_id)\n # )\n\n # if os.path.isfile(main_log_file_name):\n # main_log = pickle.load(open(main_log_file_name, \"rb\"))\n\n # if len(main_log[\"graph\"]) < 250 - 2:\n # init_state = main_log[\"graph\"][0]\n # final_state = main_log[\"graph\"][-1]\n # init_edge_class, final_edge_class = get_class_from_state(\n # init_state\n # ), get_class_from_state(final_state)\n # # print(\"init\")\n # # print(init_edge_class)\n # # print(\"final\")\n # # print(final_edge_class)\n # print(\"main dist\")\n # dist = compute_dist(init_edge_class, final_edge_class)\n # print(dist)\n # main_dist[episode_id].append(dist)\n\n # log_file_name = args.record_dir + \"/logs_episode.{}_iter.{}.pik\".format(\n # episode_id, iter_id\n # )\n\n # if os.path.isfile(log_file_name):\n # helper_log = pickle.load(open(log_file_name, \"rb\"))\n # if len(helper_log[\"graph\"]) < 250 - 2:\n # init_state = helper_log[\"graph\"][0]\n # final_state = helper_log[\"graph\"][-1]\n # init_edge_class, final_edge_class = get_class_from_state(\n # init_state\n # ), get_class_from_state(final_state)\n # # print(\"init\")\n # # print(init_edge_class)\n # # print(\"final\")\n # # print(final_edge_class)\n # print(\"helper dist\")\n # dist = compute_dist(init_edge_class, final_edge_class)\n # print(dist)\n # helper_dist[episode_id].append(dist)\n\n # if len(helper_dist[episode_id]) > 0 and len(main_dist[episode_id]) > 0:\n # all_dist.append(\n # np.mean(helper_dist[episode_id]) - np.mean(main_dist[episode_id])\n # )\n\n # DIST = np.mean(all_dist)\n # stDIST = np.std(all_dist) / len(all_dist)\n\n # DIST2 = np.mean([x >= 1 for x in all_dist])\n # stDIST2 = np.std([x >= 1 for x in all_dist]) / len(all_dist)\n\n SR, AL, SP, SWS, stdSR, stdL, stdSP, stdR = get_metrics_reward(\n main_results,\n help_results,\n episode_ids,\n num_tries,\n time_limit=args.max_episode_length,\n )\n\n # # Success, Length, SpeedUp, Reward\n # print(\"SR\", \"AL\", \"SP\", \"Reward\", \"DIST\", \"DIST2\")\n # print(SR, AL, SP, SWS, DIST, DIST2)\n\n # print(\"stdSR\", \"stdL\", \"stdSP\", \"stdR\", \"stDIST\", \"stDIST2\")\n # print(stdSR, stdL, stdSP, stdR, stDIST, stDIST2)\n\n print(\"SR\", \"AL\", \"SP\", \"Reward\")\n print(SR, AL, SP, SWS)\n\n print(\"stdSR\", \"stdL\", \"stdSP\", \"stdR\")\n print(stdSR, stdL, stdSP, stdR)\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n main()\n print(\"running time: %s sec\" % (time.time() - start_time))\n","repo_name":"xavierpuigf/online_watch_and_help","sub_path":"analysis/evaluate_helping.py","file_name":"evaluate_helping.py","file_ext":"py","file_size_in_byte":25952,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"35135444597","text":"# 1. Write a program that prints ‘Hello World’ to the screen\r\nprint('Hello World')\r\n\r\n# 2. Write a program that asks the user for their name and greets them with their name\r\nname = input('Enter your name: ')\r\nif name.isalpha() == True:\r\n print('Welcome', name)\r\nelse:\r\n print('Put your real name')\r\n\r\n# 3. Modify the previous program such that only the users Alice and Bob are greeted with their names\r\nnewName = input('Enter your name: ')\r\nif newName.isdigit() == True:\r\n print('Put your real name!')\r\nelif newName == 'Bob':\r\n print('Welcome Bob')\r\nelif newName == 'Alice':\r\n print('Welcome Alice')\r\nelif newName != 'Alice' or 'Bob':\r\n print('Hello Stranger')\r\n\r\n# 4. Write a program that asks the user for a number n and prints the sum of the numbers 1 to n\r\nnumber = int(input('Give me a number: '))\r\ndef recursive(n):\r\n if n == 1:\r\n return n\r\n else:\r\n return n + recursive(n - 1)\r\nprint(recursive(number))\r\n\r\n# 5. Modify the previous program such that only multiples of three or five are considered in the\r\n# sum, e.g. 3, 5, 6, 9, 10, 12, 15 for n=17\r\nnumber = int(input('Give me a number: '))\r\ndef recursive(n):\r\n if n == 0:\r\n return n\r\n else:\r\n if (n) % 3 == 0 or (n) % 5 == 0:\r\n return n + recursive(n - 1)\r\n else:\r\n return recursive(n-1)\r\nprint(recursive(number))\r\n\r\n# 6. Write a program that asks the user for a number n and gives them the possibility to choose\r\n#between computing the sum and computing the product of 1,…,n.\r\nnumber2 = int(input('Give me a number: '))\r\nchoice = int(input('Pick either - 1 for sum or 2 for factorial: '))\r\nif choice != 1 and choice != 2:\r\n print('Pick one or two I said')\r\nelif choice == 1:\r\n def sum1(n):\r\n if n == 1:\r\n return n\r\n else:\r\n return n + sum1(n - 1)\r\n print(sum1(number2 ))\r\nelif choice == 2:\r\n def factorial(n):\r\n if n == 1:\r\n return n\r\n else:\r\n return n * factorial(n - 1) \r\n print(factorial(number2 ))\r\n\r\n# 7. Write a program that prints a multiplication table for numbers up to 12.\r\nmultiplication = int(input('Give me a number to multiply: '))\r\nfactor = 1\r\nwhile factor <= 12:\r\n print(multiplication, 'x', factor, '=', multiplication * factor)\r\n factor += 1\r\n\r\n# 8. Write a program that prints all prime numbers between 1 and 100\r\n# 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97\r\nnum = '' \r\nprint()\r\n\r\n# 9. Write a guessing game where the user has to guess a secret number. After every guess the\r\n# program tells the user whether their number was too large or too small. At the end the number of tries\r\n# needed should be printed. It counts only as one try if they input the same number multiple times consecutively.\r\n\r\nimport math\r\nimport random\r\nintegerA = int(input('Enter a number: '))\r\nintegerB = int(input('Enter another number: '))\r\nwhile (integerA == integerB):\r\n print('The two integers cannot be equal!')\r\n integerA = int(input('Enter a lower number: '))\r\n integerB = int(input('Enter a higher number: '))\r\nif (integerA > integerB):\r\n oldB = integerB\r\n integerB = integerA\r\n integerA = oldB\r\nrandomChoice = random.randint (integerA, integerB)\r\n\r\nminNumberGuesses = (round(math.log(integerB-integerA + 1, 2)))\r\nprint('You have ',minNumberGuesses,'guesses')\r\nanswer = input('Guess the number the system generated: ')\r\nminNumberGuesses -= 1\r\nwhile answer != str(randomChoice) and minNumberGuesses > 0:\r\n if answer > str(randomChoice):\r\n answer = input('Too high, try again')\r\n elif answer < str(randomChoice):\r\n answer = input('Too low, try again')\r\n minNumberGuesses -=1\r\n\r\nif answer == str(randomChoice):\r\n print('Correct, your number was ' + str(randomChoice))\r\nelse:\r\n print('You are out of guesses looser AHAHAHAHA!')\r\n print('The number the computer picked was',randomChoice)\r\n \r\n# 10. Write a program that prints the next 20 leap years\r\nyear=2022\r\nfor i in range(0, 20):\r\n print(year + i*4)\r\n","repo_name":"ABellassai/ACC-AlessandroB2","sub_path":"CS General Coding Worksheet 17.py","file_name":"CS General Coding Worksheet 17.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9414633063","text":"import json\nimport os\n\ndef log_message(event, context):\n # Extract event detail from event object\n detail = event[\"detail\"]\n\n # Extract messages from event detail\n messages = detail['messages']\n\n # Log Message (View in CloudWatch Log)\n for message in messages:\n print(message)\n","repo_name":"teer823/sam-eventbridge-demo","sub_path":"functions/log_message/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71354183874","text":"n = int(input())\r\nsigma = 0\r\ni = 0\r\nwhile i < n:\r\n i += 1\r\n sigma += float(input())\r\nprint(\"%d %.5f\" % (sigma, sigma / n))\r\n\r\n\"\"\"\r\n之前我们已经花三道题熟悉了for循环,现在我们来看while循环\r\nwhile循环的使用频率一般来说远不如for循环,但仍然有他独特的发挥特长的地方。大家也可以思考一下(或者查一下)他们各自的长处在哪?\r\n我的建议是大家不必硬是理解他们的区别,只需要知道有这样一种写循环的方式,等你发现用for循环很难写出效果的时候,能想起来还有一个while循环就可以了。\r\n\"\"\"","repo_name":"Aluminum13/Python-NOI-OJ","sub_path":"1.5/1.5.4.py","file_name":"1.5.4.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"25167033655","text":"import logging\n\nimport sqlalchemy as sa\nfrom alembic import op\n\nfrom superset.utils.core import (\n generic_find_fk_constraint_name,\n generic_find_fk_constraint_names,\n generic_find_uq_constraint_name,\n)\n\n# revision identifiers, used by Alembic.\nrevision = \"4736ec66ce19\"\ndown_revision = \"f959a6652acd\"\n\nconv = {\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n}\n\n# Helper table for database migrations using minimal schema.\ndatasources = sa.Table(\n \"datasources\",\n sa.MetaData(),\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"datasource_name\", sa.String(255)),\n)\n\n\ndef upgrade():\n bind = op.get_bind()\n insp = sa.engine.reflection.Inspector.from_engine(bind)\n\n # Add the new less restrictive uniqueness constraint.\n with op.batch_alter_table(\"datasources\", naming_convention=conv) as batch_op:\n batch_op.create_unique_constraint(\n \"uq_datasources_cluster_name\", [\"cluster_name\", \"datasource_name\"]\n )\n\n # Augment the tables which have a foreign key constraint related to the\n # datasources.datasource_name column.\n for foreign in [\"columns\", \"metrics\"]:\n with op.batch_alter_table(foreign, naming_convention=conv) as batch_op:\n # Add the datasource_id column with the relevant constraints.\n batch_op.add_column(sa.Column(\"datasource_id\", sa.Integer))\n\n batch_op.create_foreign_key(\n f\"fk_{foreign}_datasource_id_datasources\",\n \"datasources\",\n [\"datasource_id\"],\n [\"id\"],\n )\n\n # Helper table for database migration using minimal schema.\n table = sa.Table(\n foreign,\n sa.MetaData(),\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"datasource_name\", sa.String(255)),\n sa.Column(\"datasource_id\", sa.Integer),\n )\n\n # Migrate the existing data.\n for datasource in bind.execute(datasources.select()):\n bind.execute(\n table.update()\n .where(table.c.datasource_name == datasource.datasource_name)\n .values(datasource_id=datasource.id)\n )\n\n with op.batch_alter_table(foreign, naming_convention=conv) as batch_op:\n # Drop the datasource_name column and associated constraints. Note\n # due to prior revisions (1226819ee0e3, 3b626e2a6783) there may\n # incorrectly be multiple duplicate constraints.\n names = generic_find_fk_constraint_names(\n foreign, {\"datasource_name\"}, \"datasources\", insp\n )\n\n for name in names:\n batch_op.drop_constraint(\n name or f\"fk_{foreign}_datasource_name_datasources\",\n type_=\"foreignkey\",\n )\n\n batch_op.drop_column(\"datasource_name\")\n\n try:\n # Drop the old more restrictive uniqueness constraint.\n with op.batch_alter_table(\"datasources\", naming_convention=conv) as batch_op:\n batch_op.drop_constraint(\n generic_find_uq_constraint_name(\n \"datasources\", {\"datasource_name\"}, insp\n )\n or \"uq_datasources_datasource_name\",\n type_=\"unique\",\n )\n except Exception as ex:\n logging.warning(\n \"Constraint drop failed, you may want to do this \"\n \"manually on your database. For context, this is a known \"\n \"issue around nondeterministic constraint names on Postgres \"\n \"and perhaps more databases through SQLAlchemy.\"\n )\n logging.exception(ex)\n\n\ndef downgrade():\n bind = op.get_bind()\n insp = sa.engine.reflection.Inspector.from_engine(bind)\n\n # Add the new more restrictive uniqueness constraint which is required by\n # the foreign key constraints. Note this operation will fail if the\n # datasources.datasource_name column is no longer unique.\n with op.batch_alter_table(\"datasources\", naming_convention=conv) as batch_op:\n batch_op.create_unique_constraint(\n \"uq_datasources_datasource_name\", [\"datasource_name\"]\n )\n\n # Augment the tables which have a foreign key constraint related to the\n # datasources.datasource_id column.\n for foreign in [\"columns\", \"metrics\"]:\n with op.batch_alter_table(foreign, naming_convention=conv) as batch_op:\n # Add the datasource_name column with the relevant constraints.\n batch_op.add_column(sa.Column(\"datasource_name\", sa.String(255)))\n\n batch_op.create_foreign_key(\n f\"fk_{foreign}_datasource_name_datasources\",\n \"datasources\",\n [\"datasource_name\"],\n [\"datasource_name\"],\n )\n\n # Helper table for database migration using minimal schema.\n table = sa.Table(\n foreign,\n sa.MetaData(),\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"datasource_name\", sa.String(255)),\n sa.Column(\"datasource_id\", sa.Integer),\n )\n\n # Migrate the existing data.\n for datasource in bind.execute(datasources.select()):\n bind.execute(\n table.update()\n .where(table.c.datasource_id == datasource.id)\n .values(datasource_name=datasource.datasource_name)\n )\n\n with op.batch_alter_table(foreign, naming_convention=conv) as batch_op:\n # Drop the datasource_id column and associated constraint.\n batch_op.drop_constraint(\n f\"fk_{foreign}_datasource_id_datasources\", type_=\"foreignkey\"\n )\n\n batch_op.drop_column(\"datasource_id\")\n\n with op.batch_alter_table(\"datasources\", naming_convention=conv) as batch_op:\n # Prior to dropping the uniqueness constraint, the foreign key\n # associated with the cluster_name column needs to be dropped.\n batch_op.drop_constraint(\n generic_find_fk_constraint_name(\n \"datasources\", {\"cluster_name\"}, \"clusters\", insp\n )\n or \"fk_datasources_cluster_name_clusters\",\n type_=\"foreignkey\",\n )\n\n # Drop the old less restrictive uniqueness constraint.\n batch_op.drop_constraint(\n generic_find_uq_constraint_name(\n \"datasources\", {\"cluster_name\", \"datasource_name\"}, insp\n )\n or \"uq_datasources_cluster_name\",\n type_=\"unique\",\n )\n\n # Re-create the foreign key associated with the cluster_name column.\n batch_op.create_foreign_key(\n f\"fk_{foreign}_datasource_id_datasources\",\n \"clusters\",\n [\"cluster_name\"],\n [\"cluster_name\"],\n )\n","repo_name":"apache/superset","sub_path":"superset/migrations/versions/2017-10-03_14-37_4736ec66ce19_.py","file_name":"2017-10-03_14-37_4736ec66ce19_.py","file_ext":"py","file_size_in_byte":6889,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"30113176997","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport datetime\nimport utils.ocp_utils as ocp_utils\n\n# basic cluster health check prior to replacing nodes\nocp_utils.cluster_health_check()\n\n# total arguments\nn = len(sys.argv)\nprint(\"Total arguments passed:\", n)\n\ncloud_type=sys.argv[1]\nreplicas=sys.argv[2]\nnew_worker_instance_type=sys.argv[3]\n\nocp_utils.run(\"envsubst < ./replace_nodes/clouds/worker-node-machineset-%s.yaml | oc apply -f -\" % cloud_type)\n\nfinal_machine_set=\"machinesets/\"+os.environ['CLUSTER_NAME'] + \"-worker-new\"\n\nprint('Start time: Scaling up new machineset {} at '.format(final_machine_set) + str(datetime.datetime.now()))\nocp_utils.scale_machine_replicas(final_machine_set, replicas)\nocp_utils.wait_for_node_creation(replicas, new_worker_instance_type)\nprint('End time: Finished scaling up {} at '.format(final_machine_set) + str(datetime.datetime.now()))\n\nmachines_sets=ocp_utils.run(\"oc get machinesets -A -o name --no-headers\").split('\\n')\nprint('machine sets \\n' + str(machines_sets))\n\n\nfor machineset in machines_sets:\n final_machine_set_name = final_machine_set.split('/')[-1]\n if final_machine_set_name in machineset: \n print (\"new machine set in use \\n\")\n continue\n else:\n replicas = ocp_utils.get_machine_replicas(machineset)\n #print (replicas)\n if \"No resources found\" in replicas:\n continue\n # delete old machines \n print('Start time: Deleting nodes from {} at '.format(machineset) + str(datetime.datetime.now()))\n while int(replicas) >= 1: \n replicas = int(replicas) - 1\n ocp_utils.scale_machine_replicas(machineset, replicas) \n ocp_utils.wait_for_node_deletion(machineset, replicas)\n print('End time: All nodes deleted from {} at '.format(machineset) + str(datetime.datetime.now()))\n print()\n ocp_utils.cluster_health_check()\n #delete_machineset(machineset) #After scaling down do not delete machinesets for this scenario\n ","repo_name":"openshift/svt","sub_path":"perfscale_regression_ci/scripts/scalability/replacenodes.py","file_name":"replacenodes.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"61"} +{"seq_id":"73982624194","text":"\"\"\"\nddpg_simple_type1\n- it follows DDPG_updated2.py from 'https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow'\n- However, Batch Normalization cannot be applied.\n\"\"\"\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\ntf.set_random_seed(1)\nimport numpy as np\n\n\nclass DDPG(object):\n def __init__(self, sess, nb_observations, nb_actions, actor_lr, critic_lr, tau, gamma, batch_size, memory_size,\n scale_obs, scale_obs_by_const, scale_reward, ddpg_simple_scaler, hl_a, hl_c, var):\n # given attributes\n self.sess = sess\n self.nb_observations = nb_observations\n self.nb_actions = nb_actions\n self.actor_lr = actor_lr\n self.critic_lr = critic_lr\n self.tau = tau\n self.gamma = gamma\n self.batch_size = batch_size\n self.memory_size = memory_size\n self.scale_obs = scale_obs\n self.scale_obs_by_const = scale_obs_by_const\n self.scale_reward = scale_reward\n self.ddpg_simple_scaler = ddpg_simple_scaler\n self.var = var\n self.hl_a = hl_a\n self.hl_c = hl_c\n\n # fixed attributes\n self.training_bn = tf.placeholder(tf.bool)\n\n # memory\n self.memory = np.zeros((self.memory_size, self.nb_observations * 2 + self.nb_actions + 1), dtype=np.float32)\n self.pointer = 0\n self.len_buffer = 0\n self.n = 0\n\n self.S = tf.placeholder(tf.float32, [None, self.nb_observations], 's')\n self.S_ = tf.placeholder(tf.float32, [None, self.nb_observations], 's_')\n self.R = tf.placeholder(tf.float32, [None, 1], 'r')\n\n self.a = self._build_a(self.S,)\n self.q = self._build_c(self.S, self.a, )\n a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Actor')\n c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Critic')\n ema = tf.train.ExponentialMovingAverage(decay=1 - self.tau) # soft replacement\n\n def ema_getter(getter, name, *args, **kwargs):\n return ema.average(getter(name, *args, **kwargs))\n\n target_update = [ema.apply(a_params), ema.apply(c_params)] # soft update operation\n a_ = self._build_a(self.S_, reuse=True, custom_getter=ema_getter) # replaced target parameters\n q_ = self._build_c(self.S_, a_, reuse=True, custom_getter=ema_getter)\n\n self.a_loss = - tf.reduce_mean(self.q) # maximize the q\n self.atrain = tf.train.AdamOptimizer(self.actor_lr).minimize(self.a_loss, var_list=a_params)\n\n with tf.control_dependencies(target_update): # soft replacement happened at here\n q_target = self.R + self.gamma * q_\n self.td_error = tf.losses.mean_squared_error(labels=q_target, predictions=self.q)\n #self.td_error = tf.losses.huber_loss(labels=q_target, predictions=self.q)\n self.ctrain = tf.train.AdamOptimizer(self.critic_lr).minimize(self.td_error, var_list=c_params)\n\n self.sess.run(tf.global_variables_initializer())\n\n def choose_action(self, s, apply_noise):\n if self.scale_obs:\n s = (s - self.ddpg_simple_scaler.mu_obs) / self.ddpg_simple_scaler.sigma_obs\n elif self.scale_obs_by_const[0]:\n s = s / self.scale_obs_by_const[1]\n\n a = self.sess.run(self.a, {self.S: s[np.newaxis, :], self.training_bn: False})[0]\n q_val = self.sess.run(self.q, {self.S: s[np.newaxis, :], self.a: a[np.newaxis, :],\n self.training_bn: False})[0][0]\n\n if apply_noise:\n a = np.clip(np.random.normal(a, self.var), 0, 1)\n\n return a, q_val\n\n def learn(self):\n indices = np.random.choice(self.memory_size, size=self.batch_size)\n bt = self.memory[indices, :]\n bs = bt[:, :self.nb_observations]\n ba = bt[:, self.nb_observations: self.nb_observations + self.nb_actions]\n br = bt[:, -self.nb_observations - 1: -self.nb_observations]\n bs_ = bt[:, -self.nb_observations:]\n\n # scale\n if self.scale_obs:\n bs = (bs - self.ddpg_simple_scaler.mu_obs) / self.ddpg_simple_scaler.sigma_obs\n bs_ = (bs_ - self.ddpg_simple_scaler.mu_obs) / self.ddpg_simple_scaler.sigma_obs\n elif self.scale_obs_by_const[0]:\n bs = bs / self.scale_obs_by_const[1]\n bs_ = bs_ / self.scale_obs_by_const[1]\n\n if self.scale_reward:\n br = (br - self.ddpg_simple_scaler.mu_r) / self.ddpg_simple_scaler.sigma_r\n\n _, loss_actor = self.sess.run([self.atrain, self.a_loss], {self.S: bs, self.training_bn: True})\n _, loss_critic = self.sess.run([self.ctrain, self.td_error], {self.S: bs, self.a: ba, self.R: br, self.S_: bs_,\n self.training_bn: True})\n return loss_actor, loss_critic\n\n def store_transition(self, s, a, r, s_, update_mu_sigma):\n if update_mu_sigma:\n self.ddpg_simple_scaler.update_mu_sigma_obs(self.len_buffer+1, s)\n self.ddpg_simple_scaler.update_mu_sigma_r(self.len_buffer+1, r)\n\n transition = np.hstack((s, a, [r], s_))\n index = self.pointer % self.memory_size # replace the old memory with new memory\n self.memory[index, :] = transition\n self.pointer += 1\n self.len_buffer = self.len_buffer + 1 if self.pointer < self.memory_size else self.memory_size\n\n def _build_a(self, s, reuse=None, custom_getter=None):\n trainable = True if reuse is None else False\n with tf.variable_scope('Actor', reuse=reuse, custom_getter=custom_getter):\n net = tf.layers.dense(s, self.hl_a, activation=tf.nn.relu, name='l1', trainable=trainable)\n net = tf.layers.dense(net, self.hl_a, activation=tf.nn.relu, name='l2', trainable=trainable)\n a = tf.layers.dense(net, self.nb_actions, activation=tf.nn.sigmoid, name='a1', trainable=trainable)\n return a\n\n def _build_c(self, s, a, reuse=None, custom_getter=None):\n trainable = True if reuse is None else False\n with tf.variable_scope('Critic', reuse=reuse, custom_getter=custom_getter):\n w1_s = tf.get_variable('w1_s', [self.nb_observations, self.hl_c], trainable=trainable)\n w1_a = tf.get_variable('w1_a', [self.nb_actions, self.hl_c], trainable=trainable)\n b1 = tf.get_variable('b1', [1, self.hl_c], trainable=trainable)\n net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)\n net = tf.layers.dense(net, self.hl_c, activation=tf.nn.relu, trainable=trainable)\n return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)\n","repo_name":"danelee2601/RL-based-adaptive-PID-for-DPS","sub_path":"DP/baselines/ddpg/ddpg_simple.py","file_name":"ddpg_simple.py","file_ext":"py","file_size_in_byte":6659,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"32610446566","text":"from django import forms\nfrom .models import tickersymbol\n\nclass searchform(forms.ModelForm):\n class Meta:\n model = tickersymbol\n fields = ('ticker',)\n def __init__(self, *args, **kwargs):\n super(searchform, self).__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n","repo_name":"khanidrees7972/TrackStocks","sub_path":"search/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35497825385","text":"import json, re\n\nclass Parser:\n\n \"\"\"Représente le Parser, c'est à dire le système qui analyse le message envoyé à l'utilisateur, en extrait les mots clés et les reconnait ceux qui font \"reagir\" GrandPy\"\"\"\n\n def __init__(self) -> None:\n self.HELLO = r\"^b(on)?j(ou)?r$|^slt$|^salut(ations?)?$|^yo$|^hi$|^👋$\"\n self.OC = r\"^o(pen)?c(las{1,2}rooms?)?$\"\n self.ADDRESS = r\"^ad{1,2}res{1,2}e?$\"\n self.KNOW = r\"^con{1,2}ai(tre|[ts]?|sai[ts]?)(-tu)?$\"\n self.HOW = r\"^com{1,2}ent$\"\n self.GO = r\"^vas?(-tu)?$|^al{1,2}ez(-vous)?$\"\n self.AT = r\"^[àa]$\" # [aA] = dangereux\n self.QUESTION = r\"^\\?{1,3}\\!{0,2}$\"\n self.WHAT = r\"^quel(le)?$\"\n self.TIME = r\"^heure$\"\n self.WEATHER = r\"^temps$\"\n self.PLAY = r\"^jou(ons|e[zr])$\"\n self.HEADS = r\"^pile$\"\n self.TAILS = r\"^face$\"\n self.INFO = r\"^info(rmation)?s?$\"\n self.WEBSITE = r\"^(site|app)(\\s(web|internet))?$\"\n self.PLACEOFINTEREST = r\"ad{1,2}res{1,2}e? ((\\w+\\s?){1,8})(\\?|$)\"\n\n @property\n def __stopwords(self) -> list:\n\n \"\"\"Renvoie une liste python de stopwords (minuscules).\"\"\"\n\n stopwords_filepath = \"app/ressources/stopwords.js\"\n\n with open(stopwords_filepath) as stopwords_file:\n stopwords_list = json.load(stopwords_file)\n\n return stopwords_list\n \n def __remove_punctuation(self, user_input: str) -> str:\n\n \"\"\"Retire la ponctuation et les whitespaces en trop de l'input utilisateur (str) \n et renvoie un str de cet input\"\"\"\n\n gp1 = [\"\\'\", \"\\\"\"] #\"-\",\n gp2 = [\"!?\", \"!\", \"?\", \"?!\"] #\"-\", \n gp3 = [\",\", \".\", \";\", \":\", \"[\", \"]\", \"(\", \")\", \"{\", \"}\", \">\", \"<\"]\n\n for punctuation in gp1 + gp2 + gp3:\n\n if punctuation in gp1: \n user_input = user_input.replace(punctuation, f\" \")\n elif punctuation in gp2: \n user_input = user_input.replace(punctuation, f\" {punctuation}\")\n elif punctuation in gp3:\n user_input = user_input.replace(punctuation, \"\")\n\n return re.sub(r\"\\s+\", \" \", user_input).strip()\n \n def __extract_keywords_from_user_input(self, user_input: str, multiline = True) -> str:\n \n \"\"\"Retire les stopwords et les mots répétés de l'input utilisateur sans ponctuation \n et renvoie une chaine de charactère contenant les mots \"clés\" restants (minuscules uniquement)\"\"\"\n\n words_in_user_input = self.__remove_punctuation(user_input).split()\n sep = \"\\n\" if multiline else \" \"\n keywords = []\n\n for word in words_in_user_input:\n word = word.lower()\n if word not in self.__stopwords and word not in keywords:\n keywords.append(word) \n\n\n return f\"{sep}\".join(keywords) #Renvoyer Keywords in array à la place. Pourquoi ? Cela permet d'utiliser la liste des \n # keywords de façon positionnelle, pour pouvoir repérer l'adresse du lieu recherché qui devrait se trouver\n # à une certaine position.\n\n def __find_matches_from_keywords(self, keywords: str) -> list:\n\n \"\"\" Analyse la chaine de keywords à la recherche de patterns qui font réagir grandpy\n et retourne la liste des keywords déclencheurs \"\"\"\n\n matches = [] \n patterns_combi = [\n (self.HELLO, \"hello\"), (self.PLAY, \"play\"), (self.HEADS, \"heads\"),\n (self.TAILS, \"tails\"), (self.OC, \"oc\"), (self.KNOW, \"know\"), \n (self.ADDRESS, \"address\"), (self.HOW, \"how\"), (self.AT, \"at\"), \n (self.GO, \"go\"), (self.QUESTION, \"question\"), (self.WHAT, \"what\"),\n (self.TIME, \"time\"), (self.WEATHER, \"weather\"),\n (self.INFO, \"info\"), (self.WEBSITE, \"website\")\n ]\n \n for pattern, equiv in patterns_combi:\n\n if re.search(pattern, keywords, re.I|re.M):\n matches += [equiv]\n\n return matches\n\n def extract_place_from_user_message(self, user_input: str) -> str:\n\n \"\"\" Analyse la chaine utilisateur et renvoie l'adresse qui y figure. L'implémentation actuelle \n nécessite que l'utilisateur utilise un '?' pour terminer la sous chaine qui constitue l'adresse. \"\"\"\n\n keywords = self.__extract_keywords_from_user_input(user_input, False)\n extract_poi = re.search(self.PLACEOFINTEREST, keywords)\n\n return extract_poi.group(1).strip()\n\n def find_matches(self, user_input: str) -> list: \n\n \"\"\" Extraie les mots-clés de la chaine utilisateur et renvoie les codes des mots-clés \n reconnus par le parser \"\"\"\n\n keywords_str = self.__extract_keywords_from_user_input(user_input)\n matches = self.__find_matches_from_keywords(keywords_str)\n\n return matches\n","repo_name":"Ludophilia/P7v2","sub_path":"app/grandpy/skills/parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44403662121","text":"\"\"\"\r\nAll Environment variables\r\n\"\"\"\r\nimport click\r\nfrom questionary import Separator\r\nfrom questionary import Style\r\n\r\nNOT_VALID_JWT = click.style(\"Enter a valid JWT!!!\", fg=\"red\", bold=True)\r\nCHECK_DOCS = click.style(\r\n \"Check Docs!!Your jwt have not a HS alg.\",\r\n fg=\"red\",\r\n bold=True,\r\n)\r\nNOT_CRAKED = click.style(\"JWT not cracked sorry. :'(\", fg=\"red\", bold=False)\r\nCRACKED = click.style(\"JWT cracked, key is: \", fg=\"green\", bold=False)\r\nVALID_PAYLOAD = click.style(\r\n \"Enter a Valid payload, Format: username=admin\",\r\n fg=\"red\",\r\n bold=False,\r\n)\r\nVALID_COOKIES = click.style(\r\n \"Enter a Valid cookie, Format: username=admin\",\r\n fg=\"red\",\r\n bold=False,\r\n)\r\nVALID_DATA = click.style(\r\n \"Enter a Valid data, Format: username=admin\",\r\n fg=\"red\",\r\n bold=False,\r\n)\r\nVALID_HEADER = click.style(\r\n \"Enter a Valid header, Format: username=admin\",\r\n fg=\"red\",\r\n bold=False,\r\n)\r\nVALID_PAYLOAD_JSON = click.style(\r\n \"Not a valid format for payload, send a json.\",\r\n fg=\"red\",\r\n bold=False,\r\n)\r\nNEW_JWT = click.style(\"new JWT: \", fg=\"green\", bold=True)\r\nVALID_SIGNATURE = click.style(\"Valid Signature!!\", fg=\"green\", bold=True)\r\nINVALID_SIGNATURE = click.style(\"Incorrect signature!!\", fg=\"red\", bold=True)\r\nCLIPBOARD = click.style(\"New jwt Copied to clipboard\", fg=\"blue\", bold=False)\r\n# User interface\r\ncustom_style_fancy = Style(\r\n [\r\n (\"qmark\", \"fg:#673ab7 bold\"), # token in front of the question\r\n (\"question\", \"bold\"), # question text\r\n (\r\n \"answer\",\r\n \"fg:#f44336 bold\",\r\n ), # submitted answer text behind the question\r\n (\r\n \"pointer\",\r\n \"fg:#673ab7 bold\",\r\n ), # pointer used in select and checkbox prompts\r\n (\r\n \"highlighted\",\r\n \"fg:#673ab7 bold\",\r\n ), # pointed-at choice in select and checkbox prompts\r\n (\"selected\", \"fg:#cc5454\"), # style for a selected item of a checkbox\r\n (\"separator\", \"fg:#cc5454\"), # separator in lists\r\n (\r\n \"instruction\",\r\n \"\",\r\n ), # user instructions for select, rawselect, checkbox\r\n (\"text\", \"\"), # plain text\r\n (\r\n \"disabled\",\r\n \"fg:#858585 italic\",\r\n ), # disabled choices for select and checkbox prompts\r\n ],\r\n)\r\n# Summary\r\nMAIN_SUMMARY_QUESTION = \"What do you want to do?\"\r\nMAIN_SUMMARY_CHOICES_MODIFY = \"Modify your jwt\"\r\nMAIN_SUMMARY_CHOICES_NONE_ALG = \"Check None algorithm\"\r\nMAIN_SUMMARY_CHOICES_RSA_CONFUSION = \"Check Rsa/Hmac confusion\"\r\nMAIN_SUMMARY_CHOICES_BRUTE_FORCE = (\r\n \"Brute-force your jwt to guess key(wordlist needed)\"\r\n)\r\nMAIN_SUMMARY_CHOICES_SIGN = \"Sign your jwt\"\r\nMAIN_SUMMARY_CHOICES_VERIFY = \"Verify your key\"\r\nMAIN_SUMMARY_CHOICES_KID = \"Kid injection\"\r\nMAIN_SUMMARY_CHOICES_JKU = \"Jku bypass\"\r\nMAIN_SUMMARY_CHOICES_X5U = \"X5u bypass\"\r\nMAIN_SUMMARY_CHOICES_QUIT = \"Quit\"\r\n\r\n# Separator\r\nSEPARATOR_MODIFY_JWT = Separator(\r\n \"--------------------------- Modify your jwt ---------------------------\",\r\n)\r\nSEPARATOR_VULNERABILITIES = Separator(\r\n \"---------------------------- Vulnerabilities ----------------------------\",\r\n)\r\nSEPARATOR_GUESS_KEY = Separator(\r\n \"------------------------------ Guess key ------------------------------\",\r\n)\r\nSEPARATOR_ADVANCED = Separator(\r\n \"------------------------------- Advanced -------------------------------\",\r\n)\r\nSEPARATOR_QUIT = Separator(\r\n \"--------------------------------- Quit ---------------------------------\",\r\n)\r\n\r\n# choices\r\n\r\nMAIN_SUMMARY_CHOICES = [\r\n SEPARATOR_MODIFY_JWT,\r\n MAIN_SUMMARY_CHOICES_MODIFY,\r\n # vulnerability\r\n SEPARATOR_VULNERABILITIES,\r\n MAIN_SUMMARY_CHOICES_NONE_ALG,\r\n MAIN_SUMMARY_CHOICES_RSA_CONFUSION,\r\n # guess key\r\n SEPARATOR_GUESS_KEY,\r\n MAIN_SUMMARY_CHOICES_BRUTE_FORCE,\r\n MAIN_SUMMARY_CHOICES_SIGN,\r\n MAIN_SUMMARY_CHOICES_VERIFY,\r\n # advanced\r\n SEPARATOR_ADVANCED,\r\n MAIN_SUMMARY_CHOICES_KID,\r\n MAIN_SUMMARY_CHOICES_JKU,\r\n MAIN_SUMMARY_CHOICES_X5U,\r\n SEPARATOR_QUIT,\r\n MAIN_SUMMARY_CHOICES_QUIT,\r\n]\r\nMAIN_SUMMARY_PROMPT_PEM = \"Please enter your public key (.pem)\"\r\nMAIN_SUMMARY_PROMPT_WORDLIST = \"Please enter your wordlist (.txt)\"\r\nMAIN_SUMMARY_PROMPT_KEY = \"Please enter your key\"\r\nMAIN_SUMMARY_PROMPT_INJECTION = \"Please enter your injection\"\r\nMAIN_SUMMARY_PROMPT_JWKS = \"Url of your jwks is stored (your external ip)\"\r\n# modify_summary\r\nMODIFY_SUMMARY_QUESTION = \"What do you want to do?\"\r\n\r\nSEPARATOR_HEADER = Separator(\r\n \"---------------------------- Header ----------------------------\",\r\n)\r\nSEPARATOR_PAYLOAD = Separator(\r\n \"---------------------------- Payload ----------------------------\",\r\n)\r\nMODIFY_SUMMARY_CHOICES_ADD_HEADER = \"add header value\"\r\nMODIFY_SUMMARY_CHOICES_ADD_PAYLOAD = \"add payload value\"\r\nMODIFY_SUMMARY_CHOICES_RETURN = \"Return\"\r\n\r\nMODIFY_SUMMARY_PROMPT_VALUE = \"Please enter a value\"\r\nMODIFY_SUMMARY_PROMPT_KEY = \"Please enter a key\"\r\n","repo_name":"winterwolf32/JWT-","sub_path":"myjwt/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11776549968","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom scipy.stats import multivariate_normal\n\nPLOT_COLORS = ['red', 'green', 'blue', 'orange'] # Colors for your plots\nK = 4 # Number of Gaussians in the mixture model\nNUM_TRIALS = 3 # Number of trials to run (can be adjusted for debugging)\nUNLABELED = -1 # Cluster label for unlabeled data points (do not change)\n\n\ndef main(is_semi_supervised, trial_num):\n \"\"\"Problem 3: EM for Gaussian Mixture Models (unsupervised and semi-supervised)\"\"\"\n print('Running {} EM algorithm...'\n .format('semi-supervised' if is_semi_supervised else 'unsupervised'))\n\n # Load dataset\n train_path = os.path.join('.', 'train.csv')\n x_all, z_all = load_gmm_dataset(train_path)\n\n # Split into labeled and unlabeled examples\n labeled_idxs = (z_all != UNLABELED).squeeze()\n x_tilde = x_all[labeled_idxs, :] # Labeled examples\n z_tilde = z_all[labeled_idxs, :] # Corresponding labels\n x = x_all[~labeled_idxs, :] # Unlabeled examples\n\n # *** START CODE HERE ***\n # (1) Initialize mu and sigma by splitting the n_examples data points uniformly at random\n # into K groups, then calculating the sample mean and covariance for each group\n np.random.shuffle(x)\n result = np.array_split(x, K)\n result = np.stack(result, axis=0) #(4, 245, 2)\n mu = np.mean(result, axis=1) #(4, 2)\n sigma = []\n for part in result:\n sigma.append(np.cov(part.T))\n sigma = np.array(sigma) #(4, 2, 2)\n # (2) Initialize phi to place equal probability on each Gaussian\n # phi should be a numpy array of shape (K,)\n phi = np.full((K,), 1/K) # (4,)\n # (3) Initialize the w values to place equal probability on each Gaussian\n # w should be a numpy array of shape (m, K)\n m = x.shape[0]\n w = np.full((m, K), 1/K) # (980, 4)\n n = x.shape[0]\n # *** END CODE HERE ***\n\n if is_semi_supervised:\n w = run_semi_supervised_em(x, x_tilde, z_tilde, w, phi, mu, sigma)\n else:\n w = run_em(x, w, phi, mu, sigma)\n\n # Plot your predictions\n z_pred = np.zeros(n)\n if w is not None: # Just a placeholder for the starter code\n for i in range(n):\n z_pred[i] = np.argmax(w[i])\n\n plot_gmm_preds(x, z_pred, is_semi_supervised, plot_id=trial_num)\n\n\ndef run_em(x, w, phi, mu, sigma):\n \"\"\"Problem 3(d): EM Algorithm (unsupervised).\n\n See inline comments for instructions.\n\n Args:\n x: Design matrix of shape (n_examples, dim).\n w: Initial weight matrix of shape (n_examples, k).\n phi: Initial mixture prior, of shape (k,).\n mu: Initial cluster means, list of k arrays of shape (dim,).\n sigma: Initial cluster covariances, list of k arrays of shape (dim, dim).\n\n Returns:\n Updated weight matrix of shape (n_examples, k) resulting from EM algorithm.\n More specifically, w[i, j] should contain the probability of\n example x^(i) belonging to the j-th Gaussian in the mixture.\n \"\"\"\n # No need to change any of these parameters\n eps = 1e-3 # Convergence threshold\n max_iter = 1000\n\n # Stop when the absolute change in log-likelihood is < eps\n # See below for explanation of the convergence criterion\n it = 0\n ll = prev_ll = None\n while it < max_iter and (prev_ll is None or np.abs(ll - prev_ll) >= eps):\n it = it +1\n pass # Just a placeholder for the starter code\n # *** START CODE HERE\n # (1) E-step: Update your estimates in w\n likelihood = []\n for j in range(K):\n likelihood.append(multivariate_normal.pdf(x, mean=mu[j], cov=sigma[j]))\n likelihood = np.array(likelihood).T # (980, 4)\n numerator = likelihood * phi # (980, 4)\n denominator = numerator.sum(axis=1)[:, np.newaxis] # (980, 1)\n w = numerator / denominator # (980, 4)\n\n # (2) M-step: Update the model parameters phi, mu, and sigma\n for j in range(K):\n phi[j] = w[:, j].mean()\n w_sum_j = w[:, j].sum()\n mu[j] = np.sum(w[:, j].reshape(x.shape[0], 1)*x, axis=0) / w_sum_j # (980, 4) (980, 2)\n\n diff = (x - mu[j]).T\n top = (w[:, j] * diff) @ diff.T\n sigma[j] = top/w_sum_j\n\n # (3) Compute the log-likelihood of the data to check for convergence.\n # By log-likelihood, we mean `ll = sum_x[log(sum_z[p(x|z) * p(z)])]`.\n # We define convergence by the first iteration where abs(ll - prev_ll) < eps.\n prev_ll = ll\n pdf = np.array([phi[j] * multivariate_normal.pdf(x, mean=mu[j], cov=sigma[j]) for j in range(K)])\n ll = np.sum(np.log(np.sum(pdf, axis=0)))\n print('unsup', it)\n\n # Hint: For debugging, recall part (a). We showed that ll should be monotonically increasing.\n # *** END CODE HERE ***\n return w\n\n\ndef run_semi_supervised_em(x, x_tilde, z_tilde, w, phi, mu, sigma):\n \"\"\"Problem 3(e): Semi-Supervised EM Algorithm.\n\n See inline comments for instructions.\n\n Args:\n x: Design matrix of unlabeled examples of shape (n_examples_unobs, dim).\n x_tilde: Design matrix of labeled examples of shape (n_examples_obs, dim).\n z_tilde: Array of labels of shape (n_examples_obs, 1).\n w: Initial weight matrix of shape (n_examples, k).\n phi: Initial mixture prior, of shape (k,).\n mu: Initial cluster means, list of k arrays of shape (dim,).\n sigma: Initial cluster covariances, list of k arrays of shape (dim, dim).\n\n Returns:\n Updated weight matrix of shape (n_examples, k) resulting from semi-supervised EM algorithm.\n More specifically, w[i, j] should contain the probability of\n example x^(i) belonging to the j-th Gaussian in the mixture.\n \"\"\"\n # No need to change any of these parameters\n alpha = 20. # Weight for the labeled examples\n eps = 1e-3 # Convergence threshold\n max_iter = 1000\n\n # Stop when the absolute change in log-likelihood is < eps\n # See below for explanation of the convergence criterion\n it = 0\n ll = prev_ll = None\n\n while it < max_iter and (prev_ll is None or np.abs(ll - prev_ll) >= eps):\n it = it +1\n pass # Just a placeholder for the starter code\n # *** START CODE HERE ***\n # (1) E-step: Update your estimates in w\n # labeled data\n likelihood = []\n for j in range(K):\n likelihood.append(multivariate_normal.pdf(x, mean=mu[j], cov=sigma[j]))\n likelihood = np.array(likelihood).T # (980, 4)\n numerator = likelihood * phi # (980, 4)\n denominator = numerator.sum(axis=1)[:, np.newaxis] # (980, 1)\n w = numerator / denominator # (980, 4)\n n_tilde = x_tilde.shape[0]\n # labeled data (w for l does not change)\n w_l = np.ones((n_tilde, K)) * (1 / K) # (20, 4)\n for i in range(n_tilde):\n for j in range(K):\n if j == z_tilde[i]:\n w_l[i, j] = 1\n else:\n w_l[i, j] = 0\n\n # (2) M-step: Update the model parameters phi, mu, and sigma\n n = x.shape[0]\n\n # M-step\n weight_all = np.concatenate((w_l, w)) # (1000, 4)\n weight_sum = np.sum(weight_all, axis=0) # (4,)\n for j in range(K):\n w_u_j = w[:, j] # (n_unlabel,)\n w_l_j = w_l[:, j] # (n_label,)\n total_weight = w_u_j.sum() + alpha * w_l_j.sum()\n mu[j] = (np.sum(w_u_j.reshape(-1, 1) * x, axis=0) + alpha * np.sum(w_l_j.reshape(-1, 1) * x_tilde,\n axis=0)) / total_weight\n phi[j] = total_weight / (n + alpha * n_tilde)\n sigma_u = np.dot((x - mu[j]).T, (x - mu[j]) * w_u_j[:, np.newaxis])\n sigma_l = np.dot((x_tilde - mu[j]).T, (x_tilde - mu[j]) * w_l_j[:, np.newaxis])\n sigma[j] = (sigma_u + alpha * sigma_l) / total_weight\n\n # (3) Compute the log-likelihood of the data to check for convergence.\n prev_ll = ll\n pdf = np.array([phi[j] * multivariate_normal.pdf(x, mean=mu[j], cov=sigma[j]) for j in range(K)])\n ll = np.sum(np.log(np.sum(pdf, axis=0)))\n print('semi', it)\n # Hint: Make sure to include alpha in your calculation of ll.\n # Hint: For debugging, recall part (a). We showed that ll should be monotonically increasing.\n # *** END CODE HERE ***\n\n return w\n\n\n# *** START CODE HERE ***\n# *** END CODE HERE ***\n\n\ndef plot_gmm_preds(x, z, with_supervision, plot_id):\n \"\"\"Plot GMM predictions on a 2D dataset `x` with labels `z`.\n\n Write to the output directory, including `plot_id`\n in the name, and appending 'ss' if the GMM had supervision.\n\n NOTE: You do not need to edit this function.\n \"\"\"\n plt.figure(figsize=(12, 8))\n plt.title('{} GMM Predictions'.format('Semi-supervised' if with_supervision else 'Unsupervised'))\n plt.xlabel('x_1')\n plt.ylabel('x_2')\n\n for x_1, x_2, z_ in zip(x[:, 0], x[:, 1], z):\n color = 'gray' if z_ < 0 else PLOT_COLORS[int(z_)]\n alpha = 0.25 if z_ < 0 else 0.75\n plt.scatter(x_1, x_2, marker='.', c=color, alpha=alpha)\n\n file_name = 'pred{}_{}.pdf'.format('_ss' if with_supervision else '', plot_id)\n save_path = os.path.join('.', file_name)\n plt.savefig(save_path)\n\n\ndef load_gmm_dataset(csv_path):\n \"\"\"Load dataset for Gaussian Mixture Model.\n\n Args:\n csv_path: Path to CSV file containing dataset.\n\n Returns:\n x: NumPy array shape (n_examples, dim)\n z: NumPy array shape (n_exampls, 1)\n\n NOTE: You do not need to edit this function.\n \"\"\"\n\n # Load headers\n with open(csv_path, 'r') as csv_fh:\n headers = csv_fh.readline().strip().split(',')\n\n # Load features and labels\n x_cols = [i for i in range(len(headers)) if headers[i].startswith('x')]\n z_cols = [i for i in range(len(headers)) if headers[i] == 'z']\n\n x = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=x_cols, dtype=float)\n z = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=z_cols, dtype=float)\n\n if z.ndim == 1:\n z = np.expand_dims(z, axis=-1)\n\n return x, z\n\n\nif __name__ == '__main__':\n np.random.seed(229)\n # Run NUM_TRIALS trials to see how different initializations\n # affect the final predictions with and without supervision\n for t in range(NUM_TRIALS):\n main(is_semi_supervised=False, trial_num=t)\n\n # *** START CODE HERE ***\n # Once you've implemented the semi-supervised version,\n # uncomment the following line.\n # You do not need to add any other lines in this code block.\n main(is_semi_supervised=True, trial_num=t)\n # *** END CODE HERE ***\n","repo_name":"xingzix/cs229hw","sub_path":"ps3/src/semi_supervised_em/gmm.py","file_name":"gmm.py","file_ext":"py","file_size_in_byte":10755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27913752304","text":"#import python dependencies\nimport numpy as np\n\n#import SQL Alchemy dependencies\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\n#Create engine and other set up tasks to use SQLAlchemy to access the sqlite database\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n#my resulting tables\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# import Flask\nfrom flask import Flask, jsonify\n\n# Create an app, being sure to pass __name__\napp = Flask(__name__)\n\n\n# Define what to do when a user hits the index route\n@app.route(\"/\")\ndef home():\n print(\"Server received request for 'Home' page...\")\n return (\n f\"Welcome to my Surf's up Homepage!
\"\n f\"Available Routes:
\"\n f\"Here's how to see the precipitation information: /api/v1.0/precipitation
\"\n f\"Here's how to see the station information: /api/v1.0/stations
\"\n f\"Here's how to see the temperature observations: /api/v1.0/tobs
\"\n f\"Enter a start date in this format (yyyy-mm-dd) to get some summary statistics starting from that date: /api/v1.0/yyyy-mm-dd
\"\n f\"Enter a start and end date in this format (yyyy-mm-dd) to get some summary statistics between those two dates: /api/v1.0/yyyy-mm-dd/yyyy-mm-dd
\"\n )\n\n#Define what to do when a user hits the precipitation route\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of all dates and prcp values n\"\"\"\n # Query all prcp\n results = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= '2016-08-23').\\\n order_by(Measurement.date).all()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list of all_prcp\n all_prcp = []\n for date, prcp in results:\n prcp_dict = {}\n prcp_dict[date] = prcp\n all_prcp.append(prcp_dict)\n\n\n #return the result\n return jsonify(all_prcp)\n\n#Define what to do when a user hits the stations route\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of all stations\"\"\"\n # Query all stations\n stationlist = session.query(Station.station).all()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list of stations\n all_stations= []\n for station in stationlist:\n station_dict = {}\n station_dict[\"Station\"] = station\n all_stations.append(station_dict)\n\n #return the result\n return jsonify(all_stations)\n \n#Define what to do when a user hits the tobs route\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of all dates and tobs values\"\"\"\n #Query the data \n query = '''\n SELECT\n date as Date,\n tobs as Temp\n FROM\n measurement\n WHERE\n date >= '2016-08-23'\n '''\n ## EXECUTE MAPS TO WITH SESSION AND FETCHALL MAPS TO ALL\n temp_data = engine.execute(query).fetchall()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list of temp obs for the past year given the last data point has a date of 8/23/2017\n all_temps= []\n for date, tobs in temp_data:\n temps_dict = {}\n temps_dict[\"Date\"] = date\n temps_dict[\"Tobs\"] = tobs\n all_temps.append(temps_dict)\n\n #return the result\n return jsonify(all_temps)\n\n@app.route(\"/api/v1.0/\")\ndef getstartdate(start):\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n #Query the data \n temps_start = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).all()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list of all temps from a starting date.\n all_tempsstart= []\n for min, avg, max in temps_start:\n temps_s_dict = {}\n temps_s_dict[\"Min\"] = min\n temps_s_dict[\"Avg\"] = avg\n temps_s_dict[\"Max\"] = max\n all_tempsstart.append(temps_s_dict)\n\n\n #return the result\n return jsonify(all_tempsstart)\n\n@app.route(\"/api/v1.0//\")\ndef getstartend(start, end):\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of all dates and prcp values n\"\"\"\n #Query the data \n temps_start_end = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list of all temp stats from a starting date and end date. \n all_tempsstartend = []\n for min, avg, max in temps_start_end:\n temps_s_e_dict = {}\n temps_s_e_dict[\"Min\"] = min\n temps_s_e_dict[\"Avg\"] = avg\n temps_s_e_dict[\"Max\"] = max\n all_tempsstartend.append(temps_s_e_dict)\n\n #return the result\n return jsonify(all_tempsstartend)\n \n#ending for the app\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"tariere/sqlalchemy-challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23407415948","text":"# Find the first missing postive entry\n\ndef missing_positive(A):\n\tn = len(A)\n\n\t# Swap so that A[i-1] = i\n\tfor i in range(n):\n\t\twhile 1 <= A[i] <= n and A[i] != A[A[i]-1]:\n\t\t\tA[A[i]-1], A[i] = A[i], A[A[i]-1]\n\n\t# 2nd pass: Find the first position where the violation occurs\n\tfor i in range(n):\n\t\tif i != A[i]-1:\n\t\t\treturn i+1\n\nA = [3,5,4,-1,5,1,-1]\nprint(\"Given array: {}\".format(A))\nres = missing_positive(A)\nprint(\"1st missing positive number: {}\".format(res))\n","repo_name":"sheelabhadra/Elements-Programming-Interviews","sub_path":"Honors Class/first_missing_positive.py","file_name":"first_missing_positive.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44410623781","text":"from datetime import datetime\nimport json\nimport requests\n\n\nhead = '''# Top Fullstack Frameworks\nA list of popular github projects related to Fullstack web frameworks (ranked by stars automatically)\n\n'''\ntail = '\\n*Last Automatic Update: {}*'\n\nwarning = \"\\n⚠️ No longer maintained ⚠️\\n\\n\"\n\ndeprecated_repos = []\nrepos = list()\n\n\ndef main():\n access_token = get_access_token()\n\n with open('list.txt', 'r') as f:\n for url in f.readlines():\n url, name = url.strip().split(' ')\n if url.startswith('https://github.com/'):\n repo_api = 'https://api.github.com/repos/{}'.format(url[19:])\n\n r = requests.get(repo_api, headers={'Authorization': 'token {}'.format(access_token)})\n if r.status_code != 200:\n raise ValueError('Can not retrieve from {}'.format(url))\n repo = json.loads(r.content)\n if name != \"-\":\n repo[\"name\"] = name\n\n commit_api = 'https://api.github.com/repos/{}/commits/{}'.format(url[19:], repo['default_branch'])\n\n r = requests.get(commit_api, headers={'Authorization': 'token {}'.format(access_token)})\n if r.status_code != 200:\n raise ValueError('Can not retrieve from {}'.format(url))\n commit = json.loads(r.content)\n\n repo['last_commit_date'] = datetime.fromisoformat(commit['commit']['committer']['date'][:-1])\n repos.append(repo)\n\n repos.sort(key=lambda r: r['stargazers_count'], reverse=True)\n save_ranking(repos)\n\n\ndef get_access_token():\n with open('access_token.txt', 'r') as f:\n return f.read().strip()\n\n\ndef save_ranking(repos):\n with open('README.md', 'w') as f:\n f.write(head)\n for repo in (r for r in repos if not(is_deprecated(r))):\n f.write(repo_text(repo))\n f.write(warning)\n for repo in (r for r in repos if is_deprecated(r)):\n f.write(repo_text(repo))\n f.write(tail.format(datetime.now().strftime('%Y-%m-%dT%H:%M:%S%Z')))\n\n\ndef is_deprecated(repo):\n return repo[\"html_url\"] in deprecated_repos or (datetime.now() - repo['last_commit_date']).days > 365\n\n\ndef repo_text(repo):\n repo_user_and_name = '/'.join(repo['html_url'].split('/')[-2:])\n text = \"\"\n text += f\"- [{repo['name']}]({repo['html_url']}): {repo['description']} \\n\\n \"\n text += f\"[![GitHub stars](https://img.shields.io/github/stars/{repo_user_and_name}.svg?style=social)]({repo['html_url']}) \"\n text += f\"[![GitHub issues](https://img.shields.io/github/issues/{repo_user_and_name}.svg)]({repo['html_url']}/issues) \"\n text += f\"[![GitHub last commit](https://img.shields.io/github/last-commit/{repo_user_and_name})]({repo['html_url']}/commits) \"\n text += \"\\n\"\n return text\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"stradivari96/fullstack-framework-stars","sub_path":"list2md.py","file_name":"list2md.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35620314408","text":"from flask import Flask\nfrom flask.ext.rq2 import RQ\nfrom flask_restful import Api\nfrom flask_env_settings import Settings\n\n\napp = Flask(__name__)\napp.config.from_object(\"docsbox.settings\")\n\nSettings(app, rules={\n \"REDIS_JOB_TIMEOUT\": (int, 60 * 10),\n \"ORIGINAL_FILE_TTL\": (int, 60 * 10),\n \"RESULT_FILE_TTL\": (int, 60 * 60 * 24),\n\n \"LIBREOFFICE_PATH\": (str, \"/usr/lib/libreoffice/program/\"),\n\n \"THUMBNAILS_DPI\": (int, 90),\n \"THUMBNAILS_QUANTIZE\": (bool, False),\n \"THUMBNAILS_QUANTIZE_COLORS\": (int, 128),\n \"THUMBNAILS_QUANTIZE_COLORSPACE\": (str, \"rgb\"),\n})\n\napi = Api(app)\nrq = RQ(app)\n\nfrom docsbox.docs.views import DocumentView, DocumentCreateView\n \napi.add_resource(DocumentView, \"/api/v1/\")\napi.add_resource(DocumentCreateView, \"/api/v1/\")\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"dveselov/docsbox","sub_path":"docsbox/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"61"} +{"seq_id":"12013452760","text":"import constants as key\r\nfrom telegram.ext import *\r\nimport responses as R\r\nimport mysqlcon as my\r\n\r\nprint(\"bot started...\")\r\n\r\ndef start_commmand(update,context):\r\n update.message.reply_text('Type something to get started')\r\n\r\n\r\ndef handle_message(update,context):\r\n text=str(update.message.text).lower()\r\n response = my.mysqlcon(text)\r\n #response2 = sample_responses(input_text)\r\n update.message.reply_text(response)\r\n #update.message.reply_text(response2)\r\n\r\n\r\ndef error(update,context):\r\n print(f\"Update {update} caused error {context.error}\")\r\n\r\ndef main():\r\n updater= Updater(key.API_KEY,use_context=True)\r\n dp=updater.dispatcher\r\n\r\n dp.add_handler(CommandHandler(\"start\",start_commmand))\r\n\r\n dp.add_handler(MessageHandler(Filters.text, handle_message))\r\n\r\n dp.add_error_handler(error)\r\n\r\n updater.start_polling(5)\r\n updater.idle()\r\n\r\nmain()\r\n","repo_name":"saygunhan/remotesql","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"29021015477","text":"#==========================================================================================#\n# >>>>> ПОДКЛЮЧЕНИЕ БИБЛИОТЕК И МОДУЛЕЙ <<<<< #\n#==========================================================================================#\n\nfrom dublib.Methods import ReadJSON, WriteJSON\nfrom telebot import types\n\n\nimport os\n\n#==========================================================================================#\n# >>>>> СОЗДАНИЕ ПОЛЬЗОВАТЕЛЯ И ДАННЫХ ДЛЯ НЕГО <<<<< #\n#==========================================================================================#\n\n# Описательная структура пользователя.\nclass UserData:\n\n\t# Создаёт нового пользователя.\n\tdef __CreateUser(self):\n\t\t# Создание экземпляра пользователя.\n\t\tself.__User = {\n\t\t\t\"chat-id\": None,\n\t\t\t\"premium\": False\n\t\t}\n\n\t\t# Создание папки файлов пользователя.\n\t\tos.makedirs(\"Data/Files/\" + self.__UserID)\n\n\t\t# Создание папки архивов пользователя.\n\t\tos.makedirs(\"Data/Archives/\" + self.__UserID)\n\n\t\t# Сохранение файла пользователя.\n\t\tself.save()\n\n\t# Конструктор.\n\tdef __init__(self, UserID: str):\n\n\t\t#---> Генерация статических свойств.\n\t\t#==========================================================================================#\n\t\t# ID пользователя.\n\t\tself.__UserID = str(UserID)\n\t\t\n\t\t# Данные пользователя.\n\t\tself.__User = None\n\n\t\t#---> Инициализация пользователя.\n\t\t#==========================================================================================#\n\t\t# Список названий файлов в директории пользователя.\n\t\tFiles = list()\n\n\t\t# Получение списка файлов в директории.\n\t\tFiles = os.listdir(\"Data/Users\")\n\t\t\n\t\t# Фильтрация только файлов формата JSON.\n\t\tFiles = list(filter(lambda x: x.endswith(\".json\"), Files))\n\n\t\t# Если пользователя не существует.\n\t\tif self.__UserID + \".json\" not in Files:\n\t\t\tself.__CreateUser()\n\n\t\t# Иначе читаем данные пользователя.\n\t\telse:\n\t\t\tself.__User = ReadJSON(\"Data/Users/\" + self.__UserID + \".json\")\n\n\t# Возвращает ID пользователя.\n\tdef getUserID(self) -> str:\n\t\treturn self.__UserID\n\n\t# Сохраняет данные пользователя.\n\tdef save(self):\n\t\t# Сохранение файла пользователя.\n\t\tWriteJSON(\"Data/Users/\" + self.__UserID + \".json\", self.__User)","repo_name":"kostevich/TelegramMediaArchiverBot","sub_path":"Source/UserData.py","file_name":"UserData.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7234412337","text":"\nfrom django.urls import path\nfrom .views import inicio, search, detail, index\nfrom . import views\n\nurlpatterns = [\n\n path('login/',views.login2, name = 'login '),\n path('register/', views.register, name='register'),\n path('home/', views.inicio, name = \"home\"),\n path(\"search\", search, name=\"search\"),\n path(\"\", detail, name=\"detail\"),\n path(\"\", index, name=\"index\"),\n path('fav//', views.favourite_add, name='favourite_add'),\n path('favourite/', views.favourite_list, name='favourite_list'),\n path ('index/', views.user_logout, name= \"user_logout\"),\n\n]","repo_name":"Sarah13c/Cfood-project","sub_path":"cfood/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16091705327","text":"import numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\n\n# The SEIR model differential equations.\ndef Euler(SEIR_0, t, params):\n \"\"\"\n Computes the Derivatives by Semi Implicit Euler Method\n :return:\n \"\"\"\n\n N, alpha, beta, gamma, omega_i, omega_j = params\n\n # Vetor variaveis incognitas\n Si0, Sj0, Ei0, Ej0, Ii0, Ij0, Ri0, Rj0 = SEIR_0\n\n Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj = [Si0], [Sj0], [Ei0], [Ej0], [Ii0], [Ij0], [Ri0], [Rj0]\n\n dt = t[1] - t[0]\n for _ in t[1:]:\n dSidt = - beta * omega_i * Si[-1] * (Ii[-1] + Ij[-1]) / N\n dSjdt = - beta * omega_j * Sj[-1] * (Ii[-1] + Ij[-1]) / N\n dEidt = - dSidt - alpha * Ei[-1]\n dEjdt = - dSjdt - alpha * Ej[-1]\n dIidt = alpha * Ei[-1] - gamma * Ii[-1]\n dIjdt = alpha * Ej[-1] - gamma * Ij[-1]\n dRidt = gamma * Ii[-1]\n dRjdt = gamma * Ij[-1]\n next_Si = Si[-1] + dSidt * dt\n next_Sj = Sj[-1] + dSjdt * dt\n next_Ei = Ei[-1] + dEidt * dt\n next_Ej = Ej[-1] + dEjdt * dt\n next_Ii = Ii[-1] + dIidt * dt\n next_Ij = Ij[-1] + dIjdt * dt\n next_Ri = Ri[-1] + dRidt * dt\n next_Rj = Rj[-1] + dRjdt * dt\n Si.append(next_Si)\n Sj.append(next_Sj)\n Ei.append(next_Ei)\n Ej.append(next_Ej)\n Ii.append(next_Ii)\n Ij.append(next_Ij)\n Ri.append(next_Ri)\n Rj.append(next_Rj)\n\n return np.stack([Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj]).T\n\n\ndef HUM_analysis(SEIR, t, covid_parameters):\n \"\"\"\n Provides H (ward) U (ICU) M (deaths) variables, in a post-processment\n :return:\n \"\"\"\n Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj = SEIR\n\n cp = covid_parameters\n\n alpha = cp.alpha\n gamma = cp.gamma\n\n taxa_mortalidade_i = cp.mortality_rate_elderly\n taxa_mortalidade_j = cp.mortality_rate_young\n\n los_leito = cp.los_ward\n los_uti = cp.los_icu\n\n delay_leito = cp.delay_ward\n delay_uti = cp.delay_icu\n\n tax_int_i = cp.internation_rate_ward_elderly\n tax_int_j = cp.internation_rate_ward_young\n\n tax_uti_i = cp.internation_rate_icu_elderly\n tax_uti_j = cp.internation_rate_icu_young\n\n # Leitos normais demandados\n Hi0 = Ii[0] * tax_int_i\n Hj0 = Ij[0] * tax_int_j\n # Leitos UTIs demandados\n Ui0 = Ii[0] * tax_uti_i\n Uj0 = Ij[0] * tax_uti_j\n # Obitos\n Mi0 = Ri[0] * taxa_mortalidade_i\n Mj0 = Rj[0] * taxa_mortalidade_j\n\n # print(Ei[:10])\n\n Hi, Hj, Ui, Uj, Mi, Mj = [Hi0], [Hj0], [Ui0], [Uj0], [Mi0], [Mj0]\n # Ei, Ej, Ii, Ij = [Ei], [Ej], [Ii], [Ij]\n dt = t[1] - t[0]\n for i in t[1:]:\n # Leitos Normais demandados\n dHidt = tax_int_i * alpha * Ei[i-1] - Hi[i-1] / (los_leito + delay_leito)\n dHjdt = tax_int_j * alpha * Ej[i-1] - Hj[i-1] / (los_leito + delay_leito)\n # Leitos UTIs demandados\n dUidt = tax_uti_i * alpha * Ei[i-1] - Ui[i-1] / (los_uti + delay_uti)\n dUjdt = tax_uti_j * alpha * Ej[i-1] - Uj[i-1] / (los_uti + delay_uti)\n # Removidos\n dRidt = gamma * Ii[i-1]\n dRjdt = gamma * Ij[i-1]\n # Obitos\n dMidt = taxa_mortalidade_i * dRidt\n dMjdt = taxa_mortalidade_j * dRjdt\n next_Hi = Hi[i-1] + dHidt * dt\n next_Hj = Hj[i-1] + dHjdt * dt\n next_Ui = Ui[i-1] + dUidt * dt\n next_Uj = Uj[i-1] + dUjdt * dt\n next_Mi = dMidt * dt\n next_Mj = dMjdt * dt\n Hi.append(next_Hi)\n Hj.append(next_Hj)\n Ui.append(next_Ui)\n Uj.append(next_Uj)\n Mi.append(next_Mi)\n Mj.append(next_Mj)\n ret = np.stack([Hi, Hj, Ui, Uj, Mi, Mj]).T\n\n Hi, Hj, Ui, Uj, Mi, Mj = ret.T\n\n return Hi, Hj, Ui, Uj, Mi, Mj\n\n\ndef run_SEIR_ODE_model(demograph_parameters, covid_parameters, model_parameters) -> pd.DataFrame:\n \"\"\"\n Runs the simulation\n \"\"\"\n # from scipy.integrate import odeint\n\n dp = demograph_parameters\n cp = covid_parameters\n mp = model_parameters\n\n N = dp.population\n\n Ei0 = mp.init_exposed_elderly # Ee0\n Ej0 = mp.init_exposed_young # Ey0\n Ii0 = mp.init_infected_elderly # Ie0\n Ij0 = mp.init_infected_young # Iy0\n Ri0 = mp.init_removed_elderly # Re0\n Rj0 = mp.init_removed_young # Ry0\n t_max = mp.t_max\n\n # Variaveis apresentadas em base diaria\n # A grid of time points (in days)\n t = range(t_max)\n # dt = .1\n # t = np.linspace(0, t_max, int(t_max/dt) + 1)\n\n # CONDICOES INICIAIS\n # Suscetiveis\n Si0 = N * dp.population_rate_elderly - Ii0 - Ri0 - Ei0 # Suscetiveis idosos\n Sj0 = N * (1 - dp.population_rate_elderly) - Ij0 - Rj0 - Ej0 # Suscetiveis jovens\n\n # Initial conditions vector\n SEIR_0 = Si0, Sj0, Ei0, Ej0, Ii0, Ij0, Ri0, Rj0\n\n alpha = cp.alpha\n beta = cp.beta\n gamma = cp.gamma\n\n omega_i = mp.contact_reduction_elderly\n omega_j = mp.contact_reduction_young\n\n # PARAMETROS PARA CALCULAR DERIVADAS\n args = (N, alpha, beta, gamma, omega_i, omega_j)\n\n # Integrate the SIR equations over the time grid, t\n # ret = odeint(deriv, y0, t, args)\n # Integrate the SEIR equations over the time grid, t\n ret = Euler(SEIR_0, t, args)\n # Update the variables\n Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj = ret.T\n SEIR = Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj\n\n # POST PROCESS to obtain the hospital demand (ward and ICUs) and deaths\n HUM = HUM_analysis(SEIR, t, cp)\n\n Hi, Hj, Ui, Uj, Mi, Mj = HUM\n\n print(Hi[:10])\n print(max(Mi))\n print(max(Mj))\n\n # health_system_colapse_identifier(Hi, Hj, Ui, Uj, dp, mp)\n\n df = pd.DataFrame({'Si': Si, 'Sj': Sj, 'Ei': Ei, 'Ej': Ej, 'Ii': Ii, 'Ij': Ij, 'Ri': Ri, 'Rj': Rj,\n 'Hi': Hi, 'Hj': Hj, 'Ui': Ui, 'Uj': Uj, 'Mi': Mi, 'Mj': Mj}, index=t)\n\n return df\n\n\ndef health_system_colapse_identifier(Hi, Hj, Ui, Uj, dp, mp):\n \"\"\"\n Performs a post_processing analysis,\n forecast the date to a load of the health system for 30,50,80,100 %\n considers the inital date as today.\n \"\"\"\n H = Hi + Hj\n U = Ui + Uj\n\n capacidade_leitos = dp.bed_ward\n capacidade_UTIs = dp.bed_icu\n\n lotacao = mp.lotation\n\n t_max = mp.t_max\n\n # IDENTIFICADOR DE DIAS DE COLAPSOS\n # Dia em que colapsa o sistema de saude: 30, 50, 80, 100% capacidade\n dia_colapso_leitos_30 = np.min(np.where(H > capacidade_leitos*lotacao[0]))\n dia_colapso_leitos_50 = np.min(np.where(H > capacidade_leitos*lotacao[1]))\n dia_colapso_leitos_80 = np.min(np.where(H > capacidade_leitos*lotacao[2]))\n dia_colapso_leitos_100 = np.min(np.where(H > capacidade_leitos*lotacao[3]))\n dia_colapso_leitos = (dia_colapso_leitos_30, dia_colapso_leitos_50,\n dia_colapso_leitos_80, dia_colapso_leitos_100)\n print(dia_colapso_leitos)\n\n dia_colapso_UTIs_30 = np.min(np.where(U > capacidade_UTIs*lotacao[0]))\n dia_colapso_UTIs_50 = np.min(np.where(U > capacidade_UTIs*lotacao[1]))\n dia_colapso_UTIs_80 = np.min(np.where(U > capacidade_UTIs*lotacao[2]))\n dia_colapso_UTIs_100 = np.min(np.where(U > capacidade_UTIs*lotacao[3]))\n dia_colapso_UTIs = (dia_colapso_UTIs_30, dia_colapso_UTIs_50,\n dia_colapso_UTIs_80,dia_colapso_UTIs_100)\n print(dia_colapso_UTIs)\n\n # TimeSeries\n datelist = [d.strftime('%d/%m/%Y')\n for d in pd.date_range(datetime.today(), periods = t_max)]\n\n print('Dia em que colapsa o sistema de saude (leitos comuns): 30, 50, 80, 100% capacidade')\n\n print(datelist[dia_colapso_leitos[0]])\n print(datelist[dia_colapso_leitos[1]])\n print(datelist[dia_colapso_leitos[2]])\n print(datelist[dia_colapso_leitos[3]])\n\n print('Dia em que colapsa o sistema de saude (UTI): 30, 50, 80, 100% capacidade')\n\n print(datelist[dia_colapso_UTIs[0]])\n print(datelist[dia_colapso_UTIs[1]])\n print(datelist[dia_colapso_UTIs[2]])\n print(datelist[dia_colapso_UTIs[3]])\n","repo_name":"ralfcosta/vertical","sub_path":"functions/model_functions.py","file_name":"model_functions.py","file_ext":"py","file_size_in_byte":7807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"71818524993","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 28 10:38:29 2022\n\n@author: antoiovi\nhttps://tkdocs.com/tutorial/grid.html#resize\n\nModificare nella riga 120\n self.columnconfigure(0,weight=0)\n OPPURE\n self.columnconfigure(0,weight=1)\n \n se metto weight=1 il frame con il plot si espande e contrae\n se metto weigth=0 il frame con il plot viene nascosto dal frame adiacente\n\"\"\"\n\n\nimport tkinter as tk\nfrom tkinter import BOTH, ttk\nfrom turtle import width\n\n\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\n\nfrom matplotlib.figure import Figure\n\nimport numpy as np\n\nclass ReportNotes(tk.Frame):\n \n def set_message(self,message):\n self.text_box.config(state='normal')\n self.text_box.delete(\"1.0\", tk.END)\n self.text_box.insert(tk.END, message)\n self.text_box.config(state='disabled')\n\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, width=100, height=60,*args, **kwargs)\n \n self.text_box = tk.Text(self)\n ys = ttk.Scrollbar(self, orient = 'vertical', command = self.text_box.yview)\n xs = ttk.Scrollbar(self, orient = 'horizontal', command = self.text_box.xview)\n self.text_box['yscrollcommand'] = ys.set\n self.text_box['xscrollcommand'] = xs.set\n self.text_box.insert('end', \".......\\n...\\n...\")\n self.text_box.grid(column = 0, row = 0, sticky = 'nwes')\n xs.grid(column = 0, row = 1, sticky = 'we')\n ys.grid(column = 1, row = 0, sticky = 'ns')\n self.grid_columnconfigure(0, weight = 1)\n self.grid_rowconfigure(0, weight = 1)\n self.grid_propagate(False)\n \n \nclass FramePlot(tk.Frame):\n x=0\n def button1_click(self):\n self.x=self.x+1\n \n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, *args, **kwargs,bg='red')\n container=tk.Frame(self)\n container.grid(column=0, row=0,sticky='news')\n button1=ttk.Button(self,text=\"Bottone 1\",command=self.button1_click)\n #button1.grid(column=1,row=0,sticky='we')\n self.columnconfigure(0,weight=2)\n fig = Figure(figsize=(4, 4))\n t = np.arange(0, 30, .1)\n ax = fig.add_subplot()\n line, = ax.plot(t, 2 * np.sin(2 * np.pi * t))\n ax.set_xlabel(\"time [s]\")\n ax.set_ylabel(\"f(t)\")\n \n xmin,xmax = ax.get_xlim()\n\n ymin,ymax = ax.get_ylim()\n print(\"xmin,xmax \",xmin,xmax)\n print(\"ymin,ymax \",ymin,ymax)\n ax_h=ax.bbox.height\n ax_w = ax.bbox.width\n print(\"Larghezza ed altezza del plot H, W \",ax_h,ax_w)\n\n '''\n CREA UN CANVAS CON DENTRO LA Figure\n '''\n canvas = FigureCanvasTkAgg(fig, master=container) # A tk.DrawingArea.\n # Render the Figure.\n canvas.draw()\n # AGGIUNGO IL TOOLBAR AL CANVAS\n # Funziona solo con pack\n toolbar = NavigationToolbar2Tk(canvas, container)\n toolbar.update()\n self.hbar=tk.Scrollbar(container,orient=tk.HORIZONTAL)\n self.vbar=tk.Scrollbar(container,orient=tk.VERTICAL)\n\n #canvas.get_tk_widget().config(bg='#FFFFFF',scrollregion=(0,0,ax_w+100,ax_h+100))\n canvas.get_tk_widget().config(bg='#FFFFFF',scrollregion=(0,0,ax_w+500,ax_h+100))\n # Queste righe cambiano la dimensione del grafico \n # Imposto la larghezza del plot piu un margine per essere sicuro che venga disegnato tutto\n canvas.get_tk_widget().config(width=ax_w+100,height=ax_h+100)\n #canvas.get_tk_widget().config(width=ax_w+400,height=ax_h+100)\n canvas.get_tk_widget().config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n\n self.hbar.pack(side = tk.BOTTOM, fill=tk.X)\n self.hbar.config(command=canvas.get_tk_widget().xview)\n \n self.vbar.pack(side = tk.RIGHT, fill=tk.Y)\n self.vbar.config(command=canvas.get_tk_widget().yview)\n canvas.get_tk_widget().pack(side=tk.BOTTOM,fill=tk.BOTH,expand = True)\n \n\n\n \nclass MainApplication(tk.Frame):\n\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, *args, **kwargs)\n framePlot=FramePlot(self)\n framePlot.grid(column=0,row=0,sticky='news')\n report=ReportNotes(self)\n report.grid(column=1,row=0,sticky='news')\n # Se metto weight =1 a colonna 0, il frame con il plot si espande e contrae\n # contraendo e espandendo il grafico, quindi bisogna metter weight=0\n #self.columnconfigure(0,weight=0)\n self.columnconfigure(0,weight=0)\n self.columnconfigure(1,weight=1)\n \n\n\n\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.title(\"Scrollbar on plot\")\n content=tk.Frame(root,bg='green')\n #tk.Tk ha la proprieta geometry,; tk.Frame non c'e l'ha\n root.geometry('1000x400')\n # styck (NSEW) espande il content in tutta la root \n content.grid(column=0,row=0,sticky='NWES') \n # styck (NSEW) espande mainapplication nel content\n MainApplication(content).grid(column=0,row=0,sticky='WENS')#.pack(side=\"top\", fill=\"both\", expand=True)\n # Senza questa riga content non occupa tutto il frame root \n content.columnconfigure(0,weight=1)\n content.rowconfigure(0,weight=1)\n # Senza questa riga root non occupa tutto il frame della applicazione \n root.columnconfigure(0,weight=1)\n root.rowconfigure(0,weight=1)\n #root.pack( fill='x')\n root.mainloop()","repo_name":"antoiovi/Tkinter_reference","sub_path":"Matplotlib/Plot_streching_hiding.py","file_name":"Plot_streching_hiding.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22790612266","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 1 22:33:25 2020\n\nMateria: Programaci+on Avazada 'A'\n@author: José León Alarcón\n@author: Emerson Palacios Balderramo\n\n\"\"\"\nfrom random import randint\nfrom functools import reduce\nimport pandas as pd \nimport os \n\nclass TicTacToe:\n def __init__(self):\n self.tablero = [[\"\", \"\", \"\"],\n [\"\", \"\", \"\"],\n [\"\", \"\", \"\"]]\n \n \n def tableroLLeno(self):\n ####################################################################################\n ###################### USO DE FUNCIONES DE ORDEN SUPERIOR #########################\n ####################################################################################\n lineasCasillas = map(lambda x: len(x), list(map(lambda x: list(filter(lambda j: j != '', x)), self.tablero)))\n total = reduce(lambda x,y: x+y,lineasCasillas)\n return True if total == 9 else False\n \n def cambiaTurno(self, turnoAnterior):\n if turnoAnterior == 0: #Jugó el ordenador\n return 1\n return 0\n \n def casillaOcupada(self, fil, col):\n if self.tablero[fil][col] != \"\":\n return True\n return False\n \n ####################################################################################\n ###################### IMPLEMENTACIÓN DE FUNCIÓN RECURSIVA #########################\n ####################################################################################\n def formateaTablero(self, i= 0, j = 0):\n if j == len(self.tablero):\n j = 0\n i +=1\n try:\n self.tablero[i][j] = ''\n except:\n return\n return self.formateaTablero(i, j+1)\n \n def actualizarEstadistica(self, jugador, estado):\n \n archivo = pd.read_csv('estadisticas.csv', sep=\",\")\n try:\n archivo.loc[archivo['Jugador'] == jugador, estado['Key']] = estado['valor']\n archivo.to_csv('estadisticas.csv', index=None)\n except KeyError:\n print(\"error\")\n pass\n \n def leerEstadisticas(self, jugador):\n archivo = pd.read_csv('estadisticas.csv', sep=',')\n ####################################################################################\n ###################### USO DE FUNCIONES DE ORDEN SUPERIOR #########################\n ####################################################################################\n jugadorExiste = len(list(filter(lambda x: x == jugador, archivo['Jugador'])))\n if jugadorExiste != 0:\n #si existe \n indice = list(archivo['Jugador']).index(jugador)\n return {\n 'Jugador': archivo['Jugador'][indice], \n 'Ganadas': archivo['Ganadas'][indice], \n 'Perdidas': archivo['Perdidas'][indice], \n 'Empate': archivo['Empate'][indice]}\n nuevoJugador = {\n 'Jugador': jugador,\n 'Ganadas': 0,\n 'Perdidas': 0, \n 'Empate': 0}\n nuevo = pd.DataFrame(columns=['Jugador', 'Ganadas', 'Perdidas', 'Empate'])\n nuevo = nuevo.append(nuevoJugador, ignore_index=True )\n nuevo.to_csv('estadisticas.csv', index=None, mode=\"a\", header=not os.path.isfile('estadisticas.csv'))\n return nuevoJugador\n \n \n def jugadaUsuario(self, fil, col, turno):\n if not self.casillaOcupada(fil, col):\n self.tablero[fil][col] = str(turno)\n \n if self.tableroLLeno():\n return -1\n return self.tablero\n elif self.tableroLLeno():\n return -1 #\n return None\n \n def alguienGano(self, turno):\n ####################################################################################\n ###################### USO DE FUNCIONES DE ORDEN SUPERIOR #########################\n ####################################################################################\n ganador = list(map(lambda x: list(filter(lambda j: j == turno, x)), self.tablero))\n cantidad = list(map(lambda x: len(x), ganador))\n try:\n cantidad.index(3)\n return True\n except:\n pass\n #evalua columnas \n for i in range(0, len(self.tablero)):\n filas = 0\n for j in range(0, len(self.tablero)):\n if self.tablero[j][i] == turno:\n filas += 1\n if filas == 3:\n return True\n #evalua diagonal principal\n diagonalp = 0\n for i in range(0, len(self.tablero)):\n if self.tablero[i][i] == turno:\n diagonalp+=1\n if diagonalp == 3:\n return True\n \n #evalua diagonal secuandaria\n diagonals = 0\n for i in range(0, len(self.tablero)):\n if self.tablero[i][len(self.tablero)-(i+1)] == turno:\n diagonals +=1\n if diagonals == 3:\n return True\n return False\n \n \n def intentaGanarPc(self, computador):\n \n #intenta ganar en fila\n for i in range(0, len(self.tablero)):\n fila = 0\n for j in range(0, len(self.tablero)):\n if self.tablero[i][j] == computador:\n fila += 1\n if fila == 2:\n #bloquea la fila \n for j in range(0, len(self.tablero)):\n if self.tablero[i][j] == '':\n self.tablero[i][j] = computador\n return i, j, computador\n \n #intenta bloquear columnas \n for i in range(0, len(self.tablero)):\n columna = 0\n for j in range(0, len(self.tablero)):\n if self.tablero[j][i] == computador:\n columna += 1\n if columna == 2:\n for j in range(0, len(self.tablero)):\n if self.tablero[j][i] == '':\n self.tablero[j][i] = computador\n return j, i, computador\n \n #intenta bloquear diagonales \n #diagonal principal\n diagonalp = 0\n for i in range(0, len(self.tablero)):\n if self.tablero[i][i] == computador:\n diagonalp += 1\n if diagonalp == 2:\n for i in range(0, len(self.tablero)):\n if self.tablero[i][i]== '':\n self.tablero[i][i] = computador\n return i, i, computador\n \n #diagonal secundaria \n diagonals = 0\n for i in range(0, len(self.tablero)):\n if self.tablero[i][len(self.tablero)-(i+1)] == computador:\n diagonals +=1\n if diagonals == 2:\n for i in range(0, len(self.tablero)):\n if self.tablero[i][len(self.tablero)-(i+1)] == '':\n self.tablero[i][len(self.tablero)-(i+1)] = computador \n return i, len(self.tablero)-(i+1), computador\n \n return None, None, computador\n \n ## FUNCIÓN DE PRIMERA CLASE ###\n def cambiarFicha(self, turno):\n return \"O\" if turno == \"X\" else \"X\"\n \n def jugadaComputador(self, turno):\n fil = None\n col = None\n \n #uso de función de primera clase\n ficha = self.cambiarFicha\n computador = ficha(turno)\n \n \n fil, col, computador = self.intentaGanarPc(computador)\n if fil != None and col != None:\n return fil, col, computador\n \n #intenta bloquear fila\n for i in range(0, len(self.tablero)):\n fila = 0\n for j in range(0, len(self.tablero)):\n if self.tablero[i][j] == turno:\n fila += 1\n if fila == 2:\n #bloquea la fila \n for j in range(0, len(self.tablero)):\n if self.tablero[i][j] == '':\n self.tablero[i][j] = computador\n return i, j, computador\n \n #intenta bloquear columnas \n for i in range(0, len(self.tablero)):\n columna = 0\n for j in range(0, len(self.tablero)):\n if self.tablero[j][i] == turno:\n columna += 1\n if columna == 2:\n for j in range(0, len(self.tablero)):\n if self.tablero[j][i] == '':\n self.tablero[j][i] = computador\n return j, i, computador\n \n #intenta bloquear diagonales \n #diagonal principal\n diagonalp = 0\n for i in range(0, len(self.tablero)):\n if self.tablero[i][i] == turno:\n diagonalp += 1\n if diagonalp == 2:\n for i in range(0, len(self.tablero)):\n if self.tablero[i][i]== '':\n self.tablero[i][i] = computador\n return i, i, computador\n \n #diagonal secundaria \n diagonals = 0\n for i in range(0, len(self.tablero)):\n if self.tablero[i][len(self.tablero)-(i+1)] == turno:\n diagonals +=1\n if diagonals == 2:\n for i in range(0, len(self.tablero)):\n if self.tablero[i][len(self.tablero)-(i+1)] == '':\n self.tablero[i][len(self.tablero)-(i+1)] = computador \n return i, len(self.tablero)-(i+1), computador\n #juega para random\n \n while True:\n fil = randint(0, 2)\n col = randint(0, 2)\n if not self.casillaOcupada(fil, col):\n self.tablero[fil][col] = computador \n return fil, col, computador\n \n \n \n \n \n\n\n \n \n \n \n \n\n \n ","repo_name":"Nicromano/Tic-Tac-Toe","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":9860,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72193280835","text":"import tkinter as tk\nfrom tkinter import ttk\nimport sys\nimport json\n\nsys.path.append( '..' )\nfrom src.backend import backend\n\nclass CapturePrints:\n def __init__(self):\n self.contents = []\n\n def write(self, text):\n self.contents.append(text)\n\n def get_contents(self):\n return \"\".join(self.contents)\n\nclass GUI:\n def __init__(self, master):\n self.master = master\n master.title(\"政审小工具\") # 设置窗口标题\n master.geometry(\"640x480\") # 设置窗口大小\n \n # 创建 Notebook\n self.notebook = ttk.Notebook(master)\n self.notebook.pack(expand=True, fill=\"both\")\n\n # 第一个标签页\n self.tab1 = ttk.Frame(self.notebook)\n self.notebook.add(self.tab1, text=\"标准政审\")\n self.create_first_tab(self.tab1)\n\n # 第二个标签页\n self.tab2 = ttk.Frame(self.notebook)\n self.notebook.add(self.tab2, text=\"最近20把深度分析\")\n self.create_second_tab(self.tab2) \n \n \n def create_first_tab(self, master):\n \n # radiobutton\n self.choice_var = tk.StringVar(value=\"dropdown\") # 默认选择dropdown\n\n rb_dropdown = tk.Radiobutton(master, text=\"从下拉菜单选择\", variable=self.choice_var, value=\"dropdown\", command=self.toggle_input_method)\n rb_dropdown.grid(row=0, column=0, sticky='w', padx=10, pady=10)\n\n rb_manual = tk.Radiobutton(master, text=\"手动输入\", variable=self.choice_var, value=\"manual\", command=self.toggle_input_method)\n rb_manual.grid(row=0, column=1, sticky='w', padx=10, pady=10) \n \n # maunal input ID and name\n self.label_player_name_param = tk.Label(master, text=\"怎么称呼?\")\n self.label_player_name_param.grid(row=1, column=0, sticky='w', padx=10, pady=0) # pady doubled\n\n self.entry_player_name_param = tk.Entry(master)\n self.entry_player_name_param.grid(row=1, column=1, sticky='w', padx=10, pady=0) # pady doubled\n\n self.label_accout_ID_param = tk.Label(master, text=\"dota2ID\")\n self.label_accout_ID_param.grid(row=2, column=0, sticky='w', padx=10, pady=0) # pady doubled\n \n self.entry_accout_ID_param = tk.Entry(master)\n self.entry_accout_ID_param.grid(row=2, column=1, sticky='w', padx=10, pady=0) # pady doubled\n \n # drop down input\n self.label_logged_player_name = tk.Label(master, text=\"已登录玩家名\")\n self.label_logged_player_name.grid(row=3, column=0, sticky='w', padx=10, pady=10) \n\n json_path=backend.get_accountID_path()\n \n data = {}\n try:\n with open(json_path, 'r') as json_file:\n data = json.load(json_file)\n except (FileNotFoundError, json.JSONDecodeError): # 文件不存在或JSON解码失败\n pass\n\n self.logged_player_name_values = data # 使用json文件中的键作为下拉框的值\n self.logged_player_name_var = tk.StringVar(master)\n self.combo_logged_player_name = ttk.Combobox(master, textvariable=self.logged_player_name_var, values=list(self.logged_player_name_values.keys()))\n self.combo_logged_player_name.grid(row=3, column=1, sticky='w', padx=10, pady=10)\n\n\n self.label_limit_param = tk.Label(master, text=\"抽取多少把比赛?\")\n self.label_limit_param.grid(row=4, column=0, sticky='w', padx=10, pady=10) # pady doubled\n\n self.entry_limit_param = tk.Entry(master)\n self.entry_limit_param.grid(row=4, column=1, sticky='w', padx=10, pady=10) # pady doubled\n\n self.label_lobby_type_param = tk.Label(master, text=\"比赛类型是?\")\n self.label_lobby_type_param.grid(row=5, column=0, sticky='w', padx=10, pady=10) # pady doubled\n\n self.dropdown_values = {\n \"天梯\": 7,\n \"普通\": 0,\n \"全部\": -1\n }\n\n self.dropdown_var = tk.StringVar(master)\n self.dropdown_var.set(\"天梯\") # 设置默认选项\n self.dropdown_menu = ttk.Combobox(master, textvariable=self.dropdown_var, values=list(self.dropdown_values.keys()))\n self.dropdown_menu.grid(row=5, column=1, sticky='w', padx=10, pady=10) # pady doubled\n\n self.submit_button = tk.Button(master, text=\"一键政审!\", command=self.submit)\n self.submit_button.grid(row=6, column=1, sticky='w', padx=10, pady=20) # Increased pady for more spacing\n\n self.result_text = tk.Text(master, wrap=tk.WORD, width=40, height=10)\n self.result_text.grid(row=0, column=2, rowspan=8, padx=(10,20), pady=(5,20), sticky='nsew') # Added right and bottom padding\n\n # 设置默认值\n self.entry_limit_param.insert(0, \"1000\")\n\n # Adjust the grid weights to allow the text box to expand\n master.grid_rowconfigure(6, weight=1) # This will allow the last row to expand\n master.grid_columnconfigure(2, weight=1) # This will allow the third column to expand\n self.toggle_input_method() # 使初始配置生效\n \n def create_second_tab(self, master):\n pass \n \n \n def submit(self):\n limit_param = self.entry_limit_param.get()\n lobby_type_param = self.dropdown_values[self.dropdown_var.get()] \n \n choice = self.choice_var.get()\n if choice == \"dropdown\":\n player_name_param = self.logged_player_name_var.get()\n accout_ID_param = str(self.logged_player_name_values[player_name_param])\n else:\n player_name_param = self.entry_player_name_param.get()\n accout_ID_param = self.entry_accout_ID_param.get() \n \n \n capture_prints = CapturePrints()\n original_stdout = sys.stdout # 保存原始stdout\n sys.stdout = capture_prints # 重定向print输出\n\n # 调用后端逻辑\n self.call_backend_logic(player_name_param, accout_ID_param, limit_param, lobby_type_param)\n\n sys.stdout = original_stdout # 恢复原始stdout\n\n output = capture_prints.get_contents()\n \n # delete the first 4 line\n lines = output.split('\\n')\n del lines[0:4]\n output = '\\n'.join(lines)\n \n print(output)\n\n # 在Text小部件中显示捕获的输出\n self.result_text.delete(1.0, tk.END) # 清空Text小部件内容\n self.result_text.insert(tk.END, output) # 插入新文本\n \n def toggle_input_method(self):\n choice = self.choice_var.get()\n if choice == \"dropdown\":\n self.entry_player_name_param.config(state=tk.DISABLED)\n self.entry_accout_ID_param.config(state=tk.DISABLED)\n self.combo_logged_player_name.config(state=tk.NORMAL)\n else:\n self.entry_player_name_param.config(state=tk.NORMAL)\n self.entry_accout_ID_param.config(state=tk.NORMAL)\n self.combo_logged_player_name.config(state=tk.DISABLED) \n \n \n \n def call_backend_logic(self, player_name_param, accout_ID_param, limit_param, lobby_type_param):\n \"\"\"call the back end function.\n\n Args:\n player_name_param (_type_): _description_\n accout_ID_param (_type_): _description_\n limit_param (_type_): _description_\n lobby_type_param (_type_): _description_\n\n Returns:\n _type_: _description_\n \"\"\"\n result_text=backend.analyze_custom_input(player_name_param,accout_ID_param,limit_param,lobby_type_param)\n return result_text\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n gui = GUI(root)\n root.mainloop()\n","repo_name":"BCSZSZ/dota2_MMR_graph","sub_path":"src/GUI/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":7677,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"5546959127","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on 3 march 2022\r\n\r\n@auth: Marie-Anne Melis\r\n\"\"\"\r\n\r\nimport dash\r\nfrom dash.dependencies import Input, Output\r\nimport dash_daq as daq\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport dash_bootstrap_components as dbc\r\n\r\n\r\n\r\napp = dash.Dash(__name__)\r\n\r\napp = dash.Dash(external_stylesheets=[ dbc.themes.FLATLY])\r\n\r\napp.layout = dbc.Container([\r\n dbc.Row([dbc.Col([html.H1(id = 'H1', children = 'Dash, bootstrap and dependent Gauges')],xl=12,lg=12,md = 12,sm=12,xs = 12)],style = {'textAlign':'center', 'marginTop':30, 'marginBottom':30}),\r\n\r\n \r\n dbc.Row([ \r\n dbc.Col([\r\n \r\n daq.Gauge(\r\n id='my-gauge-1',\r\n color={\"gradient\":True,\"ranges\":{\"red\":[0,6],\"yellow\":[6,8],\"green\":[8,10]}},\r\n label=\"Select first value\",\r\n value=6\r\n ),\r\n dcc.Slider(\r\n id='my-gauge-slider-1',\r\n min=0,\r\n max=10,\r\n step=1,\r\n value=5\r\n )],xl=6,lg=6,md = 6,sm=12,xs = 12),\r\n dbc.Col([\r\n daq.Gauge(\r\n id='my-gauge-2',\r\n color={\"gradient\":True,\"ranges\":{\"red\":[0,6],\"yellow\":[6,8],\"green\":[8,10]}},\r\n label=\"Select second value\",\r\n value=6\r\n ),\r\n dcc.Slider(\r\n id='my-gauge-slider-2',\r\n min=0,\r\n max=10,\r\n step=1,\r\n value=5\r\n )],xl=6,lg=6,md = 6,sm=12,xs = 12),\r\n ]),\r\n dbc.Row([ \r\n dbc.Col([\r\n html.Br(),\r\n daq.Gauge(\r\n id='summary-gauge',\r\n color={\"gradient\":True,\"ranges\":{\"red\":[0,6],\"yellow\":[6,8],\"green\":[8,10]}},\r\n label='Average'\r\n )\r\n ],xl=12,lg=12,md = 12,sm=12,xs = 12),\r\n ])\r\n \r\n],fluid = False)\r\n\r\n\r\n\r\n@app.callback([Output(component_id= 'my-gauge-1', component_property = 'value'),\r\n Output(component_id='my-gauge-2', component_property='value') ],\r\n [Input('my-gauge-slider-1', 'value'),\r\n Input('my-gauge-slider-2', 'value')])\r\n\r\ndef update_singlegauges(slider1,slider2):\r\n return (slider1,slider2)\r\n\r\n@app.callback([Output(component_id= 'summary-gauge', component_property = 'value')],\r\n [Input('my-gauge-slider-1', 'value'),\r\n Input('my-gauge-slider-2', 'value')])\r\n\r\ndef update_summarygauge(slider1,slider2):\r\n value_out = (slider1 + slider2) /2\r\n return [value_out]\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server()\r\n\r\n","repo_name":"tigi/dash-dependent-gauges","sub_path":"talking-gauges2.py","file_name":"talking-gauges2.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22454972900","text":"import sys\nimport pytest\n\nfrom core.wilcoxontest import *\n\n\nclass TestPermutationTest(object):\n def setup_method(self, test_method):\n self.cases = [8.50, 9.48, 8.65, 8.16, 8.83, 7.76, 8.63]\n self.controls = [8.27, 8.20, 8.25, 8.14, 9.00, 8.10, 7.20, 8.32, 7.70]\n self.expected_case_rank_sum = 75\n self.expected_pvalue = 0.057\n\n def test_permutation_pvalue(self):\n pvalue = calculate_permutation_pvalue(\n self.cases, self.controls, num_permutations=100000\n )\n\n assert abs(pvalue - self.expected_pvalue) < 0.01\n\n\nclass TestPermutationTestWithTies(object):\n def setup_method(self, test_method):\n self.cases = [0.45, 0.50, 0.61, 0.63, 0.75, 0.85, 0.93]\n self.controls = [0.44, 0.45, 0.52, 0.53, 0.56, 0.58, 0.58, 0.65, 0.79]\n self.expected_case_rank_sum = 71.5\n self.expected_pvalue = 0.105\n\n def test_permutation_pvalue(self):\n pvalue = calculate_permutation_pvalue(\n self.cases, self.controls, num_permutations=100000\n )\n\n assert abs(pvalue - self.expected_pvalue) < 0.01\n\n def test_approximate_pvalue(self):\n pvalue = calculate_approximate_pvalue(self.cases, self.controls)\n assert abs(pvalue - self.expected_pvalue) < 0.01\n","repo_name":"Illumina/ExpansionHunterDenovo","sub_path":"scripts/tests/test_wilcoxon.py","file_name":"test_wilcoxon.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"61"} +{"seq_id":"8515314892","text":"import modconsmu\nimport pylab\nimport time\nimport threading\n\nsmu = modconsmu.smu()\n\npylab.ion()\n\noutput = []\n\ntick = .01\n\ndef showData():\n\ti = pylab.subplot(1, 2, 1)\n\ti.hold(True)\n\tpylab.ylabel('current')\n\tv = pylab.subplot(1, 2, 2)\n\tv.hold(True)\n\tpylab.ylabel('voltage')\n\tpylab.draw()\n\twhile True:\n\t\ttimeNow = time.time() \n\t\toutput.append(smu.update())\n\t\ti.plot(timeNow, output[-1][1], 'k.')\n\t\tv.plot(timeNow, output[-1][0], 'k.')\n\t\ti.axis([timeNow-10,timeNow+1,-.2,.2])\n\t\tv.axis([timeNow-10,timeNow+1,-10,10])\n\t\tpylab.draw()\n\t\ttime.sleep(tick)\n\nshowThread = threading.Thread(target=showData)\nshowThread.daemon = True\nshowThread.start()\n\n","repo_name":"itdaniher/Olin-SMUs","sub_path":"awesomeInterfaceV0.py","file_name":"awesomeInterfaceV0.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"23463669221","text":"f_in = 'D-small-attempt0.in'\nf_out = 'D-small-attempt0.out'\n\nw1 = 'GABRIEL'\nw2 = 'RICHARD'\n\nf = open(f_in, 'r')\no = open(f_out, 'w')\n\nT = int(f.readline())\n\ndef main():\n \"\"\" Brute force taking advantage of limitations: X, R, C <= 4 \"\"\"\n with f:\n data = f.readlines()\n\n winner = w1\n for case_num_minus1, line in enumerate(data):\n case = [int(i) for i in line.split()] # [X, R, C]\n x = case[0]\n r = case[1]\n c = case[2]\n\n if (x == 4 and (r < 3 or c < 3)) or (x > r and x > c) or ((r*c)%x != 0):\n winner = w2\n elif (x == 3 and (r < 2 or c < 2)):\n winner = w2\n else:\n pass\n\n o.write((\"Case #{0}: {1}\\n\").format(str(case_num_minus1 + 1), winner))\n winner = w1 # Reset\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_158/1058.py","file_name":"1058.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3813197706","text":"from test.pages.SettingPage import *\nimport unittest\nclass Test_Setting_track_delete(unittest.TestCase):\n \"\"\"路线绘制—删除测试\"\"\"\n\n @classmethod #类方法\n def setUpClass(cls):\n cls.setting = SettingPage()\n cls.setting.login()\n cls.setting.iframe1()\n cls.setting.base_setting()\n cls.setting.iframe01()\n cls.setting.setting()\n cls.setting.iframe0()\n\n\n def test_track_delete(self):\n self.setting.map_draw()\n self.setting.iframe3()\n self.setting.track_delete()\n self.setting.parentframe()\n self.setting.delete_confirm()\n\n @classmethod\n def tearDownClass(cls):\n cls.setting.quit_driver()","repo_name":"rico-o/autoTest","sub_path":"test/case/setting/TestSetting_tarck_delete.py","file_name":"TestSetting_tarck_delete.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70449705476","text":"import os\nimport sklearn\nimport subprocess\nimport bitarray\nimport copy\nimport warnings\n\nfrom genome_integration.variants import BimFile\nfrom .. samples import Sample\nfrom .gcta_ma_utils import *\n\n\n\nclass FamSample(Sample):\n \"\"\"\n Extends the sample class, will also contain family ID and sample ID.\n\n Attributes\n ----------\n\n special to this class\n\n fid: fid from the plink file\n iid: iid from the plink file\n\n\n \"\"\"\n def __init__(self, fid, iid, sex, name, phenotype=None):\n if phenotype == \"-9\":\n phenotype = None\n\n super().__init__(name, phenotype)\n self.fid = fid\n self.iid = iid\n self.sex = sex\n self.phenotype = phenotype\n\n\n def __str__(self):\n #sex family members are not supported\n if self.phenotype is None:\n return f'{self.fid} {self.iid} 0 0 {self.sex} -9'\n else:\n return f'{self.fid} {self.iid} 0 0 {self.sex} {self.phenotype}'\n\n\nclass FamFile:\n \"\"\"\n implements a fam file\n\n Attributes\n ----------\n fam_loc: str\n path to the fam file.\n\n sample_names: list\n sample names of the fam. Currently using ~__~\n\n fam_samples: dict\n dictionary with sample names as keys and FamSample as the value.\n\n \"\"\"\n\n def __init__(self, fam_loc):\n self.fam_loc = fam_loc\n self.sample_names = []\n self.fam_samples = {}\n\n\n with open(fam_loc, \"r\") as f:\n full_file_bytes = f.read()\n\n lines = full_file_bytes.split(\"\\n\")\n if len(lines[-1]) == 0:\n lines.pop()\n\n splits_array = [x.split() for x in lines]\n self.sample_names = [f'{split[0]}~__~{split[1]}' for split in splits_array]\n\n if len(set(self.sample_names)) != len(self.sample_names):\n raise ValueError(f\"{self.fam_loc} contains multiple individuals with the same ID.\")\n\n\n self.fam_samples = {self.sample_names[i]: FamSample(x[0], x[1], x[4], self.sample_names[i], x[5])\n for i,x in enumerate(splits_array)}\n\n #old array.\n # for line in lines:\n # split = line.split()\n # sample_name = f'{split[0]}~__~{split[1]}'\n # if sample_name in self.sample_names:\n # raise ValueError(f\"{self.fam_loc} contains multiple individuals with the same ID.\")\n #\n # if split[5] == \"-9\":\n # this_individual = (split[0], split[1], split[4], sample_name)\n # else:\n # this_individual = FamSample(split[0], split[1], split[4], sample_name, split[5])\n #\n # self.sample_names.append(this_individual.name)\n # self.fam_samples[this_individual.name] = this_individual\n\n def _write_fam(self, file_name):\n with open(file_name, 'w') as f:\n for sample in self.sample_names:\n f.write(f'{self.fam_samples[sample]}\\n')\\\n\n\n\nclass PlinkFile:\n \"\"\"\n A reader for the plink bed file format.\n\n requires a valid location of the bed file.\n\n Attributes\n ----------\n bed_loc: str\n of the bed file location\n\n bim_loc: str\n of the bim file location\n\n fam_loc: str\n of the fam file location\n\n genotypes: float numpy array of variants on columns samples on the rows.\n Numpy array of genotypes default is number of minor alleles per variant and position.\n not initialized, need to call read_bed_file_into_numpy_array\n\n bim_data: BimFile object.\n contains all the variants from the bim file.\n\n fam_data: FamFile object\n contains all the Samples from the bim file\n\n _decoder: dict\n bit dict of the file encoding.\n\n Methods\n -------\n read_bed_file_into_numpy_array(self, allele_2_as_zero=True, missing_encoding=3)\n reads in the bed file takes about 5 seconds for 5,000 individuals ~14,000 variants.\n saves it to the class.\n\n prune_for_a_region(self, region):\n prunes for a StartEndRegion\n\n\n harmonize_genotypes(self, other_plink_file)\n Harmonized another PlinkFile object to this files' alleles.\n (flips alleles, and flips genotypes) (MAJOR and minor will be false names.)\n\n \"\"\"\n\n def __init__(self, bfile_location):\n\n self.bed_loc = bfile_location + \".bed\"\n self.bim_loc = bfile_location + \".bim\"\n self.fam_loc = bfile_location + \".fam\"\n\n self.genotypes = None\n\n if not os.path.exists(self.bed_loc):\n raise FileNotFoundError(f\"Could not find bimfile: {self.bed_loc}\")\n\n if not os.path.exists(self.bim_loc):\n raise FileNotFoundError(f\"Could not find bimfile: {self.bim_loc}\")\n\n if not os.path.exists(self.bim_loc):\n raise FileNotFoundError(f\"Could not find bimfile: {self.fam_loc}\")\n\n #variants\n self.bim_data = BimFile(self.bim_loc)\n\n if len(self.bim_data.snp_names) > 100000:\n print(\"Warning, Plinkfile with more than 100,000 variants will consume a lot of time and memory.\")\n\n self.fam_data = FamFile(self.fam_loc)\n self._decoder = {0: bitarray.bitarray('00'), #homoz A1 (usually minor)\n 1: bitarray.bitarray('01'), #heteroz\n 2: bitarray.bitarray('11'), #homoz A2 (usually major)\n 3: bitarray.bitarray('10'), # missing\n }\n\n def read_bed_file_into_numpy_array(self, allele_2_as_zero=True, missing_encoding=3, dtype=float):\n \"\"\"\n Reads a bed file into a numpy array\n\n :param allele_2_as_zero: bool\n allele 2 (often the major allele in plink) is encoded as zero, making an increase in the minor allele an\n increase in number. This is opposite to the plinkio encoding.\n But this makes the minor allele often also the effect allele.\n\n :param missing_encoding: int\n encodes missing values as three, plinkio default.\n :return: n indivduals by m variants numpy array (floats) of genotypes.\n \"\"\"\n self._missing_encoding = missing_encoding\n self._allele_2_as_zero = allele_2_as_zero\n\n with open(self.bed_loc, \"rb\") as bed_file:\n magick = bed_file.read(3)\n if magick != b'l\\x1b\\x01':\n raise ValueError(\"Plink file magic string is not correct.\")\n\n all_bytes = bed_file.read()\n\n num_variants = len(self.bim_data.snp_names)\n num_individuals = len(self.fam_data.sample_names)\n\n bytes_to_read = int(np.ceil(num_individuals / 4))\n\n if len(all_bytes) != (bytes_to_read*num_variants):\n ValueError(f\"{self.bed_loc} has an incorrect number of bytes: expected {(bytes_to_read*num_variants)}, found: {len(all_bytes)}\")\n\n\n genotypes = np.zeros( (num_individuals, num_variants), dtype=np.uint8)\n\n offset = 0\n for i in range(num_variants):\n bits = bitarray.bitarray(endian=\"little\")\n bits.frombytes(all_bytes[offset:(offset+bytes_to_read)])\n array = bits.decode(self._decoder)[:num_individuals]\n genotypes[:,i] = array\n offset+=bytes_to_read\n\n #convert genotypes if necessary (default)\n if allele_2_as_zero:\n tmp_geno = copy.deepcopy(genotypes)\n tmp_geno[genotypes == 2] = 0\n tmp_geno[genotypes == 0] = 2\n genotypes = tmp_geno\n\n genotypes = np.array(genotypes, dtype=dtype)\n genotypes[genotypes == 3] = missing_encoding\n\n self.genotypes = genotypes\n return genotypes\n\n\n def prune_for_a_region(self, region):\n \"\"\"\n Prunes for a region in the plink file.\n\n :param region: StartEndRegion\n region to prune for\n :return: self\n with the variants outside the region removed.\n \"\"\"\n\n if self.genotypes is None:\n warnings.warn(\"Reference genotypes where not loaded.\"\n \"Reading them now, this could take prohibitively long or use a lot of memory\",\n RuntimeWarning)\n self.read_bed_file_into_numpy_array()\n\n variants_to_keep = []\n variants_to_delete = []\n for snp_name in self.bim_data.snp_names:\n if region.snp_object_in_region(self.bim_data.bim_results[snp_name]):\n variants_to_keep.append(snp_name)\n else:\n variants_to_delete.append(snp_name)\n\n return self.prune_for_a_list_of_snps(variants_to_keep)\n\n\n def prune_for_a_list_of_snps(self, snp_list, verbose=False):\n \"\"\"\n prunes a list of variants,\n\n :param snp_list: list of variants that should be at least partially overlapping with the variants in the\n .bim_data attribute of this class\n\n :param verbose: print things about what is happening\n :return: self, with only the variants specified in the snp_list.\n \"\"\"\n\n if self.genotypes is None:\n warnings.warn(\"Genotypes where not loaded.\"\n \"Reading them now, this could take prohibitively long or use a lot of memory\",\n RuntimeWarning)\n self.read_bed_file_into_numpy_array()\n\n\n snps_to_keep = set(snp_list) & set(self.bim_data.snp_names)\n\n if len(snps_to_keep) == 0:\n raise ValueError(\"No overlapping SNPs and therefore none will be kept\")\n elif verbose == True:\n print(f\"Pruning for variants, keeping {len(snps_to_keep)} snps\")\n\n ##prune the bim data, this needs to be sorted by position, as this is retained by plink\n indices_to_keep = sorted(np.asarray([self.bim_data.snp_names.index(x) for x in snps_to_keep], dtype=int))\n # Remove the variants\n\n variants_to_remove = [x for x in self.bim_data.snp_names if x not in snps_to_keep]\n for snp_name_to_remove in variants_to_remove:\n self.bim_data.snp_names.remove(snp_name_to_remove)\n del self.bim_data.bim_results[snp_name_to_remove]\n\n # Keep the remaining genotypes\n self.genotypes = self.genotypes[:,indices_to_keep]\n\n return self\n\n\n def output_genotypes_to_bed_file(self, output_prepend):\n \"\"\"\n Writes a bed file to the final list.\n\n :param output_prepend: This is the output prepend for after which .bed, .bim .fam are appended.\n :return: Nothing, but bed, bim fam are written\n \"\"\"\n\n bed_filename, bim_filename, fam_filename = [f'{output_prepend}{x}' for x in ['.bed', '.bim', '.fam']]\n\n if self.genotypes is None:\n warnings.warn(\"Genotypes where not loaded.\"\n \"Reading them now, this could take prohibitively long or use a lot of memory\",\n RuntimeWarning)\n self.read_bed_file_into_numpy_array()\n\n self.bim_data._write_bim(bim_filename)\n self.fam_data._write_fam(fam_filename)\n\n write_encoder = self._decoder\n write_encoder[self._missing_encoding] = bitarray.bitarray('10')\n\n\n if self._allele_2_as_zero:\n tmp_geno = copy.deepcopy(self.genotypes)\n tmp_geno[self.genotypes == 2] = 0\n tmp_geno[self.genotypes == 0] = 2\n genotypes = tmp_geno\n else:\n genotypes = self.genotypes\n\n\n #now the harder part, write the bed file.\n with open(bed_filename, 'wb') as f:\n f.write(b'l\\x1b\\x01')\n for i in np.arange(genotypes.shape[1]):\n genotype_vector = genotypes[:,i]\n bits = bitarray.bitarray(endian=\"little\")\n bits.encode(write_encoder, genotype_vector)\n f.write(bits.tobytes())\n\n\n\n def harmonize_genotypes(self, other_plink_file):\n \"\"\"\n Harmonizes the other plink file to the alleles of self.\n requires that the variants are the same between files.\n requires that the variants have the same alleles\n\n WARNING: Will mean that the other plink file will have flipped major and minor alleles.\n\n :param other_plink_file: Plink File to harmonize\n :return: PlinkFile\n \"\"\"\n if set(self.bim_data.snp_names) != set(other_plink_file.bim_data.snp_names):\n raise ValueError(\"Both genotype files need to have exactly the same set of variants.\")\n\n if self.bim_data.snp_names != other_plink_file.bim_data.snp_names:\n #This is a runtime check to ensure that the variants are ordered the same.\n #One assumption is that plink orders the variants bades on position,\n #so this should not be reached, otherwise ordering functionality needs to be implemented.\n raise ValueError(\"Ordering of the variants needs to be exactly the same.\")\n\n\n indices_to_flip = []\n for snp_name in self.bim_data.snp_names:\n own_alleles = [self.bim_data.bim_results[snp_name].minor_allele,\n self.bim_data.bim_results[snp_name].major_allele]\n\n other_alleles = [other_plink_file.bim_data.bim_results[snp_name].minor_allele,\n other_plink_file.bim_data.bim_results[snp_name].major_allele]\n\n if own_alleles != other_alleles:\n indices_to_flip.append(other_plink_file.bim_data.snp_names.index(snp_name))\n\n if set(own_alleles) != set(other_alleles):\n raise ValueError(f\"Alleles sets are not the same for SNP \"\n f\"{self.bim_data.bim_results[snp_name].snp_name}\")\n\n if self.genotypes is None:\n warnings.warn(\"Reference genotypes where not loaded.\"\n \"Reading them now, This could take prohibitively long or use a lot of memory\",\n RuntimeWarning)\n self.read_bed_file_into_numpy_array()\n\n if other_plink_file.genotypes is None:\n warnings.warn(\"Reference genotypes where not loaded.\"\n \"Reading them now. This could take prohibitively long or use a lot of memory\",\n RuntimeWarning)\n other_plink_file.read_bed_file_into_numpy_array()\n\n print(f\"Flipping the alleles of {len(indices_to_flip)} variant\")\n\n for indice in indices_to_flip:\n snp_name = other_plink_file.bim_data.snp_names[indice]\n old_major = other_plink_file.bim_data.bim_results[snp_name].major_allele\n other_plink_file.bim_data.bim_results[snp_name].major_allele = other_plink_file.bim_data.bim_results[snp_name].minor_allele\n other_plink_file.bim_data.bim_results[snp_name].minor_allele = old_major\n other_plink_file.genotypes[other_plink_file.genotypes[:, indice] != 3, indice] = (\n 2 - other_plink_file.genotypes[other_plink_file.genotypes[:, indice] != 3, indice]\n )\n\n return other_plink_file, other_plink_file.genotypes\n\n\ndef read_region_from_plink(bed_file, out_location, region, variants=None):\n\n \"\"\"\n Reads a region from a plink file and writes it to an output.\n\n This function can be used if you want to read only a small part of a plink file.\n\n\n\n :param bed_file: str\n prepend filelocation of a bed file.\n :param out_location:\n prepend file location of the pruned file.\n :param region: StartEndRegion\n Region to look for.\n :param variants: iterable of str\n iterable containing the variant names to keep for analysis.\n :return: None\n \"\"\"\n if variants is None:\n subprocess.run([\"plink\",\n \"--bfile\", bed_file,\n \"--chr\", str(region.chromosome),\n \"--from-bp\", str(region.start),\n \"--to-bp\", str(region.end),\n \"--make-bed\", \"--out\", out_location\n ],\n check=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL\n )\n else:\n\n variant_file = out_location + \"_variants_to_extract\"\n with open(variant_file, \"w\") as f:\n for variant in variants:\n f.write(f\"{variant}\\n\")\n\n subprocess.run([\"plink\",\n \"--bfile\", bed_file,\n \"--chr\", str(region.chromosome),\n \"--from-bp\", str(region.start),\n \"--to-bp\", str(region.end),\n \"--extract\", variant_file,\n \"--make-bed\", \"--out\", out_location\n ],\n check=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL\n )\n subprocess.run([\"rm\", variant_file], check=True)\n\n\n\ndef plink_isolate_clump(bed_file, associations, threshold, r_sq=0.5 ,tmploc=\"\", return_snp_file=False):\n \"\"\"\n will prune for ld in a list of snps. from a bed file location.\n will output a list after prune.\n\n :param bed_file:\n :param associations:\n :return: list of snps after prune\n \"\"\"\n clump_file = tmploc + \"_\" + str(threshold) +\"_clump_file.txt\"\n tmp_plink_out = tmploc + \"_\" + str(threshold) + \"_plink_out\"\n snp_out = tmploc + \"_\" + str(threshold) + \"clumped.txt\"\n\n association_lines = [\"SNP\\tP\"]\n [association_lines.append(\"{}\\t{}\".format(\n associations[x].snp_name,\n associations[x].wald_p_val\n )) for x in associations.keys()]\n\n write_list_to_newline_separated_file(association_lines, clump_file)\n\n subprocess.run([\"plink --bfile \" + bed_file +\n \" --clump \" + clump_file +\n \" --clump-p1 \" + str(threshold) +\n \" --clump-r2 \" + str(r_sq) +\n \" --clump-kb 1000 \" +\n \" --out \" + tmp_plink_out],\n shell=True,\n check=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n\n clumpdat = read_newline_separated_file_into_list(tmp_plink_out + \".clumped\")\n\n snps_to_keep = [x.split()[2] for x in clumpdat[1:] if len(x.split())]\n\n if return_snp_file:\n write_list_to_newline_separated_file(snps_to_keep, snp_out)\n\n subprocess.run([\"rm \" + tmp_plink_out + \".* \" + clump_file ], shell=True, check=True)\n\n if return_snp_file:\n return snps_to_keep, snp_out\n\n else:\n return snps_to_keep\n\n\ndef isolate_snps_of_interest_make_bed(ma_file, exposure_name, b_file,\n tmp_file_prepend, plink_files_out, calculate_ld = False,\n individuals_to_isolate=None, no_palindromic=False):\n \"\"\"\n\n Isolate snps of interest for a gene, and make a bed file\n\n :param ma_file:\n :param exposure_name:\n :param b_file:\n :param tmp_file_prepend:\n :param plink_files_out:\n :param calculate_ld:\n :return: the name_of the bedfile with only the snps\n \"\"\"\n\n ma_data = MaFile(ma_file, exposure_name)\n\n snps_file = tmp_file_prepend + \"_snps\"\n\n # write the snps to isolate\n write_list_to_newline_separated_file(ma_data.snp_names(no_palindromic=no_palindromic), snps_file)\n\n #basic command list, will be appended to if other options are specified.\n command_list = ['plink',\n '--bfile', b_file,\n '--extract', snps_file,\n '--make-bed',\n '--out', plink_files_out\n ]\n\n if calculate_ld:\n command_list += ['--r', 'square',]\n\n if individuals_to_isolate is not None:\n # make a file with individuals.\n if not isinstance(individuals_to_isolate, list):\n raise ValueError(\"individuals for isolation needs to be a list of strings.\")\n\n # write the file\n individuals_file = tmp_file_prepend + \"_iids\"\n with open(individuals_file, 'w') as f:\n for individual in individuals_to_isolate:\n f.write(f'{individual}\\n')\n\n # now add it to the command\n command_list += ['--keep', individuals_file]\n\n # now run plink to isolate the files, and return the snplist, plink filename and eqtl ma file.\n tmp = subprocess.run(command_list,\n check=True,\n stdout=subprocess.DEVNULL, # to DEVNULL, because plink saves a log of everything\n stderr=subprocess.DEVNULL\n )\n\n bim_file = BimFile(plink_files_out + '.bim')\n\n return ma_data, bim_file\n\n\ndef score_individuals(genetic_associations, bed_file, tmp_file = \"tmp_score\", p_value_thresh = 1):\n \"\"\"\n Used to score individual.\n :param genetic_associations:\n :param bed_file: prepend of a bed file\n :param tmp_file: prepend of temporary files.\n :param p_value_thresh: p value threshold of which the genetic associations should be part of.\n :return: dict with keys corresponding to individuals,\n values: tuple with the phenotype [0] and score [1] of the individual.\n \"\"\"\n\n\n file_for_scoring = tmp_file + \"_snps_beta.txt\"\n pos_name_scoring = tmp_file + \"_posname_beta.txt\"\n prepend_for_plink = tmp_file + \"_score\"\n\n with open(file_for_scoring, \"w\") as f:\n for snp in genetic_associations.keys():\n tmp_assoc = genetic_associations[snp]\n if tmp_assoc.wald_p_val < p_value_thresh:\n f.write(\"{}\\t{}\\t{}\\n\".format(tmp_assoc.snp_name, tmp_assoc.minor_allele, tmp_assoc.beta))\n\n with open(pos_name_scoring, \"w\") as f:\n for snp in genetic_associations.keys():\n tmp_assoc = genetic_associations[snp]\n if tmp_assoc.wald_p_val < p_value_thresh:\n f.write(\"{}\\t{}\\t{}\\n\".format(\"{}:{}\".format(tmp_assoc.chromosome, tmp_assoc.position), tmp_assoc.minor_allele, tmp_assoc.beta))\n try:\n subprocess.run([\"plink\",\n \"--allow-no-sex\",\n \"--bfile\", bed_file,\n \"--score\", file_for_scoring,\n \"--out\", prepend_for_plink + \".snp_name\"\n ],\n check=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL\n )\n profile_loc = prepend_for_plink + \".snp_name.profile\"\n\n except subprocess.CalledProcessError:\n # something went wrong. Now trying it with snps which have their name as position.\n subprocess.run([\"plink\",\n \"--allow-no-sex\",\n \"--bfile\", bed_file,\n \"--score\", pos_name_scoring,\n \"--out\", prepend_for_plink + \".pos_name\"\n ],\n check=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL\n )\n profile_loc = prepend_for_plink + \".pos_name.profile\"\n\n # scoring done, now read the file.\n pheno_score = {}\n with open(profile_loc, \"r\") as f:\n f.readline()\n for line in f:\n split = line.split()\n pheno_score[split[1]] = (float(split[2]), float(split[5]))\n\n subprocess.run(['rm ' + file_for_scoring + \" \" + pos_name_scoring + \" \" + prepend_for_plink + \".*\"], shell=True, check=True)\n\n return pheno_score\n\n\n\ndef score_and_assess_auc(genetic_associations, bed_file, tmp_file = \"tmp_score\", p_value_thresh = 1.0, resolution = 500):\n \"\"\"\n Using scoring, we determine the auc\n\n :param genetic_associations:\n :param bed_file:\n :param tmp_file:\n :param p_value_thresh:\n :param resolution:\n :return:\n \"\"\"\n\n pheno_score = score_individuals(genetic_associations, bed_file, tmp_file, p_value_thresh)\n\n pheno = np.array([pheno_score[x][0] for x in pheno_score.keys()])\n scores = np.array([pheno_score[x][1] for x in pheno_score.keys()])\n\n thresholds = np.arange(min(scores), max(scores), (max(scores) - min(scores)) / resolution)\n\n # Using 2 as the phenotype celiac disease.\n tpr = [sum(pheno[scores > x] == 2.0) / sum(pheno == 2.0) for x in thresholds]\n fpr = [sum(pheno[scores > x] == 1.0) / sum(pheno == 1.0) for x in thresholds]\n\n auc = sklearn.metrics.auc(fpr,tpr)\n\n return tpr, fpr, auc\n","repo_name":"adriaan-vd-graaf/genome_integration","sub_path":"genome_integration/utils/plink_utils.py","file_name":"plink_utils.py","file_ext":"py","file_size_in_byte":24353,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"28958629067","text":"\"\"\"\nSandbox for testing\n\"\"\"\nimport os\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom SemanticNetwork import SemanticNetwork\nfrom itertools import count\n\nprint('Loading graph...')\nnetwork = SemanticNetwork('adj_matrix.csv', 'sp100.csv')\nG = network.graph\n\nprint('\\n################## Connected Components ##################')\n# Connected components\n# One large component w/93 companies\n# 7 remaining companies all disconnected\ncomponents = sorted(nx.connected_components(G), key=len, reverse=True)\nprint('Sizes of connected components: ', [len(c) for c in components])\nprint('Disconnected nodes: ', components[1:])\n\nprint('\\n################## Distance Measures ##################')\n# Distance measures\nbig = G.subgraph(components[0])\ncenter = nx.center(big)\ndiameter = nx.diameter(big)\neccentricity = nx.eccentricity(big)\nperiphery = nx.periphery(big)\nradius = nx.radius(big)\nprint('Radius:', radius)\nprint('Diameter:', diameter)\nprint('Eccentricity (min):', np.min(list(eccentricity.values())))\nprint('Eccentricity (max):', np.max(list(eccentricity.values())))\nprint('Eccentricity (mean):', np.mean(list(eccentricity.values())))\nprint('Size of center:', len(center))\nprint('Size of periphery:', len(periphery))\n\n# Compute and plot degree distribution\nprint('\\n################## Degree ##################')\ndegree_sequence = [d for n, d in G.degree()] # degree sequence\nplt.figure(1)\nplt.hist(degree_sequence, bins=range(min(degree_sequence), max(degree_sequence) + 10, 10), color='b')\nplt.title(\"Degree Histogram\")\nplt.ylabel(\"Count\")\nplt.xlabel(\"Degree\")\n\navg_degree = np.mean(degree_sequence)\nprint('Average Degree: ', avg_degree)\n\n# Which companies have highest degree?\nprint('Companies with highest degree:')\ndegrees = sorted(G.degree, key=lambda x: x[1], reverse=True)\nfor i in range(10):\n print(degrees[i])\n\n# Compute and plot local clustering coefficients\nprint('\\n################## Local Clustering Coefficient ##################')\nclustering = nx.clustering(G,weight='weight')\nclustering_sequence = list(clustering.values())\nplt.figure(2)\nplt.hist(clustering_sequence, bins=np.arange(min(clustering_sequence), max(clustering_sequence) + 0.001, 0.001), color='b')\nplt.title(\"Local Clustering Coefficient Histogram\")\nplt.ylabel(\"Count\")\nplt.xlabel(\"Local Clustering Coefficient\")\n\navg_clustering = np.mean(clustering_sequence)\nprint('Average Local Clustering Coefficient: ', avg_clustering)\n\n# Which companies have highest clustering coefficients?\nprint('Companies with highest clustering coefficient:')\nclust = sorted(clustering.items(), key=lambda x: x[1], reverse=True)\nfor i in range(10):\n print(clust[i])\n\n\n# Betweenness centrality\nprint('\\n################## Betweenness Centrality ##################')\nbetweenness = nx.betweenness_centrality(G,weight='weight')\nbetweenness_sequence = list(betweenness.values())\nplt.figure(3)\nplt.hist(betweenness_sequence, bins=np.arange(min(betweenness_sequence), max(betweenness_sequence) + 0.001, 0.001), color='b')\nplt.title(\"Betweenness Centrality Histogram\")\nplt.ylabel(\"Count\")\nplt.xlabel(\"Betweenness Centrality\")\n\navg_betweenness = np.mean(betweenness_sequence)\nprint('Average Betweenness Centrality: ', avg_betweenness)\n\n# Which companies have highest betweenness centrality?\nprint('Companies with highest betweenness centrality:')\nbet = sorted(betweenness.items(), key=lambda x: x[1], reverse=True)\nfor i in range(10):\n print(bet[i])\n\n# Degree centrality\nprint('\\n################## Degree Centrality ##################')\ndegree_centrality = nx.degree_centrality(G)\ndegree_centrality_sequence = list(degree_centrality.values())\nplt.figure(3)\nplt.hist(degree_centrality_sequence, bins=np.arange(min(degree_centrality_sequence), max(degree_centrality_sequence) + 0.001, 0.001), color='b')\nplt.title(\"Degree Centrality Histogram\")\nplt.ylabel(\"Count\")\nplt.xlabel(\"Degree Centrality\")\n\navg_degree_centrality = np.mean(degree_centrality_sequence)\nprint('Average Degree Centrality: ', avg_degree_centrality)\n\n# Which companies have highest degree centrality?\nprint('Companies with highest degree centrality:')\ndeg = sorted(degree_centrality.items(), key=lambda x: x[1], reverse=True)\nfor i in range(10):\n print(deg[i])\n\n# Eigenvector centrality\nprint('\\n################## Eigenvector Centrality ##################')\neig_centrality = nx.eigenvector_centrality(G,weight='weight')\neig_centrality_sequence = list(eig_centrality.values())\nplt.figure(3)\nplt.hist(eig_centrality_sequence, bins=np.arange(min(eig_centrality_sequence), max(eig_centrality_sequence) + 0.001, 0.001), color='b')\nplt.title(\"Eigenvector Centrality Histogram\")\nplt.ylabel(\"Count\")\nplt.xlabel(\"Eigenvector Centrality\")\n\navg_eig_centrality = np.mean(eig_centrality_sequence)\nprint('Average Eigenvector Centrality: ', avg_eig_centrality)\n\n# Which companies have highest eigenvector centrality?\nprint('Companies with highest eigenvector centrality:')\neig = sorted(eig_centrality.items(), key=lambda x: x[1], reverse=True)\nfor i in range(10):\n print(eig[i])\n\n\n# Map node classes to colors and use to plot graph\ncolor_mapping = {'Communication Services':'#15F5F0',\n\t\t\t\t 'Consumer Discretionary':'#FA3D35',\n\t\t\t\t 'Consumer Staples':'#F99B37',\n\t\t\t\t 'Energy':'#FAF574',\n\t\t\t\t 'Financials':'#88FA74',\n\t\t\t\t 'Health Care':'#FBB5E3',\n\t\t\t\t 'Industrials':'#D3AAFB',\n\t\t\t\t 'Information Technology':'#3392FF',\n\t\t\t\t 'Materials':'#FC40FA',\n\t\t\t\t 'Real Estate':'#859FD5',\n\t\t\t\t 'Utilities':'#CACBC3'}\ncolors = [color_mapping[nx.get_node_attributes(G,'sector')[n]] for n in G.nodes()]\nsizes = [x+10 for x in degree_sequence]\nplt.figure(4)\nplt.title('Semantic Network')\nnx.draw_networkx(G, pos=nx.spring_layout(G), with_labels=True, node_size=sizes, node_color=colors, alpha=0.9, width=0.1, font_size=6, font_color='black', font_weight='bold')\n\n# What if we throw out small edges?\nedgelist = list(G.edges.data('weight', default=0))\nedgelist.sort(key=lambda x:x[2], reverse=True)\nsmall_edgelist = [x for x in edgelist if x[2] >= 10]\n\nplt.figure(5)\nplt.title('Semantic Network: Edge Weight >= 10')\nnx.draw_networkx(G, edgelist=small_edgelist, with_labels=True, node_size=sizes, node_color=colors, alpha=0.9, width=0.1, font_size=6, font_color='black', font_weight='bold')\nplt.show()\n","repo_name":"sethkimmel3/SemanticNetworkStockMarketAnalysis","sub_path":"sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":6245,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"3210597223","text":"class Node:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\n\nclass HashTable:\n INITIAL_CAPACITY = 50\n\n def __init__(self):\n self.capacity = HashTable.INITIAL_CAPACITY\n self.size = 0\n self.buckets = [None] * self.capacity\n\n def hash(self, key):\n hash_sum = 0\n\n for idx, c in enumerate(key):\n hash_sum += (idx + len(key)) ** ord(c)\n hash_sum = hash_sum % self.capacity\n\n return hash_sum\n\n def insert(self, key, value):\n self.size += 1\n idx = self.hash(key)\n node = self.buckets[idx]\n new_node = Node(key, value)\n\n if node is None:\n self.buckets[idx] = new_node\n return\n\n while node.next:\n node = node.next\n node.next = new_node\n\n def find(self, key):\n idx = self.hash(key)\n node = self.buckets[idx]\n\n while node is not None and node.key != key:\n node = node.next\n\n if node is None:\n return None\n\n return node.value\n\n def remove(self, key):\n idx = self.hash(key)\n node = self.buckets[idx]\n\n prev = None\n while node is not None and node.key != key:\n prev = node\n node = node.next\n\n if node is None:\n return None\n\n self.size -= 1\n result = node.value\n if prev is None:\n if node.next:\n node = node.next\n self.buckets[idx] = node\n else:\n self.buckets[idx] = None\n else:\n prev.next = node.next\n node = None\n\n return result\n","repo_name":"Psykepro/DataStructures","sub_path":"Python/HashTable.py","file_name":"HashTable.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43045846962","text":"__author__ = 'jaishankar'\n\nimport threading\nimport sys\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\nfrom manageMembers import *\n\n\nclass CompetitionCentralWidget(QWidget):\n '''\n a QWidget with a typical box layout\n '''\n def __init__(self):\n QWidget.__init__(self)\n self.addbutton = QPushButton('Add Participant', self)\n # self.addbutton.setMinimumHeight(70)\n # self.addbutton.setMinimumWidth(70)\n self.resbutton = QPushButton('Add Result', self)\n # self.resbutton.setMinimumHeight(70)\n # self.resbutton.setMinimumWidth(70)\n # self.members = dbAll(\"SELECT id, fname, lname FROM PARTICIPANT\")\n # print(self.members)\n # self.data_list = self.members\n # self.header = [\"Id\", \"First Name\", \"Surname\"]\n # table_model = MyTableModel(self, self.data_list, self.header)\n # self.membersbox = QComboBox()\n # self.membersbox.setModel(table_model)\n #\n #\n # self.membersbox.setMinimumWidth(250)\n #self.formLayout.addRow(self.grouplbl,self.group)\n self.members = dbAll(\"SELECT id, fname, lname FROM PARTICIPANT\")\n #self.membersbox = QLineEdit()\n self.membersbox = QComboBox()\n\n #self.membersbox.setPlaceholderText(\"Enter Name\")\n print(self.members)\n completernames = []\n for member in self.members:\n self.membersbox.addItem(\"%s %s\"%(member[1],member[2]), userData=member[0].__str__())\n completernames.append(\"%s %s\"%(member[1],member[2]))\n self.membersbox.setEditable(True);\n\n completer = QCompleter(completernames)\n completer.setCaseSensitivity(Qt.CaseInsensitive)\n completer.setCompletionMode(QCompleter.PopupCompletion)\n self.membersbox.setCompleter(completer)\n\n hLayout1 = QHBoxLayout()\n\n vLayout1 = QVBoxLayout()\n\n vLayout2 = QVBoxLayout()\n # add the widgets vertically in that order\n vLayout1.addWidget(self.membersbox)\n vLayout1.addWidget(self.addbutton)\n\n\n vLayout2.addWidget(self.resbutton)\n\n hLayout1.addLayout(vLayout1)\n hLayout1.addLayout(vLayout2)\n\n\n hLayout1.addSpacing(5)\n\n self.setLayout(hLayout1)\n\n\n\n\n\n\n\nclass CompetitionWindow(QMainWindow):\n def __init__(self):\n\n QMainWindow.__init__(self)\n self.setGeometry(300, 100, 800, 400)\n self.setWindowTitle('Manage Members')\n self.exit = QAction('Exit', self)\n self.exit.setStatusTip('Exit Members')\n menubar = self.menuBar()\n file = menubar.addMenu('&File')\n file.addAction(self.exit)\n self.statusBar()\n\n widget = CompetitionCentralWidget()\n\n self.setCentralWidget(widget)\n\n currentInstance = None\n\n @classmethod\n def getCurrentInstance(cls):\n if cls.currentInstance is None:\n cls.currentInstance = CompetitionWindow()\n return cls.currentInstance\n return cls.currentInstance\n\n","repo_name":"jaishankarh/WeightliftingApp","sub_path":"competition.py","file_name":"competition.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33141517063","text":"from entity import MazeRunner\nfrom constants import *\nfrom vectors import Vector2D\nfrom random import randint\nfrom modes import Mode\nfrom stacks import Stack\nfrom animation import Animation, AnimationGroup\n\n\n# noinspection PyMethodOverriding\n# noinspection PyAttributeOutsideInit\nclass Ghost(MazeRunner):\n def __init__(self, nodes, spritesheet, row):\n MazeRunner.__init__(self, nodes, spritesheet)\n self.name = \"ghost\"\n self.color = PINK\n self.goal = Vector2D()\n self.modeStack = self.setup_mode_stack()\n self.mode = self.modeStack.pop()\n self.modeTimer = 0\n self.spawnNode = self.find_spawn_node()\n self.set_guide_stack()\n self.set_start_position()\n self.points = 200\n self.released = True\n self.pelletsForRelease = 0\n self.bannedDirections = []\n self.speed = GHOST_SPEED\n\n self.animate = AnimationGroup()\n self.animateName = \"left\"\n self.define_animations(row)\n self.animate.set_animation(self.animateName, 0)\n self.image = self.animate.get_image()\n self.previousDirection = self.direction\n self.started = True\n self.hide = True\n\n def set_guide_stack(self):\n self.guide = Stack()\n self.guide.push(UP)\n\n def get_valid_directions(self):\n valid_directions = []\n for key in self.node.neighbors.keys():\n if self.node.neighbors[key] is not None:\n if not (key == self.direction * -1):\n if not self.mode.name == \"SPAWN\":\n if not self.node.is_home_entrance:\n if key not in self.bannedDirections:\n valid_directions.append(key)\n else:\n if key != DOWN:\n valid_directions.append(key)\n else:\n valid_directions.append(key)\n if len(valid_directions) == 0:\n valid_directions.append(self.force_backtrack())\n return valid_directions\n\n def force_backtrack(self):\n if self.direction * -1 == UP:\n return UP\n if self.direction * -1 == DOWN:\n return DOWN\n if self.direction * -1 == LEFT:\n return LEFT\n if self.direction * -1 == RIGHT:\n return RIGHT\n\n def get_closest_direction(self, valid_directions):\n distances = []\n for direction in valid_directions:\n diff_vector = self.node.position + direction * TILE_WIDTH - self.goal\n distances.append(diff_vector.magnitude_squared())\n index = distances.index(min(distances))\n return valid_directions[index]\n\n def move_by_self(self):\n if self.overshot_target():\n self.node = self.target\n self.portal()\n valid_directions = self.get_valid_directions()\n self.direction = self.get_closest_direction(valid_directions)\n self.target = self.node.neighbors[self.direction]\n self.set_position()\n if self.mode.name == \"SPAWN\":\n if self.position == self.goal:\n # self.mode = self.modeStack.pop()\n self.mode = Mode(\"GUIDE\", speedmult=0.2)\n if self.mode.name == \"GUIDE\":\n if self.guide.is_empty():\n self.mode = self.modeStack.pop()\n self.set_guide_stack()\n self.started = True\n else:\n self.direction = self.guide.pop()\n self.target = self.node.neighbors[self.direction]\n self.set_position()\n\n def scatter_goal(self):\n self.goal = Vector2D(SCREENSIZE[0], 0)\n\n def chase_goal(self, pacman):\n self.goal = pacman.position\n\n def mode_update(self, dt):\n self.modeTimer += dt\n\n if self.mode.time is not None:\n if self.modeTimer >= self.mode.time:\n self.mode = self.modeStack.pop()\n self.modeTimer = 0\n\n def find_start_node(self):\n for node in self.nodes.homelist:\n if node.is_ghost_start:\n return node\n return None\n\n def set_start_position(self):\n self.node = self.find_start_node()\n self.target = self.node\n self.set_position()\n\n def freight_mode(self):\n if self.mode.name != \"SPAWN\":\n if self.mode.name != \"FREIGHT\":\n if self.mode.time is not None:\n dt = self.mode.time - self.modeTimer\n self.modeStack.push(Mode(name=self.mode.name, time=dt))\n else:\n self.modeStack.push(Mode(name=self.mode.name))\n self.mode = Mode(\"FREIGHT\", time=7, speedmult=0.5)\n self.modeTimer = 0\n self.animateName = \"freight\"\n self.animate.set_animation(self.animateName, 0)\n else:\n self.mode = Mode(\"FREIGHT\", time=7, speedmult=0.5)\n self.modeTimer = 0\n\n def check_direction_change(self):\n if self.direction != self.previousDirection:\n self.previousDirection = self.direction\n\n if self.mode.name == \"SPAWN\":\n self.set_spawn_images()\n elif self.mode.name != \"FREIGHT\":\n self.set_normal_images()\n\n def spawn_mode(self):\n self.mode = Mode(\"SPAWN\", speedmult=2)\n self.modeTimer = 0\n self.set_spawn_images()\n\n def random_goal(self):\n x = randint(0, N_COLS * TILE_WIDTH)\n y = randint(0, N_ROWS * TILE_HEIGHT)\n self.goal = Vector2D(x, y)\n\n def spawn_goal(self):\n self.goal = self.spawnNode.position\n\n @staticmethod\n def setup_mode_stack():\n modes = Stack()\n modes.push(Mode(name=\"CHASE\"))\n modes.push(Mode(name=\"SCATTER\", time=5))\n modes.push(Mode(name=\"CHASE\", time=20))\n modes.push(Mode(name=\"SCATTER\", time=7))\n modes.push(Mode(name=\"CHASE\", time=20))\n modes.push(Mode(name=\"SCATTER\", time=7))\n modes.push(Mode(name=\"CHASE\", time=20))\n modes.push(Mode(name=\"SCATTER\", time=7))\n return modes\n\n def find_spawn_node(self):\n for node in self.nodes.homelist:\n if node.is_spawn_node:\n return node\n return None\n\n def set_spawn_images(self):\n if self.started:\n if self.direction == LEFT:\n self.animateName = \"spawnleft\"\n if self.direction == RIGHT:\n self.animateName = \"spawnright\"\n if self.direction == UP:\n self.animateName = \"spawnup\"\n if self.direction == DOWN:\n self.animateName = \"spawndown\"\n self.animate.set_animation(self.animateName, 0)\n else:\n self.set_normal_images()\n\n def set_normal_images(self):\n if self.direction == LEFT:\n self.animateName = \"left\"\n if self.direction == RIGHT:\n self.animateName = \"right\"\n if self.direction == UP:\n self.animateName = \"up\"\n if self.direction == DOWN:\n self.animateName = \"down\"\n self.animate.set_animation(self.animateName, 0)\n\n def update(self, dt, pacman):\n speed_modifier = self.speed * self.mode.speedmult\n self.position += self.direction * speed_modifier * dt\n self.mode_update(dt)\n self.check_direction_change()\n self.image = self.animate.loop(dt)\n if self.mode.name == \"CHASE\":\n self.chase_goal(pacman)\n elif self.mode.name == \"SCATTER\":\n self.scatter_goal()\n elif self.mode.name == \"FREIGHT\":\n self.random_goal()\n elif self.mode.name == \"SPAWN\":\n self.spawn_goal()\n self.move_by_self()\n\n def define_animations(self, row):\n animation = Animation(\"up\")\n\n animation.speed = 10\n animation.add_frame(self.spritesheet.get_image(0, row, 32, 32))\n animation.add_frame(self.spritesheet.get_image(1, row, 32, 32))\n self.animate.add(animation)\n\n animation = Animation(\"down\")\n animation.speed = 10\n animation.add_frame(self.spritesheet.get_image(2, row, 32, 32))\n animation.add_frame(self.spritesheet.get_image(3, row, 32, 32))\n self.animate.add(animation)\n\n animation = Animation(\"left\")\n animation.speed = 10\n animation.add_frame(self.spritesheet.get_image(4, row, 32, 32))\n animation.add_frame(self.spritesheet.get_image(5, row, 32, 32))\n self.animate.add(animation)\n\n animation = Animation(\"right\")\n animation.speed = 10\n animation.add_frame(self.spritesheet.get_image(6, row, 32, 32))\n animation.add_frame(self.spritesheet.get_image(7, row, 32, 32))\n self.animate.add(animation)\n\n animation = Animation(\"freight\")\n animation.speed = 10\n for i in range(25):\n animation.add_frame(self.spritesheet.get_image(0, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(1, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(2, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(3, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(0, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(1, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(2, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(3, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(0, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(1, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(2, 6, 32, 32))\n animation.add_frame(self.spritesheet.get_image(3, 6, 32, 32))\n self.animate.add(animation)\n\n animation = Animation(\"spawnup\")\n animation.speed = 10\n animation.add_frame(self.spritesheet.get_image(4, 6, 32, 32))\n self.animate.add(animation)\n\n animation = Animation(\"spawndown\")\n animation.speed = 10\n animation.add_frame(self.spritesheet.get_image(5, 6, 32, 32))\n self.animate.add(animation)\n\n animation = Animation(\"spawnleft\")\n animation.speed = 10\n animation.add_frame(self.spritesheet.get_image(6, 6, 32, 32))\n self.animate.add(animation)\n\n animation = Animation(\"spawnright\")\n animation.speed = 10\n animation.add_frame(self.spritesheet.get_image(7, 6, 32, 32))\n self.animate.add(animation)\n\n\nclass Blinky(Ghost):\n def __init__(self, nodes, spritesheet):\n Ghost.__init__(self, nodes, spritesheet, 2)\n self.name = \"blinky\"\n self.color = RED\n self.image = self.spritesheet.get_image(0, 2, 32, 32)\n\n def set_start_position(self):\n start_node = self.find_start_node()\n self.node = start_node\n self.target = self.node\n self.set_position()\n\n\nclass Pinky(Ghost):\n def __init__(self, nodes, spritesheet):\n Ghost.__init__(self, nodes, spritesheet, 3)\n self.name = \"pinky\"\n self.color = PINK\n self.image = self.spritesheet.get_image(0, 3, 32, 32)\n\n def scatter_goal(self):\n self.goal = Vector2D()\n\n def chase_goal(self, pacman):\n self.goal = pacman.position + pacman.direction * TILE_WIDTH * 4\n\n def set_start_position(self):\n start_node = self.find_start_node()\n self.node = start_node.neighbors[DOWN]\n self.target = self.node\n self.set_position()\n\n\nclass Inky(Ghost):\n def __init__(self, nodes, spritesheet):\n Ghost.__init__(self, nodes, spritesheet, 4)\n self.name = \"inky\"\n self.color = BLUE\n self.released = False\n self.pelletsForRelease = 30\n self.bannedDirections = [RIGHT]\n self.image = self.spritesheet.get_image(0, 4, 32, 32)\n\n def scatter_goal(self):\n self.goal = Vector2D()\n\n def chase_goal(self, pacman):\n self.goal = pacman.position - pacman.direction * TILE_WIDTH * 4\n\n def set_start_position(self):\n start_node = self.find_start_node()\n pinky_node = start_node.neighbors[DOWN]\n self.node = pinky_node.neighbors[LEFT]\n self.target = self.node\n self.set_position()\n\n\n# TODO: original movement pattern\nclass Clyde(Ghost):\n def __init__(self, nodes, spritesheet):\n Ghost.__init__(self, nodes, spritesheet, 5)\n self.name = \"clyde\"\n self.color = ORANGE\n self.released = False\n self.pelletsForRelease = 60\n self.bannedDirections = [LEFT]\n self.image = self.spritesheet.get_image(0, 5, 32, 32)\n\n def scatter_goal(self):\n self.goal = Vector2D()\n\n def chase_goal(self, pacman):\n self.goal = pacman.position + pacman.direction * TILE_WIDTH * 4\n\n def set_start_position(self):\n start_node = self.find_start_node()\n pinky_node = start_node.neighbors[DOWN]\n self.node = pinky_node.neighbors[RIGHT]\n self.target = self.node\n self.set_position()\n\n\nclass Ghosts(object):\n def __init__(self, nodes, spritesheet):\n self.nodes = nodes\n self.ghosts = [Blinky(nodes, spritesheet),\n Pinky(nodes, spritesheet),\n Inky(nodes, spritesheet),\n Clyde(nodes, spritesheet)]\n\n def __iter__(self):\n return iter(self.ghosts)\n\n def update(self, dt, pacman):\n for ghost in self.ghosts:\n ghost.update(dt, pacman)\n\n def freight_mode(self):\n for ghost in self:\n ghost.freight_mode()\n\n def render(self, screen):\n for ghost in self.ghosts:\n ghost.render(screen)\n\n def release(self, num_pellets_eaten):\n for ghost in self:\n if not ghost.released:\n if num_pellets_eaten >= ghost.pelletsForRelease:\n ghost.bannedDirections = []\n ghost.spawn_mode()\n ghost.released = True\n","repo_name":"PaulSeptember/Python-Pacman","sub_path":"ghosts.py","file_name":"ghosts.py","file_ext":"py","file_size_in_byte":14015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39826982267","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nimport pandas as pd\nimport csv\nimport json\nimport sys\nfrom sklearn.decomposition import PCA\nimport numpy as np\n\n\n# %%\ninput = sys.stdin.readlines()\ndataInString = input[0]\ndatalist = list(map(int, dataInString.split()))\n\nwith open('./user.json') as json_file:\n data = json.load(json_file)\ndata_file = open('data_file.csv', 'w')\ncsv_writer = csv.writer(data_file)\ncount = 0\nfor user in data:\n if count == 0:\n header = user.keys()\n csv_writer.writerow(header)\n count += 1\n csv_writer.writerow(user.values())\ndata_file.close()\ndata = pd.read_csv('./data_file.csv')\n\n\n# %%\nmodel = PCA(n_components=3)\n\n\n# %%\nmodel.fit(data.to_numpy()[:,1:].astype(float))\n\n# %% [markdown]\n# Test\n\n# %%\n\ntransformed_data = model.transform(data.to_numpy()[:,1:].astype(float))\n\ndef get_neighbors(user, top_n):\n transformed = model.transform(np.array(user).reshape(1, 12))\n dists = sorted(zip(np.sum((transformed_data - transformed)**2, axis=1), data['id']))\n return [name for d,name in dists[:top_n]]\n \n \n\n\n# %%\nprint(get_neighbors(datalist, 25))\n\n\n# %%\n\n\n","repo_name":"CS161-LOOKUP/LookUp-Backend","sub_path":"pca_method.py","file_name":"pca_method.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3305263836","text":"#!/usr/bin/python3\nfrom openpyxl import load_workbook\n\nwb = load_workbook(filename = 'student.xlsx')\ns = wb['Sheet1']\n\ni = 2\nlist = []\nmid = 'C' + str(i)\nwhile s[mid].value != None :\n\tmid = 'C' + str(i)\n\tfinal = 'D' + str(i)\n\thw = 'E' + str(i)\n\tatt = 'F' + str(i)\n\t\n\tif s[mid].value == None : break\n \n\ttotal = 'G' + str(i)\n\ts['G' + str(i)] = s[mid].value * 0.30 + s[final].value * 0.35 + s[hw].value * 0.34 + s[att].value\n\t\t\n\tlist.append(s[total].value)\n\ti += 1\n\nlist.sort()\nprint(list)\n\n#mid 30 final 35 hw 34 att 1​","repo_name":"hyeyoung21/BigDataProcess","sub_path":"hw1/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30653546824","text":"import matplotlib.pyplot as plt \nfrom pandas import *\n\nempage=[22,45,55,65,15,35,45,62,45,62,20,34,75,65,45,43,25,34,45]\nbins=[0,10,20,30,40,50,60,70,80]\n\nplt.hist(empage,bins,histtype='bar',rwidth=0.8,color='cyan')\n\nplt.xlabel('age')\nplt.ylabel('no of Employees')\nplt.title(\"Histogram example\")\nplt.legend()\nplt.show()","repo_name":"adityabhalsod/GTU-MCA-Practical","sub_path":"Semester III/Programming in Python (4639304)/p19.py","file_name":"p19.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"33345997548","text":"# @author Hillebrand, Fabian\n# @date 2019\n\nimport numpy as np\nimport scipy as sp\nimport math\nimport time\nimport tarfile\n\nhartreeToEV = 27.21138602\n\ndef const_coeffs(s=0.0, py=0.0, pz=0.0, px=0.0):\n \"\"\"\n Creates coefficients for seperated tunnelling to each orbital.\n The energies are set to zero.\n \"\"\"\n cc = np.array([s, py, pz, px]) != 0.0\n coeffs = np.empty((sum(cc),4),)\n ene = np.zeros(sum(cc))\n if s != 0.0:\n coeffs[sum(cc[:1])-1] = [s**0.5, 0.0, 0.0, 0.0]\n if py != 0.0:\n coeffs[sum(cc[:2])-1] = [0.0, py**0.5, 0.0, 0.0]\n if pz != 0.0:\n coeffs[sum(cc[:3])-1] = [0.0, 0.0, pz**0.5, 0.0]\n if px != 0.0:\n coeffs[sum(cc[:4])-1] = [0.0, 0.0, 0.0, px**0.5]\n return [coeffs], [ene]\n\ndef read_PDOS(filename, eMin=0.0, eMax=0.0):\n \"\"\"\n Reads coefficients from *.pdos file and uses these to construct tip \n coefficients. The eigenvalues are shifted such that the Fermi energy \n is at 0 and scaled such that the units are in eV.\n\n Note: This function does currently not support spin (for the tip).\n\n pdos A list containing matrices. Rows correspond to eigenvalues\n while columns to orbitals.\n eigs A list containing arrays for eigenvalues per spin.\n \"\"\"\n # Open file\n if type(filename) is not tarfile.ExFileObject:\n f = open(filename)\n else: # Tar file already open\n f = filename\n\n lines = list(line for line in (l.strip() for l in f) if line)\n # TODO spins\n noSpins = 1\n noEigsTotal = len(lines)-2\n noDer = len(lines[1].split()[5:])\n\n homoEnergies = [float(lines[0].split()[-2])*hartreeToEV]\n pdos = [np.empty((noEigsTotal,noDer))]\n eigs = [np.empty((noEigsTotal))]\n\n # Read all coefficients, cut later\n for lineIdx, line in enumerate(lines[2:]):\n parts = line.split()\n eigs[0][lineIdx] = float(parts[1])*hartreeToEV\n pdos[0][lineIdx,:] = [float(val) for val in parts[3:]]\n\n # Cut coefficients to energy range\n startIdx = [None] * noSpins\n for spinIdx in range(noSpins):\n try:\n startIdx[spinIdx] = np.where(eigs[spinIdx] >= eMin+homoEnergies[spinIdx])[0][0]\n except:\n startIdx[spinIdx] = 0\n endIdx = [None] * noSpins\n for spinIdx in range(noSpins):\n try:\n endIdx[spinIdx] = np.where(eigs[spinIdx] > eMax+homoEnergies[spinIdx])[0][0]\n except:\n endIdx[spinIdx] = len(eigs[spinIdx])\n if endIdx <= startIdx:\n raise ValueError(\"Restricted energy-range too restrictive: endIdx <= startIdx\")\n\n eigs = [eigs[spinIdx][startIdx[spinIdx]:endIdx[spinIdx]] \\\n - homoEnergies[spinIdx] for spinIdx in range(noSpins)]\n pdos = [pdos[spinIdx][startIdx[spinIdx]:endIdx[spinIdx],:] \\\n for spinIdx in range(noSpins)]\n\n return pdos, eigs\n\nclass TipCoefficients:\n \"\"\"\n Structure that provides access to tip coefficients for each point in a list\n of grids. \n\n The coefficients can be rotated for the grid. The rotation is given through\n two different grids corresponding to the points of rotation and the rotated\n points. The axis of rotation is fixed in the x-y-plane.\n\n This structure provides access to the coefficients via bracket operators.\n The following structure is provided: [itunnel,ispin,iene][iorb,x,y,z].\n Note that this evaluation may be performed lazily.\n \"\"\"\n\n def __init__(self, mpi_rank=0, mpi_size=1, mpi_comm=None):\n self.mpi_rank = mpi_rank\n self.mpi_size = mpi_size\n self.mpi_comm = mpi_comm\n\n ### ------------------------------------------------------------------------\n ### Read function for coefficients\n ### ------------------------------------------------------------------------\n\n def read_coefficients(self, norbs, pdos_list, emin, emax):\n \"\"\"\n Reads coefficients from files or via command line if given in the\n shape (s, py, pz, px).\n Coefficients are broadcasted to all MPI processes.\n \"\"\"\n self._norbs = norbs\n self._singles = None\n self._ene = None\n self._type = None\n self._grid_dim = None\n self._ntunnels = None\n self._coeffs = None\n # Read untransformed tip coefficients and energies\n if self.mpi_rank == 0:\n self._singles = []\n idx = 0 # Index of input argument\n while idx < len(pdos_list):\n # tar.gz file instead\n if pdos_list[idx].endswith(\"tar.gz\"):\n tar = tarfile.open(pdos_list[idx], \"r:gz\")\n for member in tar.getmembers():\n single, self._ene = read_PDOS(tar.extractfile(member), emin, emax)\n for ispin in range(len(single)):\n single[ispin] = \\\n single[ispin][:,:(self.norbs+1)**2]**0.5\n self._singles.append(single)\n assert self.type != \"constant\", \"Tried to mix tip types!\"\n self._type = \"gaussian\"\n idx += 1\n # Normal input\n else:\n try:\n single, self._ene = const_coeffs(\n s=float(pdos_list[idx]),\n py=float(pdos_list[idx+1]),\n pz=float(pdos_list[idx+2]),\n px=float(pdos_list[idx+3]))\n assert self.type != \"gaussian\", \"Tried to mix tip types!\"\n self._type = \"constant\"\n idx += 4\n except ValueError:\n single, self._ene = read_PDOS(pdos_list[idx],\n emin, emax)\n # Take square root to obtain proper coefficients\n for ispin in range(len(single)):\n single[ispin] = \\\n single[ispin][:,:(self.norbs+1)**2]**0.5\n idx += 1\n assert self.type != \"constant\", \"Tried to mix tip types!\"\n self._type = \"gaussian\"\n self._singles.append(single)\n # Broadcast untransformed coefficients and energies\n if self.mpi_comm is not None:\n self._singles = self.mpi_comm.bcast(self._singles, root=0)\n self._ene = self.mpi_comm.bcast(self._ene, root=0)\n self._type = self.mpi_comm.bcast(self._type, root=0)\n\n ### ------------------------------------------------------------------------\n ### Initialize coefficients\n ### ------------------------------------------------------------------------\n\n def initialize(self, pos, rotate=False):\n \"\"\" \n Computes rotational matrix if necessary.\n This method does not communicate. Positions should be split up \n before hand to avoid unnecessary calculations.\n \"\"\"\n self._grid_dim = np.shape(pos[0])[1:]\n self._ntunnels = len(pos)-1\n # s-orbtial on tip only\n if self.norbs == 0:\n return\n\n start = time.time()\n # p-orbital on tip\n self._rot_matrix = [None]*self.ntunnels\n if not rotate:\n for itunnel in range(self.ntunnels):\n self._rot_matrix[itunnel] = 1\n else:\n npoints = np.prod(self.grid_dim)\n shifted_pos = []\n rm_data = []\n for i in range(self.ntunnels):\n shifted_pos.append(np.array([\n pos[i+1][0]-pos[i][0],\n pos[i+1][1]-pos[i][1],\n pos[i+1][2]-pos[i][2]]\n ).reshape(3,npoints))\n rm_data.append(np.empty(9*npoints))\n # Sparse matrix storage as COO\n ihelper = np.arange(3*npoints,dtype=int)\n # rows = [0,1,2,...,0,1,2,...,0,1,2,...]\n rm_rows = np.tile(ihelper,3)\n # cols = [0,0,0,1,1,1,2,2,2,...]\n rm_cols = np.repeat(ihelper,3)\n del ihelper\n # Matrix data\n for itunnel in range(self.ntunnels):\n v = np.array([0.0,0.0,-1.0])\n # Rotated vector\n w = shifted_pos[itunnel]\n w /= np.linalg.norm(w,axis=0)\n # Rotation axis (no rotation around x-y)\n n = np.cross(v,w,axisb=0).transpose()\n # Trigonometric values\n cosa = np.dot(v,w)\n sina = (1-cosa**2)**0.5\n\n # R = [[n[0]**2*(1-cosa)+cosa, n[0]*n[1]*(1-cosa)-n[2]*sina, n[0]*n[2]*(1-cosa)+n[1]*sina],\n # [n[1]*n[0]*(1-cosa)+n[2]*sina, n[1]**2*(1-cosa)+cosa, n[1]*n[2]*(1-cosa)-n[0]*sina],\n # [n[2]*n[1]*(1-cosa)-n[1]*sina, n[2]*n[1]*(1-cosa)+n[0]*sina, n[2]**2*(1-cosa)+cosa]]\n # Permutation matrix for betas: (y,z,x) -> (x,y,z)\n # P = [[0,0,1],\n # [1,0,0],\n # [0,1,0]]\n # Note: The rotational matrix R is with respect to the sample which is R^T for the tip\n # --> R to R^T for going from tip to sample, R^T to R for gradient\n rm_data[itunnel][:npoints] = n[1]**2*(1-cosa)+cosa\n rm_data[itunnel][npoints:2*npoints] = n[2]*n[1]*(1-cosa)-n[0]*sina\n rm_data[itunnel][2*npoints:3*npoints] = n[0]*n[1]*(1-cosa)+n[2]*sina\n rm_data[itunnel][3*npoints:4*npoints] = n[1]*n[2]*(1-cosa)+n[0]*sina\n rm_data[itunnel][4*npoints:5*npoints] = n[2]**2*(1-cosa)+cosa\n rm_data[itunnel][5*npoints:6*npoints] = n[0]*n[2]*(1-cosa)-n[1]*sina\n rm_data[itunnel][6*npoints:7*npoints] = n[1]*n[0]*(1-cosa)-n[2]*sina\n rm_data[itunnel][7*npoints:8*npoints] = n[2]*n[0]*(1-cosa)+n[1]*sina\n rm_data[itunnel][8*npoints:9*npoints] = n[0]**2*(1-cosa)+cosa\n\n # Build large matrices\n for itunnel in range(self.ntunnels):\n self._rot_matrix[itunnel] = \\\n sp.sparse.csr_matrix(sp.sparse.coo_matrix((rm_data[itunnel],\n (rm_rows,rm_cols)), shape=(3*npoints, 3*npoints)))\n\n end = time.time()\n print(\"Rotational matrices took {} seconds\".format(end-start))\n\n ### ------------------------------------------------------------------------\n ### Access operators\n ### ------------------------------------------------------------------------\n\n @property\n def ene(self):\n \"\"\" List of energies per spin. \"\"\"\n return self._ene\n @property\n def singles(self):\n \"\"\" Untransformed coefficients. \"\"\"\n return self._singles\n @property\n def type(self):\n \"\"\" Either gaussian or constant. \"\"\"\n return self._type\n @property\n def grid_dim(self):\n \"\"\" Dimension of grid for tip positions. \"\"\"\n return self._grid_dim\n @property\n def norbs(self):\n \"\"\" Number of tip orbitals. \"\"\"\n return self._norbs\n @property\n def nspin(self):\n \"\"\" Number of spins. \"\"\"\n return len(self.ene)\n @property\n def ntunnels(self):\n \"\"\" Number of tunnellings. \"\"\"\n return self._ntunnels\n\n def __getitem__(self, ituple):\n \"\"\" Takes an index tuple (itunnel,ispin,iene). \"\"\"\n # Unpack indices\n itunnel, ispin, iene = ituple\n if self._coeffs is not None \\\n and self._cene == iene \\\n and self._cspin == ispin \\\n and self._ctunnel == itunnel:\n return self._coeffs\n # Storage container\n self._coeffs = np.empty(((self.norbs+1)**2,)+self.grid_dim)\n\n # s-orbitals: Are never rotated\n self._coeffs[0].fill(self._singles[itunnel][ispin][iene,0])\n if self.norbs > 0:\n # p-orbitals: Are rotated like vectors\n self._coeffs[1].fill(self.singles[itunnel][ispin][iene,1])\n self._coeffs[2].fill(self.singles[itunnel][ispin][iene,2])\n self._coeffs[3].fill(self.singles[itunnel][ispin][iene,3])\n # Flat view of coeffs[1:4]\n flat_coeffs = self._coeffs[1:4].ravel()\n # Provoke write into flat view instead of overwriting variable with [:]\n flat_coeffs[:] = self._rot_matrix[itunnel]*self._coeffs[1:4].flatten()\n # Save some information\n self._ctunnel, self._cspin, self._cene = ituple\n return self._coeffs\n","repo_name":"nanotech-empa/cp2k-spm-tools","sub_path":"hrstm_tools/tip_coeffs.py","file_name":"tip_coeffs.py","file_ext":"py","file_size_in_byte":12472,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"693357099","text":"from histogram import Histogram\nfrom radixtree import RadixTree\nfrom binarytree import BinaryTree\nimport timeit\nimport string\nimport pickle\n\n\nrad = RadixTree()\nbin = BinaryTree()\n\n\ndef get_full_word_list():\n bad = string.punctuation + \"\\n\"\n table = str.maketrans(bad, \" \"*len(bad))\n words = []\n with open(\"data/downandout.txt\", 'r') as f:\n for i in f:\n if i == \"\\n\":\n continue\n words.extend(i.translate(table).strip().lower().split(\" \"))\n words = [i for i in words if i.strip() != \"\"]\n return words\n\n\ndef test_inserts(words, tree):\n hist = Histogram(tree)\n for i in words:\n hist.add_word(i)\n\n\ndef test_frequency(word, hist):\n hist.frequency(word)\n\n\nWORDS_ALL = get_full_word_list()\nWORDS_100 = WORDS_ALL[:100]\n\n\nif __name__ == \"__main__\":\n radix_freq = Histogram(RadixTree)\n binary_freq = Histogram(BinaryTree)\n for word in WORDS_ALL:\n radix_freq.add_word(word)\n binary_freq.add_word(word)\n\n print(\"radix_space: \", len(pickle.dumps(radix_freq)))\n print(\"binary_space: \", len(pickle.dumps(binary_freq)))\n\n radix_insert_test = \"test_inserts(WORDS_100, RadixTree)\"\n binary_insert_test = \"test_inserts(WORDS_100, BinaryTree)\"\n radix_freq_test = \"test_frequency('{}', radix_freq)\".format(WORDS_ALL[-1])\n binary_freq_test = \"test_frequency('{}',binary_freq)\".format(WORDS_ALL[-1])\n\n trial_nums = [10, 100, 1000, 10000]\n\n row = [\"num\", \"radix_insert\", \"binary_insert\",\n \"radix_freq\", \"binary_freq\"]\n print(row)\n\n tests = [radix_insert_test,\n binary_insert_test,\n radix_freq_test,\n binary_freq_test]\n\n for num in trial_nums:\n row = [str(num)]\n for test in tests:\n timer = timeit.Timer(test, globals=globals())\n result = timer.timeit(number=num)\n row.append(result)\n print(row)\n","repo_name":"MakeSchool-17/twitter-bot-python-hDeraj","sub_path":"histogram_benchmark.py","file_name":"histogram_benchmark.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4255183408","text":"from franz.openrdf.connect import ag_connect\nfrom franz.openrdf.query.query import QueryLanguage\n\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\n\nclass ExplorationSemanticGraph:\n conn=None\n def __init__(self):\n self.conn = ag_connect('ppitriplificator', create=False, clear=False)\n \n def load_obo(self):\n goa={}\n flag=False\n f=open(\"go.obo\",\"r\")\n for line in f:\n l=line.replace('\\n','')\n if(l.find('[Term]')!=-1):\n id_=\"\"\n name=\"\"\n branch=\"\"\n flag=True\n \n if(flag):\n if(l.startswith('id: GO')):\n id_=l.split(\": \")[1]\n \n if(l.startswith('name: ')):\n name=l.split(\": \")[1]\n \n if(l.startswith('namespace:')):\n branch=l.split(\": \")[1]\n if(id_!=\"\" and name!=\"\" and branch!=\"\"):\n goa[id_] = { 'name': name, 'branch': branch }\n \n f.close() \n \n return goa \n \n def run_query_bp_hostPathogen(self):\n for ds in ['predprin', 'hpidb']:\n print('----> Dataset ', ds)\n \n mapp={ '83331': 'Mycobacterium tuberculosis', '83332': 'Mycobacterium tuberculosis', '9606': 'Homo sapiens', '83332': 'Escherichia coli', '233413':'Mycobacterium tuberculosis', '83334': 'Escherichia coli', '93061': 'Staphylococcus aureus', '233413': 'Mycobacterium tuberculosis', '208964': 'Pseudomonas aeruginosa', '287': 'Pseudomonas aeruginosa', '562': 'Escherichia coli', '1280': 'Staphylococcus aureus', '158879': 'Staphylococcus aureus', '93061': 'Staphylococcus aureus', '574521': 'Escherichia coli' }\n goa=self.load_obo()\n \n for bra in ['cc', 'bp', 'mf']:\n print('\\t----> branch ', bra)\n \n res={}\n taxons=set()\n query=\"\"\"\n prefix ontoppi: \n prefix ppiprov: \n prefix rdfs: \n\n select distinct ?nameds ?uniprot1 ?uniprot2 ?taxon1 ?taxon2 (group_concat(distinct ?bp1; separator=\" | \") as ?processes_protein_1) (group_concat(distinct ?bp2; separator=\" | \") as ?processes_protein_2) where {\n ?interaction ontoppi:participant1 ?protein1 . \n ?protein1 ontoppi:hasUniprotCorrespondent ?uniprot1 . \n ?protein1 ontoppi:hasGO_\"\"\"+bra+\"\"\"_annotation ?bp1 . \n ?protein1 ontoppi:fromOrganism ?taxon1 .\n\n ?interaction ontoppi:participant2 ?protein2 . \n ?protein2 ontoppi:hasGO_\"\"\"+bra+\"\"\"_annotation ?bp2 . \n ?protein2 ontoppi:hasUniprotCorrespondent ?uniprot2 . \n ?protein2 ontoppi:fromOrganism ?taxon2 .\n \n ?interaction ontoppi:belongsTo ?dataset . \n ?dataset rdfs:label ?nameds .\n \t\n filter (?taxon1 != ?taxon2 && regex(?nameds, '\"\"\"+ds+\"\"\"', 'i')) .\n \n } group by ?nameds ?uniprot1 ?uniprot2 ?taxon1 ?taxon2\n \"\"\"\n tuple_query = self.conn.prepareTupleQuery(QueryLanguage.SPARQL, query)\n tuple_query.setIncludeInferred(True) #if false it does not retrieve inferred triples\n result = tuple_query.evaluate()\n with result:\n for binding_set in result:\n #print(\"%s %s %s %s\" % (str(binding_set.getValue(\"taxon1\")), binding_set.getValue(\"taxon2\"), binding_set.getValue(\"processes_protein_1\"), binding_set.getValue(\"processes_protein_2\") ) ) \n t1 = str(binding_set.getValue(\"taxon1\")).split('/')[-1].replace('>','')\n t2 = str(binding_set.getValue(\"taxon2\")).split('/')[-1].replace('>','')\n \n if(not t1 in taxons):\n taxons.add(t1)\n if(not t2 in taxons):\n taxons.add(t2)\n \n if(t1=='9606' or t2=='9606'): \n \"\"\"\n if(t1=='9606'):\n t1=t1+'_'+t2\n \n if(t2=='9606'):\n t2=t2+'_'+t1\n \"\"\"\n if(t1 in mapp.keys() and t2 in mapp.keys()):\n t1=mapp[t1]\n t2=mapp[t2] \n \n if(not t1 in res.keys()):\n res[t1]={}\n prs1 = str(binding_set.getValue(\"processes_protein_1\")).replace('\"','').split(' | ')\n for bp in prs1:\n if(not bp in res[t1].keys() and bp in goa.keys()):\n res[t1][bp]=0\n \"\"\"else:\n if(not bp in goa.keys()):\n print(t1, bp)\"\"\"\n \n if(bp in res[t1].keys()):\n res[t1][bp]+=1\n \n if(not t2 in res.keys()):\n res[t2]={}\n prs2 = str(binding_set.getValue(\"processes_protein_2\")).replace('\"','').split(' | ')\n for bp in prs2:\n if(not bp in res[t2].keys() and bp in goa.keys()):\n res[t2][bp]=0\n \"\"\"else:\n if(not bp in goa.keys()):\n print(t2, bp)\"\"\"\n \n if(bp in res[t2].keys()):\n res[t2][bp]+=1\n \n for t in taxons:\n print(t)\n \n plts={}\n \n ins=1\n r=1\n c=1\n f=open(ds+\"_\"+bra+\"_results_enrichment.tsv\",\"w\")\n for k in res.keys():\n x=[]\n y=[] \n sorted_=dict(sorted(res[k].items(), key=lambda item: item[1], reverse=True))\n total=sum(res[k].values())\n for s in sorted_:\n perc=sorted_[s]/total\n #print(k, s, goa[s]['name'], sorted_[s], perc)\n if(len(x)<5):\n x.append(goa[s]['name'])\n y.append(perc*100)\n \n f.write(\"%s\\t%s\\t%s\\t%i\\t%.4f\\n\" %(k, s, goa[s]['name'], sorted_[s], perc) )\n \n \"\"\"if(k in mapp.keys()):\n if(not mapp[k] in mapp.keys()):\n plts[mapp[k]]=[x, y]\"\"\"\n \n plts[k]=[x, y, r, c]\n c+=1\n if(ins%2==0):\n r+=1\n c=1\n ins+=1\n f.close()\n \n \"\"\"\n labels=tuple(list(plts.keys()))\n fig = make_subplots( rows=r, cols=2, subplot_titles=labels)\n for k in plts.keys():\n fig.add_trace(go.Bar(x=plts[k][0], y=plts[k][1]), row=plts[k][2], col=plts[k][3]) \n fig.update_layout(height=700, width=700, title_text=\"Most popular \"+bra.upper()+\" annotations\")\n fig.write_image('panel_'+bra+'.png')\n \"\"\"\n \n\"\"\"\n prefix ontoppi: \n prefix ppiprov: \n prefix rdfs: \n\n select distinct ?uniprot1 ?uniprot2 ?taxon1 ?taxon2 (group_concat(distinct ?bp1; separator=\" | \") as ?processes_protein_1) (group_concat(distinct ?bp2; separator=\" | \") as ?processes_protein_2) where {\n ?interaction ontoppi:participant1 ?protein1 . \n ?protein1 ontoppi:hasUniprotCorrespondent ?uniprot1 . \n ?protein1 ontoppi:hasGO_cc_annotation ?bp1 . \n ?protein1 ontoppi:fromOrganism ?taxon1 .\n\n ?interaction ontoppi:participant2 ?protein2 . \n ?protein2 ontoppi:hasGO_cc_annotation ?bp2 . \n ?protein2 ontoppi:hasUniprotCorrespondent ?uniprot2 . \n ?protein2 ontoppi:fromOrganism ?taxon2 .\n \t\n filter (?taxon1 != ?taxon2 ) .\n \n } group by ?uniprot1 ?uniprot2 ?taxon1 ?taxon2\n\"\"\" \n \n\"\"\"\nDistinct taxons \nprefix ontoppi: \n prefix ppiprov: \n prefix rdfs: \n\n select distinct ?taxon ?dataset where {\n ?protein rdf:type ontoppi:PairComponent . \n ?protein ontoppi:fromOrganism ?taxon .\n \t\n } \n\"\"\" \n\"\"\"\nExperiments and datasets and the number of ppis in each of them\nprefix ontoppi: \n prefix ppiprov: \n prefix rdfs: \n\n select ?exp ?dataset (count(?interaction) as ?number_ppis) where {\n ?exp ontoppi:hasDataset ?dataset . \n ?interaction ontoppi:belongsTo ?dataset .\n } group by ?exp ?dataset\n\"\"\"\n \na=ExplorationSemanticGraph()\na.run_query_bp_hostPathogen()\n\n","repo_name":"YasCoMa/ppintegrator","sub_path":"hostpathogen_application/analysis_semnatic_graph.py","file_name":"analysis_semnatic_graph.py","file_ext":"py","file_size_in_byte":10539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70597653635","text":"import pyglet\nfrom pyglet.window import key\nimport math\nimport resources\n\nimport menu\nimport game\n\n# Show bounding boxes\nDEBUG = False\n\nclass GameController:\n ''' Main Game Object to handle overall game logic '''\n def __init__(self, window):\n self.window = window\n self.active_env = None\n self.menu_env = menu.MainMenu(self.start_game, self.exit, window)\n self.level_env = game.Level(self.start_menu, self.window)\n self.start_menu()\n\n def start_menu(self):\n self.window.remove_handlers(self.active_env)\n self.active_env = self.menu_env\n self.window.push_handlers(self.active_env)\n\n def start_game(self):\n self.window.remove_handlers(self.active_env)\n self.active_env = self.level_env\n self.window.push_handlers(self.active_env)\n\n def exit(self):\n pyglet.app.exit()\n\n def draw(self):\n ''' Main draw method '''\n self.window.clear()\n if self.active_env:\n self.active_env.draw()\n\n def update(self, dt):\n if self.active_env:\n self.active_env.update(dt)\n\n \n# 21 x 15\nwindow = pyglet.window.Window(1080, 768)\nwindow.set_location(400, 50)\ngameController = GameController(window)\npyglet.clock.schedule_interval(gameController.update, 1/120.0)\n\n@window.event\ndef on_draw():\n gameController.draw()\n\npyglet.app.run()\n\n\n","repo_name":"JayMil/pyglet_games","sub_path":"pocs/top-view/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39884058618","text":"\r\nfrom math import *\r\nimport numpy as np\r\nfrom scipy import linalg as linalg\r\nfrom Indeterminacy import *\r\n\r\ndef AmendolaPreset(preStrain = 0.036,Q = 0.048e2):\r\n '''\r\n This function is a preset structure of Amendola's rigid top simplex simulation in Amendola, A., Carpentieri, G., de Oliveira, M., Skelton, R. E., & Fraternali, F. (2014). Experimental investigation of the softening-stiffening response of tensegrity prisms under compressive loading. Composite Structures, 117(1), 234–243. https://doi.org/10.1016/j.compstruct.2014.06.022\r\n Returns the structure class\r\n '''\r\n # A Amendola\r\n sc = 0 #length scale (mm)\r\n l0 = 0.03408 #side tendon length (for simplex)\r\n H = 0.02229\r\n # b0 = sqrt(s0**2+(2*l0**2/sqrt(3)))\r\n # l0 = sqrt((H**2-s0**2)/(1/3*(sqrt(3)-2)))\r\n D1 = l0\r\n # V,C,T,e_per,l = simplex(H,D1,1.125)\r\n # T = T*0.0014\r\n E1 = 112.3e6 #Bar stiffness\r\n E2 = 5.48e9 #Tendon stiffness\r\n E3 = 10000e9 #Simulated rigid top\r\n E = np.concatenate((E1*np.ones((3,1)),E2*np.ones((3,1)),E3*np.ones((6,1))),axis = 0)\r\n LW = 1200 #Linewidth for printing (should be just T is T>1)\r\n T =[0.0015,0.0001]\r\n ctrlE = True\r\n Struct = simplex(H,D1,T,E1 = E,E2 = E,shrink = -preStrain,LW = LW)\r\n return sc,Q,Struct\r\n\r\n\r\ndef FraternaliPreset(preStrain = 0.036,Q = 0.18e3/3):\r\n '''\r\n This function is a preset structure of Fraternali's simplex simulation in Fraternali, F., Carpentieri, G., & Amendola, A. (2015). On the mechanical modeling of the extreme softening/stiffening response of axially loaded tensegrity prisms. Journal of the Mechanics and Physics of Solids. https://doi.org/10.1016/j.jmps.2014.10.010\r\n Returns the structure class\r\n Q is the load applied to each node in newton, the total load is 3*Q\r\n preStress is the approximate delta in strain applied to the bars and tendons\r\n '''\r\n # Fraternali\r\n sc = 0 #length scale (um)\r\n s0 = 0.080 #side tendon length (for simplex)\r\n l0 = 0.132 #Base tendon length (for simplex)\r\n H = sqrt(s0**2+1/3*(sqrt(3)-2)*l0**2)\r\n b0 = sqrt(s0**2+(2*l0**2/sqrt(3)))\r\n D1 = l0\r\n # V,C,T,e_per,l = simplex(H,D1,1.125)\r\n # T = T*0.0014\r\n E1 = 203.3e9 #Bar stiffness\r\n E2 = 5.48e9 #Tendon stiffness\r\n E = np.concatenate((E1*np.ones((3,1)),E2*np.ones((9,1))),axis = 0)\r\n LW = 1000 #Linewidth for printing (should be just T is T>1)\r\n T =[0.00341,0.000378]\r\n Struct = simplex(H,D1,T,E1 = E, E2 = E,shrink =-preStrain,LW = LW)\r\n ctrlE = True #Takes stiffness from above not from class\r\n return sc,Q,Struct\r\n\r\nclass simplex:\r\n '''This class defines the various parameters in a simplex structure.\r\n The outputs are:\r\n V: Set of nodal coordinates (nx6) with the columns 1-3 representing the nodes and columns 4-6 representing the confinement of the structure\r\n C: Set of connectivities of the structure (bx3) where column 1 represents whether the member is a bar (1) or a tendon (2)\r\n T: Thickness of each member\r\n e_: The prescribed elongation percent of each member\r\n l: the loads applied to the structure\r\n This class requires inputs of the geometric dimensions (Height H and width w), the bar and tendon thicknesses, as well as the tendon radius (p). They take option arguments of the pre and post pyrolysis stiffnesses, defined shrinkage (not the experimental shrinkage), and an option to confine the structure for just the loading step.\r\n '''\r\n def __init__(self,H,w,T,E1 = 2.7e9, E2 = 36e9,shrink = None, postConfine = False,LW = 5):\r\n self.name = 'simplex'\r\n self.height = H\r\n self.width = w\r\n self.E1 = E1\r\n self.E2 = E2\r\n self.T1 = T\r\n self.LW = LW\r\n\r\n #Shrinkage\r\n if shrink == None:\r\n #Add global shrinkage onto initial geometry\r\n SHRNK = [-(0.7391434 + 0.1007027*exp(-0.08831393*t*2)) for t in T] #Experimental equation\r\n\r\n H = H*(1+SHRNK[0]) #Adjust height to minimize applied shrinkage\r\n w = w*(1+SHRNK[0])\r\n e_ = [0,SHRNK[1]-SHRNK[0]] #Only apply differential shrinkage\r\n e_trans = list(map(lambda t: -(0.0003*(2*t)**2-0.0054*(2*t)+0.8251),T))\r\n T = [t*(1+e) for (t,e) in zip(T,e_trans)] #Adjust to post pyrolysis shrinkage\r\n else:\r\n e_ = [-shrink,0]\r\n e_trans = [0 for t in T]\r\n\r\n self.T2 = T\r\n self.e_trans = e_trans\r\n\r\n #w is the length of the base tendons\r\n R = w*sin(pi/6)/sin(2*pi/3) #radius of the triangle vertices\r\n n = 3 #Number of bars\r\n thetal = -2*pi/3 #Angle between nodes\r\n thetah = -(pi/2-pi/n) #twist angle\r\n\r\n #Vertices\r\n V = [(R, 0, -H/2, 0, 0, 1),(R*cos(thetal), R*sin(thetal), -H/2, 0, 0, 1),(R*cos(2*thetal), R*sin(2*thetal),-H/2, 0, 0, 1), (R*cos(thetah), R*sin(thetah), H/2, 0, 0, 0),(R*cos(thetal+thetah), R*sin(thetal+thetah), H/2, 0, 0, 0),(R*cos(2*thetal+thetah), R*sin(2*thetal+thetah), H/2, 0, 0, 0)]\r\n self.V = V\r\n\r\n #Connectivity\r\n self.C = [(1, 0, 4),(1, 1, 5),(1, 2, 3),(2, 0, 3),(2, 1, 4),(2, 2, 5),(2, 0, 1),(2, 1, 2),(2, 2, 0),(2, 3, 4),(2, 4, 5),(2, 5, 3)]\r\n\r\n # Loads\r\n self.l = [3,4,5] #Coordinates of V that the loads are applied to\r\n self.e_ = e_\r\n\r\n # self.LW = 1\r\n\r\n if postConfine:\r\n self.postConfine = [(0., 1., 1.),\r\n (1., 1., 1.),\r\n (0., 0., 1.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.)]\r\n else:\r\n self.postConfine = [(0., 0., 1.),\r\n (0., 0., 1.),\r\n (0., 0., 1.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.)]\r\n\r\n\r\nclass icosahedron:\r\n '''This class defines the various parameters in an icosahedron structure.\r\n The outputs are:\r\n V: Set of nodal coordinates (nx6) with the columns 1-3 representing the nodes and columns 4-6 representing the confinement of the structure\r\n C: Set of connectivities of the structure (bx3) where column 1 represents whether the member is a bar (1) or a tendon (2)\r\n T: Thickness of each member\r\n e_: The prescribed elongation percent of each member\r\n l: the loads applied to the structure\r\n This class requires inputs of the geometric dimensions (Height H and width w), the bar and tendon thicknesses, as well as the tendon radius (p). They take option arguments of the pre and post pyrolysis stiffnesses, defined shrinkage (not the experimental shrinkage), and an option to confine the structure for just the loading step.\r\n '''\r\n def __init__(self,H,w,T,E1 = 2.7e9, E2 = 36e9,shrink = None, postConfine = False,LW = 5):\r\n self.name = 'icos'\r\n self.height = H\r\n self.width = w\r\n self.E1 = E1\r\n self.E2 = E2\r\n self.T1 = T\r\n self.LW = LW\r\n\r\n #Shrinkage\r\n if shrink == None:\r\n #Add global shrinkage onto initial geometry\r\n SHRNK = [-(0.7391434 + 0.1007027*exp(-0.08831393*t*2)) for t in T] #Experimental equation\r\n H = H*(1+SHRNK[0]) #Adjust height to minimize applied shrinkage\r\n e_ = [0,SHRNK[1]-SHRNK[0]] #Only apply differential shrinkage\r\n e_trans = list(map(lambda t: -(0.0003*(2*t)**2-0.0054*(2*t)+0.8251),T))\r\n T = [t*(1+e) for (t,e) in zip(T,e_trans)] #Adjust to post pyrolysis shrinkage\r\n else:\r\n e_ = [-shrink,0]\r\n e_trans = [0 for t in T]\r\n\r\n self.T2 = T\r\n self.e_trans = e_trans\r\n\r\n #Vertices\r\n h = H\r\n V = [(h/2, 0, -h/4, 0,0,0),\r\n (h/2, 0, h/4,0,0,1),\r\n (-h/2, 0, -h/4, 0,0,0),\r\n (-h/2, 0, h/4, 0,0,0),\r\n (h/4, h/2, 0, 0,0,0),\r\n (h/4, -h/2, 0, 0,0,1),\r\n (-h/4, -h/2, 0, 0,0,0),\r\n (-h/4, h/2, 0, 0,0,0), #top 21\r\n (0, h/4, h/2, 0,0,0),\r\n (0, h/4, -h/2, 0,0,0), #top 27\r\n (0, -h/4, -h/2, 0,0,0),\r\n (0, -h/4, h/2, 0,0,1)]\r\n\r\n Vert = [tuple(v[:3]) for v in V]\r\n J = subtract(Vert[2],Vert[9])\r\n K = subtract(Vert[7],Vert[9])\r\n Tn = mult(1/norm(cross(J,K)),cross(J,K))\r\n N = mult(1/norm(J),J)\r\n R = mult(1/norm(cross(Tn,N)),cross(Tn,N))\r\n\r\n B = [N]+[R]+[Tn] #Basis matrix\r\n\r\n for i,v in enumerate(V): #Rotate icos to desired position\r\n V[i] = tuple(vProduct(v[:3],B)+[v[3]]+[v[4]]+[v[5]])\r\n self.V = V\r\n\r\n #Connectivity\r\n self.C = [(1, 1, 3),(1, 5, 4),(1, 11, 10),(1, 0, 2),(1, 6, 7),(1, 8, 9),(2, 1, 11),(2, 1, 5),(2, 5, 11),(2, 5, 0),(2, 5, 10),(2, 11, 3),(2, 11, 6),(2, 1, 4),(2, 1, 8),(2, 4, 0),(2, 0, 9),(2, 0, 10),(2, 2, 6),(2, 2, 7),(2, 2, 10),(2, 2, 9),(2, 3, 6),(2, 3, 7),(2, 3, 8),(2, 4, 8),(2, 4, 9),(2, 6, 10),(2, 7, 8),(2, 7, 9)]\r\n\r\n #loads\r\n self.l = [2,7,9] #The nodes that the load is applied on\r\n self.e_ = e_\r\n\r\n self.LW = LW\r\n\r\n\r\n if postConfine:\r\n self.postConfine = [(0., 0., 0.),\r\n (1., 1., 1.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (1., 0., 1.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 1.)]\r\n else:\r\n self.postConfine = [(0., 0., 0.),\r\n (0., 0., 1.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 1.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 1.)]\r\n\r\n\r\n\r\nclass tetrakaidecahedron:\r\n '''\r\n This class defines the various parameters in a tetrakaidecahedron structure.\r\n The outputs are:\r\n V: Set of nodal coordinates (nx6) with the columns 1-3 representing the nodes and columns 4-6 representing the confinement of the structure\r\n C: Set of connectivities of the structure (bx3) where column 1 represents whether the member is a bar (1) or a tendon (2)\r\n T: Thickness of each member\r\n e_: The prescribed elongation percent of each member\r\n l: the loads applied to the structure\r\n This class requires inputs of the geometric dimensions (Height H and width w), the bar and tendon thicknesses, as well as the tendon radius (p). They take option arguments of the pre and post pyrolysis stiffnesses, defined shrinkage (not the experimental shrinkage), and an option to confine the structure for just the loading step.\r\n '''\r\n def __init__(self,H,w,T,E1 = 2.7e9, E2 = 36e9,shrink = None, postConfine = False,LW = 1):\r\n self.name = 'tetkai'\r\n self.height = H\r\n self.width = w\r\n self.E1 = E1\r\n self.E2 = E2\r\n self.T1 = T\r\n self.LW = 1\r\n\r\n #Shrinkage\r\n if shrink == None:\r\n #Add global shrinkage onto initial geometry\r\n SHRNK = [-(0.7391434 + 0.1007027*exp(-0.08831393*t*2)) for t in T] #Experimental equation\r\n H = H*(1+SHRNK[0]) #Adjust height to minimize applied shrinkage\r\n w = w*(1+SHRNK[0])\r\n e_ = [0,SHRNK[1]-SHRNK[0]] #Only apply differential shrinkage\r\n e_trans = list(map(lambda t: -(0.0003*(2*t)**2-0.0054*(2*t)+0.8251),T))\r\n T = [t*(1+e) for (t,e) in zip(T,e_trans)] #Adjust to post pyrolysis shrinkage\r\n else:\r\n e_ = [-shrink,0]\r\n e_trans = [0 for t in T]\r\n\r\n self.T2 = T\r\n self.e_trans = e_trans\r\n\r\n h = H\r\n phi = 0.3287663062125372\r\n if round(h/w,2) != 1.5:\r\n print('H:W must be 3:2 for tetkai\\nIf you need a different config, you must run form finding to find new phi and replace in code')\r\n # Nodes\r\n #Define nodes of the cell\r\n\r\n V = [(-w/2*sin(phi/2),-w/2*cos(phi/2),-h/2,1,1,1),\r\n (-w/2*cos(phi/2),w/2*sin(phi/2),-h/2,0,0,1),\r\n (w/2*sin(phi/2),w/2*cos(phi/2),-h/2,0,0,1),\r\n (w/2*cos(phi/2),-w/2*sin(phi/2),-h/2,0,0,1),\r\n (w/2*sin(phi/2),-w/2*cos(phi/2),h/2,0,0,0),\r\n (-w/2*cos(phi/2),-w/2*sin(phi/2),h/2,0,0,0),\r\n (-w/2*sin(phi/2),w/2*cos(phi/2),h/2,0,0,0),\r\n (w/2*cos(phi/2),w/2*sin(phi/2),h/2,0,0,0),\r\n (-h/2,-w/2*cos(phi/2),w/2*sin(phi/2),0,0,0),\r\n (-h/2, w/2*sin(phi/2),w/2*cos(phi/2),0,0,0),\r\n (-h/2,w/2*cos(phi/2),-w/2*sin(phi/2),0,0,0),\r\n (-h/2, -w/2*sin(phi/2),-w/2*cos(phi/2),0,0,0),\r\n (h/2,-w/2*cos(phi/2),-w/2*sin(phi/2),0,0,0),\r\n (h/2, -w/2*sin(phi/2),w/2*cos(phi/2),0,0,0),\r\n (h/2,w/2*cos(phi/2),w/2*sin(phi/2),0,0,0),\r\n (h/2, w/2*sin(phi/2),-w/2*cos(phi/2),0,0,0),\r\n (-w/2*sin(phi/2),-h/2,w/2*cos(phi/2),0,0,0),\r\n (-w/2*cos(phi/2),-h/2,-w/2*sin(phi/2),0,0,0),\r\n (w/2*sin(phi/2),-h/2,-w/2*cos(phi/2),0,0,0),\r\n (w/2*cos(phi/2),-h/2,w/2*sin(phi/2),0,0,0),\r\n (w/2*sin(phi/2),h/2,w/2*cos(phi/2),0,0,0),\r\n (-w/2*cos(phi/2),h/2,w/2*sin(phi/2),0,0,0),\r\n (-w/2*sin(phi/2),h/2,-w/2*cos(phi/2),0,0,0),\r\n (w/2*cos(phi/2),h/2,-w/2*sin(phi/2),0,0,0)]\r\n\r\n # Members\r\n C =[(1,0, 14),(1,1, 19),(1,2, 8),(1,3, 21),(1,9, 18),(1,11, 20),(1,13, 22),(1,15, 16),(1,4, 10),(1,5, 23),(1,6, 12),(1,7, 17),(2,0, 1),(2,0, 3),(2,1, 2),(2,2, 3),(2,14, 15),(2,15, 3),(2,19, 18),(2,18, 0),(2,8, 11),(2,11, 1),(2,21, 22),(2,22, 2),(2,8, 17),(2,17, 18),(2,19, 12),(2,12, 15),(2,14, 23),(2,23, 22),(2,21,10),(2,10, 11),(2,10, 9),(2,9, 8),(2,17, 16),(2,16, 19),(2,12, 13),(2,13, 14),(2,23, 20),(2,20, 21),(2,4, 16),(2,5, 9),(2,6, 20),(2,7, 13),(2,7, 4),(2,6, 7),(2,4,5),(2,5,6)]\r\n\r\n self.V = V\r\n self.C = C\r\n\r\n # Loads applied to the structure\r\n self.l = [4,5,6,7] #The nodes that the load is applied on\r\n self.e_ = e_\r\n\r\n self.LW = 1\r\n\r\n\r\n if postConfine:\r\n self.postConfine = [(1., 1., 1.),\r\n (0., 0., 1.),\r\n (0., 0., 1.),\r\n (0., 0., 1.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.)]\r\n else:\r\n self.postConfine = [(0., 0., 1.),\r\n (0., 0., 1.),\r\n (0., 0., 1.),\r\n (0., 0., 1.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.),\r\n (0., 0., 0.)]\r\n\r\n\r\nclass hangNet:\r\n\r\n def __init__(self):\r\n self.sc = -3\r\n self.Q = 100 #N\r\n self.LW = 10\r\n self.name = 'hanging Net'\r\n self.height = H\r\n self.p = p\r\n self.width = w\r\n self.E1 = 202e9\r\n self.E2 = 202e9\r\n\r\n V = [[-961.,-305,155,1,1,1],\r\n [-961,305,155,1,1,1],\r\n [-305,-961,-146.,1,1,1],\r\n [-305,-305,0,0,0,0],\r\n [-305,305,0,0,0,0],\r\n [-305,961,-146,1,1,1],\r\n [305,-961,-146,1,1,1],\r\n [305,-305,0,0,0,0],\r\n [305,305,0,0,0,0],\r\n [305,961,-146,1,1,1],\r\n [961,-305,155,1,1,1],\r\n [961,305,155,1,1,1]]\r\n\r\n C = [[1,0,3],\r\n [1,3,7],\r\n [1,10,7],\r\n [1,1,4],\r\n [1,4,8],\r\n [1,11,8],\r\n [1,2,3],\r\n [1,3,4],\r\n [1,5,4],\r\n [1,6,7],\r\n [1,7,8],\r\n [1,9,8]]\r\n\r\n D = 0.42\r\n # Member thickness\r\n T = [D/2,D/2]\r\n\r\n e_trans = [0,0]\r\n\r\n A,_,_ = EQTR3(np.asarray(V),np.asarray(C)); #Create the Equilibrium matrix\r\n SS = linalg.null_space(A)\r\n ss = SS/np.linalg.norm(SS)\r\n e_ = np.squeeze(-0.01*ss)\r\n e_[[(1),(4),(7),(10)]] = -0.25\r\n\r\n # l = [2,5,8,11] = -Q\r\n l = [2,11]\r\n\r\n self.V = V\r\n self.C = C\r\n self.T1 = T\r\n self.e_trans = e_trans\r\n self.T2 = T\r\n self.e_ = e_\r\n self.l = l\r\n self.postConfine = False\r\n\r\n\r\n# # 3 bar m = 2 (Pellegrino example)\r\n# def MemberNet():\r\n # sc= -3;\r\n # Q = 0.005;\r\n # E = 2e9\r\n # LW = 1\r\n # V,C,T,e_per,l = MemberNet()\r\n # dims = 2\r\n# # Initial Nodal Positions\r\n# V = [[0.,0,0,1,1,1],\r\n# [160.,0,0,0,0,1],\r\n# [320.,0,0,0,0,1],\r\n# [480.,0,0,1,1,1]])\r\n#\r\n# # Connectivity\r\n# C = [[1,0,1],\r\n# [1,1,2],\r\n# [1,2,3]]\r\n#\r\n# # Member thickness\r\n# T = [[3],\r\n# [3],\r\n# [3]]\r\n#\r\n# # Elongation percent\r\n# e_ = [[0],\r\n# [-0.01],\r\n# [0]]\r\n#\r\n# # Define load on the system\r\n# l = [1,2] #loaded nodes\r\n#\r\n# self.V = V\r\n# self.C = C\r\n# self.T = T\r\n# self.e_ = e_\r\n# self.l = l\r\n# # return (V,C,T,e_,l)\r\n\r\n\r\ndef hedron4(w):\r\n\r\n\r\n # w = 40\r\n\r\n # Initial Nodal Positions\r\n V = [[-w/2, 0, 0, 0, 0, 0],\r\n [0, -w/2, 0, 0, 0, 0],\r\n [w/2, 0, 0, 0, 0, 0],\r\n [0, w/2, 0, 0, 0, 0],\r\n [0, 0, w/2, 0, 0, 0],\r\n [0, 0, -w/2, 0, 0, 0]]\r\n\r\n\r\n # Connectivity\r\n C = [[1, 0, 4],\r\n [1, 1, 4],\r\n [1, 2, 4],\r\n [1, 3, 4],\r\n [1, 0, 1],\r\n [1, 1, 2],\r\n [1, 2, 3],\r\n [1, 3, 0],\r\n [1, 0, 5],\r\n [1, 1, 5],\r\n [1, 2, 5],\r\n [1, 3, 5],\r\n [1, 4, 5]]\r\n\r\n # Member thickness\r\n T = [[3],\r\n [3],\r\n [3],\r\n [3],\r\n [3],\r\n [3],\r\n [3],\r\n [3],\r\n [3],\r\n [3],\r\n [3],\r\n [3],\r\n [0.75]]\r\n\r\n gamma = .605\r\n delta = 0.01\r\n #Shrinkage percent per bar\r\n e_ = [[-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-gamma],\r\n [-(gamma+delta)]]\r\n\r\n l = []\r\n return (V,C,T,e_,l)\r\n\r\nclass StackedSimplex:\r\n def __init__(self,H,w,T,n = 3,m = 2,E1 = 2.7e9, E2 = 36e9,shrink = None, postConfine = False,LW = 1, theta2 = 0.12739665775465892):\r\n self.name = 'StackedSimplex'\r\n self.height = H\r\n self.width = w\r\n self.E1 = E1\r\n self.E2 = E2\r\n self.T1 = T\r\n self.LW = 1\r\n\r\n #Shrinkage\r\n if shrink == None:\r\n #Add global shrinkage onto initial geometry\r\n SHRNK = [-(0.7391434 + 0.1007027*exp(-0.08831393*t*2)) for t in T] #Experimental equation\r\n H = H*(1+SHRNK[0]) #Adjust height to minimize applied shrinkage\r\n w = w*(1+SHRNK[0])\r\n e_ = [0,SHRNK[1]-SHRNK[0]] #Only apply differential shrinkage\r\n e_trans = list(map(lambda t: -(0.0003*(2*t)**2-0.0054*(2*t)+0.8251),T))\r\n T = [t*(1+e) for (t,e) in zip(T,e_trans)] #Adjust to post pyrolysis shrinkage\r\n else:\r\n e_ = [-shrink,0]\r\n e_trans = [0 for t in T]\r\n\r\n self.T2 = T\r\n self.e_trans = e_trans\r\n\r\n h = H\r\n\r\n handed = -1\r\n r = w*sin(pi/6)/sin(2*pi/3) #radius of the triangle vertices\r\n theta = handed*(pi/2 - pi/n) #the twist angle\r\n phi = handed*2*pi/n #the angle between vertices\r\n delta = (pi/2 - pi/n) #the twist angle\r\n v = n*3 #number of vertices\r\n angle = delta*180/pi\r\n zeta = 2*pi/n #the angle between vertices\r\n gamma = (zeta/2-delta)*(1+2*(n%2)) # the angle of rotation of each prism to the last\r\n R = np.array([[cos(gamma), -sin(gamma), 0],\r\n [sin(gamma), cos(gamma), 0],\r\n [0, 0, 1]])\r\n D = np.zeros((3*n,3));\r\n D[:,2] = 0.7*h;\r\n\r\n b = n #number of struts\r\n t = 5*n #number of tendons\r\n\r\n Q = np.zeros((v,3)) #number of vertices by 3 coordinates\r\n W = np.zeros((2*v,3))\r\n #from polar coordinates\r\n count = n+1\r\n for k in range(len(Q[:,1])+1):\r\n if k <= n:\r\n Q[k-1,:] = [r*cos((k-1)*phi), r*sin((k-1)*phi), 0]\r\n\r\n if k > n:\r\n Q[count-1,:] = [r*cos((count-1)*phi/2+theta), r*sin((count-1)*phi/2+theta), h-0.3*h*(count%2)]\r\n count = count+1\r\n W[:v,:] = Q\r\n\r\n handed = 1\r\n phi = handed*2*pi/n #the angle between vertices\r\n\r\n count = n+1\r\n for k in range(7): #Second cell\r\n if k <= n:\r\n Q[k-1,:] = [r*cos((k-1)*phi), r*sin((k-1)*phi), 0]\r\n\r\n if k > n:\r\n Q[count-1,:] = [r*cos((count-1)*phi+theta2), r*sin((count-1)*phi+theta2), h]\r\n count = count+1\r\n\r\n Q = Q+D #stacks cell\r\n Q = Q.dot(np.linalg.matrix_power(R,int(-(handed+1)/2))) #rotates the cell\r\n W[v:2*v,:] = Q\r\n W = np.delete(W,slice(15,18), 0)\r\n #Bars\r\n B = np.array([[0,3],[1,5],[2,7],\r\n [8,9],[6,10],[4,11]])\r\n B = np.concatenate((np.ones((len(B),1)),B),axis = 1)\r\n\r\n #Tendons\r\n Tend = np.array([[0,5],[1,7],[2,3],#vertical 1\r\n [0,4],[1,6],[2,8],#vertical 2\r\n [0,1],[1,2],[2,0],#horizontal 1\r\n [3,4],[4,5],[5,6],#horizontal 2\r\n [6,7],[7,8],[8,3],#Vertical 3\r\n [4,9],[8,10],[6,11],\r\n [9,10],[10,11],[11,9],\r\n [5,11],[7,10],[3,9]])\r\n Tend = np.concatenate((2*np.ones((len(Tend),1)),Tend),axis = 1)\r\n\r\n\r\n C = np.concatenate((B,Tend),axis = 0)\r\n V = W[:]\r\n\r\n #Remove duplicate rows\r\n dups = []\r\n for i,v in enumerate(V[:,:3]):\r\n W = np.delete(V,range(i),axis = 0)[:]\r\n for j,w in enumerate(W[:,:3]):\r\n if (abs(v-w)<0.01).all() and j+i!=i:\r\n dups+=[(i,j+i)]\r\n dups = np.array(dups)\r\n V = np.delete(V,dups[:,1],axis = 0)\r\n\r\n e_ = [-(0.1391434 + 0.1007027*exp(-0.08831393*t*2)) for t in T]\r\n e_ = np.reshape(e_,(len(T),1))\r\n\r\n V = np.concatenate((V,np.zeros((len(V),3))),axis = 1)\r\n # V[0,3:] = [1,1,1] #kinematic constraints\r\n V[0:3,3:] = [0,0,1] #kinematic constraints\r\n\r\n l = [11,10,9]\r\n\r\n self.V = V\r\n self.C = C\r\n self.e_trans = e_trans\r\n self.T2 = T\r\n self.e_ = e_\r\n self.l = l\r\n self.postConfine = False\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"cwisont/Pin-Joint-Structure-Analysis","sub_path":"Structures_Class.py","file_name":"Structures_Class.py","file_ext":"py","file_size_in_byte":25211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37208498608","text":"import argparse\nimport cv2\nfrom imutils.video import FPS\nimport imutils\nimport time\nimport numpy as np\n\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()#\nap.add_argument(\"-v\", \"--video\", help=\"path to the video file\")\nargs = vars(ap.parse_args())\n\nvs = cv2.VideoCapture(args[\"video\"])\n# Get frame count\nnFrames = int(vs.get(cv2.CAP_PROP_FRAME_COUNT)) \n# Get width and height of video stream\nframeWidth = int(vs.get(cv2.CAP_PROP_FRAME_WIDTH)) \nframeHeight = int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\nprint(\"This feed is {} px by {} px\".format(frameWidth, frameHeight))\n\nframeCounter =-1\nfps = FPS().start()\n\"\"\"\nbytes = ''\nwhile True:\n\tbytes += vs.read(1024)\n\ta = bytes.find('\\xff\\xd8')\n\tb = bytes.find('\\xff\\xd9')\n\tif a != -1 and b != -1:\n\t\tjpg = bytes[a:b+2]\n\t\tbytes = bytes[b+2:]\n\t\ti = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_COLOR)\n\t\tcv2.imshow('i', i)\n\t\tprint(i.shape)\n\t\tkey = cv2.waitKey(1) & 0xFF\n\t\tif key == ord(\"q\"):\n\t\t\tbreak\n\"\"\"\nblank = np.zeros((3280, 500))\nwhile True:\n\tframe = vs.read()[1]\n\tframeCounter += 1\n\tif frame is not None:\n\t\tprint(\"Frame shape is {}\".format(frame.shape))\n\t\t#print(frame[100, 100, :])\n\t\tframe = imutils.resize(frame, width=1024) # Otherwise this will get out of hand\n\t\tprint(\"Resized shape is {}\".format(frame.shape))\n\t\tcv2.putText(frame, \"Frame: {}/{}\".format(frameCounter, nFrames), (10, 20),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n\t\tcv2.imshow(\"Real time feed\", frame)\n\t\t\n\t\tkey = cv2.waitKey(1) & 0xFF\n\t\tif key == ord(\"q\"):\n\t\t\tbreak\n\telse:\n\t\tbreak\n\t#time.sleep(0.2)\n\tfps.update()\n\n\"\"\"\nbytes=''\nwhile True:\n\tbytes+=vs.read(1024)[1]\n\ta = bytes.find('\\xff\\xd8') # JPEG start\n\tb = bytes.find('\\xff\\xd9') # JPEG end\n\tif a!=-1 and b!=-1:\n\t\tjpg = bytes[a:b+2] # actual image\n\t\tbytes= bytes[b+2:] # other informations\n\n\t\t# decode to colored image ( another option is cv2.IMREAD_GRAYSCALE )\n\t\timg = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR) \n\t\tcv2.imshow('Window name',img) # display image while receiving data\n\t\tif cv2.waitKey(1) ==27: # if user hit esc\n\t\t exit(0) # exit program\n\"\"\"\nfps.stop()\nprint(\"Average: {} fps\".format(fps.fps()))\nprint(\"Number of frames: {}\".format(frameCounter+1))\nvs.release()\ncv2.destroyAllWindows()\n\n","repo_name":"AlexBdx/Heli","sub_path":"playVideo.py","file_name":"playVideo.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37346248124","text":"import shapely.geometry as sg\nimport RandMap\nimport math\n\nrm = RandMap.RandMap()\n\n\ndef strset_to_poly(obstacle_set):\n poly_set = []\n for obstacle in obstacle_set:\n temp = []\n for seg in obstacle:\n temp.append(seg.coords[0])\n poly = sg.Polygon(temp)\n poly_set.append(poly)\n return poly_set\n\n\ndef get_bounding_space_cell(poly):\n bounds = poly.bounds\n width = bounds[2] - bounds[0]\n height = bounds[3] - bounds[1]\n center = ((width / 2) + bounds[0], (height / 2) + bounds[1])\n if width > height:\n points = [(bounds[0], center[1] + (width / 2)), (bounds[0], center[1] - (width / 2)),\n (bounds[2], center[1] + (width / 2)), (bounds[2], center[1] - (width / 2))] # LU, LL, RU, RL\n return width, points\n else:\n points = [(center[0] - (height / 2), bounds[3]), (center[0] - (height / 2), bounds[1]),\n (center[0] + (height / 2), bounds[3]), (center[0] + (height / 2), bounds[1])] # LU, LL, RU, RL\n return height, points\n\n\ndef decompose(cell, obstacles, total_height, resolution):\n cell_poly = sg.Polygon([cell.coordinates[0], cell.coordinates[1], cell.coordinates[3], cell.coordinates[2]])\n if cell_poly.intersects(obstacles[0]):\n # Cell is at least partially within the environment boundary\n dim = total_height / (2 ** cell.height)\n centroid = (cell.coordinates[0][0] + (dim / 2), cell.coordinates[0][1] - (dim / 2))\n factor = 10.0 ** 3\n centroid = (math.trunc(centroid[0] * factor) / factor, math.trunc(centroid[1] * factor) / factor)\n for obstacle in obstacles[1:]:\n if cell_poly.within(obstacle):\n cell.label = \"full\"\n return\n elif cell_poly.intersects(obstacle) or (cell_poly.intersects(obstacles[0]) and not cell_poly.within(obstacles[0])):\n cell.label = \"mixed\"\n if (total_height / (2 ** (cell.height + 1))) > resolution:\n # Children will not be below min resolution limit\n for i in range(4): # LU, LL, RU, RL child cell order\n new_coords = [] # LU, LL, RU, RL\n if i == 0:\n new_coords = [cell.coordinates[0], (cell.coordinates[0][0], cell.coordinates[0][1] - (dim / 2)),\n (cell.coordinates[0][0] + (dim / 2), cell.coordinates[0][1]), centroid]\n elif i == 1:\n new_coords = [(cell.coordinates[0][0], cell.coordinates[0][1] - (dim / 2)), cell.coordinates[1],\n centroid, (cell.coordinates[1][0] + (dim / 2), cell.coordinates[1][1])]\n elif i == 2:\n new_coords = [(cell.coordinates[0][0] + (dim / 2), cell.coordinates[0][1]), centroid,\n cell.coordinates[2], (cell.coordinates[2][0], cell.coordinates[2][1] - (dim / 2))]\n else:\n new_coords = [centroid, (cell.coordinates[1][0] + (dim / 2), cell.coordinates[1][1]),\n (cell.coordinates[2][0], cell.coordinates[2][1] - (dim / 2)), cell.coordinates[3]]\n new_cell = RandMap.CellNode(cell.height + 1, new_coords)\n new_cell.parent = cell\n new_cell.which_child = i\n cell.children[i] = new_cell\n for child in cell.children:\n decompose(child, obstacles, total_height, resolution)\n if cell.height == 0:\n rm.root = cell\n return rm\n else:\n return\n # For loop completed without returning\n cell.label = \"empty\"\n cell.centroid = centroid\n return\n else:\n # Cell is outside boundary\n cell.label = \"outside\"\n return\n\n\n\n\n","repo_name":"iandeboo1/CSCI-534---Motion-Planning-Project","sub_path":"RandDecomp.py","file_name":"RandDecomp.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31291843544","text":"import sqlite3\nimport os\n\ndb_abs_path = os.path.dirname(os.path.realpath(__file__)) + '/phonebook.db'\nconn = sqlite3.connect(db_abs_path)\nc = conn.cursor()\n\n\ndef show_records():\n try:\n records = c.execute(\"SELECT c.id, c.name, c.phone FROM contacts AS c\")\n\n print(\"Contacts:\")\n print(\"#############\")\n for row in records:\n print(\"ID: \", row[0]),\n print(\"Name: \", row[1]),\n print(\"Phone: \", row[2])\n\n print(\"\\n\")\n except:\n print(\"Something went wrong, please run db_init.py to initialize the database.\")\n conn.close()\n\n\nshow_records()\n\nconn.close()\n","repo_name":"MikeKorsikov/PythonClasses","sub_path":"Lesson53n/HW53n/db/show_tables.py","file_name":"show_tables.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4092338874","text":"#kFlight by KK4TEE\n#Started 2014-9-24\n#Cumulative time so far: around 22 hours\n#Please keep in mind this is my first time using pygame\n#and only my 2nd python project. That said, I hope you can\n#gain some use from my work here. Cheers!\n#\n#ToDo list:\n# write a proper timing routine\n# code vertical speed and altitude bars\n# add more text readouts\n# internalize navball texture\n\nimport time\nimport os, sys\nimport pygame\nfrom pygame.locals import *\nfrom math import pi, radians, sin, cos, tan\nfrom copy import *\nimport json\nimport urllib2\n\nimport config\n\nif not pygame.font: \n print ('Warning, fonts disabled')\nif not pygame.mixer: \n print ('Warning, sound disabled')\n\n\ndef telemetry(oldNews):\n #I need to figure out how to use error handling properly.\n # The program will still occasionally crash with key errors\n #if an update fails partway through.\n\n oldNews = deepcopy(oldNews)\n try:\n tele = json.load(urllib2.urlopen(config.URL + \\\n 'VesselName=v.name' + '&' +\\\n 'BodyName=v.body' + '&' +\\\n 'RadarAlt=v.heightFromTerrain' + '&' +\\\n 'MET=v.missionTime' + '&' +\\\n 'Altitude=v.altitude' + '&' +\\\n 'ApA=o.ApA' + '&' +\\\n 'PeA=o.PeA' + '&' +\\\n \\\n 'Pitch=n.pitch' + '&' +\\\n 'Roll=n.roll' + '&' +\\\n 'Heading=n.heading' + '&' +\\\n \\\n 'LiquidFuel=r.resource[LiquidFuel]' + '&' +\\\n 'LiquidFuelMax=r.resourceMax[LiquidFuel]' + '&' +\\\n 'Oxidizer=r.resource[Oxidizer]' + '&' +\\\n 'OxidizerMax=r.resourceMax[Oxidizer]' + '&' +\\\n 'MonoPropellant=r.resource[MonoPropellant]' + '&' +\\\n 'MonoPropellantMax=r.resourceMax[MonoPropellant]' + '&' +\\\n 'ElectricCharge=r.resource[ElectricCharge]' + '&' +\\\n 'ElectricChargeMax=r.resourceMax[ElectricCharge]' + '&' +\\\n \\\n 'Throttle=f.throttle' + '&' +\\\n 'Light=v.lightValue' + '&' +\\\n 'Brake=v.brakeValue' + '&' +\\\n 'Gear=v.gearValue' + '&' +\\\n 'SAS=v.sasValue' + '&' +\\\n 'RCS=v.rcsValue'\n ))\n #print tele\n if tele['Pitch'] > 0:\n pass\n return tele\n except:\n print ('Telemachus update failed')\n return oldNews\n\n\ndef clamp(num, minn, maxn):\n if num < minn:\n return minn\n elif num > maxn:\n return maxn\n else:\n return num\n\n\ndef rot_center(image, angle):\n \"\"\"rotate an image while keeping its center and size\"\"\"\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image\n\n\ndef percentScale(lowVal, highVal, currentVal):\n scaleDiff = float(highVal-lowVal)\n spotOnScale = float(currentVal - lowVal)\n return (spotOnScale/scaleDiff)*100\n\ndef formatPercentAsString(val):\n rv = clamp(round(val,2),-10,100)\n return \"{:3.2f}\".format(rv)\n\ndef drawTextReadouts(screen, position, tele):\n x, y = position\n boxX, boxY = (430,700)\n color = (0,255,0)\n fontSize = 24\n yLineSpace = 10\n fontHeader = pygame.font.Font(None, 30)\n text = 'Flight Status'\n Header = fontHeader.render(text, 1, (0, 255, 0))\n tx, ty = fontHeader.size(text)\n screen.blit(Header, (x+boxX/2-tx/2, y+ty/2))\n\n font = pygame.font.Font(None, fontSize)\n xLine = x + 10\n yLine = y + fontSize + 5\n\n #Ship's Name\n text = 'Vessel:'\n tx, ty = font.size(text)\n screen.blit(font.render(text, 1, color), (xLine, yLine + ty/2))\n tx, ty = font.size(tele['VesselName'])\n xLine = x + boxX - 5 - tx\n screen.blit(font.render(tele['VesselName'], 1, color), (xLine, yLine + ty/2))\n xLine = x + 10\n yLine += ty + yLineSpace\n\n #Body Name\n text = 'Planatary Body:'\n tx, ty = font.size(text)\n screen.blit(font.render(text, 1, color), (xLine, yLine + ty/2))\n tx, ty = font.size(tele['BodyName'])\n xLine = x + boxX - 5 - tx\n screen.blit(font.render(tele['BodyName'], 1, color), (xLine, yLine + ty/2))\n xLine = x + 10\n yLine += ty + yLineSpace\n\n #Radar Altitude\n text = 'Radar Altitude:'\n tx, ty = font.size(text)\n screen.blit(font.render(text, 1, color), (xLine, yLine + ty/2))\n tx, ty = font.size(str(tele['RadarAlt']))\n xLine = x + boxX - 5 - tx\n screen.blit(font.render(str(tele['RadarAlt']), 1, color), (xLine, yLine + ty/2))\n xLine = x + 10\n yLine += ty + yLineSpace\n\n\n #Box border\n pygame.draw.rect(screen, (128, 128, 128), \\\n (x,y,boxX,boxY), 4)\n\n\ndef drawPrimaryFlightDisplay(screen, position, tele):\n x, y = position\n boxX, boxY = (600,535)\n #boxX, boxY = (525,525)\n sidecut = 5.0 #this is a fraction\n PFDbackground = pygame.Surface((boxX,boxY))\n PFDbackground.set_colorkey((255,255,254))\n PFDbackground.fill((0, 0, 0))\n pygame.draw.circle(PFDbackground, (255,255,254), (boxX/2, boxY/2), (boxY-20)/2, 0)\n pygame.draw.rect(PFDbackground, (0,0,0), \\\n (0, 0,boxX/sidecut, boxY), 0)\n pygame.draw.rect(PFDbackground, (0,0,0), \\\n (boxX-boxX/sidecut, 0,boxX, boxY), 0)\n\n\n screen.blit(PFDbackground, position)\n\n #Ship stencel\n pygame.draw.rect(screen, (255,255,200), \\\n (boxX/2-boxX/5/2,boxY/2,boxX/5,5), 2)\n pygame.draw.line(screen, (255,255,200), (boxX/2-boxX/5, boxY/2), \\\n (boxX/2+boxX/5, boxY/2), 3)\n #Box border\n pygame.draw.rect(screen, (128, 128, 128), \\\n (x,y,boxX,boxY), 4)\n #pygame.draw.circle(screen, (0,0,200), (x+boxX/2, y+boxY/2), (boxY-20)/2, 1)\n\n\n\n\ndef drawLEDBox(screen, label, position, lit, color):\n x, y = position\n boxX, boxY = (160,80)\n font = pygame.font.Font(None, 42)\n lx, ly = font.size(label)\n #print \"LED font position: \" + str(lx) + \" - \" + str(ly)\n if lit is True or lit == 'True' or (lit > 0 and lit <= 1): #Accept bool or string True\n pygame.draw.rect(screen, color, \\\n (x,y,boxX,boxY), 0)\n text = font.render(label, 1, (255, 255, 255))\n screen.blit(text, (x+boxX/2-lx/2, y+boxY/2-ly/2))\n else:\n pygame.draw.rect(screen, (50, 50, 50), \\\n (x,y,boxX,boxY), 0)\n text = font.render(label, 1, (1, 1, 1))\n screen.blit(text, (x+boxX/2-lx/2, y+boxY/2-ly/2))\n\n #Box border\n pygame.draw.rect(screen, (128, 128, 128), \\\n (x,y,boxX,boxY), 4)\n\n\n\ndef drawAnalogGauge(screen, gaugeLabel, position, lowVal, highVal, currentVal):\n x, y = position #Modify this def to add ability to resize the gauges automatically\n percentOfGaugeRange = percentScale(lowVal, highVal, currentVal)\n inverseGaugeFill = ((5*pi/4) / (highVal-lowVal) * (highVal-currentVal))\n gaugeFill = ((5*pi/4) / (highVal-lowVal) * currentVal)\n fillThickness = 20\n #Outer Ring\n pygame.draw.circle(screen, (0,127,0), position, 100, 1)\n\n font = pygame.font.Font(None, 28)\n gtext0 = font.render(str(formatPercentAsString(percentOfGaugeRange)) + '%', 1, (20, 255, 20))\n gtext1 = font.render(gaugeLabel, 1, (20, 255, 20))\n x, y = position\n lx, ly = font.size(gaugeLabel)\n screen.blit(gtext0, (x-25, y-14))\n screen.blit(gtext1, (x-lx/2, y+ly/2))\n\n #arc(Surface, color, Rect, start_angle, stop_angle, width=1)\n if percentOfGaugeRange > 0 and percentOfGaugeRange < 15:\n #Red Section\n pygame.draw.arc(screen, (255, 0, 0), [x-99, y-99, 198, 198], \\\n inverseGaugeFill + pi/4, \\\n inverseGaugeFill + pi/4 + gaugeFill, \\\n fillThickness)\n elif percentOfGaugeRange >= 15 and percentOfGaugeRange < 33:\n #Yellow Section\n pygame.draw.arc(screen, (255, 255, 0), [x-99, y-99, 198, 198], \\\n inverseGaugeFill + pi/4, \\\n inverseGaugeFill + pi/4 + gaugeFill, \\\n fillThickness)\n elif percentOfGaugeRange >= 33 and percentOfGaugeRange <= 100:\n #Green Section\n pygame.draw.arc(screen, (0, 255, 0), [x-99, y-99, 198, 198], \\\n inverseGaugeFill + pi/4, \\\n inverseGaugeFill + pi/4 + gaugeFill, \\\n fillThickness)\n else:\n #White Section\n pygame.draw.arc(screen, (255, 255, 255), [x-99, y-99, 198, 198], \\\n inverseGaugeFill + pi/4, \\\n inverseGaugeFill + pi/4 + gaugeFill, \\\n fillThickness)\n\nclass PyManMain:\n \"\"\"The Main PyMan Class - This class handles the main\n initialization and creating of the Game.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize\"\"\"\n \"\"\"Initialize PyGame\"\"\"\n pygame.init()\n \"\"\"Set the window Size\"\"\"\n self.width, self.height = config.ScreenDimensions\n self.display_trace_color = (0, 255, 0)\n \"\"\"Create the Screen\"\"\"\n if config.GoFullscreen is True:\n self.screen = pygame.display.set_mode((0,0), FULLSCREEN | DOUBLEBUF, 0)\n self.background = pygame.Surface(pygame.display.list_modes()[0])\n print (\"Going fullscreen\")\n else:\n self.screen = pygame.display.set_mode((self.width, self.height), 0, 0)\n #Note: RESIZEABLE can go as the 2nd to last option, but things get messy\n print (\"Resolution set to \" + str(config.ScreenDimensions))\n self.background = pygame.Surface(self.screen.get_size())\n pygame.display.set_caption('kFlight by KK4TEE')\n\n #self.navImg = pygame.image.load(os.path.join('data', 'navball.png'))\n nixie0 = pygame.image.load(os.path.join('data', 'nixie_tube', '0.png'))\n nixie1 = pygame.image.load(os.path.join('data', 'nixie_tube', '1.png'))\n nixie2 = pygame.image.load(os.path.join('data', 'nixie_tube', '2.png'))\n nixie3 = pygame.image.load(os.path.join('data', 'nixie_tube', '3.png'))\n nixie4 = pygame.image.load(os.path.join('data', 'nixie_tube', '4.png'))\n nixie5 = pygame.image.load(os.path.join('data', 'nixie_tube', '5.png'))\n nixie6 = pygame.image.load(os.path.join('data', 'nixie_tube', '6.png'))\n nixie7 = pygame.image.load(os.path.join('data', 'nixie_tube', '7.png'))\n nixie8 = pygame.image.load(os.path.join('data', 'nixie_tube', '8.png'))\n nixie9 = pygame.image.load(os.path.join('data', 'nixie_tube', '9.png'))\n nixiedl = pygame.image.load(os.path.join('data', 'nixie_tube', 'dl.png'))\n nixiedr = pygame.image.load(os.path.join('data', 'nixie_tube', 'dr.png'))\n self.nixieList = [nixie0, nixie1, nixie2, nixie3, nixie4, nixie5,\n nixie6, nixie7, nixie8, nixie9, nixiedl, nixiedr]\n self.navImg = pygame.Surface((1400,1400))\n pygame.draw.rect(self.navImg, (128,128,255), \\\n (0, 0,1400, 700), 0)\n pygame.draw.rect(self.navImg, (128,64,0), \\\n (0, 700,1400, 1400), 0)\n\n self.navImg.convert()\n self.navImg = pygame.transform.scale(self.navImg, (1400,1400))\n\n #Set up the 8-ball layer for the Primary Flight Display\n #self.navball = pygame.display.set_mode((800, 800), 0, 0)\n\n #Set up the PFD Layer\n #self.PFD = pygame.display.set_mode((600, 525), 0, 0)\n\n #Set up the black background layer\n self.background = self.background.convert()\n self.background.fill((0, 0, 0))\n\n\n def AnalogGauges(self, xOffset, yOffset, radius,tele):\n xOffset += radius/2\n yOffset += radius/2\n if tele['MonoPropellant'] is not -1:\n drawAnalogGauge(self.screen, \"Mono Prop\", (xOffset, yOffset), 0,\\\n tele['MonoPropellantMax'], tele['MonoPropellant'])\n xOffset += radius + config.SpaceBetweenGaugesX\n if tele['LiquidFuel'] is not -1:\n drawAnalogGauge(self.screen, \"Liquid Fuel\", (xOffset, yOffset), 0, \\\n tele['LiquidFuelMax'], tele['LiquidFuel'])\n xOffset += radius + config.SpaceBetweenGaugesX\n if tele['Oxidizer'] is not -1:\n drawAnalogGauge(self.screen, \"Oxidizer\", (xOffset, yOffset), 0,\\\n tele['OxidizerMax'], tele['Oxidizer'])\n xOffset += radius + config.SpaceBetweenGaugesX\n if tele['ElectricCharge'] is not -1:\n drawAnalogGauge(self.screen, \"Electricity\", (xOffset, yOffset), 0,\\\n tele['ElectricChargeMax'], tele['ElectricCharge'])\n xOffset += radius + config.SpaceBetweenGaugesX\n xOffset += radius + config.SpaceBetweenGaugesX\n\n\n def LEDBoxes(self, x, y, boxX, boxY, tele):\n xOffset, yOffset = x, y\n drawLEDBox(self.screen, 'Alarm', (xOffset,yOffset), False, (255,0,0))\n xOffset += boxX + 5\n drawLEDBox(self.screen, 'Throttle', (xOffset,yOffset), tele['Throttle'], (220,220,20))\n\n xOffset, yOffset = x, y + boxY + 5\n drawLEDBox(self.screen, 'SAS', (xOffset,yOffset), tele['SAS'], (40,40,245))\n xOffset += boxX+ 5\n drawLEDBox(self.screen, 'RCS', (xOffset,yOffset), tele['RCS'], (210,210,210))\n\n xOffset, yOffset = x, yOffset + boxY + 5\n drawLEDBox(self.screen, 'Gear', (xOffset,yOffset), tele['Gear'], (40,245,40))\n xOffset += boxX + 5\n drawLEDBox(self.screen, 'Brake', (xOffset,yOffset), tele['Brake'], (245,40,40))\n\n xOffset, yOffset = x, yOffset + boxY + 5\n drawLEDBox(self.screen, 'Light', (xOffset,yOffset), tele['Light'], (245,245,40))\n xOffset += boxX+ 5\n drawLEDBox(self.screen, 'G-Force', (xOffset,yOffset), False, (245,40,40))\n\n xOffset, yOffset = x, yOffset + boxY + 5\n drawLEDBox(self.screen, 'EVA', (xOffset,yOffset), False, (245,245,40))\n xOffset += boxX+ 5\n drawLEDBox(self.screen, 'Landed', (xOffset,yOffset), False, (245,40,40))\n\n xOffset, yOffset = x, yOffset + boxY + 5\n if self.tele['RadarAlt'] != -1:\n drawLEDBox(self.screen, 'SurfRadar', (xOffset,yOffset), True, (245,245,40))\n else:\n drawLEDBox(self.screen, 'SurfRadar', (xOffset,yOffset), False, (245,245,40))\n xOffset += boxX + 5\n drawLEDBox(self.screen, 'Pilot Ejected', (xOffset,yOffset), False, (245,40,40))\n\n\n def NavBallImg(self, screen, X, Y, roll, pitch, heading):\n navballSize = (520,520)\n #navballSize = (450,450)\n nsX, nsY = navballSize\n radius = nsX/2\n navball = pygame.Surface(navballSize)\n niX, niY = self.navImg.get_size()\n #print \"Pitch: \" + str(pitch) + \" Roll: \" + str(roll)\n #Roll\n navImg = pygame.Surface((niX,niY))\n text = str(int(round(heading)))\n color = (255,255,255)\n font = pygame.font.Font(None, 28)\n tx, ty = font.size(text)\n navImg.blit(self.navImg, (0, 3*pitch))\n navball.blit(navImg, ( -niX/2 + nsX/2, -niY/2 + nsY/2 ))\n navball.blit(rot_center(navball, roll), (0,0))\n# navball.blit(navImg, (-niX/2+200, -niY/2+250))\n navball.blit(font.render(text, 1, color), (nsX/2-ty/2, nsY/2- ty/2))\n screen.blit(navball, (X - nsX/2,Y - nsY/2 -2))\n\n def Nt(self, screen, X, Y, V):\n nixieSize = self.nixieList[V].get_size()\n niX, niY = nixieSize\n nixie = pygame.Surface(nixieSize)\n #nixie.blit(self.nixie0, (niX / 2 + nsX / 2, -niY / 2 + nsY / 2))\n nixie.blit(self.nixieList[V], (0,0))\n screen.blit(nixie, (X, Y))\n\n def NixieReadout(self, screen, cX, cY, Num):\n n = min(abs(int(Num)), 999999)\n deltaWidth, deltaHeight = self.nixieList[0].get_size()\n self.Nt(self.screen, cX, cY, (n / 100000) % 10)\n cX += deltaWidth\n self.Nt(self.screen, cX, cY, (n / 10000) % 10)\n cX += deltaWidth\n self.Nt(self.screen, cX, cY, (n / 1000) % 10)\n cX += deltaWidth\n self.Nt(self.screen, cX, cY, (n / 100) % 10)\n cX += deltaWidth\n self.Nt(self.screen, cX, cY, (n / 10) % 10)\n cX += deltaWidth\n self.Nt(self.screen, cX, cY, (n / 1) % 10)\n\n\n def TextTelementry(self, xOffset, yOffset):\n pass\n\n\n def DrawBorder(self):\n info = pygame.display.Info()\n pygame.draw.rect(self.screen, (0, 128, 0), \\\n (2,2,info.current_w - 5,info.current_h - 5), 2)\n\n\n def UserHandler(self):\n pygameEvents = pygame.event.get()\n keysPressedList = []\n for event in pygameEvents:\n if event.type == pygame.KEYDOWN:\n if event.key == K_ESCAPE:\n print (\"Exiting by ESC\")\n sys.exit()\n if event.key == K_q:\n print (\"q\")\n\n\n ''' #Instead of using a qeue, see what is being pressed RIGHT NOW\n keysPressed = pygame.key.get_pressed()\n if (keysPressed[K_ESCAPE]):\n print \"Exiting by ESC\"\n sys.exit()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n print 'pygame Quit'\n sys.exit()\n '''\n\n def MainLoop(self):\n print (\"Resolutions detected: \"+str(pygame.display.list_modes()))\n frameCount = 0\n xM, yM = self.screen.get_size()\n self.tele = {}\n lasttime = time.time()\n self.screen.blit(self.background, (0, 0))\n self.DrawBorder()\n programStartTime = time.time()\n\n self.tele= {u'MonoPropellant': 189.084091375693,u'ApA': 0.0, u'PeA': 0.0,u'Altitude': 2109111.7047641, u'LiquidFuelMax': 720, u'RCS': u'False', u'ElectricChargeMax': 950.159999996424, u'Oxidizer': 478.732902340961, u'Roll': 8.91797637939453, u'Gear': u'False', u'SAS': u'False', u'Throttle': 0, u'BodyName': u'Kerbin', u'MonoPropellantMax': 190, u'Light': u'True', u'RadarAlt': -1, u'LiquidFuel': 391.690572273977, u'Heading': 174.4286, u'OxidizerMax': 880, u'VesselName': u'Mark1-2Pod (Kerbal X)', u'ElectricCharge': 950.006827276331, u'MET': 3456.6799228806, u'Brake': u'True', u'Pitch': -8.6469259262085}\n\n\n while 1:\n pygame.time.Clock().tick(config.RefreshRate)\n self.UserHandler()\n self.screen.blit(self.background, (0, 0))\n #self.DrawBorder()\n self.tele = telemetry(self.tele)\n #print self.tele\n\n self.NavBallImg(self.screen, 250 + 50, 250 + 25,\n -self.tele['Roll'], self.tele['Pitch'],\n self.tele['Heading'])\n self.AnalogGauges(config.SpaceBetweenGaugesX + 5,\n yM - config.SpaceBetweenGaugesX - 200 - 5,\n 200, self.tele)\n self.LEDBoxes(xM-0-760,0, 160, 80, self.tele)\n drawTextReadouts(self.screen, (xM-0-430, 0), self.tele)\n drawPrimaryFlightDisplay(self.screen, (0,0), self.tele)\n\n self.NixieReadout(self.screen, xM - 427, 150, self.tele['MET'])\n self.NixieReadout(self.screen, xM - 427, 290, self.tele['Altitude'])\n self.NixieReadout(self.screen, xM - 427, 430, self.tele['ApA'])\n self.NixieReadout(self.screen, xM - 427, 560, self.tele['PeA'])\n\n pygame.display.flip()\n frameCount += 1\n\n #####Debugging#####\n currentTime = time.time()\n looptime = currentTime - lasttime\n lasttime = currentTime\n print (\"\")\n print (\"Frame Count: \" + str(frameCount))\n print (\"Loop time: \" + str(looptime) + \"s\")\n print (\"FramesPerSec:\" + str(1.0/looptime))\n print (\"Avg FPS: \" + str(frameCount/(currentTime - programStartTime)) + \"FPS\")\n print (\"Program Run: \" + str(currentTime - programStartTime))\n #print self.tele\n self.tele['Roll'] +=50*looptime\n\n '''\n if looptime < 1.0/config.RefreshRate:\n time.sleep(1.0/config.RefreshRate - looptime)\n print \"Sleeping\" + str(1.0/config.RefreshRate - looptime)'''\n\n\nif __name__ == \"__main__\":\n MainWindow = PyManMain()\n MainWindow.MainLoop()\n","repo_name":"KK4TEE/kFlightPanel","sub_path":"kFlight.py","file_name":"kFlight.py","file_ext":"py","file_size_in_byte":19853,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39883574359","text":"import sys\r\nfrom math import log\r\nimport numpy as np\r\nimport binascii\r\nimport cv2\r\n\r\nimport Pe2Img\r\n\r\n\r\nclass TutorialTest():\r\n def __init__(self):\r\n self.m_cPe2Img = Pe2Img.Pe2Img()\r\n\r\n def ConvertAndSave(self, _peData, _savePath):\r\n peData = list()\r\n # print('Processing ' + _openPath)\r\n if len(_peData[0]) != 16: # If not hexadecimal\r\n assert(False)\r\n\r\n b = int((len(_peData)*16)**(0.5))\r\n b = 2**(int(log(b)/log(2))+1)\r\n a = int(len(_peData)*16/b)\r\n\r\n for i in range(len(_peData)):\r\n for j in range(len(_peData[i])):\r\n peData.append(_peData[i][j])\r\n\r\n if len(peData) > (a*b):\r\n peData = peData[:a*b]\r\n\r\n elif len(peData) < (a*b):\r\n last = a*b - len(peData)\r\n for i in last:\r\n peData.append(00)\r\n\r\n image = np.reshape(np.array(peData), (a, b))\r\n\r\n cv2.imwrite(_savePath, image)\r\n\r\n def ConvertRGB(self, _peData, _savePath):\r\n peData = list()\r\n pixelLen = int(len(_peData)*16/3) # 3 rgb values per pixel\r\n width = int(pixelLen**0.5)\r\n width = 2**(int(log(width)/log(2))+1)\r\n height = int(pixelLen/width)\r\n\r\n for i in range(len(_peData)):\r\n for j in range(len(_peData[i])):\r\n peData.append(_peData[i][j])\r\n\r\n if len(peData) > (height*width*3):\r\n peData = peData[:height*width*3]\r\n \r\n elif len(peData) < (height*width*3):\r\n last = height*width*3 - len(peData)\r\n for i in last:\r\n peData.append(00)\r\n\r\n rgb = np.reshape(np.array(peData), (height, width, 3))\r\n cv2.imwrite(_savePath, rgb)\r\n\r\n def main(self, _argv):\r\n\r\n if (len(_argv) != 3):\r\n print(\"Enter \\n1. a Pe's Section (Entire, Text, TextData) \\n2. a color scale(rgb, gray)\")\r\n sys.exit()\r\n\r\n peSections = _argv[1]\r\n color = _argv[2]\r\n \r\n peData = list()\r\n\r\n if peSections == 'Entire':\r\n text = self.m_cPe2Img.GetEntire()\r\n\r\n elif peSections == 'Text':\r\n text = self.m_cPe2Img.GetTextSection()\r\n\r\n elif peSections == 'TextData':\r\n text = self.m_cPe2Img.GetTextDataSection()\r\n\r\n elif peSections == 'Resource':\r\n text = self.m_cPe2Img.GetRsrcSection()\r\n\r\n text = binascii.hexlify(text)\r\n bytesData = [text[i:i+2].decode('utf-8') for i in range(0, len(text), 2)]\r\n peData = [int(bytesData[i], 16) for i in range(len(bytesData))]\r\n peData = [peData[i:i+16] for i in range(0, len(peData), 16)]\r\n print(peData[:30])\r\n\r\n if color == 'rgb':\r\n self.ConvertRGB(peData)\r\n\r\n elif color == 'gray':\r\n self.ConvertAndSave(peData)\r\n\r\nif __name__ == '__main__':\r\n\r\n openPath = '/home/yoon/paper/benign/Binary_PE/test2.vir'\r\n savePath = '/home/yoon/paper/benign/image/2021-03-16_test2.vir_GetRsrc_rgb.png'\r\n\r\n mainClass = TutorialTest(\r\n _openPath = openPath,\r\n _savePath = savePath\r\n )\r\n mainClass.main(sys.argv)\r\n","repo_name":"hoonyandu/ComputerVision","sub_path":"ConvertImage/TutorialTest.py","file_name":"TutorialTest.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26732896166","text":"import string\n\nfrom fnf import FNFMaker\n\nopps = {\n \"a\": \"x := x + y\",\n \"b\": \"y := y + 2z\",\n \"c\": \"x := 3x + z\",\n \"d\": \"z := y − z\"\n}\n\n\ndef dependence_fun(c1, c2):\n l1, r1 = opps[c1].split(\":=\")\n l2, r2 = opps[c2].split(\":=\")\n l1 = [c for c in l1 if c in string.ascii_letters]\n r1 = [c for c in r1 if c in string.ascii_letters]\n l2 = [c for c in l2 if c in string.ascii_letters]\n r2 = [c for c in r2 if c in string.ascii_letters]\n\n return any([c in r2 for c in l1]) or any([c in r1 for c in l2])\n\n\nword = \"baadcb\"\n\nif __name__ == '__main__':\n maker = FNFMaker(\n alphabet=list(opps),\n dependence_fun=dependence_fun\n )\n print(\"Dependencies:\\n\", maker.get_dependencies())\n\n G = maker.build_graph(word)\n maker.draw_graph(G, word)\n fnf = maker.getFNF(G, word)\n\n print(\"FNF:\\n\", fnf)\n","repo_name":"wojtke/agh-tw","sub_path":"fnf_example.py","file_name":"fnf_example.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24958018022","text":"import os\n\nroot_dir = os.path.expanduser(\"~\")\n\ntrain_data_path = os.path.join(root_dir, \"/home/corpus/giga/giga_train_bin/*\")\n#eval_data_path = os.path.join(root_dir, \"/home/corpus/giga/giga_train_bin/*\")\ndecode_data_path = os.path.join(root_dir, \"/home/NHG/code/data/test/ductest\")\ntrain_ds_data_path = os.path.join(root_dir, \"/home/NHG/code/data/ds/ductest\")\nvocab_path = os.path.join(root_dir, \"/home/NHG/code/data/vocabulary/vocab\")\nlog_root = os.path.join(root_dir, \"/home/NHG/code/codespace/log_root\")\nconcept_vocab_path = os.path.join(root_dir, \"/home/NHG/code/data/vocabulary/concept_vocab\")\n\ntraintimes = 450000\nhidden_dim= 256\nemb_dim= 128\nbatch_size= 64\nmax_enc_steps=60\nmax_dec_steps=20\nbeam_size=8\nmin_dec_steps=2\nvocab_size=150000\nconcept_num = 2\n\nlr=0.15\nadagrad_init_acc=0.1\nrand_unif_init_mag=0.02\ntrunc_norm_init_std=1e-4\nmax_grad_norm=2.0\n\npointer_gen = True\nis_coverage = False\nDS_train = False\ncov_loss_wt = 1.0\nrein = 0.99\npi = 2.92\n\neps = 1e-12\nmax_iterations = 2000000\n\nuse_gpu=True\n\nlr_coverage=0.15\n\nuse_maxpool_init_ctx = False\n","repo_name":"wprojectsn/codes","sub_path":"code/codespace/data_util/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"25852808734","text":"# vim: set encoding=utf-8\nfrom unittest import TestCase\n\nfrom mock import patch\n\nfrom regparser.layer.external_citations import ExternalCitationParser\nfrom regparser.tree.struct import Node\n\n\ndef get_citation(citations, text):\n \"\"\"\n Return the 1st citation whose text matches the given text\n \"\"\"\n matched = filter(lambda x: x['text'] == text, citations)\n if matched:\n return matched[0]\n return None\n\n\nclass ParseTest(TestCase):\n def test_public_law(self):\n \"\"\"\n Ensure that we successfully parse Public Law citations that look\n like the following: Public Law 111-203\n \"\"\"\n node = Node(\"Public Law 111-203\", label=['1005', '2'])\n citations = ExternalCitationParser(None).process(node)\n self.assertEqual(len(citations), 1)\n self.assertEqual(citations[0]['text'], node.text)\n self.assertEqual(citations[0]['citation_type'], 'PUBLIC_LAW')\n self.assertEqual(citations[0]['components'],\n {'congress': '111', 'lawnum': '203'})\n self.assertEqual(citations[0]['locations'], [0])\n self.assertTrue('url' in citations[0])\n\n def test_statues_at_large(self):\n \"\"\"\n Ensure that we successfully parse Statues at Large citations that\n look like the following: 122 Stat. 1375\n \"\"\"\n node = Node('122 Stat. 1375', label=['1003', '5'])\n citations = ExternalCitationParser(None).process(node)\n self.assertEqual(len(citations), 1)\n self.assertEqual(citations[0]['text'], node.text)\n self.assertEqual(citations[0]['citation_type'], 'STATUTES_AT_LARGE')\n self.assertEqual(citations[0]['components'],\n {'volume': '122', 'page': '1375'})\n self.assertEqual(citations[0]['locations'], [0])\n self.assertTrue('url' in citations[0])\n\n def test_cfr(self):\n \"\"\"Ensure that we successfully parse CFR references.\"\"\"\n node = Node(\"Ref 1: 12 CFR part 1026. Ref 2: 12 CFR 1026.13.\",\n label=['1003'])\n citations = ExternalCitationParser(None).process(node)\n\n cit_1026 = get_citation(citations, '12 CFR part 1026')\n self.assertEqual(cit_1026['citation_type'], 'CFR')\n self.assertEqual(cit_1026['components'],\n {'cfr_title': '12', 'part': '1026'})\n self.assertEqual(cit_1026['locations'], [0])\n self.assertTrue('url' in cit_1026)\n\n cit_1026_13 = get_citation(citations, '12 CFR 1026.13')\n self.assertEqual(cit_1026_13['citation_type'], 'CFR')\n self.assertEqual(cit_1026_13['components'],\n {'cfr_title': '12', 'part': '1026', 'section': '13'})\n self.assertEqual(cit_1026_13['locations'], [0])\n self.assertTrue('url' in cit_1026_13)\n\n def test_cfr_multiple(self):\n \"\"\"Ensure that we successfully parse multiple CFR references.\"\"\"\n node = Node(\"Some text 26 CFR 601.121 through 601.125 some more text\",\n label=['1003'])\n citations = ExternalCitationParser(None).process(node)\n\n cit_601_121 = get_citation(citations, '26 CFR 601.121')\n self.assertEqual(cit_601_121['citation_type'], 'CFR')\n self.assertEqual(cit_601_121['components'],\n {'cfr_title': '26', 'part': '601', 'section': '121'})\n self.assertEqual(cit_601_121['locations'], [0])\n self.assertTrue('url' in cit_601_121)\n\n cit_601_125 = get_citation(citations, '601.125')\n self.assertEqual(cit_601_125['citation_type'], 'CFR')\n self.assertEqual(cit_601_125['components'],\n {'cfr_title': '26', 'part': '601', 'section': '125'})\n self.assertEqual(cit_601_125['locations'], [0])\n self.assertTrue('url' in cit_601_125)\n\n def test_drop_self_referential_cfr(self):\n \"\"\"\n Ensure that CFR references that refer to the reg being parsed are\n not marked as external citations.\n \"\"\"\n node = Node(\"11 CFR 110.14\", label=['110', '1'])\n citations = ExternalCitationParser(None).process(node)\n self.assertEqual(None, citations)\n\n def test_custom(self):\n \"\"\"Ensure that custom citations are found. Also verify multiple\n matches are found and word boundaries respected\"\"\"\n node = Node(\"This has MAGIC text. Not magic, or MAGICAL, but MAGIC\")\n to_patch = ('regparser.layer.external_types.settings.'\n 'CUSTOM_CITATIONS')\n\n with patch.dict(to_patch, {'MAGIC': 'http://example.com/magic'}):\n citations = ExternalCitationParser(None).process(node)\n\n self.assertEqual(1, len(citations))\n self.assertEqual(citations[0], {'text': 'MAGIC',\n 'citation_type': 'OTHER',\n 'components': {},\n 'url': 'http://example.com/magic',\n 'locations': [0, 2]})\n","repo_name":"cmc333333/regulations-parser","sub_path":"tests/layer_external_citations_tests.py","file_name":"layer_external_citations_tests.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"26724097111","text":"\"\"\"\nA famous casino is suddenly faced with a sharp decline of their revenues.\nThey decide to offer Texas hold'em also online. Can you help them\nby writing an algorithm that can rank poker hands?\nTask:\n\nCreate a poker hand that has a method to compare itself to another poker hand:\n compare_with(self, other_hand)\nA poker hand has a constructor that accepts a string containing 5 cards:\n PokerHand(hand)\nThe characteristics of the string of cards are:\nA space is used as card seperator\nEach card consists of two characters\nThe first character is the value of the card, valid characters are:\n2, 3, 4, 5, 6, 7, 8, 9, T(en), J(ack), Q(ueen), K(ing), A(ce)\nThe second character represents the suit, valid characters are:\nS(pades), H(earts), D(iamonds), C(lubs)\n\nThe result of your poker hand compare can be one of these 3 options:\n RESULT = [\"Loss\", \"Tie\", \"Win\"]\nApply the Texas Hold'em rules for ranking the cards.\nThere is no ranking for the suits.\n\n\"\"\"\n\n\nclass PokerHand(object):\n\n RESULT = [\"Loss\", \"Tie\", \"Win\"]\n card_value = {\"2\": 2, \"3\": 3, \"4\": 4,\n \"5\": 5, \"6\": 6, \"7\": 7,\n \"8\": 8, \"9\": 9, \"T\": 10,\n \"J\": 11, \"Q\": 12, \"K\": 13, \"A\": 14}\n final_value = []\n\n def __init__(self, hand):\n temp_hand_list = hand.split()\n self.hand_list = []\n for hand in temp_hand_list:\n self.hand_list.append((self.card_value[hand[0]], hand[1]))\n self.hand_list = sorted(self.hand_list, key=lambda x: x[0])\n self.card_numbers, self.card_shapes = zip(*self.hand_list)\n\n def compare_with(self, other):\n self.final_value = self.find_result()\n other.final_value = other.find_result()\n if self.final_value[0] > other.final_value[0]:\n return self.RESULT[2]\n elif self.final_value[0] < other.final_value[0]:\n return self.RESULT[0]\n elif self.final_value[0] == other.final_value[0]:\n if self.final_value[1] > other.final_value[1]:\n return self.RESULT[2]\n elif self.final_value[1] < other.final_value[1]:\n return self.RESULT[0]\n else:\n if self.final_value[0] in [8, 7, 6, 4, 3, 2, 1]:\n filtered_res = set(self.final_value[2]) ^ set(\n other.final_value[2])\n if len(filtered_res) == 0:\n return self.RESULT[1]\n elif max(filtered_res) in self.card_numbers:\n return self.RESULT[2]\n elif max(filtered_res) in other.card_numbers:\n return self.RESULT[0]\n else:\n return self.RESULT[1]\n\n def find_result(self):\n \"\"\"10: 'Royal flush' 9: 'Straight flush' 8: 'Four of a kind'\n 7: 'Full house' 6: 'Flush' 5: 'Straight' 4: 'Three of a kind'\n 3: 'Two pairs' 2: 'Pair' 1: 'Highcard'\"\"\"\n result_val = [0, 0]\n if self.check_flush():\n # 6: 'Flush'\n result_val = [6, max(self.card_numbers),\n list(set(self.card_numbers))]\n if self.check_straight():\n if result_val[0] == 6:\n if max(self.card_numbers) == 14:\n # 10: 'Royal flush'\n result_val = [10, max(self.card_numbers), list(\n set(self.card_numbers))]\n else:\n # 9: 'Straight flush'\n result_val = [9, max(self.card_numbers),\n list(set(self.card_numbers))]\n else:\n # 5: 'Straight'\n result_val = [5, max(self.card_numbers),\n list(set(self.card_numbers))]\n if result_val[0] < 8:\n res = self.check_n_of_a_kind()\n n = res[0]\n if result_val[0] < n:\n result_val = res\n return result_val\n\n def check_flush(self):\n \"\"\" Check for Flush\t5 cards of the same suit \"\"\"\n return True if len(set(self.card_shapes)) == 1 else False\n\n def check_straight(self):\n \"\"\"\n Straight - Sequence of 5 cards in increasing value\n (Ace can precede 2 and follow up King)\n \"\"\"\n L = self.card_numbers\n if list(range(len(L)+L[0]))[L[0]:] == list(L) or sum(L) == 28:\n return True\n return False\n\n def check_n_of_a_kind(self):\n \"\"\"\n Finds if there is a Four/Three/Two of a kind or\n if there is a full house\n \"\"\"\n count = 1\n L = self.card_numbers\n L1 = list(set(L))\n temp = 0\n temp1 = 0\n temp2 = 0\n if len(L1) < 5:\n for card_value in L1:\n if L.count(card_value) == 4:\n # Four of a kind so return 8\n return [8, card_value, L1]\n elif (L.count(card_value) == 2 and count == 3\n or L.count(card_value) == 3 and count == 2):\n # its full house so return 7\n if temp1 != 0:\n return [7, temp1, [temp1, card_value]]\n elif temp2 != 0:\n return [7, card_value, [card_value, temp2]]\n elif L.count(card_value) == 2 and count == 2:\n # if there are 2 pairs\n return [3, card_value, L1]\n elif L.count(card_value) > 1:\n temp = card_value\n if L.count(card_value) == 3:\n temp1 = card_value\n elif L.count(card_value) == 2:\n temp2 = card_value\n count = L.count(card_value)\n if count == 3:\n # Three of a kind so return 4\n return [4, temp, L1]\n elif count == 2:\n # Two cards with the same value\n return [2, temp, L1]\n return [1, max(L1), L1]\n\n\n# PokerHand(\"4H 5H 6S 2H 3H\").compare_with(PokerHand(\"KS AS TS QS JS\"))\n# PokerHand(\"2H 3H 4H 5H 6H\").compare_with(PokerHand(\"KS AS TS QS JS\"))\n# PokerHand(\"2H 3H 4H 5H 6H\").compare_with(PokerHand(\"AS AD AC AH JD\"))\n# PokerHand(\"AS AH 2H AD AC\").compare_with(PokerHand(\"JS JD JC JH 3D\"))\n# PokerHand(\"2S AH 2H AS AC\").compare_with(PokerHand(\"JS JD JC JH AD\"))\n# PokerHand(\"2S AH 2H AS AC\").compare_with(PokerHand(\"2H 3H 5H 6H 7H\"))\n# PokerHand(\"AS 3S 4S 8S 2S\").compare_with(PokerHand(\"2H 3H 5H 6H 7H\"))\n# PokerHand(\"2H 3H 5H 6H 7H\").compare_with(PokerHand(\"2S 3H 4H 5S 6C\"))\n# PokerHand(\"2S 3H 4H 5S 6C\").compare_with(PokerHand(\"3D 4C 5H 6H 2S\"))\n# Straight wins of three of a kind\n# PokerHand(\"2S 3H 4H 5S 6C\").compare_with(PokerHand(\"AH AC 5H 6H AS\"))\n# PokerHand(\"2S 2H 4H 5S 4C\").compare_with(PokerHand(\"AH AC 5H 6H AS\"))\n# 2 Pair wins of pair\n# PokerHand(\"2S 2H 4H 5S 4C\").compare_with(PokerHand(\"AH AC 5H 6H 7S\"))\nPokerHand(\"KH KC 3S 3H 3D\").compare_with(PokerHand(\"2H 2C 3S 3H 3D\"))\n# PokerHand(\"2S AH 4H 5S KC\").compare_with(PokerHand(\"AH AC 5H 6H 7S\"))\n# PokerHand(\"2S 3H 6H 7S 9C\").compare_with(PokerHand(\"7H 3C TH 6H 9S\"))\n# PokerHand(\"4S 5H 6H TS AC\").compare_with(PokerHand(\"3S 5H 6H TS AC\"))\n# PokerHand(\"2S AH 4H 5S 6C\").compare_with(PokerHand(\"AD 4C 5H 6H 2C\"))\n","repo_name":"kavikamal/Pratice-Codewars","sub_path":"pokerrank.py","file_name":"pokerrank.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"448085647","text":"\nprint('hub py imported')\n\n#system base\nimport time\nimport os\nimport subprocess\n#PyWebView\nimport webview\nimport importlib\n\n# request base\nimport io,urllib.request\nfrom zipfile import ZipFile\nimport json\n\n# from urllib.request import urlretrieve\n\n\n#utilcode Folder\nfrom utilcode.pyhtml import *\nfrom utilcode.Frameless import *\nfrom utilcode.hubfunctions import pyupdater,getapps,buildnews,newsscrap,ahkchecker\nfrom utilcode.pyjson import jsonwriter\nfrom utilcode.DBCooper import HubDB\n\n\n\n#py Updater Settings\nfrom client_config import ClientConfig\n\n\nextrapath=''\nif __name__ == '': \n extrapath= 'apps/hub/'\nAPP_NAME = ClientConfig.APP_NAME\nAPP_VERSION = ClientConfig.APP_VERSION\nAppInit=True\nSiteURL='https://botplace.me/'\nsizew=600\nsizeh=410\nDebugUI=False\n\n\nfilename= os.path.basename(__file__)\nfilename=filename.replace('.py','')\n#On UI Show. Auto stop On top after 3 secs from Ui show.\n# def on_shown():\n# print('pywebview window shown')\n# window.events.shown -= on_shown\n\n\n\n#On Load Events\ndef on_loaded():\n print('DOM is ready')\n global AppInit\n if AppInit == True:\n # Prevent Multi load\n AppInit=False\n\n\n #Site DB INIT\n global hubdata\n hubdata=HubDB(window,SiteURL)\n # print(hubdata.mydata['Botplace'][0]['userdata'])\n # print(webview.token)\n\n # window.evaluate_js(\"\"\"\n # remotelight('true')\n # \"\"\")\n # Update Funcs , Start Next UI\n jsrunner('toptitle','innerHTML',\"=\",ClientConfig.APP_NAME+''' V '''+ClientConfig.APP_VERSION,hubdata.window)\n\n pyupdater(hubdata)\n ahkchecker(hubdata)\n # getnews(hubdata)\n starthub(hubdata)\n else:\n #After first init\n window.events.loaded -= on_loaded\n # window.events.loaded -= on_loaded #One Init no load\n\n#allow to call your funcs with onclick=\"pywebview.api.Funcname()\" on your html.\n# jsrunner('maindata','innerHTML',\"=\",'')\ndef starthub(hubdata):\n \n setsizer(hubdata.window,80,80) #Set widow to 80% of W and H\n \n # window.load_url('http://localhost:8010/admin/')\n window.load_url(SiteURL+'/api/apphub/')\n jsrunner('toptitle','innerHTML',\"=\",APP_NAME+''' V '''+APP_VERSION,hubdata.window)\n \n #Build News cards\n # buildnews(hubdata,NewsHTML)\n getapps(hubdata)\n\n window.on_top = False\n\n # HubDB.Newsjson(hubdata)\n # print(hubdata.NewsJson)\n \n #test\n\n# prog bar\ndef localpip(name):\n try:\n process = subprocess.Popen([\"pip.exe\", 'install','-t','lib-dynload','-r', 'apps/'+name+'/requirements.txt'], stdout=subprocess.PIPE)\n for line in process.stdout:\n # print('data printer',line.decode().strip())\n jsrunner('wheelinfo','innerHTML',\"=\",line.decode().strip(),hubdata.window)\n\n return \"done\"\n except:\n return \"fail\"\n\n\ndef mydl(url,name,typer):\n print('DL',name,typer)\n apphandler=hubdata.applistnames.index(name)\n # print(listfix)\n apphandler=hubdata.mydata['Botplace'][0]['Apps'][apphandler]\n with urllib.request.urlopen(url) as Response:\n Length = Response.getheader('content-length')\n BlockSize = 1000000 # default value\n\n if Length:\n Length = int(Length)\n BlockSize = max(4096, Length // 20)\n\n print(\"UrlLib len, blocksize: \", Length, BlockSize)\n\n BufferAll = io.BytesIO()\n Size = 0\n while True:\n BufferNow = Response.read(BlockSize)\n if not BufferNow:\n break\n BufferAll.write(BufferNow)\n Size += len(BufferNow)\n if Length:\n jsrunner('updatestatus','setAttribute',\".\",('style','width:'+\"%d%%\" % (int((Size / Length)*100))),hubdata.window)\n\n Percent = int((Size / Length)*100)\n print(f\"download: {Percent}% {url}\")\n\n print(\"Buffer All len:\", len(BufferAll.getvalue()))\n\n ##### file Extract ####\n with ZipFile(BufferAll) as zfile:\n \n zfile.extractall('apps/'+name)\n zfile.close()\n\n ### settings json build ###\n dataslist=[\n {name: {\"appdata\": []}}]\n d_new={name:[v for x in dataslist \n for k,v in x.items() \n if k ==name] \n for name in set(list(y)[0] \n for y in dataslist)}\n # print(d_new)\n print('app ver',apphandler['ver'])\n d_new[name][0]['appdata'] = {'ver': apphandler['ver']} \n \n \n # jsonwriter(d_new)\n \n # if apphandler['apptype']['title']==\"python\":\n # tmphtml='''\n #

Freeing willy

\n #
\n\n #
\n \n # '''\n # jsrunner('apppagebtn','innerHTML',\"=\",tmphtml,hubdata.window)\n # localpip(name)\n \n \n \n json.dumps(d_new)\n with open('apps/'+name+'/'+name+'.json', mode='w') as f:\n f.write(json.dumps(d_new, indent=2))\n\n Apppagemodel(hubdata,apphandler['id'])\n\n \n \n \n\n\n\n\n\n#Allow connect Func call from HTML onclick=\"pywebview.api.Funcname()\" ** self must be added!!!\nclass Api:\n #main Hub\n # def mainreset(self):\n # newsscrap(hubdata,NewsHTML)\n\n \n ### Frame Utils Api ###\n def appdataget(self,objid):\n print('app get')\n Apppagemodel(hubdata,objid)\n Api.uncheck(objid)\n\n ### app button handler ###\n def appbtn(self,typer,objid):\n print('app get',typer,objid)\n apphandler=hubdata.applist.index(objid)\n # print(listfix)\n apphandler=hubdata.mydata['Botplace'][0]['Apps'][apphandler]\n\n if typer == \"Download\":\n tmphtml='''\n

Downloading

\n \n
\n
\n \n '''\n jsrunner('apppagebtn','innerHTML',\"=\",tmphtml,hubdata.window)\n mydl(apphandler['link1'],apphandler['title'],apphandler['apptype']['title'])\n\n if typer == \"Update\":\n print('update app')\n tmphtml='''\n

Updating

\n \n
\n
\n \n '''\n jsrunner('apppagebtn','innerHTML',\"=\",tmphtml,hubdata.window)\n\n if apphandler['apptype']['title']==\"ahk\":\n print('ahk')\n mydl(apphandler['updatelink'],apphandler['title'],apphandler['apptype']['title'])\n\n\n \n elif apphandler['apptype']['title']==\"python\":\n print('python')\n mydl(apphandler['updatelink'],apphandler['title'],apphandler['apptype']['title'])\n\n\n elif apphandler['apptype']['title']==\"exe\":\n print('exe')\n \n Apppagemodel(hubdata,apphandler['id'])\n \n if typer == \"Start\": \n print('start setected')\n if apphandler['apptype']['title']==\"ahk\":\n print('ahk')\n subprocess.run([\"AHK/AutoHotkeyU64.exe\", 'apps/'+apphandler['title']+'/'+apphandler['title']+'.ahk'])\n\n elif apphandler['apptype']['title']==\"python\":\n print('python')\n \n if os.path.isfile('BotPlace App Hub.exe'):\n \n subprocess.call(['BotPlace App Hub.exe', apphandler['title']], shell=True)\n else:\n print('else call')\n importlib.import_module('apps.'+apphandler['title']+'.'+apphandler['title'])\n # exec(open('apps/'+apphandler['title']+'/'+apphandler['title']+'.py').read())\n # except:\n # exec(open('apps/'+apphandler['title']+'/'+apphandler['title']+'.py').read())\n\n # importlib.import_module('apps.'+apphandler['title']+'.'+apphandler['title'])\n # exec(open('apps/'+apphandler['title']+'/'+apphandler['title']+'.py').read())\n\n elif apphandler['apptype']['title']==\"exe\":\n print('exe')\n subprocess.run(['apps/'+apphandler['title']+'/'+apphandler['title']+'.exe'])\n\n\n #uncheck app\n def uncheck(objid):\n print('obj unchk',objid)\n jsrunner('appcheckicon'+str(objid),'innerHTML',\"=\",'',hubdata.window)\n # print(hubdata.applist)\n listfix=hubdata.applist.index(objid)\n # print(listfix)\n hubdata.mydata['Botplace'][0]['Apps'][listfix]['checked']=True\n jsonwriter(hubdata.mydata)\n \n \n #Model API\n def modalcall(self,code,objid):\n # print(code,objid)\n if code==\"news\":\n newscardmodal(hubdata,objid)\n\n\n\n #Resize Window\n def resizedrag(self):\n doresize(window) #utilcode/Frameless.py window is the target object window.\n\n #Top Window Handlers\n def topbar(self,code):\n topbarhandler(code,window) #utilcode/Frameless.py window is the target object window.\n\n #Link to Site Top window Handler\n def bottbar(self,code,linkpage):\n botbarhandler(code,linkpage)\n \n\n\nDRAG_REGION_SELECTOR = '.pywebview-drag-region'\n\nif filename == 'hub' or __name__ == \"__main__\":\n\n api = Api()\n window = webview.create_window('Bot Place',\n extrapath+'dist/base.html',on_top=True,\n width=sizew, height=sizeh,\n resizable=True,\n frameless=True,js_api=api) \n # window.events.shown += on_shown\n window.events.loaded += on_loaded\n webview.start(debug=DebugUI, \\\n http_server=False, user_agent=None)\n","repo_name":"DizzyduckAR/BotPlace","sub_path":"apps/hub/hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":10562,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"61"} +{"seq_id":"5699670366","text":"# Baekjoon Online Judge - 2839번. 설탕 배달\n\n# DP\nN = int(input())\ndp = [5001] * (N + 3) # N보다 1 큰 값을 지정. 적절히 값을 크게 줬다.\ndp[3] = dp[5] = 1\nfor i in range(6, N + 1):\n # 점화식 세웠을 때 i-3번째 혹은 i-5번째의 최소값에서 + 1을 해주면 된다.\n dp[i] = min(dp[i-3], dp[i-5]) + 1\nif dp[N] >= 5001:\n print(-1)\nelse:\n print(dp[N])\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_2839_2.py","file_name":"BOJ_2839_2.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1543951238","text":"import glob\nimport os.path as osp\nfrom copy import deepcopy\nfrom pathlib import Path\nfrom typing import OrderedDict as OrderedDictType\nfrom typing import Union, List, Dict\n\nimport numpy as np\nimport torch\nfrom human_body_prior.body_model.body_model import BodyModel\nfrom human_body_prior.data.dataloader import VPoserDS\nfrom human_body_prior.tools.omni_tools import copy2cpu as c2c\nfrom human_body_prior.tools.omni_tools import create_list_chunks\nfrom human_body_prior.tools.omni_tools import makepath\nfrom loguru import logger\nfrom omegaconf import OmegaConf\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom moshpp.marker_layout.edit_tools import marker_layout_load\nfrom moshpp.marker_layout.edit_tools import randomize_marker_layout_vids\nfrom moshpp.mosh_head import MoSh\n\n\ndef compute_vertex_normal_batched(vertices: torch.Tensor, indices: torch.Tensor):\n from pytorch3d.structures import Meshes\n return Meshes(verts=vertices,\n faces=indices.expand(len(vertices), -1, -1)).verts_normals_packed().view(-1, vertices.shape[1], 3)\n\n\ndef drop_dict_as_pt(data_dict: Dict[str, torch.Tensor],\n out_dir: Union[str, Path],\n aggregate_method: str = 'concatenate'):\n v = None\n for k, v in data_dict.items():\n if aggregate_method == 'concatenate':\n v = np.concatenate(data_dict[k])\n elif aggregate_method == 'stack':\n v = np.stack(data_dict[k], axis=0)\n\n outfname = makepath(out_dir, '%s.pt' % k, isfile=True)\n # print('{} {} size {}, {}'.format(type(v), k, v[0].shape, v[100].shape))\n\n if osp.exists(outfname): continue\n torch.save(torch.from_numpy(np.asarray(v)), outfname)\n if v is not None:\n logger.debug(f'Dumped {len(v)} data points as pytorch pt files at: {out_dir}')\n\n\ndef dataset_exists(dataset_dir: Union[str, Path], split_names: List[str] = None) -> bool:\n \"\"\"\n This function checks whether a valid SOMA dataset directory exists at a location\n\n Args:\n dataset_dir:\n split_names:\n\n Returns:\n\n \"\"\"\n if dataset_dir is None: return False\n if split_names is None:\n split_names = ['train', 'vald']\n\n done = []\n for split_name in split_names:\n for k in ['root_orient', 'pose_body', 'betas', 'trans']:\n out_fname = osp.join(dataset_dir, split_name, f'{k}.pt')\n done.append(osp.exists(out_fname))\n return np.all(done)\n\n\ndef sort_markers_like_superset(markers: np.ndarray, labels: list, superset_labels: list):\n \"\"\"\n Given superset labels will adjust their order to superset labels and leave non-existing markers as zero\n Args:\n markers:\n labels:\n superset_labels:\n\n Returns:\n\n \"\"\"\n num_superset_labels = len(superset_labels)\n time_length = len(markers)\n markers_rearranged = np.zeros([time_length, num_superset_labels, 3])\n\n for t in range(time_length):\n mocap_lids = [labels[t].index(l) for l in superset_labels if l in labels[t]]\n superset_lids = [superset_labels.index(l) for l in superset_labels if l in labels[t]]\n assert len(mocap_lids) != 0\n markers_rearranged[t, superset_lids] = markers[t][mocap_lids]\n return markers_rearranged\n\n\ndef prepare_real_marker_from_mosh_stageii_pkls(marker_vids: OrderedDictType[str, int],\n ds_cfg: OrderedDictType):\n superset_labels = list(marker_vids.keys())\n\n wanted_fields = ['betas', 'expression', 'markers', 'pose_body',\n 'pose_eye', 'pose_hand', 'pose_jaw', 'root_orient', 'trans']\n\n def run(mosh_stageii_pkl_fnames):\n out_put_data = {}\n\n for mosh_stageii_pkl_fname in tqdm(mosh_stageii_pkl_fnames):\n # breakpoint() # todo replace with proper code\n # data = read_mosh_pkl(mosh_stageii_pkl_fname, superset_labels)\n data = MoSh.load_as_amass_npz(stageii_pkl_data_or_fname=mosh_stageii_pkl_fname, include_markers=True)\n data['markers'] = sort_markers_like_superset(data['markers_obs'], data['labels_obs'], superset_labels)\n\n if data['gender'] != ds_cfg.gender: continue\n\n time_length = len(data['markers'])\n\n if 'betas' in data:\n data['betas'] = np.repeat(data['betas'][:ds_cfg.num_betas][None], repeats=time_length, axis=0)\n\n # if ds_cfg.animate_face:\n # assert 'expression' in data, ValueError('face animation is enabled yet real marker data doesnt have expressions')\n # breakpoint()\n if ds_cfg.animate_face:\n if 'expression' in data:\n data['expression'] = data['expression'][:, :ds_cfg.num_expressions]\n else:\n data['expression'] = np.zeros([time_length, ds_cfg.num_expressions])\n else:\n for k in ['pose_eye', 'pose_jaw', 'expression']:\n if k in data: data.pop(k)\n if not ds_cfg.animate_hand and 'pose_hand' in data:\n data.pop('pose_hand')\n\n ds_rate = max(1, int(data['mocap_frame_rate'] // ds_cfg.unified_frame_rate))\n\n for k in wanted_fields:\n if k in data:\n data[k] = data[k][::ds_rate]\n\n for k in wanted_fields:\n if k not in data: continue\n if k not in out_put_data: out_put_data[k] = []\n for tIds in create_list_chunks(range(data[k].shape[0]),\n ds_cfg.num_timeseq_frames,\n ds_cfg.num_frames_overlap):\n out_put_data[k].append(c2c(data[k][tIds]).reshape(1, len(tIds), -1).astype(np.float32))\n\n # breakpoint()\n\n out_put_data = {k: np.concatenate(v) for k, v in out_put_data.items()}\n out_put_data['data_is_real'] = np.ones(len(out_put_data['trans'])).astype(np.bool)\n\n logger.debug('real data: {}'.format({k: v.shape for k, v in out_put_data.items()}))\n return out_put_data.copy()\n # return out_put_data\n\n return run\n\n\ndef put_markers_on_synthetic_body(marker_vids: OrderedDictType[str, int],\n marker_type_mask: OrderedDictType[str, np.ndarray],\n m2b_dist_array: np.ndarray, surface_model_fname: Union[str, Path],\n wrist_markers_on_stick: bool = False,\n num_random_vid_ring: int = 0, num_marker_layout_augmentation: int = 1,\n enable_rnd_vid_on_face_hands: bool = False, static_props_array: np.ndarray = None):\n wrist_mask = np.array([True if l in ['RIWR', 'ROWR', 'LIWR', 'LOWR'] else False for l in marker_vids.keys()])\n wrist_mask = wrist_mask[None, :, None]\n comp_device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n marker_layout_randomizer = randomize_marker_layout_vids(marker_vids=marker_vids,\n marker_type_mask=marker_type_mask,\n n_ring=num_random_vid_ring,\n enable_rnd_vid_on_face_hands=enable_rnd_vid_on_face_hands,\n surface_model_fname=surface_model_fname)\n\n def run(body_ds, ds_cfg):\n assert len(body_ds), ValueError('provided body dataset is empty!')\n\n dataloader = DataLoader(body_ds, batch_size=32, shuffle=False, num_workers=10, drop_last=False)\n\n data_dict = {}\n # todo: check on ds_cfg.surface_model_fname\n bm = BodyModel(surface_model_fname,\n num_betas=ds_cfg.num_betas,\n num_expressions=ds_cfg.num_expressions).to(comp_device)\n assert bm.model_type == ds_cfg.surface_model_type, \\\n ValueError(f'the model type of the body dataset is not the same as current:'\n ' {bm.model_type} != {ds_cfg.surface_model_type}')\n\n for body_parms in tqdm(dataloader):\n body_parms = {k: v.view(body_parms['trans'].shape[0] * ds_cfg.num_timeseq_frames, -1) for k, v in\n body_parms.items()}\n bs = body_parms['trans'].shape[0]\n body_parms['betas'] = body_parms['betas'][:, :ds_cfg.num_betas]\n if ds_cfg.animate_face:\n body_parms['expression'] = body_parms['expression'][:, :ds_cfg.num_expressions]\n body_parms['pose_eye'] = body_parms['expression'].new(np.zeros([bs, 6]))\n\n with torch.no_grad():\n\n body = bm(**{k: v.to(comp_device) for k, v in body_parms.items() if k not in ['joints']})\n vertices = body['v'] if isinstance(body, dict) else body.v\n faces = body['f'] if isinstance(body, dict) else body.f\n vn = compute_vertex_normal_batched(vertices, faces)\n\n cur_m2b_dist = m2b_dist_array.copy()\n if wrist_markers_on_stick:\n cur_m2b_dist[wrist_mask] = np.random.choice([0.0095, 0.039])\n\n for _ in range(num_marker_layout_augmentation):\n new_vids = np.array(list(marker_layout_randomizer().values()))\n # print(new_vids, vertices.shape, vn.shape, vertices.shape, new_vids.max())\n markers = c2c(vertices[:, new_vids] + vn[:, new_vids] * vn.new(cur_m2b_dist))\n # if np.any(np.isnan(markers)):#, ValueError('NaN value encountered in marker data')\n # breakpoint()\n\n if static_props_array is not None:\n prop_markers = []\n for fid in range(len(markers)):\n prob_id = np.random.choice(len(static_props_array))\n prop_markers.append(static_props_array[prob_id:prob_id + 1])\n\n body_parms['prop_markers'] = np.concatenate(prop_markers, axis=0)\n\n body_parms['markers'] = markers\n\n for k in body_parms.keys():\n if k not in data_dict: data_dict[k] = []\n data_dict[k].append(\n c2c(deepcopy(body_parms[k])).reshape(bs // ds_cfg.num_timeseq_frames,\n ds_cfg.num_timeseq_frames,\n -1).astype(np.float32))\n\n results = {k: np.concatenate(v, axis=0) for k, v in data_dict.items()}\n results['data_is_real'] = np.zeros(len(results['trans'])).astype(np.bool)\n logger.debug('synthetic data: {}'.format({k: v.shape for k, v in results.items()}))\n return results.copy()\n\n return run\n\n\ndef prepare_marker_dataset(marker_dataset_dir: Union[str, Path],\n superset_fname: Union[str, Path],\n body_dataset_dir: Union[str, Path],\n real_marker_amass_splits,\n amass_pkl_dir: Union[str, Path],\n wrist_markers_on_stick: bool,\n use_real_data_for: List[str],\n use_synt_data_for: List[str],\n surface_model_fname: Union[str, Path],\n num_random_vid_ring: int = 0,\n num_marker_layout_augmentation: int = 1,\n static_props_array: np.ndarray = None,\n enable_rnd_vid_on_face_hands: bool = True,\n babel: Dict[str, List[str]] = None):\n \"\"\"\n We use this to be able to creat synthetic marker data using different marker layouts\n\n Args:\n marker_dataset_dir:\n superset_fname:\n body_dataset_dir:\n real_marker_amass_splits:\n amass_pkl_dir:\n wrist_markers_on_stick:\n use_real_data_for:\n use_synt_data_for:\n surface_model_fname:\n num_random_vid_ring:\n num_marker_layout_augmentation:\n static_props_array:\n enable_rnd_vid_on_face_hands:\n babel:\n\n Returns:\n\n \"\"\"\n if dataset_exists(marker_dataset_dir):\n logger.debug(f'Marker dataset already exists at {marker_dataset_dir}')\n return\n\n log_fname = makepath(marker_dataset_dir, 'dataset.log', isfile=True)\n log_format = \"{module}:{function}:{line} -- {level} -- {message}\"\n ds_logger_id = logger.add(log_fname, format=log_format, enqueue=True)\n\n marker_meta = marker_layout_load(superset_fname)\n marker_vids = marker_meta['marker_vids'] # superset should a mapping between labels: str to list of vertex ids\n marker_type_mask = marker_meta['marker_type_mask']\n m2b_dist = np.ones(len(marker_vids)) * 0.0095\n for mask_type, marker_mask in marker_meta['marker_type_mask'].items():\n m2b_dist[marker_mask] = marker_meta['m2b_distance'][mask_type]\n m2b_dist = m2b_dist[None, :, None]\n\n logger.debug(f'Creating marker dataset with superset {superset_fname} which has {len(marker_vids)} markers.')\n logger.debug(f'Superset marker type distance {dict(marker_meta[\"m2b_distance\"])} meters.')\n if num_random_vid_ring > 0:\n logger.debug(f'Will place markers randomly on {num_random_vid_ring} ring neighbourhood of a vid.')\n\n body_ds_cfg_fname = osp.join(body_dataset_dir, 'settings.yaml')\n assert osp.exists(body_ds_cfg_fname), FileNotFoundError(body_ds_cfg_fname)\n body_ds_cfg = OmegaConf.load(body_ds_cfg_fname)\n\n for split_name in ['train', 'vald']:\n # for split_name in ['vald', 'train']:\n body_ds = VPoserDS(dataset_dir=osp.join(body_dataset_dir, split_name))\n assert len(body_ds), ValueError(f'No body dataset found at: {osp.join(body_dataset_dir, split_name)}')\n\n if dataset_exists(marker_dataset_dir, [split_name]): continue\n logger.debug(f'Preparing data files for split {split_name}')\n\n if use_synt_data_for is not None and split_name in use_synt_data_for:\n logger.debug(\n f'Preparing synthetic marker data for split {split_name} from '\n f'#{len(body_ds)} body parameters corresponding to datasets {body_ds_cfg.amass_splits[split_name]}')\n synt_data_dict = put_markers_on_synthetic_body(marker_vids=marker_vids,\n marker_type_mask=marker_type_mask,\n m2b_dist_array=m2b_dist,\n wrist_markers_on_stick=wrist_markers_on_stick,\n num_random_vid_ring=num_random_vid_ring,\n num_marker_layout_augmentation=num_marker_layout_augmentation,\n enable_rnd_vid_on_face_hands=enable_rnd_vid_on_face_hands,\n surface_model_fname=surface_model_fname,\n static_props_array=static_props_array)(body_ds, body_ds_cfg)\n logger.debug(f'#{len(synt_data_dict[\"trans\"])} synthetic data points created')\n else:\n logger.debug(\n f'Not using synthetic data for split {split_name} since use_synt_data_for ({use_synt_data_for}) does not include this split')\n synt_data_dict = {}\n\n if use_real_data_for and split_name in use_real_data_for:\n logger.debug('To be able to use real data for training you need to obtain real mocap '\n 'markers of AMASS from the respective original datasets and mosh them')\n mosh_stageii_pkl_fnames = []\n for ds_name in real_marker_amass_splits[split_name]:\n cur_mosh_stageii_pkl_fnames, use_babel = [], False\n if babel and ds_name in babel:\n cur_mosh_stageii_pkl_fnames = [fname.replace('.npz', '.pkl') for fname in babel[ds_name]]\n if cur_mosh_stageii_pkl_fnames: use_babel = True\n else:\n cur_mosh_stageii_pkl_fnames = glob.glob(osp.join(amass_pkl_dir, ds_name, '*/*_stageii.pkl'))\n\n logger.debug(\n f'Obtained {len(cur_mosh_stageii_pkl_fnames):05d} sequences for real mocap from AMASS subset {ds_name}. used_babel = {use_babel}')\n\n mosh_stageii_pkl_fnames.extend(cur_mosh_stageii_pkl_fnames)\n\n logger.debug(\n f\"Preparing real marker data for split {split_name} from #{len(mosh_stageii_pkl_fnames)} mosh_stageii_pkl_fnames {real_marker_amass_splits[split_name]}.\")\n real_data_dict = prepare_real_marker_from_mosh_stageii_pkls(marker_vids, body_ds_cfg)(\n mosh_stageii_pkl_fnames)\n logger.debug(\n f'#{len(real_data_dict[\"trans\"])} real data points extracted from #{len(mosh_stageii_pkl_fnames)} mosh_stageii_pkl_fnames')\n else:\n logger.debug(\n f'Not using real data for split {split_name} since use_real_data_for ({use_real_data_for}) does not have this split name')\n real_data_dict = {}\n\n data_keys = list(set(list(synt_data_dict.keys()) + list(real_data_dict.keys())))\n data_dict = {k: [] for k in data_keys}\n for k in data_keys:\n # add key to data showing real and synthetic data\n if k in synt_data_dict: data_dict[k].append(synt_data_dict[k])\n if k in real_data_dict: data_dict[k].append(real_data_dict[k])\n\n drop_dict_as_pt(data_dict=data_dict, out_dir=makepath(marker_dataset_dir, split_name))\n\n save_cfg = OmegaConf.create({\n 'real_marker_amass_splits': real_marker_amass_splits,\n 'synthetic_body_amass_splits': body_ds_cfg.amass_splits,\n 'superset_fname': superset_fname,\n 'wrist_markers_on_stick': wrist_markers_on_stick,\n 'use_real_data_for': use_real_data_for,\n 'use_synt_data_for': use_synt_data_for,\n 'body_dataset_dir': body_dataset_dir,\n 'surface_model_fname': surface_model_fname,\n 'num_random_vid_ring': num_random_vid_ring,\n 'enable_rnd_vid_on_face_hands': enable_rnd_vid_on_face_hands,\n 'num_marker_layout_augmentation': num_marker_layout_augmentation,\n 'babel': babel,\n 'num_prop_marker_max': static_props_array.shape[1] if static_props_array is not None else 0,\n 'unified_frame_rate': body_ds_cfg.unified_frame_rate,\n 'num_hand_var_perseq': body_ds_cfg.num_hand_var_perseq,\n 'num_betas': body_ds_cfg.num_betas,\n 'num_expressions': body_ds_cfg.num_expressions,\n 'gender': body_ds_cfg.gender,\n 'amass_pkl_dir': amass_pkl_dir,\n 'num_timeseq_frames': body_ds_cfg.num_timeseq_frames,\n 'num_frames_overlap': body_ds_cfg.num_frames_overlap,\n })\n OmegaConf.save(config=save_cfg, f=makepath(marker_dataset_dir, 'settings.yaml', isfile=True))\n\n logger.debug(f'marker_dataset_dir: {marker_dataset_dir}')\n logger.remove(ds_logger_id)\n return marker_dataset_dir\n","repo_name":"nghorbani/soma","sub_path":"src/soma/data/marker_dataset.py","file_name":"marker_dataset.py","file_ext":"py","file_size_in_byte":19155,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"61"} +{"seq_id":"17866233835","text":"import argparse\nimport csv\nimport json\n\nimport sklearn.manifold\nimport torch\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"MiniConf Portal Command Line\")\n parser.add_argument(\"papers\", default=False, help=\"paper file\")\n\n parser.add_argument(\"embeddings\", default=False, help=\"embeddings file to shrink\")\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n emb = torch.load(args.embeddings)\n out = sklearn.manifold.TSNE(n_components=2).fit_transform(emb.cpu().numpy())\n d = []\n with open(args.papers, \"r\") as f:\n abstracts = list(csv.DictReader(f))\n for i, row in enumerate(abstracts):\n d.append({\"id\": row[\"UID\"], \"pos\": out[i].tolist()})\n print(json.dumps(d))\n","repo_name":"Mini-Conf/Mini-Conf","sub_path":"scripts/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":522,"dataset":"github-code","pt":"61"} +{"seq_id":"6999395043","text":"import os\nimport stat\nimport urllib2\nimport tempfile\nimport cookielib\n\nfrom .saml import HTTPNegotiateAuthHandler\n\nfrom . import version\n\n__author__ = 'Duncan Macleod '\n__credits__ = 'Scott Koranda '\n__version__ = version.__version__\n\nTMPDIR = tempfile.gettempdir()\n\nCOOKIE_JAR = os.path.join(TMPDIR, '%s_cookies' % os.getenv('USER'))\nLIGO_LOGIN_URL = 'login.ligo.org'\n\n\ndef request(url, debug=False):\n \"\"\"Request the given URL using LIGO.ORG SAML authentication.\n\n This requires an active Kerberos ticket for the user, to get one:\n\n .. code:: bash\n\n kinit albert.einstein@LIGO.ORG\n\n Parameters\n ----------\n url : `str`\n URL path for request\n debug : `bool`, optional\n Query in verbose debugging mode, default: `False`\n\n Returns\n -------\n response : `str`\n output of HTTP request\n \"\"\"\n # set debug to 1 to see all HTTP(s) traffic\n debug = int(debug)\n\n # need an instance of HTTPS handler to do HTTPS\n httpshandler = urllib2.HTTPSHandler(debuglevel=debug)\n\n # use a cookie jar to store session cookies\n jar = cookielib.LWPCookieJar()\n\n # if a cookier jar exists open it and read the cookies\n # and make sure it has the right permissions\n if os.path.exists(COOKIE_JAR):\n os.chmod(COOKIE_JAR, stat.S_IRUSR | stat.S_IWUSR)\n\n # set ignore_discard so that session cookies are preserved\n jar.load(COOKIE_JAR, ignore_discard=True)\n\n # create a cookie handler from the cookier jar\n cookiehandler = urllib2.HTTPCookieProcessor(jar)\n # need a redirect handler to follow redirects\n redirecthandler = urllib2.HTTPRedirectHandler()\n\n # need an auth handler that can do negotiation.\n # input parameter is the Kerberos service principal.\n principal = 'HTTP@%s' % LIGO_LOGIN_URL\n authhandler = HTTPNegotiateAuthHandler(service_principal=principal)\n\n # create the opener.\n opener = urllib2.build_opener(authhandler, cookiehandler, httpshandler,\n redirecthandler)\n\n # prepare the request object\n req = urllib2.Request(url)\n\n # use the opener and the request object to make the request.\n response = opener.open(req)\n\n # save the session cookies to a file so that they can\n # be used again without having to authenticate\n jar.save(COOKIE_JAR, ignore_discard=True)\n\n return response\n","repo_name":"lscsoft/cis","sub_path":"cis/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2911810862","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 6 08:21:24 2017\n\n@author: Nat\n\"\"\"\n\ndef myAutoArima(fullTs, config):\n '''\n entry point for arima model. \n the function detects seasonality, finds both trend and seasonal\n differencing order to ensure stability of the timeseries, \n and systematically searches for the best parameters [p, q, P, Q]\n for reference see R forecast package from Hyndman\n https://github.com/robjhyndman/forecast/blob/master/R/newarima2.R\n input: univariate timeseries in either pandas dataframe \n or numpy array format.\n output: \n params for best SARIMAX model\n '''\n import sys\n import utilities as ut\n import numpy as np\n import itertools\n \n tsArray = ut.pdDFtoArray(fullTs)\n\n # find differencing order and generate diffed timeseries\n d = min(config.maxD, ut.findTrendDiff(tsArray, config.maxD, \n config.kpssRegType, \n config.significance))\n tsArrayDiff = np.diff(tsArray, n = d)\n\n # detect seasonality\n frequency = ut.seasonalDetection(tsArrayDiff, config.recommendedFreq, \n config.significance)\n if not isinstance(frequency, int):\n frequency = int(frequency)\n if frequency not in config.recommendedFreq + [0]:\n print(\"warning: auto-detected frequency %d may be incorrect.\" %frequency) \n \n if frequency <= 1:\n D = 0\n tsArraySeasonalDiff = tsArrayDiff\n else: \n # find seasonal differencing order and generate diffed timeseries\n D = min(config.maxDD, ut.findSeasonalDiff(tsArrayDiff, frequency,config.maxDD))\n tsArraySeasonalDiff = ut.seasonalDiff(tsArrayDiff, \n frequency = frequency, \n order = D, padding = False)\n \n curBestAIC = sys.maxsize\n pastParams = list()\n curBestModel = np.zeros(7)\n finished = False\n \n # search step of arima seasonal P, Q orders\n PQDeltas = [-1, 0, 1] if frequency > 1 else [0, 0, 0]\n # search step of arima non-seasonal p, q orders\n pqDeltas = [-1, 0, 1]\n # search space [p, q, P, Q] initialized as suggested by Hyndman\n nextParams = config.seasonalInitialOrder if frequency > 1 \\\n else config.nonSeasonalInitialOrder\n \n while not finished:\n modelList = list(map(lambda x: mySarimaxGridSearch(config, tsArraySeasonalDiff, frequency, x), nextParams))\n try:\n newBestAIC = min(item[4] for item in modelList)\n except IndexError: \n newBestAIC = sys.maxsize\n if newBestAIC == sys.maxsize:\n finished = True\n \n if newBestAIC < curBestAIC:\n curBestAIC = newBestAIC\n curBestModel = [item for item in modelList if item[4] == curBestAIC][0][0:4]\n \n # log used parameters to save time in consequent searches\n pastParams = pastParams + nextParams\n \n nextParams = list()\n for r in itertools.product(pqDeltas, pqDeltas, PQDeltas, PQDeltas): \n p = max(0, curBestModel[0] + r[0])\n q = max(0, curBestModel[1] + r[1])\n P = max(0, curBestModel[2] + r[2])\n Q = max(0, curBestModel[3] + r[3])\n nextParams = nextParams + [[p,q,P,Q]]\n nextParams.sort()\n nextParams = [item for item,_ in itertools.groupby(nextParams) \\\n if (sum(item)>0) & (item not in pastParams)]\n if nextParams == []:\n finished = True\n \n if curBestModel[2] + curBestModel[3] + D == 0: frequency = 0\n return [curBestModel[0], d, curBestModel[1], curBestModel[2], D, curBestModel[3], frequency]\n \ndef mySarimaxGridSearch(config, fullTs, frequency = 12, params = [1,0,1,0]):\n '''\n statsmodels reference:\n http://www.statsmodels.org/dev/statespace.html#seasonal-autoregressive-integrated-moving-average-with-exogenous-regressors-sarimax\n details through links to class SARIMAX and SARIMAXResults\n http://www.statsmodels.org/dev/examples/notebooks/generated/statespace_sarimax_stata.html\n '''\n import sys\n import statsmodels.api as sm\n if (params[0] > config.maxP) | (params[1] > config.maxQ) \\\n | (params[2] > config.maxPP) | (params[3] > config.maxQQ):\n return(params + [sys.maxsize]), False\n \n if params[2] + params[3] == 0: frequency = 0\n try:\n model = sm.tsa.statespace.SARIMAX(fullTs, \n order = (params[0], 0, params[1]), \n seasonal_order = (params[2], 0, params[3], frequency), \n trend = config.kpssRegType)\n modelfit = model.fit()\n aic = modelfit.aic\n except:\n aic = sys.maxsize\n return params + [aic]\n","repo_name":"natnij/timeseries","sub_path":"myArima.py","file_name":"myArima.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"46025632530","text":"# Autor: Eliazar Noa Llasccanoa\n# Código: 193003\n# Propósito del código: Este código implementa un trazador de rayos para renderizar una escena 3D con objetos\n# geométricos (esferas) y efectos de iluminación. El objetivo es generar una imagen\n# realista utilizando técnicas de sombreado y reflexión. Se pueden personalizar los\n# objetos de la escena y sus propiedades para obtener diferentes resultados.\n# Fecha: ---\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef normalizar(vector):\n return vector / np.linalg.norm(vector)\n\ndef reflejado(vector, eje):\n return vector - 2 * np.dot(vector, eje) * eje\n\ndef interseccion_esfera(centro, radio, origen_rayo, direccion_rayo):\n b = 2 * np.dot(direccion_rayo, origen_rayo - centro)\n c = np.linalg.norm(origen_rayo - centro) ** 2 - radio ** 2\n delta = b ** 2 - 4 * c\n if delta > 0:\n t1 = (-b + np.sqrt(delta)) / 2\n t2 = (-b - np.sqrt(delta)) / 2\n if t1 > 0 and t2 > 0:\n return min(t1, t2)\n return None\n\ndef objeto_intersectado_mas_cercano(objetos, origen_rayo, direccion_rayo):\n distancias = [interseccion_esfera(obj['centro'], obj['radio'], origen_rayo, direccion_rayo) for obj in objetos]\n objeto_mas_cercano = None\n distancia_minima = np.inf\n for indice, distancia in enumerate(distancias):\n if distancia and distancia < distancia_minima:\n distancia_minima = distancia\n objeto_mas_cercano = objetos[indice]\n return objeto_mas_cercano, distancia_minima\n# Configuración de la escena\n\nancho = 300\naltura = 200\n\nprofundidad_maxima = 3\n\ncamara = np.array([0, 0, 1.5])\nratio = float(ancho) / altura\npantalla = (-1, 1 / ratio, 1, -1 / ratio)# izquierda, arriba, derecha, abajo\n\nluz = {\n 'posicion': np.array([-1, 5, 1]),\n 'ambiente': np.array([0.7, 0.7, 0.7]),\n 'difuso': np.array([0.7, 0.7, 0.7]),\n 'especular': np.array([0.7, 0.7, 0.7])\n}\n# parámetros\npiso = -0.7\natras = -1\nradio1 = 0.5\ncenter = piso + radio1\nlejos = 3\n\nobjetos = [\n # Objeto 1: Esfera gris claro en la parte frontal izquierda\n { 'centro': np.array([0, piso + 0.5, -1.5 + atras]), 'radio': 0.5, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0.5, 0.5, 0.5]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.5 },\n # Objeto 2: Esfera azul oscuro en la parte frontal derecha\n { 'centro': np.array([-1.5, piso + 0.5, -1 + atras]), 'radio': 0.5, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0, 0, 0.7]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.5 },\n # Objeto 3: Esfera magenta (rojo y azul) en la parte frontal central\n { 'centro': np.array([1, piso + 0.3, -0.8 + atras]), 'radio': 0.3, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0.7, 0, 0.7]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.5 },\n # Objeto 4: Esfera verde en la parte trasera derecha\n { 'centro': np.array([2.2, piso + 0.4, -1 + atras]), 'radio': 0.4, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0, 0.7, 0]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.5 },\n # Objeto 5: Esfera roja en la parte trasera izquierda\n { 'centro': np.array([-1.2, piso + 0.4, -2 + atras]), 'radio': 0.4, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0.7, 0, 0]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.5 },\n # Objeto 6: Esfera roja en la parte trasera más lejana\n { 'centro': np.array([1.6, piso + 1, -2 + atras]), 'radio': 1, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0.7, 0, 0]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.5 },\n # Objeto 7: Esfera verde en la parte frontal más lejana\n { 'centro': np.array([-0.6, piso + 0.3, -3 + atras]), 'radio': 0.3, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0, 0.7, 0]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.5 },\n # Objeto 8: Esfera magenta (rojo y azul) en la parte trasera más lejana\n { 'centro': np.array([-1.5, piso + 0.8, -4 + atras]), 'radio': 0.8, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0.7, 0, 0.7]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.5 },\n # Objeto 9: Esfera azul oscuro en la parte frontal más lejana\n { 'centro': np.array([0.8, piso + 0.7, -5 + atras]), 'radio': 0.7, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0, 0, 0.7]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.5 },\n # Objeto 10: Plano infinito que representa el piso\n { 'centro': np.array([0, -10000, 0]), 'radio': 10000 + piso, 'ambiente': np.array([0.1, 0.1, 0.1]),\n 'difuso': np.array([0.5, 0.5, 0.5]), 'especular': np.array([1, 1, 1]), 'brillo': 100,\n 'reflexion': 0.2 }\n]\n\nimagen = np.zeros((altura, ancho, 3))\nfor i, y in enumerate(np.linspace(pantalla[1], pantalla[3], altura)):\n for j, x in enumerate(np.linspace(pantalla[0], pantalla[2], ancho)):\n # pantalla esta en el origen\n pixel = np.array([x, y, 0])\n origen = camara\n direccion = normalizar(pixel - origen)\n\n color = np.zeros((3))\n reflexion = 1\n \n for k in range(profundidad_maxima):\n # verifica por intersecciones\n objeto_mas_cercano, distancia_minima = objeto_intersectado_mas_cercano(objetos, origen, direccion)\n if objeto_mas_cercano is None:\n break\n interseccion = origen + distancia_minima * direccion\n normal_a_superficie = normalizar(interseccion - objeto_mas_cercano['centro'])\n punto_desplazado = interseccion + 1e-5 * normal_a_superficie\n interseccion_con_luz = normalizar(luz['posicion'] - punto_desplazado)\n\n _, distancia_minima = objeto_intersectado_mas_cercano(objetos, punto_desplazado, interseccion_con_luz)\n interseccion_con_luz_distancia = np.linalg.norm(luz['posicion'] - interseccion)\n esta_sombreado = distancia_minima < interseccion_con_luz_distancia\n\n if esta_sombreado:\n break\n \n iluminacion = np.zeros((3))\n \n # ambiente\n iluminacion += objeto_mas_cercano['ambiente'] * luz['ambiente']\n \n # difuso\n iluminacion += objeto_mas_cercano['difuso'] * luz['difuso'] * np.dot(interseccion_con_luz, normal_a_superficie)\n \n # especular\n interseccion_a_camara = normalizar(camara - interseccion)\n H = normalizar(interseccion_con_luz + interseccion_a_camara)\n iluminacion += objeto_mas_cercano['especular'] * luz['especular'] * np.dot(normal_a_superficie, H) ** (objeto_mas_cercano['brillo'] / 4)\n\n # reflexion\n color += reflexion * iluminacion\n reflexion *= objeto_mas_cercano['reflexion']\n \n origen = punto_desplazado\n direccion = reflejado(direccion, normal_a_superficie)\n \n imagen[i, j] = np.clip(color, 0, 1)\n print(\"%d/%d\" % (i + 1, altura))\n \nplt.imsave('imagen2.png', imagen)\nimgplot = plt.imshow(imagen)\nplt.show()\n","repo_name":"EliazarNoaLlas/Curso_GraficaII","sub_path":"glutdlls37beta/GRAFICAII/UnidadIII/193003_Laboratorio/EjercicioB2.py","file_name":"EjercicioB2.py","file_ext":"py","file_size_in_byte":7321,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"60860087","text":"try:\n import ssl\n SSL_AVAILABLE = True\nexcept:\n SSL_AVAILABLE = False\n\nimport sys\nimport socket\nimport logging\nimport uuid\nimport platform\nimport os\n\nfrom functools import wraps\n\nfrom .authentication import (MySQL41AuthPlugin, PlainAuthPlugin,\n Sha256MemoryAuthPlugin)\nfrom .errors import InterfaceError, OperationalError, ProgrammingError\nfrom .compat import PY3, STRING_TYPES, UNICODE_TYPES\nfrom .crud import Schema\nfrom .constants import SSLMode, Auth\nfrom .helpers import get_item_or_attr\nfrom .protocol import Protocol, MessageReaderWriter\nfrom .result import Result, RowResult, DocResult\nfrom .statement import SqlStatement, AddStatement, quote_identifier\nfrom .protobuf import Protobuf\n\n\n_DROP_DATABASE_QUERY = \"DROP DATABASE IF EXISTS `{0}`\"\n_CREATE_DATABASE_QUERY = \"CREATE DATABASE IF NOT EXISTS `{0}`\"\n_LOGGER = logging.getLogger(\"mysqlx\")\n\nclass SocketStream(object):\n \"\"\"Implements a socket stream.\"\"\"\n def __init__(self):\n self._socket = None\n self._is_ssl = False\n self._is_socket = False\n self._host = None\n\n def connect(self, params):\n \"\"\"Connects to a TCP service.\n\n Args:\n params (tuple): The connection parameters.\n\n Raises:\n :class:`mysqlx.InterfaceError`: If Unix socket is not supported.\n \"\"\"\n try:\n self._socket = socket.create_connection(params)\n self._host = params[0]\n except ValueError:\n try:\n self._socket = socket.socket(socket.AF_UNIX)\n self._is_socket = True\n self._socket.connect(params)\n except AttributeError:\n raise InterfaceError(\"Unix socket unsupported\")\n\n def read(self, count):\n \"\"\"Receive data from the socket.\n\n Args:\n count (int): Buffer size.\n\n Returns:\n bytes: The data received.\n \"\"\"\n if self._socket is None:\n raise OperationalError(\"MySQLx Connection not available\")\n buf = []\n while count > 0:\n data = self._socket.recv(count)\n if data == b\"\":\n raise RuntimeError(\"Unexpected connection close\")\n buf.append(data)\n count -= len(data)\n return b\"\".join(buf)\n\n def sendall(self, data):\n \"\"\"Send data to the socket.\n\n Args:\n data (bytes): The data to be sent.\n \"\"\"\n if self._socket is None:\n raise OperationalError(\"MySQLx Connection not available\")\n self._socket.sendall(data)\n\n def close(self):\n \"\"\"Close the socket.\"\"\"\n if not self._socket:\n return\n\n self._socket.close()\n self._socket = None\n\n def set_ssl(self, ssl_mode, ssl_ca, ssl_crl, ssl_cert, ssl_key):\n \"\"\"Set SSL parameters.\n\n Args:\n ssl_mode (str): SSL mode.\n ssl_ca (str): The certification authority certificate.\n ssl_crl (str): The certification revocation lists.\n ssl_cert (str): The certificate.\n ssl_key (str): The certificate key.\n\n Raises:\n :class:`mysqlx.RuntimeError`: If Python installation has no SSL\n support.\n :class:`mysqlx.InterfaceError`: If the parameters are invalid.\n \"\"\"\n if not SSL_AVAILABLE:\n self.close()\n raise RuntimeError(\"Python installation has no SSL support\")\n\n context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n context.load_default_certs()\n\n if ssl_ca:\n try:\n context.load_verify_locations(ssl_ca)\n context.verify_mode = ssl.CERT_REQUIRED\n except (IOError, ssl.SSLError) as err:\n self.close()\n raise InterfaceError(\"Invalid CA Certificate: {}\".format(err))\n\n if ssl_crl:\n try:\n context.load_verify_locations(ssl_crl)\n context.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF\n except (IOError, ssl.SSLError) as err:\n self.close()\n raise InterfaceError(\"Invalid CRL: {}\".format(err))\n\n if ssl_cert:\n try:\n context.load_cert_chain(ssl_cert, ssl_key)\n except (IOError, ssl.SSLError) as err:\n self.close()\n raise InterfaceError(\"Invalid Certificate/Key: {}\".format(err))\n\n self._socket = context.wrap_socket(self._socket)\n if ssl_mode == SSLMode.VERIFY_IDENTITY:\n hostnames = []\n # Windows does not return loopback aliases on gethostbyaddr\n if os.name == 'nt' and (self._host == 'localhost' or \\\n self._host == '127.0.0.1'):\n hostnames = ['localhost', '127.0.0.1']\n aliases = socket.gethostbyaddr(self._host)\n hostnames.extend([aliases[0]] + aliases[1])\n match_found = False\n errs = []\n for hostname in hostnames:\n try:\n ssl.match_hostname(self._socket.getpeercert(), hostname)\n except ssl.CertificateError as err:\n errs.append(err)\n else:\n match_found = True\n break\n if not match_found:\n self.close()\n raise InterfaceError(\"Unable to verify server identity: {}\"\n \"\".format(\", \".join(errs)))\n self._is_ssl = True\n\n def is_ssl(self):\n \"\"\"Verifies if SSL is being used.\n\n Returns:\n bool: Returns `True` if SSL is being used.\n \"\"\"\n return self._is_ssl\n\n def is_socket(self):\n \"\"\"Verifies if socket connection is being used.\n\n Returns:\n bool: Returns `True` if socket connection is being used.\n \"\"\"\n return self._is_socket\n\n def is_secure(self):\n \"\"\"Verifies if connection is secure.\n\n Returns:\n bool: Returns `True` if connection is secure.\n \"\"\"\n return self._is_ssl or self._is_socket\n\n def is_open(self):\n \"\"\"Verifies if connection is open.\n\n Returns:\n bool: Returns `True` if connection is open.\n \"\"\"\n return self._socket is not None\n\n\ndef catch_network_exception(func):\n \"\"\"Decorator used to catch socket.error or RuntimeError.\n\n Raises:\n :class:`mysqlx.InterfaceError`: If `socket.Error` or `RuntimeError`\n is raised.\n \"\"\"\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n \"\"\"Wrapper function.\"\"\"\n try:\n return func(self, *args, **kwargs)\n except (socket.error, RuntimeError):\n self.disconnect()\n raise InterfaceError(\"Cannot connect to host\")\n return wrapper\n\n\nclass Connection(object):\n \"\"\"Connection to a MySQL Server.\n\n Args:\n settings (dict): Dictionary with connection settings.\n \"\"\"\n def __init__(self, settings):\n self.settings = settings\n self.stream = SocketStream()\n self.reader_writer = None\n self.protocol = None\n self._user = settings.get(\"user\")\n self._password = settings.get(\"password\")\n self._schema = settings.get(\"schema\")\n self._active_result = None\n self._routers = settings.get(\"routers\", [])\n\n if 'host' in settings and settings['host']:\n self._routers.append({\n 'host': settings.get('host'),\n 'port': settings.get('port', None)\n })\n\n self._cur_router = -1\n self._can_failover = True\n self._ensure_priorities()\n self._routers.sort(key=lambda x: x['priority'], reverse=True)\n\n def fetch_active_result(self):\n \"\"\"Fetch active result.\"\"\"\n if self._active_result is not None:\n self._active_result.fetch_all()\n self._active_result = None\n\n def set_active_result(self, result):\n \"\"\"Set active result.\n\n Args:\n `Result`: It can be :class:`mysqlx.Result`,\n :class:`mysqlx.BufferingResult`,\n :class:`mysqlx.RowResult`, :class:`mysqlx.SqlResult` or\n :class:`mysqlx.DocResult`.\n \"\"\"\n self._active_result = result\n\n def _ensure_priorities(self):\n \"\"\"Ensure priorities.\n\n Raises:\n :class:`mysqlx.ProgrammingError`: If priorities are invalid.\n \"\"\"\n priority_count = 0\n priority = 100\n\n for router in self._routers:\n pri = router.get('priority', None)\n if pri is None:\n priority_count += 1\n router[\"priority\"] = priority\n elif pri > 100:\n raise ProgrammingError(\"The priorities must be between 0 and \"\n \"100\", 4007)\n priority -= 1\n\n if 0 < priority_count < len(self._routers):\n raise ProgrammingError(\"You must either assign no priority to any \"\n \"of the routers or give a priority for \"\n \"every router\", 4000)\n\n def _get_connection_params(self):\n \"\"\"Returns the connection parameters.\n\n Returns:\n tuple: The connection parameters.\n \"\"\"\n if not self._routers:\n self._can_failover = False\n if \"host\" in self.settings:\n return self.settings[\"host\"], self.settings.get(\"port\", 33060)\n if \"socket\" in self.settings:\n return self.settings[\"socket\"]\n return (\"localhost\", 33060,)\n\n # Reset routers status once all are tried\n if not self._can_failover or self._cur_router == -1:\n self._cur_router = -1\n self._can_failover = True\n for router in self._routers:\n router['available'] = True\n\n self._cur_router += 1\n host = self._routers[self._cur_router][\"host\"]\n port = self._routers[self._cur_router][\"port\"]\n\n if self._cur_router > 0:\n self._routers[self._cur_router-1][\"available\"] = False\n if self._cur_router >= len(self._routers) - 1:\n self._can_failover = False\n\n return (host, port,)\n\n def connect(self):\n \"\"\"Attempt to connect to the MySQL server.\n\n Raises:\n :class:`mysqlx.InterfaceError`: If fails to connect to the MySQL\n server.\n \"\"\"\n # Loop and check\n error = None\n while self._can_failover:\n try:\n self.stream.connect(self._get_connection_params())\n self.reader_writer = MessageReaderWriter(self.stream)\n self.protocol = Protocol(self.reader_writer)\n self._handle_capabilities()\n self._authenticate()\n return\n except socket.error as err:\n error = err\n\n if len(self._routers) <= 1:\n raise InterfaceError(\"Cannot connect to host: {0}\".format(error))\n raise InterfaceError(\"Failed to connect to any of the routers\", 4001)\n\n def _handle_capabilities(self):\n \"\"\"Handle capabilities.\n\n Raises:\n :class:`mysqlx.OperationalError`: If SSL is not enabled at the\n server.\n :class:`mysqlx.RuntimeError`: If support for SSL is not available\n in Python.\n \"\"\"\n if self.settings.get(\"ssl-mode\") == SSLMode.DISABLED:\n return\n if self.stream.is_socket():\n if self.settings.get(\"ssl-mode\"):\n _LOGGER.warning(\"SSL not required when using Unix socket.\")\n return\n\n data = self.protocol.get_capabilites().capabilities\n if not (get_item_or_attr(data[0], \"name\").lower() == \"tls\"\n if data else False):\n self.close_connection()\n raise OperationalError(\"SSL not enabled at server\")\n\n is_ol7 = False\n if platform.system() == \"Linux\":\n # pylint: disable=W1505\n distname, version, _ = platform.linux_distribution()\n try:\n is_ol7 = \"Oracle Linux\" in distname and \\\n version.split(\".\")[0] == \"7\"\n except IndexError:\n is_ol7 = False\n\n if sys.version_info < (2, 7, 9) and not is_ol7:\n self.close_connection()\n raise RuntimeError(\"The support for SSL is not available for \"\n \"this Python version\")\n\n self.protocol.set_capabilities(tls=True)\n self.stream.set_ssl(self.settings.get(\"ssl-mode\", SSLMode.REQUIRED),\n self.settings.get(\"ssl-ca\"),\n self.settings.get(\"ssl-crl\"),\n self.settings.get(\"ssl-cert\"),\n self.settings.get(\"ssl-key\"))\n\n def _authenticate(self):\n \"\"\"Authenticate with the MySQL server.\"\"\"\n auth = self.settings.get(\"auth\")\n if auth:\n if auth == Auth.PLAIN:\n self._authenticate_plain()\n elif auth == Auth.SHA256_MEMORY:\n self._authenticate_sha256_memory()\n elif auth == Auth.MYSQL41:\n self._authenticate_mysql41()\n elif self.stream.is_secure():\n # Use PLAIN if no auth provided and connection is secure\n self._authenticate_plain()\n else:\n # Use MYSQL41 if connection is not secure\n try:\n self._authenticate_mysql41()\n except InterfaceError:\n pass\n else:\n return\n # Try SHA256_MEMORY if MYSQL41 fails\n try:\n self._authenticate_sha256_memory()\n except InterfaceError:\n raise InterfaceError(\"Authentication failed using MYSQL41 and \"\n \"SHA256_MEMORY, check username and \"\n \"password or try a secure connection\")\n\n def _authenticate_mysql41(self):\n \"\"\"Authenticate with the MySQL server using `MySQL41AuthPlugin`.\"\"\"\n plugin = MySQL41AuthPlugin(self._user, self._password)\n self.protocol.send_auth_start(plugin.auth_name())\n extra_data = self.protocol.read_auth_continue()\n self.protocol.send_auth_continue(plugin.auth_data(extra_data))\n self.protocol.read_auth_ok()\n\n def _authenticate_plain(self):\n \"\"\"Authenticate with the MySQL server using `PlainAuthPlugin`.\"\"\"\n if not self.stream.is_secure():\n raise InterfaceError(\"PLAIN authentication is not allowed via \"\n \"unencrypted connection\")\n plugin = PlainAuthPlugin(self._user, self._password)\n self.protocol.send_auth_start(plugin.auth_name(),\n auth_data=plugin.auth_data())\n self.protocol.read_auth_ok()\n\n def _authenticate_sha256_memory(self):\n \"\"\"Authenticate with the MySQL server using `Sha256MemoryAuthPlugin`.\"\"\"\n plugin = Sha256MemoryAuthPlugin(self._user, self._password)\n self.protocol.send_auth_start(plugin.auth_name())\n extra_data = self.protocol.read_auth_continue()\n self.protocol.send_auth_continue(plugin.auth_data(extra_data))\n self.protocol.read_auth_ok()\n\n @catch_network_exception\n def send_sql(self, sql, *args):\n \"\"\"Execute a SQL statement.\n\n Args:\n sql (str): The SQL statement.\n *args: Arbitrary arguments.\n\n Raises:\n :class:`mysqlx.ProgrammingError`: If the SQL statement is not a\n valid string.\n \"\"\"\n if not isinstance(sql, STRING_TYPES):\n raise ProgrammingError(\"The SQL statement is not a valid string\")\n elif not PY3 and isinstance(sql, UNICODE_TYPES):\n self.protocol.send_execute_statement(\n \"sql\", bytes(bytearray(sql, \"utf-8\")), args)\n else:\n self.protocol.send_execute_statement(\"sql\", sql, args)\n\n @catch_network_exception\n def send_insert(self, statement):\n \"\"\"Send an insert statement.\n\n Args:\n statement (`Statement`): It can be :class:`mysqlx.InsertStatement`\n or :class:`mysqlx.AddStatement`.\n\n Returns:\n :class:`mysqlx.Result`: A result object.\n \"\"\"\n self.protocol.send_insert(statement)\n ids = None\n if isinstance(statement, AddStatement):\n ids = statement.ids\n return Result(self, ids)\n\n @catch_network_exception\n def find(self, statement):\n \"\"\"Send an find statement.\n\n Args:\n statement (`Statement`): It can be :class:`mysqlx.ReadStatement`\n or :class:`mysqlx.FindStatement`.\n\n Returns:\n `Result`: It can be class:`mysqlx.DocResult` or\n :class:`mysqlx.RowResult`.\n \"\"\"\n self.protocol.send_find(statement)\n return DocResult(self) if statement.is_doc_based() else RowResult(self)\n\n @catch_network_exception\n def delete(self, statement):\n \"\"\"Send an delete statement.\n\n Args:\n statement (`Statement`): It can be :class:`mysqlx.RemoveStatement`\n or :class:`mysqlx.DeleteStatement`.\n\n Returns:\n :class:`mysqlx.Result`: The result object.\n \"\"\"\n self.protocol.send_delete(statement)\n return Result(self)\n\n @catch_network_exception\n def update(self, statement):\n \"\"\"Send an delete statement.\n\n Args:\n statement (`Statement`): It can be :class:`mysqlx.ModifyStatement`\n or :class:`mysqlx.UpdateStatement`.\n\n Returns:\n :class:`mysqlx.Result`: The result object.\n \"\"\"\n self.protocol.send_update(statement)\n return Result(self)\n\n @catch_network_exception\n def execute_nonquery(self, namespace, cmd, raise_on_fail, *args):\n \"\"\"Execute a non query command.\n\n Args:\n namespace (str): The namespace.\n cmd (str): The command.\n raise_on_fail (bool): `True` to raise on fail.\n *args: Arbitrary arguments.\n\n Raises:\n :class:`mysqlx.OperationalError`: On errors.\n\n Returns:\n :class:`mysqlx.Result`: The result object.\n \"\"\"\n try:\n self.protocol.send_execute_statement(namespace, cmd, args)\n return Result(self)\n except OperationalError:\n if raise_on_fail:\n raise\n\n @catch_network_exception\n def execute_sql_scalar(self, sql, *args):\n \"\"\"Execute a SQL scalar.\n\n Args:\n sql (str): The SQL statement.\n *args: Arbitrary arguments.\n\n Raises:\n :class:`mysqlx.InterfaceError`: If no data found.\n\n Returns:\n :class:`mysqlx.Result`: The result.\n \"\"\"\n self.protocol.send_execute_statement(\"sql\", sql, args)\n result = RowResult(self)\n result.fetch_all()\n if result.count == 0:\n raise InterfaceError(\"No data found\")\n return result[0][0]\n\n @catch_network_exception\n def get_row_result(self, cmd, *args):\n \"\"\"Returns the row result.\n\n Args:\n cmd (str): The command.\n *args: Arbitrary arguments.\n\n Returns:\n :class:`mysqlx.RowResult`: The result object.\n \"\"\"\n self.protocol.send_execute_statement(\"xplugin\", cmd, args)\n return RowResult(self)\n\n @catch_network_exception\n def read_row(self, result):\n \"\"\"Read row.\n\n Args:\n result (:class:`mysqlx.RowResult`): The result object.\n \"\"\"\n return self.protocol.read_row(result)\n\n @catch_network_exception\n def close_result(self, result):\n \"\"\"Close result.\n\n Args:\n result (:class:`mysqlx.Result`): The result object.\n \"\"\"\n self.protocol.close_result(result)\n\n @catch_network_exception\n def get_column_metadata(self, result):\n \"\"\"Get column metadata.\n\n Args:\n result (:class:`mysqlx.Result`): The result object.\n \"\"\"\n return self.protocol.get_column_metadata(result)\n\n def is_open(self):\n \"\"\"Check if connection is open.\n\n Returns:\n bool: `True` if connection is open.\n \"\"\"\n return self.stream.is_open()\n\n def disconnect(self):\n \"\"\"Disconnect from server.\"\"\"\n if not self.is_open():\n return\n self.stream.close()\n\n def close_session(self):\n \"\"\"Close a sucessfully authenticated session.\"\"\"\n if not self.is_open():\n return\n if self._active_result is not None:\n self._active_result.fetch_all()\n self.protocol.send_close()\n self.protocol.read_ok()\n self.stream.close()\n\n def close_connection(self):\n \"\"\"Announce to the server that the client wants to close the\n connection. Discards any session state of the server.\n \"\"\"\n if not self.is_open():\n return\n if self._active_result is not None:\n self._active_result.fetch_all()\n self.protocol.send_connection_close()\n self.protocol.read_ok()\n self.stream.close()\n\n\nclass Session(object):\n \"\"\"Enables interaction with a X Protocol enabled MySQL Product.\n\n The functionality includes:\n\n - Accessing available schemas.\n - Schema management operations.\n - Enabling/disabling warning generation.\n - Retrieval of connection information.\n\n Args:\n settings (dict): Connection data used to connect to the database.\n \"\"\"\n def __init__(self, settings):\n self.use_pure = settings.get(\"use-pure\", Protobuf.use_pure)\n self._settings = settings\n self._connection = Connection(self._settings)\n self._connection.connect()\n\n @property\n def use_pure(self):\n \"\"\"bool: `True` to use pure Python Protobuf implementation.\n \"\"\"\n return Protobuf.use_pure\n\n @use_pure.setter\n def use_pure(self, value):\n if not isinstance(value, bool):\n raise ProgrammingError(\"'use_pure' option should be True or False\")\n Protobuf.set_use_pure(value)\n\n def is_open(self):\n \"\"\"Returns `True` if the session is open.\n\n Returns:\n bool: Returns `True` if the session is open.\n \"\"\"\n return self._connection.stream.is_open()\n\n def sql(self, sql):\n \"\"\"Creates a :class:`mysqlx.SqlStatement` object to allow running the\n SQL statement on the target MySQL Server.\n \"\"\"\n return SqlStatement(self._connection, sql)\n\n def get_connection(self):\n \"\"\"Returns the underlying connection.\n\n Returns:\n mysqlx.connection.Connection: The connection object.\n \"\"\"\n return self._connection\n\n def get_schemas(self):\n \"\"\"Returns the list of schemas in the current session.\n\n Returns:\n `list`: The list of schemas in the current session.\n\n .. versionadded:: 8.0.12\n \"\"\"\n result = self.sql(\"SHOW DATABASES\").execute()\n return [row[0] for row in result.fetch_all()]\n\n def get_schema(self, name):\n \"\"\"Retrieves a Schema object from the current session by it's name.\n\n Args:\n name (string): The name of the Schema object to be retrieved.\n\n Returns:\n mysqlx.Schema: The Schema object with the given name.\n \"\"\"\n return Schema(self, name)\n\n def get_default_schema(self):\n \"\"\"Retrieves a Schema object from the current session by the schema\n name configured in the connection settings.\n\n Returns:\n mysqlx.Schema: The Schema object with the given name at connect\n time.\n\n Raises:\n :class:`mysqlx.ProgrammingError`: If default schema not provided.\n \"\"\"\n if self._connection.settings.get(\"schema\"):\n return Schema(self, self._connection.settings[\"schema\"])\n raise ProgrammingError(\"Default schema not provided\")\n\n def drop_schema(self, name):\n \"\"\"Drops the schema with the specified name.\n\n Args:\n name (string): The name of the Schema object to be retrieved.\n \"\"\"\n self._connection.execute_nonquery(\n \"sql\", _DROP_DATABASE_QUERY.format(name), True)\n\n def create_schema(self, name):\n \"\"\"Creates a schema on the database and returns the corresponding\n object.\n\n Args:\n name (string): A string value indicating the schema name.\n \"\"\"\n self._connection.execute_nonquery(\n \"sql\", _CREATE_DATABASE_QUERY.format(name), True)\n return Schema(self, name)\n\n def start_transaction(self):\n \"\"\"Starts a transaction context on the server.\"\"\"\n self._connection.execute_nonquery(\"sql\", \"START TRANSACTION\", True)\n\n def commit(self):\n \"\"\"Commits all the operations executed after a call to\n startTransaction().\n \"\"\"\n self._connection.execute_nonquery(\"sql\", \"COMMIT\", True)\n\n def rollback(self):\n \"\"\"Discards all the operations executed after a call to\n startTransaction().\n \"\"\"\n self._connection.execute_nonquery(\"sql\", \"ROLLBACK\", True)\n\n def set_savepoint(self, name=None):\n \"\"\"Creates a transaction savepoint.\n\n If a name is not provided, one will be generated using the uuid.uuid1()\n function.\n\n Args:\n name (Optional[string]): The savepoint name.\n\n Returns:\n string: The savepoint name.\n \"\"\"\n if name is None:\n name = \"{0}\".format(uuid.uuid1())\n elif not isinstance(name, STRING_TYPES) or len(name.strip()) == 0:\n raise ProgrammingError(\"Invalid SAVEPOINT name\")\n self._connection.execute_nonquery(\"sql\", \"SAVEPOINT {0}\"\n \"\".format(quote_identifier(name)),\n True)\n return name\n\n def rollback_to(self, name):\n \"\"\"Rollback to a transaction savepoint with the given name.\n\n Args:\n name (string): The savepoint name.\n \"\"\"\n if not isinstance(name, STRING_TYPES) or len(name.strip()) == 0:\n raise ProgrammingError(\"Invalid SAVEPOINT name\")\n self._connection.execute_nonquery(\"sql\", \"ROLLBACK TO SAVEPOINT {0}\"\n \"\".format(quote_identifier(name)),\n True)\n\n def release_savepoint(self, name):\n \"\"\"Release a transaction savepoint with the given name.\n\n Args:\n name (string): The savepoint name.\n \"\"\"\n if not isinstance(name, STRING_TYPES) or len(name.strip()) == 0:\n raise ProgrammingError(\"Invalid SAVEPOINT name\")\n self._connection.execute_nonquery(\"sql\", \"RELEASE SAVEPOINT {0}\"\n \"\".format(quote_identifier(name)),\n True)\n\n def close(self):\n \"\"\"Closes the session.\"\"\"\n self._connection.close_session()\n","repo_name":"panoslin/DouYinSpider","sub_path":"venv/Lib/site-packages/mysqlx/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":27457,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"61"} +{"seq_id":"34406772144","text":"#BERT 是一种自然语言处理(NLP)中的深度学习模型,通常使用深度学习框架(TensorFlow、PyTorch 等)来实现。\n\nfrom transformers import BertTokenizer, BertForSequenceClassification\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\n\n# 读取数据\ndf = pd.read_csv('data.csv')\n\n# 划分训练集和验证集\ntrain_texts, val_texts, train_labels, val_labels = train_test_split(df['text'].tolist(), df['label'].tolist(), random_state=42)\n\n# 加载预训练的 BERT 模型和分词器\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nmodel = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)\n\n# 定义数据集和数据加载器\nclass TextDataset(Dataset):\n def __init__(self, texts, labels):\n self.texts = texts\n self.labels = labels\n self.tokenizer = tokenizer\n\n def __getitem__(self, idx):\n text = self.texts[idx]\n label = self.labels[idx]\n inputs = self.tokenizer.encode_plus(\n text,\n None,\n add_special_tokens=True,\n max_length=256,\n padding='max_length',\n truncation=True\n )\n input_ids = inputs['input_ids']\n attention_mask = inputs['attention_mask']\n\n return {\n 'input_ids': torch.tensor(input_ids, dtype=torch.long),\n 'attention_mask': torch.tensor(attention_mask, dtype=torch.long),\n 'labels': torch.tensor(label, dtype=torch.long)\n }\n\n def __len__(self):\n return len(self.texts)\n\ntrain_dataset = TextDataset(train_texts, train_labels)\nval_dataset = TextDataset(val_texts, val_labels)\n\ntrain_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)\nval_loader = DataLoader(val_dataset, batch_size=8)\n\n# 定义优化器和损失函数\noptimizer = torch.optim.Adam(model.parameters(), lr=3e-5)\nloss_fn = torch.nn.CrossEntropyLoss()\n\n# 训练模型\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nmodel.to(device)\n\nepochs = 5\nfor epoch in range(epochs):\n train_loss = 0\n train_acc = 0\n model.train()\n for batch in train_loader:\n input_ids = batch['input_ids'].to(device)\n attention_mask = batch['attention_mask'].to(device)\n labels = batch['labels'].to(device)\n\n optimizer.zero_grad()\n\n outputs = model(input_ids, attention_mask=attention_mask, labels=labels)\n\n loss = loss_fn(outputs[1], labels)\n\n train_loss += loss.item()\n\n loss.backward()\n\n optimizer.step()\n\n train_acc += (outputs[1].argmax(1) == labels).sum().item()\n\n train_loss /= len(train_loader)\n train_acc /= len(train_dataset)\n\n val_loss = 0\n val_acc = 0\n model.eval()\n with torch.no_grad():\n for batch in val_loader:\n input_ids = batch\n attention_mask = batch['attention_mask'].to(device)\n labels = batch['labels'].to(device)\n\n outputs = model(input_ids, attention_mask=attention_mask, labels=labels)\n\n loss = loss_fn(outputs[1], labels)\n\n val_loss += loss.item()\n\n val_acc += (outputs[1].argmax(1) == labels).sum().item()\n\n val_loss /= len(val_loader)\n val_acc /= len(val_dataset)\n\n print(f'Epoch {epoch + 1}: train_loss={train_loss:.4f}, train_acc={train_acc:.4f}, val_loss={val_loss:.4f}, val_acc={val_acc:.4f}')\n\n# 在测试集上评估模型\ntest_texts = ['This is a positive text.', 'This is a negative text.']\ntest_labels = [1, 0]\n\ntest_dataset = TextDataset(test_texts, test_labels)\ntest_loader = DataLoader(test_dataset, batch_size=2)\n\nmodel.eval()\nwith torch.no_grad():\n test_preds = []\n for batch in test_loader:\n input_ids = batch['input_ids'].to(device)\n attention_mask = batch['attention_mask'].to(device)\n\n outputs = model(input_ids, attention_mask=attention_mask)\n\n test_preds.extend(outputs[0].argmax(1).tolist())\n\nprint(f'Test accuracy: {accuracy_score(test_labels, test_preds):.4f}')\nprint(f'Test precision, recall, f1-score: {precision_recall_fscore_support(test_labels, test_preds, average=\"binary\")}')\n\n\n#这个代码示例可以让您了解如何使用 BERT 进行文本分类 。\n#在 Transformers 库中使用 BERT 非常方便。\n#但是,您可能需要针对特定的应用场景对代码进行修改和调整。\n\n","repo_name":"zachspace/AI-reference","sub_path":"BERD mBERD /BERD.py","file_name":"BERD.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29142724594","text":"# simple script that scrapes ctftime for most recent ctf data, then formats it into the ctf_data.json file properly.\n\nfrom bs4 import BeautifulSoup as bs\nfrom datetime import datetime\nimport requests\nimport json\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\nctfs = requests.get('https://ctftime.org/team/3135', headers=headers)\nsoup = bs(ctfs.text,'html.parser')\n\niter = soup.table\n\ndata = {\"ctfs\": []}\n\n#Gets data from the CTFTime API. Repeats itself cuz the api likes to shit itself every once in a while.\ndef fetchDataRepeat(link):\n ctfs = requests.get(f'https://ctftime.org/api/v1/{ link }', headers=headers)\n if \"Forbidden\" in ctfs.text:\n return fetchDataRepeat(link)\n return json.loads(ctfs.text)\n\n\nfor i in range(5):\n ctf = iter.contents[2 * i + 3]\n content = fetchDataRepeat(f'events/{ ctf.contents[2].contents[0][\"href\"][7:] }/')\n dt = {\n \"date\": content[\"finish\"],\n \"logo\": content[\"logo\"],\n \"desc\": content[\"url\"]\n }\n data[\"ctfs\"].append({\n \"ctf\": ctf.contents[2].getText(),\n \"placement\": ctf.contents[1].getText(),\n \"data\": dt\n })\n\n #limit 5 ctfs, if there are less than 5 break.\n if(2 * i + 5 > len(iter)): break\n\nwith open(\"ctf_data.json\", \"w\") as outfile:\n outfile.write(json.dumps(data, indent=4))","repo_name":"Teddygat0r/batmans-kitchen-homepage","sub_path":"public/ctf_data/fetchData.py","file_name":"fetchData.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31590680954","text":"#!/usr/bin/env python3\r\n#Python/Strings/Strings.py\r\n#James Maki\r\n#May 04, 2021\r\n\r\n#Strings are sequences of characters that are assigned using either 'single quotes' or \"double quotes\"\r\n\r\n#String Assigned to Variable\r\nx = \"String\"\r\nprint(x)\r\n\r\n#Multiline Strings | Single Quotes & Double Quotes\r\nx = \"\"\"\r\nHappy\r\nBirthday\r\n\"\"\"\r\ny = '''\r\nCharlie\r\nBrown\r\n'''\r\nprint(x + y)\r\n\r\n#Looping Through a String\r\nfor x in \"Python\":\r\n print(x) # Prints each byte within the string array, one line at a time\r\n\r\n#String Length\r\nx = \"Python\" # Length of 6\r\nprint(len(x)) # The len() function returns the length of a string\r\n\r\n#String Check - To check if a certain phrase or character is within a string, use the in keyword\r\nchk = \"I'm using Python 3.9.2 to Code\"\r\nprint(\"Python\" in chk) # Returns a boolean value of True\r\n#If Statement String Check\r\nif \"Python\" in chk:\r\n print(\"The word 'Python' is in the checked phrase!\")\r\n#If Not Statement String Check\r\nif \"2.7.3\" not in chk:\r\n print(\"Python 2.7.3 is not being used for this program!\")","repo_name":"jamaki-ibmi/Python","sub_path":"Strings/Strings.py","file_name":"Strings.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2723145015","text":"member = ['a','b','c','d', 'e']\nnumber = [88, 90, 85, 90, 88]\nindex = 1\nfor i in number:\n member.insert(index,i)\n index += 2\nprint(member)\n\nfor j in member:\n if isinstance(j,str):\n print(j, end=' ')\n else:\n print(j)\n\n\n#方法一:\ncount = 0\nlength = len(member)\nwhile count < length:\n print(member[count], member[count+1])\n count += 2\n\n#方法二: \n \nfor each in range(len(member)):\n if each%2 == 0:\n print(member[each], member[each+1])\n","repo_name":"sunzhongyuan/learnPython","sub_path":"010.py","file_name":"010.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10180516349","text":"import time\ndef func1():\n N = int(input(\"Enter N\"))\n\n # setA = list(map(str, input(\"Enter the words\").split()))\n\n count = 0\n while N > 0:\n t, c = map(int, input(\"Enter Number\").split())\n if c - t >= 2:\n count = count + 1\n N = N - 1\n print(count)\n\n\ndef perfectNumberUsingComprehension(number):\n start_time = time.time()\n divisorList = []\n list(divisorList.append(idx) for idx in range(1, number) if number % idx == 0)\n print(number, 'is a perfect number') if sum(divisorList) == number else print(number, 'is a perfect number')\n end_time = time.time()\n print('Total time taken : ', end_time-start_time)\n\n\ndef perfectNumber(number):\n start_time = time.time()\n number = int(number)\n divisorList = []\n sum_ele = 0\n for idx in range(1, number):\n if number % idx == 0:\n divisorList.append(idx)\n\n for ele in divisorList:\n sum_ele = sum_ele + ele\n #print(sum_ele)\n\n if sum_ele == number:\n print(number, 'is a perfect number')\n else:\n print(number, 'is not a perfect number')\n\n end_time = time.time()\n print('Total time taken : ', end_time - start_time)\n\n\nfor _ in range(int(input('Enter the number of values you want to check\\t'))):\n perfectNumber(int(input(\"Enter the Number\\t\")))\n","repo_name":"amits0003/Selenium_Study_Files","sub_path":"problem_solving_section/perfectNumber.py","file_name":"perfectNumber.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70661332674","text":"import logging\nfrom datetime import datetime, timedelta\nimport os\nimport sys\nimport argparse\n\nfrom carto.auth import APIKeyAuthClient\nfrom carto.sql import SQLClient, CopySQLClient\n\nimport numpy as np\nfrom siphon.radarserver import RadarServer\nfrom siphon.cdmr import Dataset\n\n# Example of CopySQLClient usage to stream data from NEXRAD Level 2\n# data to CARTO.\n#\n# For more information see the following URL's:\n# - https://carto.com/blog/mapping-nexrad-radar-data/\n# - https://gist.github.com/stuartlynn/a7868cf8ca02931a6408\n# - http://nbviewer.jupyter.org/gist/dopplershift/356f2e14832e9b676207\n\n# Set up a logger\nlogger = logging.getLogger('nexrad_copy')\nlogger.setLevel(logging.INFO)\n\n# Parse input arguments\nparser = argparse.ArgumentParser(description=(\n 'Example of CopySQLClient usage to stream data from NEXRAD Level 2'\n ' data to CARTO.'\n))\n\nparser.add_argument('--base_url', type=str, dest='CARTO_BASE_URL',\n default=os.environ.get('CARTO_API_URL', ''),\n help=('Set the base URL. For example:'\n ' https://username.carto.com/'\n ' (defaults to env variable CARTO_API_URL)'))\n\nparser.add_argument('--api_key', dest='CARTO_API_KEY',\n default=os.environ.get('CARTO_API_KEY', ''),\n help=('Api key of the account'\n ' (defaults to env variable CARTO_API_KEY)'))\n\nargs = parser.parse_args()\n\nif not args.CARTO_BASE_URL or not args.CARTO_API_KEY:\n sys.exit(parser.print_usage())\n\nauth_client = APIKeyAuthClient(args.CARTO_BASE_URL, args.CARTO_API_KEY)\nsql_client = SQLClient(auth_client)\ncopy_client = CopySQLClient(auth_client)\n\n# Create a table suitable to receive the data\nlogger.info('Creating table nexrad_copy_example...')\nsql_client.send(\"\"\"CREATE TABLE IF NOT EXISTS nexrad_copy_example (\n the_geom geometry(Geometry,4326),\n reflectivity numeric\n)\"\"\")\nsql_client.send(\n \"SELECT CDB_CartodbfyTable(current_schema, 'nexrad_copy_example')\")\nlogger.info('Done')\n\nlogger.info('Trying to connect to the THREDDS radar query service')\nrs = RadarServer(\n 'http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/')\n\nlogger.info('Quering data from the station')\nquery = rs.query()\nquery.stations('KLVX').time(datetime.utcnow())\nassert rs.validate_query(query)\n\ncatalog = rs.get_catalog(query)\nlogger.info('Avaliable datasets: %s' % catalog.datasets)\nlogger.info('Using the first one')\nds = list(catalog.datasets.values())[0]\ndata = Dataset(ds.access_urls['CdmRemote'])\nlogger.info('Got the following data: %s' % data.Title)\nlogger.info(data.Summary)\n\n\n# A helper method to clean up the data\ndef raw_to_masked_float(var, data):\n # Values come back signed. If the _Unsigned attribute is set, we\n # need to convert from the range [-127, 128] to [0, 255].\n if var._Unsigned:\n data = data & 0xFF\n\n # Mask missing points\n data = np.ma.array(data, mask=(data == 0))\n\n # Convert to float using the scale and offset\n return data * var.scale_factor + var.add_offset\n\n# We pull out the variables we need for azimuth and range, as well as\n# the data itself.\nlogger.info('Pulling out some of the variables...')\nsweep = 0\nref_var = data.variables['Reflectivity_HI']\nref_data = ref_var[sweep]\nrng = data.variables['distanceR_HI'][:]\naz = data.variables['azimuthR_HI'][sweep]\nlogger.info('ref_data.shape: {}'.format(ref_data.shape))\n\n\n# Calculate a (lat, lon) pair offset by a (dist, bearing) pair\n# https://stackoverflow.com/questions/7222382/get-lat-long-given-current-point-distance-and-bearing\ndef offset_by_meters(lat, lon, dist, bearing):\n R = 6378.1\n bearing_rads = np.deg2rad(bearing)[:, None]\n dist_km = dist / 1000.0\n\n lat1 = np.radians(lat)\n lon1 = np.radians(lon)\n\n lat2 = np.arcsin(np.sin(lat1) * np.cos(dist_km/R) +\n np.cos(lat1) * np.sin(dist_km/R) * np.cos(bearing_rads))\n lon2 = lon1 + np.arctan2(np.sin(bearing_rads) * np.sin(dist_km/R)\n * np.cos(lat1),\n np.cos(dist_km/R) - np.sin(lat1) * np.sin(lat2))\n\n return np.degrees(lat2), np.degrees(lon2)\n\n# Then convert the raw data to floating point values, and the relative\n# polar coordinates to lat/lon\nlogger.info('Converting the data...')\nref = raw_to_masked_float(ref_var, ref_data)\nlat, lon = offset_by_meters(\n data.StationLatitude, data.StationLongitude, rng, az)\nlogger.info('Done')\n\n\n# This generator builds the rows without needing to buffer all of them\n# in memory. Alternatively a StringIO (python2) or a BytesIO\n# (python3) buffer could be used.\ndef rows():\n for ix, iy in np.ndindex(ref.shape):\n value = ref[ix, iy]\n if value is np.ma.masked:\n continue\n\n row = u'SRID=4326;POINT({lon} {lat}),{reflectivity}\\n'.format(\n lon=lon[ix, iy],\n lat=lat[ix, iy],\n reflectivity=value\n )\n\n yield row.encode()\n\n# And finally stream the data to CARTO\nlogger.info('Executing COPY command...')\nresult = copy_client.copyfrom(\n ('COPY nexrad_copy_example(the_geom, reflectivity)'\n ' FROM stdin WITH (FORMAT csv)'),\n rows())\nlogger.info(result)\n","repo_name":"CartoDB/carto-python","sub_path":"examples/nexrad/nexrad_copy.py","file_name":"nexrad_copy.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"61"} +{"seq_id":"72350839875","text":"import numpy as np\nimport pygame\n\nfrom tank import Tank\n\nclass Controller:\n \"\"\"\n A class representing a game controller. Supports analog joystick input.\n \"\"\"\n def __init__(self, controller_id: int):\n \"\"\"\n Creates a new controller object with the given ID.\n\n Args:\n - controller_id: An integer representing the ID of the controller (e.g. 0 for the first connected controller)\n \"\"\"\n pygame.init()\n pygame.joystick.init()\n\n self.controller = pygame.joystick.Joystick(controller_id)\n self.controller.init()\n\n def get_inputs(self):\n \"\"\"\n Returns a dictionary containing the current input from the controller.\n\n Returns:\n - A dictionary with the following keys:\n - move_direction: A numpy array with two floats in range [-1,1] representing the movement direction (left joystick)\n - turret_direction: A numpy array with two floats in range [-1,1] representing the turret direction (right joystick)\n - fire: A boolean indicating whether the fire button is currently pressed\n \"\"\"\n pygame.event.get()\n\n # Get joystick input\n move_direction = np.array([self.controller.get_axis(0), self.controller.get_axis(1)])\n turret_direction = np.array([self.controller.get_axis(2), self.controller.get_axis(3)])\n\n # Apply deadzone to joystick input\n move_direction = self.apply_deadzone(move_direction)\n turret_direction = self.apply_deadzone(turret_direction)\n\n # # Normalize joystick input\n # move_direction = move_direction / np.linalg.norm(move_direction)\n # turret_direction = turret_direction / np.linalg.norm(turret_direction)\n\n # Get fire button input (right analog trigger or right shoulder button)\n fire: bool = self.controller.get_axis(5) > -0.9 or \\\n\t\t\t\t\t\tself.controller.get_button(5)\n\n # Return input dictionary\n return {\"move_direction\": move_direction, \"turret_direction\": turret_direction, \"fire\": fire}\n\n def apply_deadzone(self, value, deadzone=0.1):\n \"\"\"\n Applies a deadzone to the given input value.\n\n Args:\n - value: A numpy array representing the input value\n - deadzone: A float representing the deadzone threshold (default: 0.05)\n\n Returns:\n - A numpy array with the same shape as the input value, with values below the deadzone threshold set to zero.\n \"\"\"\n if np.linalg.norm(value) < deadzone:\n return None\n else:\n return value\n","repo_name":"simulatedScience/tanks_pygame","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6059468758","text":"#-*- coding: utf-8 -*-\nfrom datetime import datetime\n\nfrom django.test import TestCase\nfrom django.test import Client\n\nfrom bbb2012.polls.models import Poll, Choice\n\nclass PollTest(TestCase):\n \n def setUp(self):\n question=\"Quem será eliminado do BBB 2012\"\n now = datetime.now()\n self.poll = Poll.objects.create(question=question, pub_date=now)\n self.poll.choice_set.create(candidate=\"Fred\",votes=0,percentual=0.0,candidate_photo=\"fred.png\")\n self.poll.choice_set.create(candidate=\"Marcel\",votes=0,percentual=0.0,candidate_photo=\"marcel.png\")\n\n def test_dos_modelos(self):\n \n self.assertEqual(self.poll.choice_set.all().count(), 2)\n self.assertNotEqual(self.poll.choice_set.all().count(), 0)\n\n def test_same_candidate(self):\n self.assertNotEqual(self.poll.choice_set.all()[0].candidate, self.poll.choice_set.all()[1].candidate)\n\n #def test_de_voto(self):\n #c = Client()\n ## Fazendo o primeiro voto\n #response = c.post('/polls/1/vote/', {'vote_id': '1',})\n ## Verficicando se nao ha votos\n #self.assertEqual(response.status_code, 302)\n ## Verificando se ha um voto\n #choice = Choice.objects.get(pk=1)\n #self.assertEqual(choice.votes, 1)","repo_name":"freddurao/bbb2012","sub_path":"polls/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14393696567","text":"import datetime\nimport csv\n\nbestand = 'inloggers.csv'\n\nvandaag = datetime.datetime.today()\ndate = vandaag.strftime(\"%a %d %b %Y %H:%M:%S\")\n\n#gebruik hier een herhalingslus:\nnaam = input(\"Wat is je achternaam? \")\nvoorl = input(\"Wat zijn je voorletters? \")\ngbdatum = input(\"Wat is je geboortedatum? \")\nemail = input(\"Wat is je e-mail adres? \")\n#wanneer de volgende persoon inlogt is onbekendmeteen naar file, dus schrijf\n\nwith open(bestand, 'w', newline='') as myCSVFile:\n writer = csv.writer(myCSVFile, delimiter=';')\n writer.writerow((date, naam, voorl, gbdatum, email))\n","repo_name":"EduardGarritsen/Python_Opdrachten","sub_path":"Les_9/Exercise 9_2.py","file_name":"Exercise 9_2.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9625507602","text":"\n\nT = int(input())\nfor tc in range(1, T + 1):\n N = int(input())\n cor = [0] * (201)\n for _ in range(N):\n a,b=map(int,input().split())\n a=(a+1)//2\n b=(b+1)//2\n if a>b:\n a,b=b,a\n\n for i in range(a,b+1):\n cor[i]+=1\n\n print(\"#{} {}\".format(tc, max(cor)))\n","repo_name":"PIN-devel/OJ","sub_path":"SWEA/4408_자기 방으로 돌아가기.py","file_name":"4408_자기 방으로 돌아가기.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19928907005","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n# Test\n#############################\n##########30/06/16###########\n#############################\n\n#hypothese : la balle se deplace d'un px par itération\n\nimport pygame\nfrom pygame.locals import *\nfrom math import pi, sqrt\nfrom random import random, randint\nfrom os import popen\nfrom pygame.draw import line\n\ndef signAlea():\n\tx = 0\n\twhile x == 0 :\n\t\tx = randint(-1,1)\n\treturn x\n\ndef sign(x):\n\tif x < 0 :\n\t\treturn -1\n\telse:\n\t\treturn 1\n\ndef getSystemResolutionOnLinux():\n\tscreen = popen(\"xrandr -q -d :0\").readlines()[0]\n\ttmp = screen.split()\n\treturn int( tmp[7] ) * 0.2, int( tmp[9][:-1] ) * 0.2\n\ndef appartenancePoint(x1, y1, x2, y2, w2, h2):\n\tif x2 <= x1 and y2 <= y1 and x1 < x2 + w2 and y1 < y2 +h2 :\n\t\treturn 1\n\telse:\n\t\treturn 0\n\ndef somme(l):\n\ts = 0\n\tfor i in l:\n\t\ts += i\n\treturn s\n\n#ordre donner pour l'objet 1\ndef delimiterCollision(x1, y1, w1, h1, x2, y2, w2, h2):\n\tt = [ appartenancePoint(x1, y1, x2, y2, w2, h2) ]\n\tt += [ appartenancePoint(x1 + w1 - 1, y1, x2, y2, w2, h2) ]\n\tt += [ appartenancePoint(x1, y1 + h1 -1, x2, y2, w2, h2) ]\n\tt += [ appartenancePoint(x1 + w1 - 1, y1 + h1 - 1, x2, y2, w2, h2) ]\n\ts = somme(t)\n\n\tif s == 0 :\n\t\treturn NULL\n\telif s == 2 :\n\t\tif t[NO] and t[SO] :\n\t\t\treturn RIGHT\n\t\telif t[NE] and t[SE] :\n\t\t\treturn LEFT\n\t\telif t[NO] and t[NE] :\n\t\t\treturn DOWN\n\t\telif t[SO] and t[SE] :\n\t\t\treturn UP\n\telif s == 1 :\n\t\tif t[NO] :\n\t\t\tif appartenancePoint(x1, y1 + 1, x2, y2, w2, h2) :\n\t\t\t\treturn RIGHT\n\t\t\telse:\n\t\t\t\treturn DOWN\n\t\telif t[NE] :\n\t\t\tif appartenancePoint(x1 + w1 - 1, y1 + 1, x2, y2, w2, h2) :\n\t\t\t\treturn LEFT\n\t\t\telse:\n\t\t\t\treturn DOWN\n\t\telif t[SO] :\n\t\t\tif appartenancePoint(x1, y1 + h1 - 2, x2, y2, w2, h2) :\n\t\t\t\treturn RIGHT\n\t\t\telse:\n\t\t\t\treturn UP\n\t\telif t[SE] :\n\t\t\tif appartenancePoint(x1 + w1 - 1, y1 + h1 - 2, x2, y2, w2, h2) :\n\t\t\t\treturn LEFT\n\t\t\telse:\n\t\t\t\treturn UP\n\ndef switch(x):\n\tif x:\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef moveBallTrig(x, y, dt, ball_angle, V):\n\treturn x + round( dt * V * cos(ball_angle) ), y - round( dt * V * sin(ball_angle) )\n\ndef moveBallVect(x, y, dt, v):\n\treturn x + round( dt * v[X] ), y - round( dt * v[Y] )\n\n#x1 : past, x2 : pres\ndef resoSysLinForAI(x1, y1, x2, y2, i):\n\tif x2 == x1 :\n\t\treturn -1, 0, 0\n\n\ty1 += BALL_H // 2\n\ty2 += BALL_H // 2\n\tx = GAMER_POS_X[i]\n\n\tif i == DEUX :\n\t\tx1 += BALL_W\n\t\tx2 += BALL_W\n\telse:\n\t\tx += GAMER_W\n\n\ta = ( y2 - y1 ) / ( x2 - x1 )\n\tb = y1 - a * x1\n\n\treturn int( a * x + b ), b ,a\n\ndef initBallAngle(ball_angle, a):\n\tball_angle = pi / 2\n\twhile pi / a <= ball_angle and ball_angle <= pi - pi / a or pi + pi / a <= ball_angle and ball_angle <= 2 * pi - pi / a :\n\t\tball_angle = 2 * pi * random()\n\treturn ball_angle\n\ndef initBallVit(V):\n\tvx = 0.0\n\tvy = 0.0\n\twhile True :\n\t\tvx = V * random() * signAlea()\n\t\tvy = sqrt( V ** 2 - vx ** 2 ) * signAlea()\n\t\tif abs(vy) / V < 0.8 :#cos( pi / 5 ) ~ 0.8\n\t\t\tbreak\n\treturn [ vx , vy ]\n\ndef changeBallVit(yb, yg, v, V):\n\ti = randint(0,1)\n\ti_ = ( i + 1 ) % 2\n\td = V - abs(v[i])\n\tc = 0.0\n\tyb += BALL_H // 2\n\tyg += GAMER_H // 2\n\n\tif yg - GAMER_H // 2 <= yb and yb <= yg + GAMER_H // 2 :\n\t\tc = abs( ( yb - yg ) / ( GAMER_H // 2 ) ) * 0.75\n\telse:\n\t\tc = 0.75\n\n\tv[i] = v[i] + d * c * sign(v[i])\n\tv[i_] = sqrt( V ** 2 - v[i] ** 2 ) * sign(v[i_])\n\n\treturn v\n\ndef printScore(font, score, cadre_score):\n\tcadre_score.fill(WHITE)\n\ttmp = pygame.Surface( ( int( CADRE_SCORE_W * 0.8 ) , int( CADRE_SCORE_H - CADRE_SCORE_W * 0.2 ) ) )\n\ttmp.fill(BLACK)\n\tcadre_score.blit(tmp, ( ( cadre_score.get_rect().w - tmp.get_rect().w ) // 2, ( cadre_score.get_rect().h - tmp.get_rect().h ) // 2 ))\n\ttext_score = font.render( str(score[0]) + \" - \" + str(score[1]), 1, WHITE)\n\tcadre_score.blit(text_score, ( ( cadre_score.get_rect().w - text_score.get_rect().w ) // 2, ( cadre_score.get_rect().h - text_score.get_rect().h ) // 2 ))\n\ndef main():\n\ttps = [ 0.0 , 0.0 ]\n\tscore = [ 0 , 0 ]\n\tgamer_pos_Y = [ [ 0 , 0 ] , [ 0 , 0 ] ]\n\tgamer_time = [ [ 0.0 , 0.0 ] , [ 0.0 , 0.0 ] ]\n\tgamer_move = [ STOP , STOP ]\n\tgamer_control = DEUX\n\tball_pos = [ [ 0 , 0 ] , [ 0 , 0 ] ]\n\tball_time = [ 0.0 , 0.0 ]\n\tball_vit_norm = 400.0 / 1000 * WIDTH_SC / 1229#premier terme en px/s\n\tball_vit_vect = [ ball_vit_norm * 0.85 , ball_vit_norm * sqrt(1-0.85**2) ]\n\t#ball_angle = pi / 2\n\torder = -1\n\ty = [ HEIGH_SC + 10 , -10 ]\n\n\tmarge = randint(1, GAMER_H // 2)#MARGE >= 1 sinon c'est le chaos !\n\ta = [ 0.0 , 0.0 ]\n\n\t#ball_angle = initBallAngle(ball_angle, 2.2)\n\t#ball_vit_vect = initBallVit(ball_vit_norm)\n\n\t# Initialisation de la fenêtre d'affichage\n\tpygame.init()\n\tscreen = pygame.display.set_mode((WIDTH_SC, HEIGH_SC), pygame.DOUBLEBUF | pygame.HWSURFACE)\n\tpygame.display.set_caption(\"Pong\")\n\n\tbg = pygame.Surface(screen.get_size())\n\n\tgamer = pygame.Surface( ( GAMER_W , GAMER_H ) )\n\tgamer.fill(WHITE)\n\tfor i in [ PRES , PAST ] :\n\t\tgamer_pos_Y[UN][i] = ( HEIGH_SC - GAMER_H ) // 2\n\t\tgamer_pos_Y[DEUX][i] = ( HEIGH_SC - GAMER_H ) // 2\n\n\tball = pygame.Surface( ( BALL_W , BALL_H ) )\n\tball.fill(BRIGHTBLUE)\n\tball_pos[X][PAST] = ( WIDTH_SC - BALL_W ) // 2\n\tball_pos[Y][PAST] = ( HEIGH_SC - BALL_H ) // 2\n\n\tcroix = pygame.Surface( ( BALL_W , BALL_H ) )\n\ttmp = [ pygame.Surface( ( BALL_W // 4 , BALL_H ) ) , pygame.Surface( ( BALL_W , BALL_H // 4 ) ) ]\n\tcroix.fill(BLACK)\n\tfor i in range(2) :\n\t\ttmp[i].fill(RED)\n\t\tcroix.blit(tmp[i], ( ( ( i + 1 ) % 2 ) * ( BALL_W // 2 - BALL_W // 8 ) , ( i % 2 ) * ( BALL_H // 2 - BALL_H // 8 ) ))\n\tcroix.set_colorkey(BLACK)\n\n\t# Affichage d'un texte\n\tfont = pygame.font.SysFont(\"arial\", FONT_SIZE)\n\n\tcadre_score = pygame.Surface( ( CADRE_SCORE_W , CADRE_SCORE_H ) )\n\ttextpos_cadre_score = cadre_score.get_rect()\n\ttextpos_cadre_score.centerx = bg.get_rect().centerx\n\ttextpos_cadre_score.y = int( HEIGH_SC * 0.05 )\n\n\t# Boucle d'évènements\n\twhile 1:\n\t\ttps[PRES] = pygame.time.get_ticks()\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT or event.type == KEYDOWN and event.key == K_ESCAPE:\n\t\t\t\texit()\n\t\t\telif ( event.type == MOUSEBUTTONDOWN and event.button ) == MB_LEFT or ( ball_pos[X][PRES] + BALL_W <= 0 or WIDTH_SC <= ball_pos[X][PRES] ) :\n\t\t\t\tball_time[PAST] = tps[PRES]\n\t\t\t\tball_pos[X][PAST] = ( WIDTH_SC - BALL_W ) // 2\n\t\t\t\tball_pos[Y][PAST] = ( HEIGH_SC - BALL_H ) // 2\n\t\t\t\t#ball_angle = initBallAngle(ball_angle, 2.2)\n\t\t\t\tball_vit_vect = initBallVit(ball_vit_norm)\n\t\t\telif event.type == KEYDOWN and (event.unicode == 'w' or event.unicode == 'W') :\n\t\t\t\tgamer_move[UN] = DESC\n\t\t\t\tgamer_time[UN][PAST] = tps[PRES]\n\t\t\telif event.type == KEYDOWN and (event.unicode == 'x' or event.unicode == 'X') :\n\t\t\t\tgamer_move[UN] = STOP\n\t\t\t\tgamer_pos_Y[UN][PAST] = gamer_pos_Y[UN][PRES]\n\t\t\t#elif event.type == KEYUP and (event.unicode == 'x' or event.unicode == 'X') :\n\t\t\t\t#gamer_move[UN] = False\n\t\t\telif event.type == KEYDOWN and (event.unicode == 'c' or event.unicode == 'C') :\n\t\t\t\tgamer_move[UN] = MONT\n\t\t\t\tgamer_time[UN][PAST] = tps[PRES]\n\t\t\t#elif event.type == KEYUP and (event.unicode == 'c' or event.unicode == 'C') :\n\t\t\t\t#gamer_move[DEUX] = False\n\t\t\t#elif ( event.type == MOUSEBUTTONDOWN and event.button == MBSW_UP ) :#ajout d'un hertz\n\t\t\t\t#ball_vit_norm += 1.0 / 1000#vitesse en px/s\n\t\t\t#elif ( event.type == MOUSEBUTTONDOWN and event.button == MBSW_DOWN ) :\n\t\t\t\t#ball_vit_norm -= 1.0 / 1000#vitesse en px/s\n\n\t\t#ball_pos[X][PRES], ball_pos[Y][PRES] = moveBallTrig(ball_pos[X][PAST], ball_pos[Y][PAST], ball_time[PAST], tps[PRES], ball_angle)\n\t\tball_pos[X][PRES], ball_pos[Y][PRES] = moveBallVect(ball_pos[X][PAST], ball_pos[Y][PAST], tps[PRES] - ball_time[PAST], ball_vit_vect)\n\n\t\tif ball_pos[X][PRES] + BALL_W <= 0 or WIDTH_SC <= ball_pos[X][PRES] :\n\t\t\tball_time[PAST] = tps[PRES]\n\t\t\tball_pos[X][PAST] = ( WIDTH_SC - BALL_W ) // 2\n\t\t\tball_pos[Y][PAST] = ( HEIGH_SC - BALL_H ) // 2\n\t\t\t#ball_angle = initBallAngle(ball_angle, 2.2)\n\t\t\tball_vit_vect = initBallVit(ball_vit_norm)\n\t\t\tif ball_pos[X][PRES] < 0 :\n\t\t\t\tscore[1] += 1\n\t\t\telse:\n\t\t\t\tscore[0] += 1\n\t\t###\n\t\t#Artificial Intelligence\n\t\t###\n\t\tif 0 < ball_pos[X][PRES] - ball_pos[X][PAST] :\n\t\t\tif gamer_control != DEUX :\n\t\t\t\tmarge = randint(1, GAMER_H // 2)\n\t\t\tgamer_control = DEUX\n\t\telse:\n\t\t\tif gamer_control != UN :\n\t\t\t\tmarge = randint(1, GAMER_H // 2)\n\t\t\tgamer_control = UN\n\n\t\ty[PAST] = y[PRES]\n\t\ty[PRES], a[0], a[1] = resoSysLinForAI(ball_pos[X][PAST], ball_pos[Y][PAST], ball_pos[X][PRES], ball_pos[Y][PRES], gamer_control)\n\n\t\tif BALL_H // 2 <= y[PRES] and y[PRES] + BALL_H // 2 < HEIGH_SC and abs( y[PRES] - y[PAST] ) <= 1 :\n\t\t\t#marge = 1\n\t\t\tif gamer_pos_Y[gamer_control][PRES] + GAMER_H // 2 - marge <= y[PRES] and y[PRES] < gamer_pos_Y[gamer_control][PRES] + GAMER_H // 2 + marge :#si la raquette est bien placée\n\t\t\t\tif gamer_move[gamer_control] != STOP :\n\t\t\t\t\tgamer_move[gamer_control] = STOP\n\t\t\t\t\tgamer_pos_Y[gamer_control][PAST] = gamer_pos_Y[gamer_control][PRES]\n\t\t\telse:\n\t\t\t\tif gamer_move[gamer_control] == STOP :\n\t\t\t\t\tif y[PRES] < gamer_pos_Y[gamer_control][PRES] :\n\t\t\t\t\t\tgamer_move[gamer_control] = MONT\n\t\t\t\t\t\tgamer_time[gamer_control][PAST] = tps[PRES]\n\t\t\t\t\telif gamer_pos_Y[gamer_control][PRES] + GAMER_H <= y[PRES] + BALL_H :\n\t\t\t\t\t\tgamer_move[gamer_control] = DESC\n\t\t\t\t\t\tgamer_time[gamer_control][PAST] = tps[PRES]\n\t\t###\n\t\tfor i in [ UN , DEUX ] :\n\t\t\tif gamer_move[i] != STOP :\n\t\t\t\tgamer_pos_Y[i][PRES] = gamer_pos_Y[i][PAST] - round( ( tps[PRES] - gamer_time[i][PAST] ) * GAMER_VIT * gamer_move[i] )\n\n\t\tif not( 0 <= ball_pos[Y][PRES] and ball_pos[Y][PRES] + BALL_H <= HEIGH_SC ) :\n\t\t\tif ball_pos[Y][PRES] < 0 :\n\t\t\t\tball_pos[Y][PRES] += 1\n\t\t\telse :\n\t\t\t\tball_pos[Y][PRES] -= 1\n\t\t\tball_vit_vect[Y] = -ball_vit_vect[Y]\n\t\t\t#ball_angle = - ball_angle\n\t\t\tball_time[PAST] = tps[PRES]\n\t\t\tball_pos[X][PAST] = ball_pos[X][PRES]\n\t\t\tball_pos[Y][PAST] = ball_pos[Y][PRES]\n\n\t\tfor i in [ UN , DEUX ] :\n\t\t\torder = delimiterCollision(ball_pos[X][PRES], ball_pos[Y][PRES], BALL_W, BALL_H, GAMER_POS_X[i], gamer_pos_Y[i][PRES], GAMER_W, GAMER_H)\n\t\t\tif order == LEFT :\n\t\t\t\tball_pos[X][PRES] -= 1\n\t\t\t\tball_vit_vect[X] = -ball_vit_vect[X]\n\t\t\t\tball_vit_vect = changeBallVit(ball_pos[Y][PRES], gamer_pos_Y[i][PRES], ball_vit_vect, ball_vit_norm)\n\t\t\t\t#ball_angle = pi - ball_angle\n\t\t\telif order == RIGHT :\n\t\t\t\tball_pos[X][PRES] += 1\n\t\t\t\tball_vit_vect[X] = -ball_vit_vect[X]\n\t\t\t\tball_vit_vect = changeBallVit(ball_pos[Y][PRES], gamer_pos_Y[i][PRES], ball_vit_vect, ball_vit_norm)\n\t\t\t\t#ball_angle = pi - ball_angle\n\t\t\telif order == DOWN :\n\t\t\t\tball_pos[Y][PRES] += 1\n\t\t\t\tball_vit_vect[Y] = -ball_vit_vect[Y]\n\t\t\t\t#ball_angle = - ball_angle\n\t\t\telif order == UP :\n\t\t\t\tball_pos[Y][PRES] -= 1\n\t\t\t\tball_vit_vect[Y] = -ball_vit_vect[Y]\n\t\t\t\t#ball_angle = - ball_angle\n\t\t\tif order != -1 :\n\t\t\t\tball_time[PAST] = tps[PRES]\n\t\t\t\tball_pos[X][PAST] = ball_pos[X][PRES]\n\t\t\t\tball_pos[Y][PAST] = ball_pos[Y][PRES]\n\n\t\t\tif gamer_move[i] != STOP:\n\t\t\t\tif gamer_pos_Y[i][PRES] < 0 :\n\t\t\t\t\tgamer_pos_Y[i][PRES] += 1\n\t\t\t\t\tgamer_pos_Y[i][PAST] = gamer_pos_Y[i][PRES]\n\t\t\t\t\tgamer_move[i] = STOP\n\t\t\t\telif HEIGH_SC < gamer_pos_Y[i][PRES] + GAMER_H :\n\t\t\t\t\tgamer_pos_Y[i][PRES] -= 1\n\t\t\t\t\tgamer_pos_Y[i][PAST] = gamer_pos_Y[i][PRES]\n\t\t\t\t\tgamer_move[i] = STOP\n\n\t\tif FREQ <= tps[PRES] - tps[PAST] :\n\t\t\tbg.fill(BLACK)\n\t\t\tbg.blit(gamer, (GAMER_POS_X[UN], gamer_pos_Y[UN][PRES]))\n\t\t\tbg.blit(gamer, (GAMER_POS_X[DEUX], gamer_pos_Y[DEUX][PRES]))\n\t\t\tprintScore(font, score, cadre_score)\n\t\t\tbg.blit(cadre_score, textpos_cadre_score)\n\t\t\tbg.blit(ball, (ball_pos[X][PRES], ball_pos[Y][PRES]))\n\t\t\tbg.blit(croix, (GAMER_POS_X[gamer_control] + ( gamer_control + 1 ) % 2 * GAMER_W - BALL_W // 2, y[PRES] - BALL_H // 2))\n\t\t\tline(bg, GREEN, (0, a[0]), (WIDTH_SC, a[0] + a[1] * WIDTH_SC ), 1)\n\t\t\tscreen.blit(bg, (0, 0))\n\t\t\ttps[PAST] = tps[PRES]\n\t\t\tpygame.display.flip()\n\n#############################\n#########constantes##########\n#############################\nWIDTH_SC, HEIGH_SC = getSystemResolutionOnLinux()\nWIDTH_SC = int( WIDTH_SC * 0.9 )\nHEIGH_SC = int( HEIGH_SC * 0.9 )\n\nFREQ = 4\n\nGAMER_W = 30 * WIDTH_SC // 1229\nGAMER_H = 260 * HEIGH_SC // 1229\n\nCADRE_SCORE_W = int( WIDTH_SC * 0.25 )\nCADRE_SCORE_H = int( HEIGH_SC * 0.2 )\n\nGAMER_POS_X = [ round( WIDTH_SC * 0.05 ) , round( WIDTH_SC * 0.95 ) - GAMER_W ]\n\nGAMER_VIT = 600.0 / 1000 * WIDTH_SC / 1229#premier terme en px/s\n\nBALL_W = 30 * WIDTH_SC // 1229\nBALL_H = BALL_W\n#BALL_VIT = 400.0 / 1000 * WIDTH_SC / 1229#vitesse en px/s\n\nFONT_SIZE = int( CADRE_SCORE_W * 0.250 )\n\n#ADD_ANGLE = 20.0\n\n#Mouse Button, Scroll Wheel\nMB_LEFT, MB_MIDDLE, MB_RIGHT, MBSW_UP, MBSW_DOWN = 1, 2, 3, 4, 5\n\n# R G B\nWHITE = (250, 250, 250)\nBLACK = ( 10, 10, 10)\nGREEN = ( 0, 155, 0)\nBRIGHTBLUE = ( 0, 50, 255)\nBROWN = (174, 94, 0)\nRED = (155, 0, 0)\n\n#############################\n########enumerations#########\n#############################\nPRES, PAST, DELAY = 0, 1, 2\nX, Y = 0, 1\nVERT, HOR = 0, 1\nNULL, UP, DOWN, RIGHT, LEFT = -1, 0, 1, 2, 3\nNO, NE, SO, SE = 0, 1, 2, 3#points cardinaux\nMONT, STOP, DESC = 1, 0, -1\nUN, DEUX = 0, 1# joueur UN, joueur DEUX\n\n#############################\n########dictionnaires########\n#############################\nDIR = { NULL : \"NULL\" , UP : \"UP\" , DOWN : \"DOWN\" , RIGHT : \"RIGHT\" , LEFT : \"LEFT\" }\n\nif __name__ == '__main__' : main()\n","repo_name":"flaloup/repo2","sub_path":"pythonGit2.py","file_name":"pythonGit2.py","file_ext":"py","file_size_in_byte":13031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8894567291","text":"#v3 - Campos de Texto, rótulos, imagens, botão e função para o botão.\r\n\r\nfrom PySide2.QtWidgets import QApplication, QWidget, QLabel, QLineEdit, QPushButton\r\nfrom PySide2.QtGui import QIcon, QPixmap\r\nimport mysql.connector\r\nimport sys\r\n\r\n#Comunicação com o banco de dados.\r\nbanco = mysql.connector.connect(\r\n #Aqui é necessário inserir as informações de acesso do banco de dados.\r\n host='localhost',\r\n user='root',\r\n passwd=\"senha\",\r\n database='confeitaria_suzana'\r\n)\r\n\r\n\r\n#Classe que recebe como parâmetro 'QWidget, que neste caso é uma classe mãe'\r\nclass Janela(QWidget):\r\n def __init__(self): #\"Construtor\" da classe, ou seja, este método irá chamar todos os outros métodos\r\n super().__init__() #da classe e também\r\n\r\n #Configurações básicas da janela, como título, tamanho, cor\r\n self.setWindowTitle(\"Suzana Doces e Salgados\")\r\n self.setGeometry(420, 180, 1080, 720) #esq, sup, larg e alt\r\n self.setMinimumWidth(360)\r\n self.setMinimumHeight(240)\r\n self.setAutoFillBackground(True) #Habilita a troca de cor do fundo da janela\r\n self.setStyleSheet('background-color: #ffc8c8;') #Coloca a cor no plano de fundo\r\n icone = QIcon('cake.png')\r\n self.setWindowIcon(icone)\r\n\r\n #Coloquei porque não estava conseguindo acessar.\r\n '''Estava inicalmente em 'montar_formulário()', mas desta forma eu não consegui acessar\r\n na função 'cadastrar()' porque me retornava um problema com o escopo. Deixo aqui este \r\n relato para não esquecer de ajustar posteriormente.\r\n '''\r\n\r\n\r\n #Chamada das demais funções da Classe\r\n self.inserir_imagem()\r\n self.montar_formulario()\r\n self.botao()\r\n\r\n #Informaçoes de Texto da Janela\r\n def montar_formulario(self):\r\n #Posições de Referência\r\n dir_x = 450\r\n dir_y = 300\r\n esp = 80\r\n\r\n #Rótulos\r\n lbl_codigo = QLabel(' Código: ', self)\r\n lbl_codigo.move(dir_x, dir_y)\r\n lbl_descricao = QLabel('Descrição: ', self)\r\n lbl_descricao.move(dir_x, dir_y + esp)\r\n lbl_preco = QLabel('Preço(R$): ', self)\r\n lbl_preco.move(dir_x, dir_y + 2*esp)\r\n\r\n #Caixas de Texto\r\n global caixa_codigo, caixa_descricao, caixa_preco\r\n caixa_codigo = QLineEdit(self)\r\n caixa_codigo.move(dir_x + 70, dir_y-2)\r\n caixa_codigo.setPlaceholderText('cógigo do produto')\r\n caixa_descricao = QLineEdit(self)\r\n caixa_descricao.move(dir_x + 70, dir_y + esp - 2)\r\n caixa_descricao.setPlaceholderText('descrição do produto')\r\n caixa_preco = QLineEdit(self)\r\n caixa_preco.move(dir_x + 70, dir_y + 2*esp - 2)\r\n caixa_preco.setPlaceholderText('preço do produto')\r\n\r\n #Insere as imagens da Janela\r\n def inserir_imagem(self):\r\n logo = QIcon('C:/Users/guigs/Documents/Suzana Soares/Logo/Conf_Suzana.png')\r\n lbl_logo = QLabel('Logo', self)\r\n pixmap1 = logo.pixmap(275, 275, QIcon.Active)\r\n lbl_logo.setPixmap(pixmap1)\r\n lbl_logo.move(430, 0)\r\n\r\n #Define os botões da Janela\r\n def botao(self):\r\n botao_cadastrar = QPushButton('Cadastrar', self)\r\n botao_cadastrar.move(540, 560)\r\n botao_cadastrar.clicked.connect(self.cadastrar)\r\n\r\n def cadastrar(self):\r\n codigo = caixa_codigo.text()\r\n descricao = caixa_descricao.text()\r\n preco = caixa_preco.text()\r\n\r\n print(f'''O produto foi cadastrado com sucesso!\r\n =-=-=-=- RESUMO -=-=-=-=\r\n Código: {codigo}\r\n Descrição: {descricao}\r\n Preço(R$): {preco}''')\r\n\r\n cursor = banco.cursor()\r\n comando_SQL = 'insert into produtos_cadastrados (codigo, descricao, preco) value (%s,%s,%s);'\r\n dados = (str(codigo), str(descricao), str(preco))\r\n cursor.execute(comando_SQL, dados)\r\n banco.commit()\r\n\r\n caixa_codigo.clear()\r\n caixa_descricao.clear()\r\n caixa_preco.clear()\r\n\r\n\r\ndef executar():\r\n interface_grafica = QApplication(sys.argv)\r\n\r\n inst1 = Janela() #Cria uma instancia da classe janela\r\n inst1.show() #Etiliza o '.show()' que herdado da classe QWidget(mãe) para abrir a janela\r\n interface_grafica.exec_() #manteném a instancia interface gráfica rodando (loop)\r\n sys.exit(0) #possibilita usar o \"X\" dos Sistema Windowns para fechar a interface gráfica e interromper o loop\r\n\r\n\r\nexecutar()","repo_name":"GuilhermeGBarreto/Python_Projects","sub_path":"Su Bolos e Salgados/Cadastro de Produtos/controlador_v3.py","file_name":"controlador_v3.py","file_ext":"py","file_size_in_byte":4618,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30632763889","text":"import numpy as np\nimport xarray as xr\nfrom python_utils.dataset_operations import *\n\n\ndef pressure_to_geowind(pressure, lon, lat, rho=1.225, Re=6371000, f='f-plane', f_central=None, slp=False):\n '''\n pressure_to_geowind.py\n\n Computes geostrophic winds from sea level pressure field\n\n Inputs\n ------\n pressure: np.ndarray, sea level pressure field in Pascal\n lon: np.ndarray, longitude vector\n lat: np.ndarray, latitude vector\n rho = 1.225, air density\n Re = 6371000, radius earth\n f='f-plane' or 'real_world'\n f_central=None, central latitude for f-plane\n slp = False, correct for surface friction (Protushinsky and Johnson 1997) when using sea level pressure fields\n\n Returns\n -------\n u, v: zonal and meridional wind components\n '''\n\n # Compute the distances between the gridpoints for computing the gradient\n dist_x = np.diff(lon)[0] * (2 * np.pi * Re / 360) * np.cos(lat/360 * 2 * np.pi)\n dist_y = np.diff(lat) * (2 * np.pi * Re / 360)\n\n # Compute the gradient\n dpdx = np.ones_like(pressure) * np.nan\n dpdy = np.ones_like(pressure) * np.nan\n\n for i in range(len(lat)):\n dpdx[i, :] = np.gradient(pressure[i, :], dist_x[i])\n for i in range(len(lon)):\n dpdy[:, i] = np.gradient(pressure[:, i], np.mean(dist_y))\n\n # Compute Coriolis parameter\n if f == 'f-plane':\n if f_central == None:\n f_c = 2 * 7.2921e-5 * np.sin(np.median(lat))\n else:\n f_c = 2 * 7.2921e-5 * np.sin(np.median(f_central))\n\n f_c = np.ones_like(dpdx) * f_c\n\n elif f == 'real_world':\n f_c = 2 * 7.2921e-5 * np.sin(lat)\n f_c = f_c.repeat(len(lon)).reshape(dpdx.shape)\n\n # Compute geostrophic velocity\n u = 1/(rho * f_c) * dpdy\n v = -1/(rho * f_c) * dpdx\n\n if slp:\n # Deflect and reduce winds for the impact of surface friction (Protushinsky and Johnson 1997)\n rad = np.pi/180\n scale = .7\n angle = 30\n\n rot_mat = np.array([[np.cos(angle*rad), -np.sin(angle*rad)],\n [np.sin(angle*rad), np.cos(angle*rad)]])\n\n a = u.shape\n for i in range(a[0]):\n for j in range(a[1]):\n\n vec = np.array([u[i, j], v[i, j]]) # extract velocity vector\n rot_vec = scale * np.dot(rot_mat, vec) # rotate and scale\n\n u[i, j] = rot_vec[0]\n v[i, j] = rot_vec[1]\n\n return u, v\n\n\ndef mean_diff_two_periods(src, period1=('1979', '1999'), period2=('2000', '2018'), param='slp', winter_only=False, lon180=False, fesom_output=False, scale=1):\n '''\n anomaly_two_periods.py\n\n Computes the difference between two reference periods in one dataset.\n Computation: period2 - period1\n\n Inputs\n ------\n src (str or xr.DataArray or xr.Dataset)\n path to dataset or dataset as xr.DataArray / xr.Dataset\n period1, period2 (tuple)\n tuples with start and end of each period\n winter_only (bool, list)\n if True only DJFMAM are considered, if list month in list are considered\n scale (int, float)\n scale factor (default=1)\n\n\n Returns\n -------\n ds_p1\n\n ds_p2\n\n delta_ds\n dataset containing the subtracted periods\n\n '''\n\n # Handle different input cases for src_path\n if isinstance(src, str):\n # Open file\n ds = xr.open_dataset(src) # .load()\n\n elif isinstance(src, list) | isinstance(src, np.ndarray):\n ds = xr.open_mfdataset(src, combine='by_coords', chunks={'nod2': 1e4}) # .load()\n\n elif isinstance(src, xr.DataArray):\n ds = src.to_dataset() # .load()\n\n elif isinstance(src, xr.Dataset):\n ds = src # .load()\n\n # Apply cf conventions\n if not fesom_output:\n ds = dataset_to_cfconvention(ds, lon180=lon180)\n\n # Select month\n if winter_only:\n if isinstance(winter_only, bool):\n ds = select_winter_month(ds)\n elif isinstance(winter_only, list):\n ds = select_winter_month(ds, month=winter_only)\n\n # Select two time periods\n ds_p1 = ds.sel(time=slice(period1[0], period1[1]))\n ds_p2 = ds.sel(time=slice(period2[0], period2[1]))\n\n print('First period: ' + str(ds_p1.time[0].values), ' to ' + str(ds_p1.time[-1].values))\n print('Second period: ' + str(ds_p2.time[0].values), ' to ' + str(ds_p2.time[-1].values))\n\n # time mean\n ds_p1 = ds_p1.mean(dim='time').compute()\n ds_p2 = ds_p2.mean(dim='time').compute()\n\n # Compute the difference of the mean fields and scale\n ds_delta = (ds_p2[param] - ds_p1[param]) * scale\n ds_delta.compute()\n\n return ds_p1, ds_p2, ds_delta\n\n\ndef create_wind_anomaly_netCDF(ds, lon_range, lat_range, lon360=True, savepath=None):\n '''\n Create_wind_anomaly_netCDF.py\n\n Creates global netCDF files with wind anomalies for use in fesom2.1\n\n Inputs\n ------\n ds, xr.dataarray\n slp data\n lon_range, tuple\n (min_lon, max_lon)\n lat_range, tuple\n (min_lat, max_lat)\n lon360, bool\n convert longitudes to 0-360° range (default is True)\n savepath, string\n path to save the netCDF file\n\n Returns\n -------\n ds_new, xr.dataset\n global dataset with wind anomalies\n\n '''\n\n # Find the nearest values to the given ranges\n lon_min = ds.lon.sel(lon=lon_range[0], method='nearest').values\n lon_max = ds.lon.sel(lon=lon_range[-1], method='nearest').values\n lat_min = ds.lat.sel(lat=lat_range[0], method='nearest').values\n lat_max = ds.lat.sel(lat=lat_range[-1], method='nearest').values\n\n # Compute geostrophic wind\n slp = ds.values\n lon = ds.lon.values\n lat = ds.lat.values\n\n u, v = pressure_to_geowind(slp, lon, lat, f='f-plane', f_central=75, slp=True)\n\n # Set all values outside range to 0\n LON, LAT = np.meshgrid(lon, lat)\n\n u_anom = np.where((LON >= lon_min) & (LON <= lon_max) &\n (LAT >= lat_min) & (LAT <= lat_max), u, 0)\n v_anom = np.where((LON >= lon_min) & (LON <= lon_max) &\n (LAT >= lat_min) & (LAT <= lat_max), v, 0)\n\n ds_new = xr.Dataset({\n 'uanom': xr.DataArray(\n data=u_anom, # enter data here\n dims=['lat', 'lon'],\n coords={'lat': lat, 'lon': lon},\n attrs={\n 'units': 'm/s',\n 'description': 'geostrophic wind anomaly from JRA55 slp difference 1979-1999, 2000-2018, Rotated by 30° and scaled by a factor of 0.7 (Protoshinsky and Johnson 1997)'\n }\n ),\n 'vanom': xr.DataArray(\n data=v_anom, # enter data here\n dims=['lat', 'lon'],\n coords={'lat': lat, 'lon': lon},\n attrs={\n 'units': 'm/s',\n 'description': 'geostrophic wind anomaly from JRA55 slp difference 1979-1999, 2000-2018, Rotated by 30° and scaled by a factor of 0.7 (Protoshinsky and Johnson 1997)'\n }\n ),\n })\n\n if lon360:\n ds_new.coords['lon'] = np.where(\n ds_new.lon.values < 0, ds_new.lon.values + 360, ds_new.lon.values)\n ds_new = ds_new.sortby(ds_new.lon)\n\n # Add Time coordinate\n ds_new = ds_new.assign_coords({'time': 1}).expand_dims('time')\n\n if savepath:\n ds_new.to_netcdf(savepath)\n\n return ds_new\n","repo_name":"FinnHeu/python_utils","sub_path":"python_utils/bs_anomalies.py","file_name":"bs_anomalies.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74477177153","text":"import helper as hp\nimport numpy as np\nimport time\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom scipy.spatial import distance\nimport os\n\n\n\"\"\"\nThis is the object class Adversary. It contains all info and properties of an adversary and has only black-box acces to \na ML model.\n\"\"\"\n\nclass Adversary(object):\n\tdef __init__(self, b, n_features, strategy, API):\n\t\tif NotValidBudget(b):\n\t\t\traise Exception('not a valid initialisation: budget')\n\t\tself.b = b\n\t\tself.q = 0\n\t\tself.SetStrategy(strategy)\n\n\t\tself.API = API\n\n\t\tself.NEG = 0\n\t\tself.POS = 1\n\n\t\tself.n_features = n_features\n\n\t\tself.x_trn = []\n\t\tself.y_trn = []\n\n\t\tself.x_val = [] # Normally, the adversary does not have this data set\n\t\tself.y_val = [] # Normally, the adversary does not have this data set\n\n\t\tself.x_new = []\n\t\tself.y_new = []\n\n\t\tself.time_start = 0\n\t\tself.time_end = 0\n\n\t\tself.rundata = []\n\n\t\tself.model = None\n\n\t# -------------------------------Simple checkers and setters------------------------------\n\n\tdef SetStrategy(self, strategy):\n\t\tif strategy == 'monoAdaptive':\n\t\t\tself.nbinit = 0\n\t\t\tself.qprrnd = 2\n\t\t\tself.strategy = strategy\n\t\telif strategy == 'adaptive':\n\t\t\tself.strategy = 'adaptive'\n\t\t\tself.SetRounds(1)\n\t\telse:\n\t\t\traise ValidStrategyException\n\n\tdef RemoveFromBudget(self,rm):\n\t\tif (self.b-rm)<0:\n\t\t\traise NotEnoughBudget()\n\t\telse:\n\t\t\tself.b -= rm\n\t\t\tself.q += rm\n\n\tdef SetRounds(self, r):\n\t\tassert (r>0), \"the number of rounds must be positive\"\n\t\tassert (r str:\n maxLen = 0\n length = len(s)\n for i in range(length):\n curLen = 1\n startIdx = endIdx = i\n for j in range(1, length):\n leftIdx = i - j\n rightIdx = i + j\n if leftIdx < 0 or rightIdx > (length - 1):\n break\n if s[leftIdx] != s[rightIdx]:\n break\n curLen += 2\n startIdx = leftIdx\n endIdx = rightIdx\n if curLen > maxLen:\n maxLen = curLen\n resStart = startIdx\n resEnd = endIdx\n\n curLen = 0\n startIdx = endIdx = i\n for j in range(0, length):\n leftIdx = i - j\n rightIdx = i + j + 1\n if (leftIdx < 0) or (rightIdx > length - 1):\n break\n if s[leftIdx] != s[rightIdx]:\n break\n curLen += 2\n startIdx = leftIdx\n endIdx = rightIdx\n if curLen > maxLen:\n maxLen = curLen\n resStart = startIdx\n resEnd = endIdx\n \n result = \"\"\n for idx in range(resStart, resEnd + 1):\n result+=s[idx]\n return result\n\ndef main():\n s = \"abaabbaaBA\"\n print(Solution().longestPalindrome(s))\n\nif __name__ == \"__main__\":\n main()\n \n# @lc code=end\n\n","repo_name":"ha233/.leetcode","sub_path":"5.longest-palindromic-substring.py","file_name":"5.longest-palindromic-substring.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23566082901","text":"from math import log\r\nfrom math import pow\r\n\r\ndef binaryDiv(x):\r\n\txd2 = x//2\r\n\treturn int(x - xd2), int(xd2)\r\n\r\ndef solve(s):\r\n\tl = s.split(' ')\r\n\tn = int(l[0])\r\n\tk = int(l[1])\r\n\tbeforeLevel = int(log(k, 2))\r\n\tnumInBeforeLevels = pow(2, beforeLevel) - 1\r\n\tkLeft = k - numInBeforeLevels\r\n\tnLeft = n - numInBeforeLevels\r\n\tnumIncurrentLevel = numInBeforeLevels + 1\r\n\tq = nLeft // numIncurrentLevel\r\n\tr = nLeft % numIncurrentLevel\r\n\tif \tkLeft <= r:\r\n\t\tchoosedGap = q + 1\r\n\telse:\r\n\t\tchoosedGap = q\r\n\treturn '{} {}'.format(*binaryDiv(choosedGap - 1))\r\n\t\t\t\r\n\t\r\ndef getInputFromFile(filename):\r\n\tf = open(filename, 'rt')\r\n\tn = int(f.readline().rstrip())\r\n\tinputs = []\r\n\tfor i in range(0, n):\r\n\t\tline = f.readline().rstrip()\r\n\t\tinputs.append(line)\r\n\tf.close()\r\n\treturn n, inputs\t\r\n\t\r\n\t\r\ndef getAns(filename, outputFile):\r\n\tn, inputs = getInputFromFile(filename)\r\n\tres = []\r\n\tfor i in range(0, n):\r\n\t\tans = solve(inputs[i])\r\n\t\tres.append('Case #{}: {}'.format(i+1, ans))\r\n\tf = open(outputFile, 'wt')\r\n\ts = '\\n'.join(res) + '\\n'\r\n\tprint(s)\r\n\tf.write(s)\r\n\tf.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1015.py","file_name":"1015.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33203018605","text":"import dem as d\nimport numpy as np\nimport scipy.ndimage.morphology as morph\n\ngrid_name = 'ca_bath_near'\nslope_grid_name = 'ca_bath_near_slope'\nmaximum_pit_depth = 200\n\n\ndem = d.Elevation.load(grid_name)\n'''\nslope_mask = d.Mask.load(slope_grid_name)\n\nbase_of_slope_mask = d.Mask()\nbase_of_slope_mask._copy_info_from_grid(slope_mask, True)\n\nbase_of_slope_mask._griddata = slope_mask._griddata.astype(np.float64) + morph.binary_erosion(slope_mask._griddata).astype(np.float64)\ni = np.where((base_of_slope_mask._griddata == 1) & (dem._griddata <= -600))\n\nrc = zip(i[0].tolist(),i[1].tolist())\noutlets = base_of_slope_mask._rowscols_to_xy(rc)\n\ndem._griddata[slope_mask._griddata == 0] = np.NaN\n'''\n\nfilled_dem = d.FilledElevation(elevation = dem, outlets = outlets, display_output = True, maximum_pit_depth = maximum_pit_depth)\nfd = d.FlowDirectionD8(flooded_dem = filled_dem)\narea = d.GeographicArea(flow_direction = fd)\n\nfilled_dem.save(grid_name + '_filled')\nfd.save(grid_name + '_flow_direction')\narea.save(grid_name + '_area')\n\n","repo_name":"stgl/Submarine","sub_path":"Gebco/scripts/calcFilledSlope.py","file_name":"calcFilledSlope.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19883841375","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom time import sleep\n\nimport jenkinsapi\nfrom jenkinsapi.jenkins import Jenkins\nfrom jenkinsapi.utils.crumb_requester import CrumbRequester\n\nfrom Logger import logger\nfrom .config_loader import get_jenkins_cfg as cfg\nfrom .notification_bot import send_build_info\n\nBASE_URL = cfg()[\"url\"]\nCRUMB = CrumbRequester(baseurl=BASE_URL)\nJENKINS = Jenkins(BASE_URL, requester=CRUMB)\nCHECK_BUILD_LIST = dict()\nTRACKING_JOBS = cfg()['jobs']\nTIMEOUT = 10\n\n\ndef jenkins_checker_thread():\n \"\"\"Функция для периодической проверки прохождения отслеживаемых билдов\"\"\"\n while True:\n logger.info('Jenkins checker run at %s', datetime.now())\n global JENKINS\n # Проходимся по джобам, добавляем новые билды в отслеживаемые\n for job in TRACKING_JOBS:\n try:\n last_build = JENKINS.get_job(job).get_last_build()\n last_num = last_build.get_number()\n except jenkinsapi.custom_exceptions.NoBuildData:\n last_build = None\n last_num = 0\n except ConnectionError as error:\n logger.error('Ошибка получения последнего билда с дженкинса.\\n'\n 'Отслеживаемая джоба: %s\\n'\n 'Текст ошибки: %s', job, error)\n continue\n # Если не было до этого билдов - обновляем последний билд\n if TRACKING_JOBS[job] == 0:\n TRACKING_JOBS[job] = last_num\n logger.info('Обновлён последний билд для джобы %s', job)\n # Проверяем последний билд, если он больше текущего - значит новый\n if TRACKING_JOBS[job] < last_num:\n logger.info('Обнаружен новый билд %s джобы %s, '\n 'добавлен в отслеживаемые',\n last_num, job)\n # Задаём особый формат для отслеживания\n tracking_job = str(job) + '.' + str(last_num)\n # Ставим признак, что он ещё не обработан\n CHECK_BUILD_LIST[tracking_job] = 'NOT_READY'\n TRACKING_JOBS[job] = last_num\n # Готовим данные для отправки в телеграмм\n job_in_telegram = job.replace('_', '\\_').replace('/', '\\/')\n job_for_tracking = job + '.' + str(last_num)\n # Отправляем информацию о новом билде в телеграмм\n send_build_info('*Запущен новый билд '\n f'{last_num}\\!*\\n\\n'\n f'Джоба\\: {job_in_telegram}',\n job_for_tracking)\n if len(CHECK_BUILD_LIST) == 0:\n logger.info('Нет билдов для отслеживания')\n else:\n builds_str = ''\n # Подготавливаем название билда для дальнейшей обработки из списка\n for job in CHECK_BUILD_LIST:\n build = str(job).split(\".\")[1]\n builds_str += f'{build} '\n logger.info('Есть билды для отслеживания: %s', builds_str)\n untracking_builds = dict()\n # Проходимся по отслеживаемым билдам\n for tracking_job in CHECK_BUILD_LIST:\n # Подготавливаем данные по билду\n job = str(tracking_job).split(\".\")[0]\n build = str(tracking_job).split(\".\")[1]\n logger.info('Проверяем билд %s джобы %s', build, job)\n build_obj = JENKINS.get_job(job).get_build(int(build))\n status = build_obj.get_status()\n logger.info('Билд №%s. Статус: %s',\n build,\n status)\n # Статус может быть разным,\n # если он None - значит билд в процессе\n if status is not None:\n # Если статус не None - значит он закончился, но даём 1 шаг\n # на обновление статуса. Отрабатывает со 2-го раза, может\n # измениться с SUCCESS на любое другое состояние,\n # особенность Jenkins\n if CHECK_BUILD_LIST[tracking_job] == 'NOT_READY':\n CHECK_BUILD_LIST[tracking_job] = 'READY'\n # Если билд в статусе READY, можно получить настоящий статус\n elif CHECK_BUILD_LIST[tracking_job] == 'READY':\n result_build = '🟦 Неизвестно'\n if status == 'SUCCESS':\n result_build = '🟩 Успешно'\n elif status == 'ABORTED':\n result_build = '🟪 Отменён'\n elif status == 'FAILURE':\n result_build = '🟥 Провалена сборка билда'\n elif status == 'UNSTABLE':\n result_build = '🟨 Есть ошибки в тестах'\n # Если билд завершён - добавляем его в untracking_builds\n untracking_builds[job] = build\n # Пробуем получить длительность билда\n try:\n duration = str(build_obj.get_duration()).split(\n '.', maxsplit=1)[0].replace(':', '\\:')\n except Exception as e:\n duration = '\\?'\n logger.error('Ошибка при разборе отчёта\\n%s', e)\n # Подготавливаем данные для телеграмм\n job_in_url = tracking_job.split('.')[0] \\\n .replace('/', '/job/')\n job_in_telegram = job.replace('_', '\\_').replace('/',\n '\\/')\n # Отправляем финальное сообщение о завершении билда\n send_build_info(\n f'*Сборка билда {build} завершена\\!*\\n\\n'\n f'Джоба\\: {job_in_telegram}\\n'\n f'Результат\\: {result_build}\\n'\n f'Длительность\\: {duration}\\n',\n tracking_job,\n True)\n # Убираем все законченные билды из отслеживаемых\n for untracking_job in untracking_builds:\n element = (f'{untracking_job}.'\n f'{untracking_builds[untracking_job]}')\n CHECK_BUILD_LIST.pop(element)\n logger.info('Проверка завершена, запущено ожидание в %s секунд',\n TIMEOUT)\n sleep(TIMEOUT)\n","repo_name":"nuPATEXHuK/CI_bot","sub_path":"funcs/jenkins_checker_app.py","file_name":"jenkins_checker_app.py","file_ext":"py","file_size_in_byte":7850,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39286980778","text":"# coding=utf-8\nimport tensorflow as tf\nimport numpy as np\nfrom six.moves import cPickle as pickle\nfrom six.moves import range\n\n\ndef accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])\n\n\npickle_file = \"notMNIST.pickle\"\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)\n\nimage_size = 28\nnum_labels = 10\n\n\ndef reformat(dataset, labels):\n \"\"\"图片格式化\"\"\"\n # 图片拉伸成向量 [28,28]===>[,28*28]\n dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n # 将lables构建成one-hot类型的向量\n labels = (np.arange(num_labels) == labels[:, None]).astype(np.float32)\n return dataset, labels\n\n\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)\n\nbatch_size = 128\nhidden_units = 1024\n\ngraph = tf.Graph()\nwith graph.as_default():\n\n # Input data. For the training data, we use a placeholder that will be fed\n # at run time with a training minibatch.\n with tf.name_scope('input'):\n with tf.name_scope('tf_train_dataset'):\n tf_train_dataset = tf.placeholder(tf.float32,\n shape=(batch_size, image_size * image_size))\n with tf.name_scope('tf_train_labels'):\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n with tf.name_scope('tf_valid_dataset'):\n tf_valid_dataset = tf.constant(valid_dataset)\n with tf.name_scope('tf_test_dataset'):\n tf_test_dataset = tf.constant(test_dataset)\n\n # neural_network\n with tf.name_scope('weights'):\n weights_1 = tf.Variable(\n tf.truncated_normal([image_size * image_size, hidden_units]))\n with tf.name_scope('biases_1'):\n biases_1 = tf.Variable(tf.zeros([hidden_units]))\n # Training computation.\n\n with tf.name_scope('weights_2'):\n weights_2 = tf.Variable(tf.truncated_normal([hidden_units, num_labels]))\n with tf.name_scope('biases_2'):\n biases_2 = tf.Variable(tf.zeros([num_labels]))\n with tf.name_scope('hidden_logits'):\n hidden_logits = tf.matmul(tf.nn.relu(tf.matmul(tf_train_dataset, weights_1) + biases_1), weights_2) + biases_2\n\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=hidden_logits))\n\n # Optimizer.\n with tf.name_scope('optimizer'):\n optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n\n # Predictions for the training, validation, and test data.\n with tf.name_scope('train_prediction'):\n train_prediction = tf.nn.softmax(hidden_logits)\n with tf.name_scope('valid_prediction'):\n valid_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, weights_1) + biases_1), weights_2) + biases_2)\n with tf.name_scope('test_prediction'):\n test_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, weights_1) + biases_1), weights_2) + biases_2)\n\nnum_steps = 3001\n\nwith tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n writer = tf.summary.FileWriter(\"log/\", session.graph)\n print(\"Initialized\")\n for step in range(num_steps):\n # Pick an offset within the training data, which has been randomized.\n # Note: we could use better randomization across epochs.\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n # Generate a minibatch.\n batch_data = train_dataset[offset:(offset + batch_size), :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n # Prepare a dictionary telling the session where to feed the minibatch.\n # The key of the dictionary is the placeholder node of the graph to be fed,\n # and the value is the numpy array to feed to it.\n feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n\n if (step % 500 == 0):\n print(\"Minibatch loss at step %d: %f\" % (step, l))\n print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n print(\"Validation accuracy: %.1f%%\" % accuracy(\n valid_prediction.eval(), valid_labels))\n print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))\n","repo_name":"STHSF/DeepLearning","sub_path":"notMNIST/deepclassify.py","file_name":"deepclassify.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"35862543531","text":"class Solution:\n def maxScoreWords(self, words: List[str], letters: List[str], score: List[int]) -> int:\n\n def is_possible(c1, c2):\n c3 = Counter(c1)\n c3.subtract(c2)\n return c3, min(c3.values()) >= 0\n\n def dfs(i, c):\n if i == len(data):\n return 0\n\n nc, possible = is_possible(c, data[i][1])\n\n if possible:\n a = data[i][0] + dfs(i + 1, nc)\n else:\n a = 0\n\n return max(a, dfs(i + 1, c))\n\n data = []\n\n for word in words:\n w_count = Counter(word)\n\n nc, possible = is_possible(Counter(letters), w_count)\n\n if possible:\n w_score = sum([score[ord(ch) - ord('a')] for ch in word])\n else:\n w_score = 0\n\n data.append((w_score, w_count))\n\n return dfs(0, Counter(letters))\n","repo_name":"pbelskiy/contest","sub_path":"leetcode.com/1255_maximum_score_words_formed_by_letters/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16921893065","text":"import copy\nimport json\nimport logging\nimport math\n\n# Standard imports\nimport os\nimport pickle\nfrom typing import Dict\n\n# Third party imports\nimport numpy as np\nimport pandas\nimport rasterio as rio\nimport xarray as xr\nfrom rasterio.profiles import DefaultGTiffProfile\nfrom rasterio.windows import Window\n\n# CARS imports\nfrom cars.core import constants as cst\nfrom cars.core import outputs\nfrom cars.core.utils import safe_makedirs\nfrom cars.data_structures import cars_dict, dataframe_converter\n\n# cars dataset dtype\nCARS_DS_TYPE_ARRAY = \"arrays\"\nCARS_DS_TYPE_POINTS = \"points\"\nCARS_DS_TYPE_DICT = \"dict\"\n\n# cars_dataset names\nTILES_INFO_FILE = \"tiles_info.json\"\nOVERLAP_FILE = \"overlaps.npy\"\nGRID_FILE = \"grid.npy\"\nPROFILE_FILE = \"profile.json\"\n\n# single tile names\nATTRIBUTE_FILE = \"attributes.json\"\nDATASET_FILE = \"dataset\"\nDATAFRAME_FILE = \"dataframe.csv\"\nCARSDICT_FILE = \"cars_dict\"\n\nPROFILE = \"profile\"\nWINDOW = \"window\"\nOVERLAPS = \"overlaps\"\nATTRIBUTES = \"attributes\"\nSAVING_INFO = \"saving_info\"\n\n\nclass CarsDataset:\n \"\"\"\n CarsDataset.\n\n Internal CARS structure for organazing tiles\n (xr.Datasets or pd.DataFrames).\n \"\"\"\n\n def __init__(self, dataset_type, load_from_disk=None):\n \"\"\"\n Init function of CarsDataset.\n If a path is provided, restore CarsDataset saved on disk.\n\n :param dataset_type: type of dataset : 'arrays' or 'points'\n :type dataset_type: str\n :param load_from_disk: path to saved CarsDataset\n :type load_from_disk: str\n\n \"\"\"\n\n self.dataset_type = dataset_type\n if dataset_type not in [\n CARS_DS_TYPE_ARRAY,\n CARS_DS_TYPE_POINTS,\n CARS_DS_TYPE_DICT,\n ]:\n raise ValueError(\"wrong dataset type\")\n\n self.tiles = None\n self.tiles_info = {}\n self._tiling_grid = None\n self.overlaps = None\n self.attributes = {}\n\n if load_from_disk is not None:\n self.load_cars_dataset_from_disk(load_from_disk)\n\n def __repr__(self):\n \"\"\"\n Repr function\n :return: printable self CarsDataset\n \"\"\"\n return self.custom_print()\n\n def __str__(self):\n \"\"\"\n Str function\n :return: printable self CarsDataset\n \"\"\"\n return self.custom_print()\n\n def custom_print(self):\n \"\"\"\n Return string of self\n :return: printable self\n \"\"\"\n\n res = str(self.__class__) + \": \\n\" \"dataset_type: \" + str(\n self.dataset_type\n ) + \"\\n\" + \"shape: \" + str(self.shape) + \"\\n\" + \"tiling_grid: \" + str(\n self._tiling_grid\n ) + \"\\n\" + \"overlaps: \" + str(\n self.overlaps\n ) + \"\\n\" + \"tiles_info: \" + str(\n self.tiles_info\n ) + \"\\n\" + \"attributes: \" + str(\n self.attributes\n ) + \"\\n\" + \"tiles:\" + str(\n self.tiles\n )\n return res\n\n @property\n def shape(self):\n \"\"\"\n Return the shape of tiling grid (nb_row, nb_col)\n :return: shape of grid\n \"\"\"\n return self.tiling_grid.shape[0], self.tiling_grid.shape[1]\n\n @property\n def tiling_grid(self):\n \"\"\"\n Tiling grid, containing pixel windows of tiles\n\n :return: tiling grid, of shape [N, M, 4],\n containing [row_min, row_max, col_min, col_max]\n :rtype: np.ndarray\n \"\"\"\n return self._tiling_grid\n\n @tiling_grid.setter\n def tiling_grid(self, new_grid):\n \"\"\"\n Set tiling_grid\n\n :param new_grid: new grid\n :type new_grid: np.ndarray\n \"\"\"\n self._tiling_grid = new_grid\n # reset overlaps to zeros\n self.overlaps = np.zeros(new_grid.shape)\n # fill dataset grid with Nones\n self.generate_none_tiles()\n\n def __getitem__(self, key):\n \"\"\"\n Get item : return the [row, col] dataset\n\n :param key: tuple index\n\n :return: tile\n :rtype: xr.Dataset or pd.DataFrame\n \"\"\"\n\n if isinstance(key, (tuple, list)):\n if len(key) == 2:\n res = self.tiles[key[0]][key[1]]\n elif len(key) == 1:\n res = self.tiles[key[0]]\n else:\n raise ValueError(\"Too many indexes, expected 1 or 2\")\n else:\n if isinstance(key, int):\n res = self.tiles[key]\n else:\n raise ValueError(\"Index type not supported\")\n\n return res\n\n def __setitem__(self, key, newvalue):\n \"\"\"\n Set new tile\n\n :param key: tuple of row and col indexes\n :type key: tuple(int, int)\n :param newvalue: tile to set\n \"\"\"\n if isinstance(key, (tuple, list)):\n if len(key) == 2:\n self.tiles[key[0]][key[1]] = newvalue\n else:\n raise ValueError(\"Too many indexes, expected 2\")\n else:\n raise ValueError(\"Index type not supported\")\n\n def load_single_tile(self, tile_path_name: str):\n \"\"\"\n Load a single tile\n\n :param tile_path_name: Path of tile to load\n :type tile_path_name: str\n\n :return: single tile\n :rtype: xarray Dataset or Panda dataframe to file\n\n \"\"\"\n\n functions = {\n CARS_DS_TYPE_ARRAY: load_single_tile_array,\n CARS_DS_TYPE_POINTS: load_single_tile_points,\n CARS_DS_TYPE_DICT: load_single_tile_dict,\n }\n\n return functions[self.dataset_type](tile_path_name)\n\n def save_single_tile(self, tile, tile_path_name: str):\n \"\"\"\n Save xarray Dataset or Panda dataframe to file\n\n :param tile: tile to save\n :type tile: xr.Dataset or pd.DataFrame\n :param tile_path_name: Path of file to save in\n \"\"\"\n\n functions = {\n CARS_DS_TYPE_ARRAY: save_single_tile_array,\n CARS_DS_TYPE_POINTS: save_single_tile_points,\n CARS_DS_TYPE_DICT: save_single_tile_dict,\n }\n\n return functions[self.dataset_type](tile, tile_path_name)\n\n def run_save(self, future_result, file_name: str, **kwargs):\n \"\"\"\n Save future result when arrived\n\n :param future_result: xarray.Dataset received\n :param file_name: filename to save data to\n \"\"\"\n\n functions = {\n CARS_DS_TYPE_ARRAY: run_save_arrays,\n CARS_DS_TYPE_POINTS: run_save_points,\n }\n\n return functions[self.dataset_type](future_result, file_name, **kwargs)\n\n def get_window_as_dict(self, row, col, from_terrain=False, resolution=1):\n \"\"\"\n Get window in pixels for rasterio. Set from_terrain if tiling grid\n was defined in geographic coordinates.\n\n :param row: row\n :type row: int\n :param col: col\n :type col: int\n :param from_terrain: true if in terrain coordinates\n :type from_terrain: bool\n :param resolution: resolution\n :type resolution: float\n\n :return: New window : {\n \"row_min\" : row_min ,\n \"row_max\" : row_max\n \"col_min\" : col_min\n \"col_max\" : col_max\n }\n :rtype: Dict\n\n \"\"\"\n\n row_min = np.min(self.tiling_grid[:, :, 0])\n col_min = np.min(self.tiling_grid[:, :, 2])\n col_max = np.max(self.tiling_grid[:, :, 3])\n\n window_arr = np.copy(self.tiling_grid[row, col, :])\n\n if from_terrain:\n # row -> y axis : reversed by convention\n window = np.array(\n [\n col_max - window_arr[3],\n col_max - window_arr[2],\n window_arr[0] - row_min,\n window_arr[1] - row_min,\n ]\n )\n\n else:\n window = np.array(\n [\n window_arr[0] - row_min,\n window_arr[1] - row_min,\n window_arr[2] - col_min,\n window_arr[3] - col_min,\n ]\n )\n\n # normalize with resolution\n window = np.round(window / resolution)\n\n new_window = {\n \"row_min\": int(window[0]),\n \"row_max\": int(window[1]),\n \"col_min\": int(window[2]),\n \"col_max\": int(window[3]),\n }\n\n return new_window\n\n def create_grid(\n self,\n nb_col: int,\n nb_row: int,\n row_split: int,\n col_split: int,\n row_overlap: int,\n col_overlap: int,\n ):\n \"\"\"\n Generate grid of positions by splitting [0, nb_row]x[0, nb_col]\n in splits of xsplit x ysplit size\n\n :param nb_col : number of columns\n :param nb_row : number of lines\n :param col_split: width of splits\n :param row_split: height of splits\n :param col_overlap: overlap to apply on rows\n :param row_overlap: overlap to apply on cols\n\n \"\"\"\n nb_col_splits = math.ceil(nb_col / row_split)\n nb_row_splits = math.ceil(nb_row / col_split)\n\n row_min, row_max = 0, nb_row\n col_min, col_max = 0, nb_col\n\n out_grid = np.ndarray(\n shape=(nb_row_splits, nb_col_splits, 4), dtype=int\n )\n\n out_overlap = np.ndarray(\n shape=(nb_row_splits, nb_col_splits, 4), dtype=int\n )\n\n for i in range(0, nb_row_splits):\n for j in range(0, nb_col_splits):\n row_down = row_min + row_split * i\n col_left = col_min + col_split * j\n row_up = min(row_max, row_min + (i + 1) * row_split)\n col_right = min(col_max, col_min + (j + 1) * col_split)\n\n out_grid[i, j, 0] = row_down\n out_grid[i, j, 1] = row_up\n out_grid[i, j, 2] = col_left\n out_grid[i, j, 3] = col_right\n\n # fill overlap [OL_row_down, OL_row_up, OL_col_left,\n # OL_col_right]\n out_overlap[i, j, 0] = row_down - max(\n row_min, row_down - row_overlap\n )\n out_overlap[i, j, 1] = (\n min(row_max, row_up + row_overlap) - row_up\n )\n out_overlap[i, j, 2] = col_left - max(\n col_min, col_left - col_overlap\n )\n out_overlap[i, j, 3] = (\n min(col_right, col_right + col_overlap) - col_right\n )\n\n self.tiling_grid = out_grid\n self.overlaps = out_overlap\n\n def generate_none_tiles(self):\n \"\"\"\n Generate the structure of data tiles, with Nones, according\n to grid shape.\n\n \"\"\"\n\n self.tiles = create_none(\n self.tiling_grid.shape[0], self.tiling_grid.shape[1]\n )\n\n def create_empty_copy(self, cars_ds):\n \"\"\"\n Copy attributes, grid, overlaps, and create Nones.\n\n :param cars_ds: CarsDataset to copy\n :type cars_ds: CarsDataset\n\n \"\"\"\n\n self.tiles_info = copy.deepcopy(cars_ds.tiles_info)\n self.tiling_grid = copy.deepcopy(cars_ds.tiling_grid)\n self.overlaps = copy.deepcopy(cars_ds.overlaps)\n\n self.tiles = []\n for _ in range(cars_ds.overlaps.shape[0]):\n tiles_row = []\n for _ in range(cars_ds.overlaps.shape[1]):\n tiles_row.append(None)\n self.tiles.append(tiles_row)\n\n def generate_descriptor(\n self, future_result, file_name, tag=None, dtype=None, nodata=None\n ):\n \"\"\"\n Generate de rasterio descriptor for the given future result\n\n Only works with pixelic tiling grid\n\n :param future_result: Future result\n :type future_result: xr.Dataset\n :param file_name: file name to save futures to\n :type file_name: str\n :param tag: tag to save\n :type tag: str\n :param dtype: dtype\n :type dtype: str\n :param nodata: no data value\n :type nodata: float\n \"\"\"\n\n # Get profile from 1st finished future\n new_profile = get_profile_for_tag_dataset(future_result, tag)\n\n if \"width\" not in new_profile or \"height\" not in new_profile:\n logging.debug(\n \"CarsDataset doesn't have a profile, default is given\"\n )\n new_profile = DefaultGTiffProfile(count=new_profile[\"count\"])\n new_profile[\"height\"] = np.max(self.tiling_grid[:, :, 1])\n new_profile[\"width\"] = np.max(self.tiling_grid[:, :, 3])\n\n # Change dtype\n new_profile[\"dtype\"] = dtype\n if nodata is not None:\n new_profile[\"nodata\"] = nodata\n\n descriptor = rio.open(file_name, \"w\", **new_profile, BIGTIFF=\"IF_SAFER\")\n\n return descriptor\n\n def save_cars_dataset(self, directory):\n \"\"\"\n Save whole CarsDataset to given directory, including tiling grids,\n attributes, overlaps, and all the xr.Dataset or pd.DataFrames.\n\n :param directory: Path where to save self CarsDataset\n :type directory: str\n\n \"\"\"\n\n # Create CarsDataset folder\n safe_makedirs(directory)\n\n if self.tiles is None:\n logging.error(\"No tiles managed by CarsDatasets\")\n raise RuntimeError(\"No tiles managed by CarsDatasets\")\n\n # save tiles info\n tiles_info_file = os.path.join(directory, TILES_INFO_FILE)\n save_dict(self.tiles_info, tiles_info_file)\n\n # save grid\n grid_file = os.path.join(directory, GRID_FILE)\n save_numpy_array(self.tiling_grid, grid_file)\n\n # save overlap\n overlap_file = os.path.join(directory, OVERLAP_FILE)\n save_numpy_array(self.overlaps, overlap_file)\n\n nb_rows, nb_cols = self.tiling_grid.shape[0], self.tiling_grid.shape[1]\n\n # save each tile\n for col in range(nb_cols):\n for row in range(nb_rows):\n # Get name\n current_tile_path_name = create_tile_path(col, row, directory)\n\n # save tile\n self.save_single_tile(\n self.tiles[row][col], current_tile_path_name\n )\n\n def load_cars_dataset_from_disk(self, directory):\n \"\"\"\n Load whole CarsDataset from given directory\n\n :param directory: Path where is saved CarsDataset to load\n :type directory: str\n\n \"\"\"\n\n # get tiles info\n tiles_info_file = os.path.join(directory, TILES_INFO_FILE)\n self.tiles_info = load_dict(tiles_info_file)\n\n # load grid\n grid_file = os.path.join(directory, GRID_FILE)\n self.tiling_grid = load_numpy_array(grid_file)\n\n nb_rows, nb_cols = self.tiling_grid.shape[0], self.tiling_grid.shape[1]\n\n # load overlap\n overlap_file = os.path.join(directory, OVERLAP_FILE)\n self.overlaps = load_numpy_array(overlap_file)\n\n # load each tile\n self.tiles = []\n for row in range(nb_rows):\n tiles_row = []\n for col in range(nb_cols):\n # Get name\n current_tile_path_name = create_tile_path(col, row, directory)\n\n # load tile\n tiles_row.append(self.load_single_tile(current_tile_path_name))\n\n self.tiles.append(tiles_row)\n\n\ndef run_save_arrays(future_result, file_name, tag=None, descriptor=None):\n \"\"\"\n Save future when arrived\n\n :param future_result: xarray.Dataset received\n :type future_result: xarray.Dataset\n :param file_name: filename to save data to\n :type file_name: str\n :param tag: dataset tag to rasterize\n :type tag: str\n :param descriptor: rasterio descriptor\n \"\"\"\n # write future result using saved window and overlaps\n\n save_dataset(\n future_result,\n file_name,\n tag,\n use_windows_and_overlaps=True,\n descriptor=descriptor,\n )\n\n\ndef run_save_points(\n future_result, file_name, overwrite=False, save_points_cloud_by_pair=False\n):\n \"\"\"\n Save future result when arrived\n\n :param future_result: pandas Dataframe received\n :type future_result: pandas Dataframe\n :param file_name: filename to save data to\n :type file_name: str\n :param overwrite: overwrite file\n :type overwrite: bool\n\n \"\"\"\n\n if overwrite:\n # remove pickle file if already exists\n if os.path.exists(file_name):\n os.remove(file_name)\n # Save\n save_all_dataframe(\n future_result,\n file_name,\n save_points_cloud_by_pair=save_points_cloud_by_pair,\n overwrite=False,\n )\n\n\ndef load_single_tile_array(tile_path_name: str) -> xr.Dataset:\n \"\"\"\n Load a xarray tile\n\n :param tile_path_name: Path of tile to load\n :type tile_path_name: str\n\n :return: tile dataset\n :rtype: xr.Dataset\n\n \"\"\"\n\n # get dataset\n dataset_file_name = os.path.join(tile_path_name, DATASET_FILE)\n if not os.path.exists(dataset_file_name):\n logging.error(\"Tile {} does not exists\".format(dataset_file_name))\n return None\n with open(dataset_file_name, \"rb\") as handle:\n dataset = pickle.load(handle)\n\n # get attributes\n attributes_file_name = os.path.join(tile_path_name, ATTRIBUTE_FILE)\n attributes = load_dict(attributes_file_name)\n\n # Format transformation\n if PROFILE in attributes:\n attributes[PROFILE] = dict_profile_to_rio_profile(attributes[PROFILE])\n\n # add to dataset\n dataset.attrs.update(attributes)\n\n return dataset\n\n\ndef load_single_tile_points(tile_path_name: str):\n \"\"\"\n Load a panda dataframe\n\n :param tile_path_name: Path of tile to load\n :type tile_path_name: str\n\n :return: Tile dataframe\n :rtype: Panda dataframe\n\n \"\"\"\n\n # get dataframe\n dataframe_file_name = os.path.join(tile_path_name, DATAFRAME_FILE)\n\n if not os.path.exists(dataframe_file_name):\n logging.error(\"Tile {} does not exists\".format(dataframe_file_name))\n return None\n\n with open(dataframe_file_name, \"rb\") as handle:\n dataframe = pickle.load(handle)\n\n # get attributes\n attributes_file_name = os.path.join(tile_path_name, ATTRIBUTE_FILE)\n attributes = load_dict(attributes_file_name)\n\n # Format transformation\n\n # add to dataframe\n dataframe.attrs.update(attributes)\n\n return dataframe\n\n\ndef load_single_tile_dict(tile_path_name: str):\n \"\"\"\n Load a CarsDict\n\n :param tile_path_name: Path of tile to load\n :type tile_path_name: str\n\n :return: Tile dataframe\n :rtype: Panda dataframe\n\n \"\"\"\n\n # get dataframe\n dict_file_name = os.path.join(tile_path_name, CARSDICT_FILE)\n with open(dict_file_name, \"rb\") as handle:\n dict_cars = pickle.load(handle)\n\n # get attributes\n attributes_file_name = os.path.join(tile_path_name, ATTRIBUTE_FILE)\n attributes = load_dict(attributes_file_name)\n\n # Format transformation\n\n # add to dataframe\n dict_cars.attrs.update(attributes)\n\n return dict_cars\n\n\ndef save_single_tile_array(dataset: xr.Dataset, tile_path_name: str):\n \"\"\"\n Save xarray to directory, saving the data in a different file that\n the attributes (saved in a .json next to it).\n\n :param dataset: dataset to save\n :type dataset: xr.Dataset\n :param tile_path_name: Path of file to save in\n :type tile_path_name: str\n \"\"\"\n\n if dataset is None:\n logging.debug(\"Tile is None: not saved\")\n return\n\n # Create tile folder\n safe_makedirs(tile_path_name)\n\n # save attributes\n saved_dataset_attrs = copy.copy(dataset.attrs)\n attributes_file_name = os.path.join(tile_path_name, ATTRIBUTE_FILE)\n if dataset.attrs is None:\n attributes = {}\n else:\n attributes = dataset.attrs\n\n # Format transformation\n if PROFILE in attributes:\n attributes[PROFILE] = rio_profile_to_dict_profile(attributes[PROFILE])\n\n # dump\n # separate attributes\n dataset.attrs, custom_attributes = separate_dicts(\n attributes, [PROFILE, WINDOW, OVERLAPS, SAVING_INFO, ATTRIBUTES]\n )\n # save\n save_dict(custom_attributes, attributes_file_name)\n dataset_file_name = os.path.join(tile_path_name, DATASET_FILE)\n with open(dataset_file_name, \"wb\") as handle:\n pickle.dump(dataset, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n # Retrieve attrs\n dataset.attrs = saved_dataset_attrs\n\n\ndef save_single_tile_points(dataframe, tile_path_name: str):\n \"\"\"\n Save dataFrame to directory, saving the data in a different file that\n the attributes (saved in a .json next to it).\n\n :param dataframe: dataframe to save\n :type dataframe: pd.DataFrame\n :param tile_path_name: Path of file to save in\n :type tile_path_name: str\n \"\"\"\n if dataframe is None:\n logging.debug(\"Tile is None: not saved\")\n return\n # Create tile folder\n safe_makedirs(tile_path_name)\n\n # save attributes\n saved_dataframe_attrs = copy.copy(dataframe.attrs)\n attributes_file_name = os.path.join(tile_path_name, ATTRIBUTE_FILE)\n if dataframe.attrs is None:\n attributes = {}\n else:\n attributes = dataframe.attrs\n\n # Format transformation\n\n # dump\n # separate attributes\n dataframe.attrs, custom_attributes = separate_dicts(\n attributes, [SAVING_INFO, ATTRIBUTES]\n )\n # save\n save_dict(custom_attributes, attributes_file_name)\n dataframe_file_name = os.path.join(tile_path_name, DATAFRAME_FILE)\n with open(dataframe_file_name, \"wb\") as handle:\n pickle.dump(dataframe, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n # Retrieve attrs\n dataframe.attrs = saved_dataframe_attrs\n\n\ndef save_single_tile_dict(dict_cars, tile_path_name: str):\n \"\"\"\n Save cars_dict to directory, saving the data in a different file that\n the attributes (saved in a .json next to it).\n\n :param dict_cars: dataframe to save\n :type dict_cars: pd.DataFrame\n :param tile_path_name: Path of file to save in\n :type tile_path_name: str\n \"\"\"\n # Create tile folder\n safe_makedirs(tile_path_name)\n\n # save attributes\n saved_dict_cars_attrs = copy.copy(dict_cars.attrs)\n attributes_file_name = os.path.join(tile_path_name, ATTRIBUTE_FILE)\n if dict_cars.attrs is None:\n attributes = {}\n else:\n attributes = dict_cars.attrs\n\n # Format transformation\n\n # dump\n # separate attributes\n dict_cars.attrs, custom_attributes = separate_dicts(\n attributes, [SAVING_INFO, ATTRIBUTES]\n )\n # save\n save_dict(custom_attributes, attributes_file_name)\n dict_cars_file_name = os.path.join(tile_path_name, CARSDICT_FILE)\n with open(dict_cars_file_name, \"wb\") as handle:\n pickle.dump(dict_cars, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n # Retrieve attrs\n dict_cars.attrs = saved_dict_cars_attrs\n\n\ndef fill_dataset(\n dataset,\n saving_info=None,\n window=None,\n profile=None,\n attributes=None,\n overlaps=None,\n):\n \"\"\"\n From a full xarray dataset, fill info properly.\n User can fill with saving information (containing CarsDataset id),\n window of current tile and its overlaps,\n rasterio profile of full data, and attributes associated to data\n\n :param dataset: dataset to fill\n :type dataset: xarray_dataset\n :param saving_info: created by Orchestrator.get_saving_infos\n :type saving_info: dict\n :param window:\n :type window: dict\n :param profile:\n :type profile: dict\n :param attributes:\n :type attributes: dict\n\n \"\"\"\n\n if attributes is not None:\n dataset.attrs[ATTRIBUTES] = attributes\n\n if saving_info is not None:\n dataset.attrs[SAVING_INFO] = saving_info\n\n if window is not None:\n dataset.attrs[WINDOW] = window\n\n if overlaps is not None:\n dataset.attrs[OVERLAPS] = overlaps\n\n if profile is not None:\n dataset.attrs[PROFILE] = profile\n\n\ndef fill_dataframe(dataframe, saving_info=None, attributes=None):\n \"\"\"\n From a full pandas dataframe, fill info properly.\n User can fill with saving information (containing CarsDataset id),\n and attributes associated to data\n\n\n :param dataframe: dataframe to fill\n :type dataframe: pandas dataframe\n :param saving_info: created by Orchestrator.get_saving_infos\n :type saving_info: dict\n :param attributes:\n :type attributes: dict\n\n \"\"\"\n\n if attributes is not None:\n dataframe.attrs[ATTRIBUTES] = attributes\n\n if saving_info is not None:\n dataframe.attrs[SAVING_INFO] = saving_info\n\n\ndef fill_dict(data_dict, saving_info=None, attributes=None):\n \"\"\"\n From a fulldict, fill info properly.\n User can fill with saving information (containing CarsDataset id),\n and attributes associated to data\n\n\n :param data_dict: dictionnary to fill\n :type data_dict: Dict\n :param saving_info: created by Orchestrator.get_saving_infos\n :type saving_info: dict\n :param attributes:\n :type attributes: dict\n\n \"\"\"\n\n # TODO only use CarsDict\n\n if isinstance(data_dict, dict):\n if attributes is not None:\n data_dict[ATTRIBUTES] = attributes\n\n if saving_info is not None:\n data_dict[SAVING_INFO] = saving_info\n\n elif isinstance(data_dict, cars_dict.CarsDict):\n if attributes is not None:\n data_dict.attrs[ATTRIBUTES] = attributes\n\n if saving_info is not None:\n data_dict.attrs[SAVING_INFO] = saving_info\n\n\ndef save_all_dataframe(\n dataframe, file_name, save_points_cloud_by_pair=False, overwrite=True\n):\n \"\"\"\n Save DataFrame to csv and laz format. The content of dataframe is merged to\n the content of existing saved Dataframe, if overwrite==False\n The option save_points_cloud_by_pair separate the dataframe\n by pair\n :param file_name: file name to save data to\n :type file_name: str\n :param overwrite: overwrite file if exists\n :type overwrite: bool\n\n \"\"\"\n # generate filename if attributes have xstart and ystart settings\n if (\n \"attributes\" in dataframe.attrs\n and \"xmin\" in dataframe.attrs[\"attributes\"]\n ):\n file_name = os.path.dirname(file_name)\n file_name = os.path.join(\n file_name,\n (\n str(dataframe.attrs[\"attributes\"][\"xmin\"])\n + \"_\"\n + str(dataframe.attrs[\"attributes\"][\"ymax\"])\n ),\n )\n elif \"saving_info\" in dataframe.attrs:\n file_name = os.path.dirname(file_name)\n file_name = os.path.join(\n file_name,\n (\n str(dataframe.attrs[\"saving_info\"][\"cars_ds_col\"])\n + \"_\"\n + str(dataframe.attrs[\"saving_info\"][\"cars_ds_row\"])\n ),\n )\n\n if not save_points_cloud_by_pair:\n save_dataframe(dataframe, file_name, overwrite)\n else:\n pairing_indexes = set(np.array(dataframe[\"global_id\"]).flat)\n source_pc_names = dataframe.attrs[\"attributes\"][\"source_pc_names\"]\n\n for pair_index in pairing_indexes:\n points_indexes = dataframe[\"global_id\"] == pair_index\n file_name_by_pair = (\n file_name + \"_\" + source_pc_names[int(pair_index)]\n )\n save_dataframe(\n dataframe.loc[points_indexes],\n file_name_by_pair,\n overwrite,\n )\n\n\ndef save_dataframe(dataframe, file_name, overwrite=True):\n \"\"\"\n Save dataframe (csv, laz, attr file)\n \"\"\"\n # Save attributes\n attributes_file_name = file_name + \"_attrs.json\"\n save_dict(dataframe.attrs, attributes_file_name)\n\n # Save point cloud to laz format\n if (\n \"attributes\" in dataframe.attrs\n and \"save_points_cloud_as_laz\" in dataframe.attrs[\"attributes\"]\n ):\n if dataframe.attrs[\"attributes\"][\"save_points_cloud_as_laz\"]:\n las_file_name = file_name + \".laz\"\n dataframe_converter.convert_pcl_to_laz(dataframe, las_file_name)\n\n # Save panda dataframe to csv\n if (\n (\n \"attributes\" in dataframe.attrs\n and \"save_points_cloud_as_csv\" in dataframe.attrs[\"attributes\"]\n and dataframe.attrs[\"attributes\"][\"save_points_cloud_as_csv\"]\n )\n or \"attributes\" not in dataframe.attrs\n or \"save_points_cloud_as_csv\" not in dataframe.attrs[\"attributes\"]\n ):\n _, extension = os.path.splitext(file_name)\n if \"csv\" not in extension:\n file_name = file_name + \".csv\"\n if overwrite and os.path.exists(file_name):\n dataframe.to_csv(file_name, index=False)\n else:\n if os.path.exists(file_name):\n # merge files\n existing_dataframe = pandas.read_csv(file_name)\n merged_dataframe = pandas.concat(\n [existing_dataframe, dataframe],\n ignore_index=True,\n sort=False,\n )\n merged_dataframe.to_csv(file_name, index=False)\n else:\n dataframe.to_csv(file_name, index=False)\n\n\ndef save_dataset(\n dataset, file_name, tag, use_windows_and_overlaps=False, descriptor=None\n):\n \"\"\"\n Reconstruct and save data.\n In order to save properly the dataset to corresponding tiff file,\n dataset must have been filled with saving info, profile, window,\n overlaps (if not 0), and rasterio descriptor if already created.\n See fill_dataset.\n\n :param dataset: dataset to save\n :type dataset: xr.Dataset\n :param file_name: file name to save data to\n :type file_name: str\n :param tag: tag to reconstruct\n :type tag: str\n :param use_windows_and_overlaps: use saved window and overlaps\n :type use_windows_and_overlaps: bool\n :param descriptor: descriptor to use with rasterio\n :type descriptor: rasterio dataset\n\n \"\"\"\n if dataset is None:\n logging.error(\"Tile is None: not saved \")\n return\n\n overlaps = get_overlaps_dataset(dataset)\n window = get_window_dataset(dataset)\n\n rio_window = None\n overlap = [0, 0, 0, 0]\n if use_windows_and_overlaps:\n if window is None:\n logging.debug(\"User wants to use window but none was set\")\n\n else:\n rio_window = generate_rasterio_window(window)\n\n if overlaps is not None:\n overlap = [\n overlaps[\"up\"],\n overlaps[\"down\"],\n overlaps[\"left\"],\n overlaps[\"right\"],\n ]\n if len(dataset[tag].values.shape) > 2:\n nb_rows, nb_cols = (\n dataset[tag].values.shape[1],\n dataset[tag].values.shape[2],\n )\n\n data = dataset[tag].values[\n :,\n overlap[0] : nb_rows - overlap[1],\n overlap[2] : nb_cols - overlap[3],\n ]\n else:\n nb_rows, nb_cols = (\n dataset[tag].values.shape[0],\n dataset[tag].values.shape[1],\n )\n\n data = dataset[tag].values[\n overlap[0] : nb_rows - overlap[1],\n overlap[2] : nb_cols - overlap[3],\n ]\n\n if tag == cst.EPI_COLOR and \"int\" in descriptor.dtypes[0]:\n # Prepare color data for cast\n data = np.nan_to_num(data, nan=descriptor.nodata)\n data = np.round(data)\n\n profile = get_profile_for_tag_dataset(dataset, tag)\n\n new_profile = profile\n if \"width\" not in new_profile or \"height\" not in new_profile:\n logging.debug(\"CarsDataset doesn't have a profile, default is given\")\n new_profile = DefaultGTiffProfile(count=new_profile[\"count\"])\n new_profile[\"height\"] = data.shape[0]\n new_profile[\"width\"] = data.shape[1]\n new_profile[\"dtype\"] = \"float32\"\n\n bands_description = None\n if tag in (cst.EPI_CLASSIFICATION, cst.RASTER_CLASSIF):\n bands_description = dataset.coords[cst.BAND_CLASSIF].values\n if tag in (cst.EPI_COLOR, cst.POINTS_CLOUD_CLR_KEY_ROOT):\n bands_description = dataset.coords[cst.BAND_IM].values\n if tag == cst.RASTER_SOURCE_PC:\n bands_description = dataset.coords[cst.BAND_SOURCE_PC].values\n if tag in (cst.EPI_FILLING, cst.RASTER_FILLING):\n bands_description = dataset.coords[cst.BAND_FILLING].values\n\n outputs.rasterio_write_georaster(\n file_name,\n data,\n new_profile,\n window=rio_window,\n descriptor=descriptor,\n bands_description=bands_description,\n )\n\n\ndef create_tile_path(col: int, row: int, directory: str) -> str:\n \"\"\"\n Create path of tile, according to its position in CarsDataset grid\n\n :param col: numero of column\n :type col: int\n :param row: numero of row\n :type row: int\n :param directory: path where to save tile\n :type directory: str\n\n :return: full path\n :rtype: str\n\n \"\"\"\n\n tail = \"col_\" + repr(col) + \"_row_\" + repr(row)\n name = os.path.join(directory, tail)\n\n return name\n\n\ndef save_numpy_array(array: np.ndarray, file_name: str):\n \"\"\"\n Save numpy array to file\n\n :param array: array to save\n :type array: np.ndarray\n :param file_name: numero of row\n :type file_name: str\n\n \"\"\"\n\n with open(file_name, \"wb\") as descriptor:\n np.save(descriptor, array)\n\n\ndef load_numpy_array(file_name: str) -> np.ndarray:\n \"\"\"\n Load numpy array from file\n\n :param file_name: numero of row\n :type file_name: str\n\n :return: array\n :rtype: np.ndarray\n\n \"\"\"\n with open(file_name, \"rb\") as descriptor:\n return np.load(descriptor)\n\n\ndef create_none(nb_row: int, nb_col: int):\n \"\"\"\n Create a grid filled with None. The created grid is a 2D list :\n ex: [[None, None], [None, None]]\n\n :param nb_row: number of rows\n :param nb_col: number of cols\n :return: Grid filled with None\n :rtype: list of list\n \"\"\"\n grid = []\n for _ in range(nb_row):\n tmp = []\n for _ in range(nb_col):\n tmp.append(None)\n grid.append(tmp)\n return grid\n\n\ndef overlap_array_to_dict(overlap):\n \"\"\"\n Convert matrix of overlaps, to dict format used in CarsDatasets.\n Input is : [o_up, o_down, o_left, o_right].\n Output is : {\"up\": o_up, \"down\": o_down, \"left\": o_left, \"right\": o_right}\n\n :param overlap: overlaps\n :type overlap: List\n\n :return: New overlaps\n :rtype: Dict\n\n \"\"\"\n new_overlap = {\n \"up\": int(overlap[0]),\n \"down\": int(overlap[1]),\n \"left\": int(overlap[2]),\n \"right\": int(overlap[3]),\n }\n return new_overlap\n\n\ndef window_array_to_dict(window, overlap=None):\n \"\"\"\n Convert matrix of windows, to dict format used in CarsDatasets.\n Use overlaps if you want to get window with overlaps\n inputs are :\n\n - window : [row_min, row_max, col_min, col_max], with pixel format\n - overlap (optional): [o_row_min, o_row_max, o_col_min, o_col_max]\n\n outputs are :\n {\n \"row_min\" : row_min - o_row_min,\n \"row_max\" : row_max + o_row_max,\n \"col_min\" : col_min - o_col_min,\n \"col_max\" : col_max - o_col_max,\n\n }\n\n :param window: window\n :type window: List\n :param overlap: overlaps\n :type overlap: List\n\n :return: New window\n :rtype: Dict\n\n \"\"\"\n\n new_window = {\n \"row_min\": int(window[0]),\n \"row_max\": int(window[1]),\n \"col_min\": int(window[2]),\n \"col_max\": int(window[3]),\n }\n\n if overlap is not None:\n new_window[\"row_min\"] -= int(overlap[0])\n new_window[\"row_max\"] += int(overlap[1])\n new_window[\"col_min\"] -= int(overlap[2])\n new_window[\"col_max\"] += int(overlap[3])\n\n return new_window\n\n\ndef dict_profile_to_rio_profile(dict_profile: Dict) -> Dict:\n \"\"\"\n Transform a rasterio Profile transformed into serializable Dict,\n into a rasterio profile.\n\n :param profile: rasterio Profile transformed into serializable Dict\n :type profile: Dict\n\n :return: Profile\n :rtype: Rasterio Profile\n\n \"\"\"\n\n rio_profile = copy.copy(dict_profile)\n\n transform = None\n if \"transform\" in dict_profile:\n if dict_profile[\"transform\"] is not None:\n transform = rio.Affine(\n dict_profile[\"transform\"][0],\n dict_profile[\"transform\"][1],\n dict_profile[\"transform\"][2],\n dict_profile[\"transform\"][3],\n dict_profile[\"transform\"][4],\n dict_profile[\"transform\"][5],\n )\n crs = None\n if \"crs\" in dict_profile:\n if dict_profile[\"crs\"] is not None:\n if isinstance(dict_profile[\"crs\"], str):\n crs = rio.crs.CRS.from_epsg(\n dict_profile[\"crs\"].replace(\"EPSG:\", \"\")\n )\n else:\n crs = rio.crs.CRS.from_epsg(dict_profile[\"crs\"])\n\n rio_profile[\"crs\"] = crs\n rio_profile[\"transform\"] = transform\n\n return rio_profile\n\n\ndef rio_profile_to_dict_profile(in_profile: Dict) -> Dict:\n \"\"\"\n Transform a rasterio profile into a serializable Dict.\n\n :param in_profile: rasterio Profile transformed into serializable Dict\n :type in_profile: Dict\n\n :return: Profile\n :rtype: Dict\n\n \"\"\"\n\n profile = copy.copy(in_profile)\n\n profile = {**profile}\n crs = None\n if \"crs\" in profile:\n if profile[\"crs\"] is not None:\n if isinstance(profile[\"crs\"], str):\n crs = profile[\"crs\"]\n else:\n crs = profile[\"crs\"].to_epsg()\n\n transform = None\n if \"transform\" in profile:\n if profile[\"transform\"] is not None:\n transform = list(profile[\"transform\"])[:6]\n\n profile.update(crs=crs, transform=transform)\n\n return profile\n\n\ndef save_dict(dictionary, file_path: str, safe_save=False):\n \"\"\"\n Save dict to json file\n\n :param dictionary: dictionary to save\n :type dictionary: Dict\n :param file_path: file path to use\n :type file_path: str\n :param safe_save: if True, be robust to types\n :type safe_save: bool\n\n \"\"\"\n\n class CustomEncoder(json.JSONEncoder):\n \"\"\"\n Custom json encoder\n\n \"\"\"\n\n def default(self, o):\n \"\"\"\n Converter\n \"\"\"\n if isinstance(o, np.integer):\n return int(o)\n if isinstance(o, np.floating):\n return float(o)\n if isinstance(o, np.ndarray):\n return o.tolist()\n return json.JSONEncoder.default(self, o)\n\n if safe_save:\n with open(file_path, \"w\", encoding=\"utf8\") as fstream:\n json.dump(dictionary, fstream, indent=2, cls=CustomEncoder)\n else:\n with open(file_path, \"w\", encoding=\"utf8\") as fstream:\n json.dump(dictionary, fstream, indent=2)\n\n\ndef load_dict(file_path: str) -> Dict:\n \"\"\"\n Load dict from json file\n\n :param file_path: file path to use\n :type file_path: str\n\n \"\"\"\n\n with open(file_path, \"r\", encoding=\"utf8\") as fstream:\n dictionary = json.load(fstream)\n\n return dictionary\n\n\ndef separate_dicts(dictionary, list_tags):\n \"\"\"\n Separate a dict into two, the second one containing the given tags.\n\n For example, {key1: val1, key2: val2, key3: val3}\n with list_tags = [key2] will be split in :\n {key1: val1, key3: val3} and {key2: val2}\n\n \"\"\"\n\n dict1 = {}\n dict2 = {}\n\n for key in dictionary:\n if key in list_tags:\n dict2[key] = dictionary[key]\n else:\n dict1[key] = dictionary[key]\n\n return dict1, dict2\n\n\ndef get_attributes_dataframe(dataframe):\n \"\"\"\n Get attributes field in .attr of dataframe\n\n :param dataframe: dataframe\n :type dataframe: pandas dataframe\n \"\"\"\n\n return dataframe.attrs.get(ATTRIBUTES, None)\n\n\ndef get_window_dataset(dataset):\n \"\"\"\n Get window in dataset\n\n :param dataset: dataset\n :type dataset: xr.Dataset\n \"\"\"\n\n return dataset.attrs.get(WINDOW, None)\n\n\ndef get_overlaps_dataset(dataset):\n \"\"\"\n Get overlaps in dataset\n\n :param dataset: dataset\n :type dataset: xr.Dataset\n \"\"\"\n\n return dataset.attrs.get(OVERLAPS, None)\n\n\ndef get_profile_rasterio(dataset):\n \"\"\"\n Get profile in dataset\n\n :param dataset: dataset\n :type dataset: xr.Dataset\n \"\"\"\n\n return dataset.attrs.get(PROFILE, None)\n\n\ndef get_attributes(dataset):\n \"\"\"\n Get attributes in dataset\n\n :param dataset: dataset\n :type dataset: xr.Dataset\n \"\"\"\n\n return dataset.attrs.get(ATTRIBUTES, None)\n\n\ndef get_profile_for_tag_dataset(dataset, tag: str) -> Dict:\n \"\"\"\n Get profile according to layer to save.\n This function modify current rasterio dataset to fix the number of\n bands of the data associated to given tag.\n\n :param tag: tag to use\n :type tag: str\n\n :return: Profile\n :rtype: Rasterio Profile\n\n \"\"\"\n\n new_profile = get_profile_rasterio(dataset)\n if new_profile is None:\n new_profile = {}\n\n new_profile[\"count\"] = 1\n if len(dataset[tag].values.shape) > 2:\n new_profile[\"count\"] = dataset[tag].values.shape[0]\n\n return new_profile\n\n\ndef generate_rasterio_window(window: Dict) -> rio.windows.Window:\n \"\"\"\n Generate rasterio window to use.\n\n :param window: window to convert, containing 'row_min',\n 'row_max', 'col_min', 'col_max\n :type window: dict\n\n :return: rasterio window\n :rtype: rio.windows.Window\n\n \"\"\"\n returned_window = None\n\n if window is not None:\n return Window.from_slices(\n (window[\"row_min\"], window[\"row_max\"]),\n (window[\"col_min\"], window[\"col_max\"]),\n )\n\n return returned_window\n","repo_name":"CNES/cars","sub_path":"cars/data_structures/cars_dataset.py","file_name":"cars_dataset.py","file_ext":"py","file_size_in_byte":41511,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"61"} +{"seq_id":"24949496101","text":"import discord\nimport asyncio\nimport requests\nimport json\nfrom discord.ext import commands\n\nclass Trebek(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def trebek(self, ctx):\n api = 'https://jservice.io/api/random'\n response = requests.get(api).json()\n question = response[0]['question']\n answer = response[0]['answer']\n\n await ctx.send(question)\n def is_correct(m):\n return m.author == ctx.message.author and m.channel == ctx.message.channel\n\n try:\n guess = await self.bot.wait_for('message', check=is_correct, timeout=30.0)\n if guess.content == answer:\n await ctx.channel.send('You are right')\n else:\n await ctx.channel.send('Oops. It was {}.'.format(answer))\n except asyncio.TimeoutError:\n return await ctx.channel.send('Sorry, you took too long it was {}.'.format(answer))\n\n","repo_name":"lucas-hopkins/discord_bot","sub_path":"trebek.py","file_name":"trebek.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22935630233","text":"import json\nimport os\nimport pickle\nimport torch\nimport torch.utils.data as tdata\nfrom torchvision import transforms\nimport skimage.io as io\nfrom PIL import Image\nfrom pycocotools.coco import COCO\nfrom coco_caption.pycocoevalcap.eval import COCOEvalCap\nimport nltk\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef coco_eval(results,eval_caption_path):\n eval_json_output_dir = './coco_caption/results/'\n os.makedirs(eval_json_output_dir,exist_ok=True)\n resFile = eval_json_output_dir + 'captions-generate.json'\n json.dump(results,open(resFile,'w',encoding='utf-8'))\n\n annFile = eval_caption_path\n coco = COCO(annFile)\n cocoRes = coco.loadRes(resFile)\n\n cocoEval = COCOEvalCap(coco,cocoRes)\n cocoEval.params['image_id'] = cocoRes.getImgIds()\n cocoEval.evaluate()\n\n cider = 0\n print('---------------Evaluation performance-----------------')\n for metric,score in cocoEval.eval.items():\n print('%s: %.3f'%(metric,score))\n if metric == 'CIDEr':\n cider = score\n return cider\n\ndef coco_eval_specific(results,eval_caption_path,entry_limit=500):\n eval_json_output_dir = './coco/results/'\n os.makedirs(eval_json_output_dir,exist_ok=True)\n resFile = eval_json_output_dir + 'captions-generate.json'\n json.dump(results,open(resFile,'w'))\n\n annFile = eval_caption_path\n coco = COCO(annFile)\n cocoRes = coco.loadRes(resFile)\n\n cocoEval = COCOEvalCap(coco,cocoRes)\n cocoEval.params['image_id'] = cocoRes.getImgIds()\n cocoEval.evaluate()\n\n ans = [{'img_id':eva['image_id'],'CIDEr':eva['CIDEr']} for eva in cocoEval.evalImgs]\n os.makedirs('./Data/Eval_Statics/',exist_ok=True)\n with open(\"./Data/Eval_Statics/CIDEr_Result.txt\",'w') as f:\n entry = \"img_id\" + \" \" + \"CIDEr\" + \"\\n\"\n f.writelines(entry)\n entry_num = 0\n for ans_entry in ans:\n entry = str(ans_entry['img_id']) + \" \" + str(np.round(ans_entry['CIDEr'],2)) + \"\\n\"\n f.writelines(entry)\n entry_num += 1\n if entry_num >= entry_limit: break\n cider_list = [eva['CIDEr'] for eva in cocoEval.evalImgs]\n cider_list_npy = np.array(cider_list)\n indices = np.argsort(cider_list_npy)[::-1]\n f.writelines('best samples:\\n')\n for idx in indices[:50]:\n entry = str(ans[idx]['img_id']) + \" \" + str(np.round(ans[idx]['CIDEr'],2)) + \"\\n\"\n f.writelines(entry)\n indices = indices[::-1]\n f.writelines('worst samples:\\n')\n for idx in indices[:50]:\n entry = str(ans[idx]['img_id']) + \" \" + str(np.round(ans[idx]['CIDEr'],2)) + \"\\n\"\n f.writelines(entry)\n\n f.close()\n\n ciderScores = [eva['CIDEr'] for eva in cocoEval.evalImgs]\n\n x = plt.hist(ciderScores,bins=[0,1,2,3,4,5,6,7,8,9,10])\n print(x)\n plt.title('Histogram of CIDEr Scores', fontsize=20)\n plt.xlabel('CIDEr score', fontsize=20)\n plt.ylabel('result counts', fontsize=20)\n plt.savefig('ciderHist.png',dpi=500)\n plt.show()\n","repo_name":"zyj0021200/simpleImageCaptionZoo","sub_path":"COCO_Eval_Utils.py","file_name":"COCO_Eval_Utils.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"41557284291","text":"import glob\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nfrom abc import ABC, abstractmethod\n\n\n#----------------------------------------------------------------------------------------------------------------------------------------------------\n\"\"\" ImgLoader class: data_generator method for use with tf.data.Dataset.from_generator \"\"\"\n\nclass BaseImgLoader(ABC):\n def __init__(self, config: dict, dataset_type: str):\n # Expects at least two sub-folders within data folder e.g. \"AC\", \"VC, \"HQ\"\n img_path = f\"{config['data_path']}/Images\"\n seg_path = f\"{config['data_path']}/Segmentations\"\n self.sub_folders = [f for f in os.listdir(img_path) if os.path.isdir(f\"{img_path}/{f}\")]\n self.seg_folders = [f for f in os.listdir(seg_path) if os.path.isdir(f\"{img_path}/{f}\")]\n\n if len(self.sub_folders) == 0:\n print(\"==================================================\")\n print(\"Assuming unpaired dataset\")\n self._img_paths = img_path\n self._seg_paths = seg_path\n\n else:\n self._img_paths = {key: f\"{img_path}/{key}\" for key in self.sub_folders}\n self._seg_paths = {key: f\"{seg_path}/{key}\" for key in self.seg_folders}\n\n self._dataset_type = dataset_type\n self.config = config\n self.down_sample = config[\"down_sample\"]\n\n if config[\"times\"] is not None:\n self._json = json.load(open(f\"{config['data_path']}/{config['times']}\", 'r'))\n \n else:\n self._json = None\n\n def example_images(self):\n if len(self._ex_segs) > 0:\n return self._normalise(self._ex_sources), self._normalise(self._ex_targets), self._ex_segs\n\n else:\n return self._normalise(self._ex_sources), self._normalise(self._ex_targets)\n \n def train_val_split(self, seed: int = 5) -> None:\n # Get unique subject IDs for subject-level train/val split\n self._unique_ids = []\n\n for img_id in self._targets:\n if img_id[0:4] not in self._unique_ids:\n self._unique_ids.append(img_id[0:4])\n\n self._unique_ids.sort()\n self._subject_imgs = {}\n\n # Need procedure IDs (as poss. >1 per subject) to build ordered index of subjects' images\n self._subject_imgs = {}\n\n for img_id in self._targets + self._sources:\n if img_id[0:6] not in self._subject_imgs.keys():\n self._subject_imgs[img_id[0:6]] = []\n if img_id[:-8] not in self._subject_imgs[img_id[0:6]]:\n self._subject_imgs[img_id[0:6]].append(img_id[:-8])\n\n for key in self._subject_imgs.keys():\n self._subject_imgs[key] = sorted(self._subject_imgs[key], key=lambda x: int(x[-3:]))\n\n if self.config[\"fold\"] > self.config[\"cv_folds\"] - 1:\n raise ValueError(f\"Fold number {self.config['fold']} of {self.config['cv_folds']} folds\")\n\n np.random.seed(seed)\n N = len(self._unique_ids)\n np.random.shuffle(self._unique_ids)\n\n # Split into folds by subject\n if self.config[\"cv_folds\"] > 1:\n if seed == None:\n self._unique_ids.sort()\n\n num_in_fold = N // self.config[\"cv_folds\"]\n\n if self._dataset_type == \"training\":\n fold_ids = self._unique_ids[0:self.config[\"fold\"] * num_in_fold] + self._unique_ids[(self.config[\"fold\"] + 1) * num_in_fold:]\n elif self._dataset_type == \"validation\":\n fold_ids = self._unique_ids[self.config[\"fold\"] * num_in_fold:(self.config[\"fold\"] + 1) * num_in_fold]\n else:\n raise ValueError(\"Select 'training' or 'validation'\")\n\n self._fold_targets = []\n self._fold_sources = []\n self._fold_targets = sorted([img for img in self._targets if img[0:4] in fold_ids])\n self._fold_sources = sorted([img for img in self._sources if img[0:4] in fold_ids])\n self._fold_segs = sorted([seg for seg in self._segs if seg[0:4] in fold_ids])\n \n elif self.config[\"cv_folds\"] == 1:\n self._fold_targets = self._targets\n self._fold_sources = self._sources\n self._fold_segs = self._segs\n \n else:\n raise ValueError(\"Number of folds must be > 0\")\n\n example_idx = np.random.randint(0, len(self._fold_sources), self.config[\"num_examples\"])\n ex_sources_list = list(np.array([self._fold_sources]).squeeze()[example_idx])\n\n if len(self.sub_folders) == 0:\n ex_targets_list = [np.random.choice([t[0:11] + s[-8:] for t in self._fold_targets if s[0:6] in t and t[0:11] + s[-8:] not in s]) for s in ex_sources_list]\n self._ex_sources = np.stack([np.load(f\"{self._img_paths}/{img}\") for img in ex_sources_list], axis=0)\n self._ex_targets = np.stack([np.load(f\"{self._img_paths}/{img}\") for img in ex_targets_list], axis=0)\n else:\n ex_targets_list = list(np.array([self._fold_targets]).squeeze()[example_idx])\n self._ex_sources = np.stack([np.load(f\"{self._img_paths[img[6:8]]}/{img}\") for img in ex_sources_list], axis=0)\n self._ex_targets = np.stack([np.load(f\"{self._img_paths[img[6:8]]}/{img}\") for img in ex_targets_list], axis=0)\n\n self._ex_sources = self._ex_sources[:, ::self.down_sample, ::self.down_sample, :, np.newaxis].astype(np.float32)\n self._ex_targets = self._ex_targets[:, ::self.down_sample, ::self.down_sample, :, np.newaxis].astype(np.float32)\n\n if len(self.config[\"segs\"]) > 0 and len(self.sub_folders) == 0:\n candidate_segs = [glob.glob(f\"{self._seg_paths}/{img[0:6]}AC*{img[-8:]}\")[0] for img in ex_targets_list]\n self._ex_segs = np.stack([np.load(seg) for seg in candidate_segs], axis=0)\n self._ex_segs = self._ex_segs[:, ::self.down_sample, ::self.down_sample, :, np.newaxis].astype(np.float32)\n\n elif len(self.config[\"segs\"]) > 0 and len(self.sub_folders) > 0:\n self._ex_segs = np.stack([np.load(f\"{self._seg_paths[img[6:8]]}/{img}\") for img in ex_targets_list], axis=0)\n self._ex_segs = self._ex_segs[:, ::self.down_sample, ::self.down_sample, :, np.newaxis].astype(np.float32)\n\n else:\n self._ex_segs = []\n\n np.random.seed()\n \n print(f\"{len(self._fold_targets)} of {len(self._targets)} examples in {self._dataset_type} folds\")\n\n @property\n def unique_ids(self) -> list:\n return self._unique_ids\n \n @property\n def data(self) -> dict:\n \"\"\" Return list of all images \"\"\"\n return {\"targets\": self._targets, \"sources\": self._sources}\n \n @property\n def fold_data(self) -> dict:\n \"\"\" Return list of all images in training or validation fold \"\"\"\n return {\"targets\": self._fold_targets, \"sources\": self._fold_sources}\n \n @property\n def subject_imgs(self):\n raise NotImplementedError\n \n def set_normalisation(self, param_1: float = None, param_2: float = None):\n # Mean -281.528, std = 261.552\n # Min -500, max = 22451\n self.norm_type = self.config[\"norm_type\"]\n\n # Override if custom parameters passed\n if param_1 is not None and param_2 is not None:\n self.param_1 = param_1\n self.param_2 = param_2\n\n # Otherwise, use parameters provided in config yaml\n elif self.config[\"norm_param_1\"] is not None and self.config[\"norm_param_1\"] is not None:\n self.param_1 = self.config[\"norm_param_1\"]\n self.param_2 = self.config[\"norm_param_2\"]\n\n # Otherwise, calculate parameters\n else:\n # If mean and std of data not available, we get rolling averages\n if self.norm_type == \"meanstd\" or self.norm_type == \"std\":\n mean = 0\n std = 0\n\n for img in self._targets + self._sources:\n im = np.load(f\"{self._img_paths[img[6:8]]}/{img}\")\n mean = 0.99 * mean + 0.01 * im.mean()\n std = 0.99 * std + 0.01 * im.std()\n \n self.param_1 = mean\n self.param_2 = std\n\n # If min and max not available, we get min and max of whole dataset\n elif self.norm_type == \"minmax\":\n min_val = 2048\n max_val = -2048\n\n for img in self._targets + self._sources:\n im = np.load(f\"{self._img_paths[img[6:8]]}/{img}\")\n min_val = np.min([min_val, im.min()])\n max_val = np.max([max_val, im.max()])\n \n self.param_1 = min_val\n self.param_2 = max_val\n \n else:\n raise ValueError(\"Choose meanstd or minmax\")\n\n print(\"==================================================\")\n print(f\"{self.norm_type} normalisation: mean/min {self.param_1}, std/max {self.param_2}\")\n\n return self.param_1, self.param_2\n \n @property\n def norm_params(self):\n \"\"\" Return mean/std or min/max parameters \"\"\"\n return (self.param_1, self.param_2)\n \n @abstractmethod\n def img_pairer(self):\n raise NotImplementedError\n \n def _normalise(self, img):\n if self.norm_type == \"meanstd\":\n return (img - self.param_1) / self.param_2\n elif self.norm_type == \"std\":\n return img / self.param_2\n else:\n return (img - self.param_1) / (self.param_2 - self.param_1)\n \n def un_normalise(self, img):\n if self.norm_type == \"meanstd\":\n return img * self.param_2 + self.param_1\n elif self.norm_type == \"std\":\n return img * self.param_2\n else:\n return img * (self.param_2 - self.param_1) + self.param_1\n\n def data_generator(self):\n if self._dataset_type == \"training\":\n np.random.shuffle(self._fold_sources)\n\n N = len(self._fold_sources)\n i = 0\n\n # Pair source and target images\n while i < N:\n source_name = self._fold_sources[i]\n names = self.img_pairer(source_name)\n target_name = names[\"target\"]\n source_name = names[\"source\"]\n\n if len(self.sub_folders) == 0:\n target = np.load(f\"{self._img_paths}/{target_name}\")\n source = np.load(f\"{self._img_paths}/{source_name}\")\n else:\n target = np.load(f\"{self._img_paths[target_name[6:8]]}/{target_name}\")\n source = np.load(f\"{self._img_paths[source_name[6:8]]}/{source_name}\")\n\n target = target[::self.down_sample, ::self.down_sample, :, np.newaxis]\n source = source[::self.down_sample, ::self.down_sample, :, np.newaxis]\n target = self._normalise(target)\n source = self._normalise(source)\n\n if self._json is not None:\n source_time = self._json[names[\"source\"][:-8] + \".nrrd\"]\n target_time = self._json[names[\"target\"][:-8] + \".nrrd\"]\n\n # TODO: allow using different seg channels\n if len(self._fold_segs) > 0:\n if len(self.sub_folders) == 0:\n candidate_segs = glob.glob(f\"{self._seg_paths}/{target_name[0:6]}AC*{target_name[-8:]}\")\n assert len(candidate_segs) == 1, candidate_segs\n seg = np.load(candidate_segs[0]).astype(\"float32\")\n seg = seg[::self.down_sample, ::self.down_sample, :, np.newaxis]\n seg[seg > 1] = 1\n # TODO: return index\n\n else:\n seg = np.load(f\"{self._seg_paths[target_name[6:8]]}/{target_name}\").astype(\"float32\")\n seg = seg[::self.down_sample, ::self.down_sample, :, np.newaxis]\n seg[seg > 1] = 1\n\n if self._json is not None:\n yield (source, target, seg, source_time, target_time)\n else:\n yield (source, target, seg)\n\n else:\n if self._json is not None:\n yield (source, target, source_time, target_time)\n else:\n yield (source, target)\n\n i += 1\n\n#-------------------------------------------------------------------------\n\"\"\" Data loader for one to one source-target pairings \"\"\"\n\nclass PairedLoader(BaseImgLoader):\n def __init__(self, config: dict, dataset_type: str):\n super().__init__(config, dataset_type)\n\n # Expects list of targets and sources e.g. [\"AC\", \"VC\"], [\"HQ\"]\n self._targets = []\n self._sources = []\n self._segs = []\n\n if len(config[\"target\"]) > 0:\n for key in config[\"target\"]:\n self._targets += os.listdir(self._img_paths[key])\n\n elif len(config[\"target\"]) == 0:\n for key in self.sub_folders:\n self._targets += os.listdir(self._img_paths[key])\n\n if len(config[\"source\"]) > 0:\n for key in config[\"source\"]:\n self._sources += os.listdir(self._img_paths[key])\n\n elif len(config[\"source\"]) == 0:\n for key in self.sub_folders:\n self._sources += os.listdir(self._img_paths[key])\n \n if len(config[\"segs\"]) > 0:\n for key in config[\"segs\"]:\n self._segs += os.listdir(self._seg_paths[key])\n\n if len(self._targets) == 0 or len(self._sources) == 0:\n raise FileNotFoundError(f\"No data found: {len(self._targets)} targets, {len(self._sources)} sources\")\n\n print(\"==================================================\")\n print(f\"Data: {len(self._targets)} targets, {len(self._sources)} sources, {len(self._segs)} segmentations\")\n print(f\"Using paired loader for {self._dataset_type}\")\n\n super().train_val_split()\n\n self._subject_targets = {k: [img for img in v if img[6:8] in self.config[\"target\"]] for k, v in self._subject_imgs.items()}\n self._subject_sources = {k: [img for img in v if img[6:8] in self.config[\"source\"]] for k, v in self._subject_imgs.items()}\n \n @property\n def subject_imgs(self):\n \"\"\" Return list of images indexed by procedure \"\"\"\n return {\"targets\": self._subject_targets, \"sources\": self._subject_sources}\n\n def img_pairer(self, source: str) -> dict:\n # TODO: return idx\n # Get potential target candidates matching source (where target and source specified)\n target_candidates = self._subject_targets[source[0:6]]\n assert len(target_candidates) > 0, source\n target_stem = target_candidates[np.random.randint(len(target_candidates))]\n target = f\"{target_stem}_{source[-7:]}\"\n\n return {\"target\": target, \"source\": source}\n\n#-------------------------------------------------------------------------\n\"\"\" Data loader for unpaired images \"\"\"\n\nclass UnpairedLoader(BaseImgLoader):\n def __init__(self, config: dict, dataset_type: str):\n super().__init__(config, dataset_type)\n\n # Optional list of targets and sources e.g. [\"AC\", \"VC\"], [\"HQ\"]\n self._targets = []\n self._sources = []\n self._segs = []\n\n if len(config[\"target\"]) > 0:\n for key in config[\"target\"]:\n self._targets += [t for t in os.listdir(self._img_paths) if key in t]\n\n elif len(config[\"target\"]) == 0:\n self._targets += os.listdir(self._img_paths)\n\n if len(config[\"source\"]) > 0:\n for key in config[\"source\"]:\n self._sources += [s for s in os.listdir(self._img_paths) if key in s]\n\n elif len(config[\"source\"]) == 0:\n self._sources += os.listdir(self._img_paths)\n \n if len(config[\"segs\"]) > 0:\n self._segs += os.listdir(self._seg_paths)\n\n print(\"==================================================\")\n print(f\"Data: {len(self._targets)} targets, {len(self._sources)} sources, {len(self._segs)} segmentations\")\n print(f\"Using unpaired loader for {self._dataset_type}\")\n\n super().train_val_split()\n\n if len(self.config[\"target\"]) > 0:\n self._subject_targets = {k: [img for img in v if img[6:8] in self.config[\"target\"]] for k, v in self._subject_imgs.items()}\n else:\n self._subject_targets = None\n\n @property\n def subject_imgs(self):\n \"\"\" Return list of images indexed by procedure \"\"\"\n return self._subject_imgs\n\n def img_pairer(self, source: object, direction: str = None) -> dict:\n # TODO: add forwards/backwards sampling, return idx\n if self._subject_targets is None:\n target_candidates = list(self._subject_imgs[source[0:6]])\n else:\n target_candidates = list(self._subject_targets[source[0:6]])\n\n try:\n target_candidates.remove(source[0:-8])\n except ValueError:\n pass\n\n assert len(target_candidates) > 0, source\n target_stem = target_candidates[np.random.randint(len(target_candidates))]\n target = f\"{target_stem}_{source[-7:]}\"\n\n return {\"target\": target, \"source\": source}\n\n\n#----------------------------------------------------------------------------------------------------------------------------------------------------\n \nif __name__ == \"__main__\":\n\n \"\"\" Routine for visually testing dataloader \"\"\"\n\n FILE_PATH = \"D:/ProjectImages/SyntheticContrast\"\n segs = [\"AC\"]\n TestLoader = UnpairedLoader({\"data_path\": FILE_PATH, \"target\": [\"AC\", \"VC\"], \"source\": [\"HQ\"], \"segs\": segs, \"times\": \"times.json\", \"down_samp\": 4, \"num_examples\": 4, \"cv_folds\": 3, \"fold\": 2}, dataset_type=\"training\")\n TestLoader.set_normalisation(norm_type=\"minmax\", param_1=-500, param_2=2500)\n\n output_types = [\"float32\", \"float32\"]\n\n if len(segs) > 0:\n output_types += [\"float8\"]\n \n train_ds = tf.data.Dataset.from_generator(TestLoader.data_generator, output_types=output_types)\n\n for data in train_ds.batch(4).take(2):\n if len(segs) > 0:\n source, source_time, target, target_time, seg = data\n else:\n source, source_time, target, target_time = data\n \n source = TestLoader.un_normalise(source)\n target = TestLoader.un_normalise(target)\n\n plt.subplot(3, 2, 1)\n plt.imshow(source[0, :, :, 0, 0], cmap=\"gray\", vmin=-150, vmax=250)\n plt.axis(\"off\")\n plt.title(source_time[0].numpy())\n plt.subplot(3, 2, 2)\n plt.imshow(source[1, :, :, 0, 0], cmap=\"gray\", vmin=-150, vmax=250)\n plt.axis(\"off\")\n plt.title(source_time[1].numpy())\n\n plt.subplot(3, 2, 3)\n plt.imshow(target[0, :, :, 0, 0], cmap=\"gray\", vmin=-150, vmax=250)\n plt.axis(\"off\")\n plt.title(target_time[0].numpy())\n plt.subplot(3, 2, 4)\n plt.imshow(target[1, :, :, 0, 0], cmap=\"gray\", vmin=-150, vmax=250)\n plt.axis(\"off\")\n plt.title(target_time[1].numpy())\n\n if len(segs) > 0:\n plt.subplot(3, 2, 5)\n plt.imshow(seg[0, :, :, 0, 0])\n plt.axis(\"off\")\n plt.subplot(3, 2, 6)\n plt.imshow(seg[1, :, :, 0, 0])\n plt.axis(\"off\")\n\n plt.show()\n\n if len(segs) > 0:\n source, target, seg = TestLoader.example_images()\n\n else:\n source, target = TestLoader.example_images()\n\n source = TestLoader.un_normalise(source)\n target = TestLoader.un_normalise(target)\n \n fig, axs = plt.subplots(target.shape[0], 3)\n\n for i in range(target.shape[0]):\n axs[i, 0].imshow(source[i, :, :, 11, 0], cmap=\"gray\", vmin=-150, vmax=250)\n axs[i, 0].axis(\"off\")\n axs[i, 1].imshow(target[i, :, :, 11, 0], cmap=\"gray\", vmin=-150, vmax=250)\n axs[i, 1].axis(\"off\")\n\n if len(segs) > 0:\n axs[i, 2].imshow(seg[i, :, :, 11, 0])\n axs[i, 2].axis(\"off\")\n \n plt.show()\n","repo_name":"markpinnock/CNN_CT_SISR","sub_path":"superresolution_v02/utils/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":19982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18978624155","text":"import socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nprint(\"Socket created\")\n\nhost = \"localhost\"\nport = 5432\ns.bind((host, port))\nmensage = \"Server: Hello World!\"\n\nwhile 1:\n data, addr = s.recvfrom(4096)\n if not data:\n break\n print(\"Client: \" + data.decode())\n s.sendto(mensage.encode(), addr)\n ","repo_name":"pedro-camara/python-sec","sub_path":"udpserver.py","file_name":"udpserver.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10296355947","text":"from django.urls import path\nfrom .views import home,student_list, student_add, student_detail, student_update,student_delete\n# from django.views.generic import TemplateView\nfrom .views import HomeView, StudentListView # newly added\n\nurlpatterns = [\n # path('', home, name=\"home\"),\n # path('', TemplateView.as_view(template_name= \"fscohort/home.html\"), name=\"home\"),\n path('', HomeView.as_view(), name=\"home\"),\n # path('student_list/', student_list, name=\"list\"), # commented\n path('student_list/', StudentListView.as_view(), name=\"list\"), # new line\n path('student_add/', student_add, name=\"add\"),\n path('detail//', student_detail, name=\"detail\"),\n path('update//', student_update, name=\"update\"),\n path('delete//', student_delete, name=\"delete\"),\n]","repo_name":"emreeken183425/BACKEND-NOTES","sub_path":"django/tekrar/class_crudes/class/fscohort/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15216747925","text":"# Files and Directories\n# To print all files in the given directory\n# import os\n# cwd = os.getcwd() # Get the current working directory (cwd)\n# files = os.listdir(cwd) # Get all the files in that directory\n# print(\"Files in %r: %s\" % (cwd, files))\n\n# ===================================================================\n# # First Way to Read File\n# file = open('Day24/file.txt')\n# # Reading the the Content of File\n# contents = file.read()\n# # printing Content of File\n# print(contents)\n# # Close the file when we are done\n# file.close()\n\n# Second Way to Read File\n# Using with keyword in this case no need to close the file\n\n# with open('Day24/file.txt') as file:\n# # reading file\n# contents = file.read()\n# print(contents)\n\n# When We want to write in a file then we need to pass mode \n# Note: w for write\n# r for read(default)\n# a for append\n\nwith open('Day24/file.txt', mode=\"w\"):\n # read file\n contents= file.read()\n # write in file\n file.write(\"File Writing Done\")\n\n# Using Append Mode Instead of Write Mode\nwith open('Day24/file.txt', mode=\"a\") as file:\n # write in file\n file.write(\"File Appending Done\")\n\n# Difference Betweeen Write and Append Mode\n # When we use write mode it clears all pre-exisiting data and writes to it\n # While append mode keeps all pre-exisiting data and writes to the end of file","repo_name":"AMAN123956/Python-Daily-Learning","sub_path":"Day24/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}