diff --git "a/3579.jsonl" "b/3579.jsonl" new file mode 100644--- /dev/null +++ "b/3579.jsonl" @@ -0,0 +1,1241 @@ +{"seq_id":"33498073747","text":"import ssl\nfrom pprint import pprint\nfrom sys import argv\nfrom socket import socket, AF_INET, SOCK_STREAM,SHUT_RDWR\nport = 443\ncon = True\nhostname = \"www.w3schools.com\"\npath = \"images/picture.jpg\"\nif (len(argv)==2):\n\thostname = argv[1]\n\tpath = \"\"\nif (len(argv)==3):\n\thostname = argv[1]\n\tpath = \"\"\n\tcon = argv[2]\n\tif con == \"True\":\n\t\tcon = True\n\telse:\n\t\tcon = False\nif (len(argv)==4):\n\thostname = argv[1]\n\tcon = argv[2]\n\tpath = argv[3]\n\tif con ==\"True\":\n\t\tcon =True\n\telse:\n\t\tcon = False\ncadir = './certs'\nprint (\"Cadir Folder path:\")\nprint (cadir)\nprint (\"Check Hostname Attribute:\")\ncontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\ncontext.load_verify_locations(cafile=\"./certs/ca-certificates.crt\", capath = cadir)\n#context.load_verify_locations(capath = cadir)\ncontext.verify_mode = ssl.CERT_REQUIRED\ncontext.check_hostname = con\nprint(context.check_hostname)\n\nsock = socket(AF_INET, SOCK_STREAM)\nsock.connect((hostname,port))\ninput(\"After making TCP connection. Press any key to continue ......\")\n\nssock = context.wrap_socket(sock, server_hostname = hostname, do_handshake_on_connect= False)\nssock.do_handshake()\npprint(ssock.getpeercert())\ninput(\"After getpercert. Press any key to continue ......\")\npprint(ssock.cipher())\ninput(\"After cipher. Press any key to continue ......\")\n\nrequest = b\"GET /\" + path.encode('utf-8') + b\" HTTP/1.0\\r\\nHost: \" + hostname.encode('utf-8') + b\"\\r\\n\\r\\n\"\npprint(request.split(b\"\\r\\n\"))\nssock.sendall(request)\n\nresponse = ssock.recv(2048)\nwhile response:\n\tpprint(response.split(b\"\\r\\n\"))\n\tresponse = ssock.recv(2048)\n\nssock.shutdown(SHUT_RDWR)\nssock.close()\n","repo_name":"vatsalagrawal6991/Implementing-Customized-TLS","sub_path":"tls_client.py","file_name":"tls_client.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73896805601","text":"import numpy as np\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import f1_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom scipy.stats import skew\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\n\nscaler = MinMaxScaler()\n\ndef get_data(path):\n columns = ['accx', 'accy', 'accz', 'linx', 'liny', 'linz']\n\n idx2filename = {}\n\n whole_data = []\n\n for index, file in enumerate(os.listdir(path)):\n data = pd.read_csv(path + str(file), names=columns, delimiter=',')\n idx2filename[index] = file\n whole_data.append(data.values[1000:4000, :])\n\n return whole_data, idx2filename\n\ndef RMS(threedata):\n print((threedata[0:1]))\n\ndef featuring(datas): # take mean and std of data samples and plus RMS\n\n mean_features = []\n std_features = []\n skew_features = []\n median_features = []\n final_acc_matrix = []\n final_lin_matrix = []\n\n y_labels = []\n\n for idx, data in enumerate(datas):\n\n one_data_size, num_features = data.shape\n\n num_sample = 30\n one_sample_size = int(one_data_size / 30)\n for num in range(num_sample):\n\n mean_features.append(np.mean(data[num * one_sample_size:(num + 1) * one_sample_size, :], 0))\n std_features.append(np.std(data[num * one_sample_size:(num + 1) * one_sample_size, :], 0))\n skew_features.append(skew(data[num * one_sample_size:(num + 1) * one_sample_size, :], axis=0, bias=True))\n median_features.append(np.median(data[num * one_sample_size:(num + 1) * one_sample_size, :], axis = 0))\n y_labels.append(idx)\n\n square_matrix = np.square(data[num * one_sample_size:(num + 1) * one_sample_size, :])\n acc_square_matrix = square_matrix[:, [0, 1, 2]]\n lin_square_matrix = square_matrix[:, [0, 1, 2]]\n\n acc_square_matrix = acc_square_matrix.sum(axis = 1)\n lin_square_matrix = lin_square_matrix.sum(axis = 1)\n\n sqrt_acc_features = np.sqrt(acc_square_matrix)\n sqrt_lin_features = np.sqrt(lin_square_matrix)\n\n final_acc_matrix.append(sqrt_acc_features)\n final_lin_matrix.append(sqrt_lin_features)\n\n\n return mean_features, std_features, skew_features, median_features, final_acc_matrix, final_lin_matrix, y_labels\n\ndef train_test_divide(mean_data, std_data, skew_data, median_data, amp_acc, amp_lin, y_data, ratio):\n num_data = len(mean_data)\n\n mean_data, std_data, skew_data, median_data, amp_acc, amp_lin, y_data = shuffle(mean_data, std_data, skew_data, median_data, amp_acc, amp_lin, y_data)\n\n train_mean_data = mean_data[:int(ratio * num_data)]\n train_std_data = std_data[:int(ratio * num_data)]\n train_skew_data = skew_data[:int(ratio * num_data)]\n train_median_data = median_data[:int(ratio * num_data)]\n train_amp_acc = amp_acc[:int(ratio * num_data)]\n train_amp_lin = amp_lin[:int(ratio * num_data)]\n\n train_label = y_data[:int(ratio * num_data)]\n\n test_mean_data = mean_data[int(ratio * num_data):]\n test_std_data = std_data[int(ratio * num_data):]\n test_skew_data = skew_data[int(ratio * num_data):]\n test_median_data = median_data[int(ratio * num_data):]\n test_amp_acc = amp_acc[int(ratio * num_data):]\n test_amp_lin = amp_lin[int(ratio * num_data):]\n\n test_label = y_data[int(ratio * num_data):]\n\n return train_mean_data, train_std_data, train_skew_data, train_median_data, train_amp_acc, train_amp_lin, train_label, test_mean_data, test_std_data, test_skew_data, test_median_data, test_amp_acc, test_amp_lin, test_label\n\ndef classify(mean_features, std_features, skew_features, median_features, final_acc_matrix, final_lin_matrix, y_labels):\n\n train_mean_data, train_std_data, train_skew_data, train_median_data, train_amp_acc, train_amp_lin, train_label, test_mean_data, test_std_data, test_skew_data, test_median_data, test_amp_acc, test_amp_lin, test_label = train_test_divide(mean_features, std_features, skew_features, median_features, final_acc_matrix, final_lin_matrix, y_labels, 0.6)\n\n train_mean_data = np.array(train_mean_data)\n train_std_data = np.array(train_std_data)\n train_skew_data = np.array(train_skew_data)\n train_median_data = np.array(train_median_data)\n train_amp_acc = np.array(train_amp_acc)\n train_amp_lin = np.array(train_amp_lin)\n\n test_mean_data = np.array(test_mean_data)\n test_std_data = np.array(test_std_data)\n test_skew_data = np.array(test_skew_data)\n test_median_data = np.array(test_median_data)\n test_amp_acc = np.array(test_amp_acc)\n test_amp_lin = np.array(test_amp_lin)\n\n train_data = np.concatenate((train_mean_data, train_std_data, train_skew_data, train_median_data), axis=1)#train_amp_acc, , train_amp_lin , , train_skew_data, train_median_data\n test_data = np.concatenate((test_mean_data, test_std_data, test_skew_data, test_median_data), axis=1)#test_amp_acc , test_amp_lin , test_median_data , test_skew_data, test_median_data\n\n rfc = RandomForestClassifier(n_estimators=1000)\n rfc.fit(train_data, train_label)\n train_score = rfc.score(train_data, train_label)\n test_score = rfc.score(test_data, test_label)\n print(\"rfc train score: \", train_score)\n print(\"rfc test score: \", test_score)\n\n p = rfc.predict(test_data)\n f1_score_result = f1_score(test_label, p, average=None).mean()\n print(\"dt F1 score: \", f1_score_result)\n\n return test_score, f1_score_result\n\npath = \"data/\"\n\nwhole_data, idx2filename = get_data(path)\nmean_features, std_features, skew_features, median_features, final_acc_matrix, final_lin_matrix, y_labels = featuring(whole_data)\nnum_iteration = 20\n\ntest_scores = []\nf1_scores = []\n\nfor _ in range(num_iteration):\n test_score, f1_score_result = classify(mean_features, std_features, skew_features, median_features, final_acc_matrix, final_lin_matrix, y_labels)\n test_scores.append(test_score)\n f1_scores.append(f1_score_result)\n\navg_test_score = np.mean(test_scores)\navg_f1_score = np.mean(f1_score_result)\n\nprint('Test: ', avg_test_score)\nprint('Avg: ', avg_f1_score)\n","repo_name":"barrelo89/Gait-IEEE-WF-IoT","sub_path":"RFC.py","file_name":"RFC.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2978641147","text":"\"\"\"\nViews for the Recipe API.\n\"\"\"\nfrom rest_framework import viewsets, mixins, status\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom drf_spectacular.utils import (\n extend_schema_view,\n extend_schema,\n OpenApiParameter,\n OpenApiTypes\n)\n\nfrom core.models import *\nfrom recipe.serializers import *\n\n@extend_schema_view(\n list=extend_schema(\n parameters=[\n OpenApiParameter(\n 'tags',\n OpenApiTypes.STR,\n description='csv of tags'\n ),\n OpenApiParameter(\n 'ingredients',\n OpenApiTypes.STR,\n description='csv of ingredients'\n )\n ]\n )\n)\nclass RecipeViewSet(viewsets.ModelViewSet):\n \"\"\"Defines basic views for the recipe endpoint.\"\"\"\n\n queryset = Recipe.objects.all()\n serializer_class = RecipeDetailSerializer\n authentication_classes = [TokenAuthentication]\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n \"\"\"\n modifies the default behaviour of getting all recipes\n to only recipes of the authenticated user.\n and filtering\n \"\"\"\n tags = self.request.query_params.get('tags')\n ingredients = self.request.query_params.get('ingredients')\n queryset = self.queryset\n\n if tags:\n tag_names = tags.split(',')\n queryset = queryset.filter(tags__name__in=tag_names)\n\n if ingredients:\n ingredient_names = ingredients.split(',')\n queryset = queryset.filter(ingredients__name__in=ingredient_names)\n \n return queryset.filter(\n user=self.request.user\n ).order_by('-id').distinct()\n \n\n def get_serializer_class(self):\n \"\"\"returns the needed serializer by default uses the one with description.\"\"\"\n if self.action == \"list\":\n return RecipeSerializer\n elif self.action == 'upload_image':\n return RecipeImageSerializer\n\n return self.serializer_class\n \n @action(methods=['POST'], detail=True, url_path='upload_image')\n def upload_image(self, request, pk=None):\n \"\"\"uploads an image to a recipe.\"\"\"\n # refer to https://github.com/encode/django-rest-framework/blob/master/rest_framework/generics.py\n # line: 79 to understand how to it returns the correct object.\n recipe = self.get_object() \n serializer = self.get_serializer(recipe, data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass TagViewSet(\n mixins.ListModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.GenericViewSet,\n):\n \"\"\"Manages tags in the database.\"\"\"\n\n serializer_class = TagSerializer\n queryset = Tag.objects.all()\n authentication_classes = [TokenAuthentication]\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n \"\"\"\n modifies the default behaviour of getting all tags\n to only tags of the authenticated user.\n \"\"\"\n return self.queryset.filter(user=self.request.user).order_by(\"-name\")\n\n\nclass IngredientViewSet(\n mixins.ListModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.GenericViewSet,\n):\n \"\"\"Manages Ingredients in the database\"\"\"\n\n serializer_class = IngredientSerializer\n queryset = Ingredient.objects.all()\n authentication_classes = [TokenAuthentication]\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n \"\"\"\n modifies the normal behaviour of listing all ingredients\n to only list ingredients related to the user.\n \"\"\"\n return self.queryset.filter(user=self.request.user).order_by(\"-name\")\n","repo_name":"MohammedSaLah-Eldeen/recipe-app-api","sub_path":"app/recipe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72708133283","text":"import json\nimport sys\nfrom utils import save_data\nfrom random import randint\nfrom items.items import Armor, Weapon, armors_path, weapons_path\nfrom avatar.avatar import Avatar, heroes_path, enemies_path\n\ndef print_parts(parts):\n for i in range(len(parts)):\n print(f\"{i}:\", f\"{parts[i]}\")\n\nclass Game:\n hero = None\n boss = None\n menu_items = [\n 'start',\n 'create_hero',\n 'create_boss',\n 'create_weapon',\n 'create_armor',\n 'set_weapon',\n 'set_armor',\n 'set_hero',\n 'set_boss',\n 'set_boss_armor',\n 'set_boss_weapon',\n 'view',\n 'exit'\n ]\n\n def __init__(self):\n with open(heroes_path) as f:\n self.heroes = [Avatar(**i) for i in json.load(f)]\n with open(armors_path) as f:\n self.armors = [Armor(**i) for i in json.load(f)]\n with open(weapons_path) as f:\n self.weapons = [Weapon(**i) for i in json.load(f)]\n with open(enemies_path) as f:\n self.enemies = [Avatar(**i) for i in json.load(f)]\n\n def print_main_menu(self):\n print_parts(self.menu_items)\n\n def exit(self):\n sys.exit(\"EXIT\")\n\n def create_hero(self, name, hp, power):\n self.heroes += [Avatar(name, hp, power)]\n save_data(heroes_path, [i.get_data_for_save() for i in self.heroes])\n\n def create_weapon(self, name, size, durability, power):\n self.weapons += [Weapon(name, size, durability, power)]\n save_data(weapons_path, [i.get_data_for_save() for i in self.weapons])\n\n def create_armor(self, name, size, durability, hp):\n self.armors += [Armor(name, size, durability, hp)]\n save_data(armors_path, [i.get_data_for_save() for i in self.armors])\n\n def set_weapon(self,weapon):\n if self.hero is None:\n return False\n self.hero.set_weapon(weapon)\n return True\n\n def set_armor(self, armor):\n if self.hero is None:\n return False\n self.hero.set_armor(armor)\n return True\n\n def set_hero(self, hero):\n self.hero = hero\n\n def create_boss(self, name, hp, power):\n self.enemies += [Avatar(name, hp, power)]\n save_data(enemies_path, [i.get_data_for_save() for i in self.enemies])\n\n def set_boss(self, boss):\n self.boss = boss\n\n def set_boss_weapon(self, boss_weapon):\n if self.boss is None:\n return False\n self.boss.set_weapon(boss_weapon)\n return True\n\n def set_boss_armor(self, boss_armor):\n if self.boss is None:\n return False\n self.boss.set_armor(boss_armor)\n return True\n\n def start(self):\n while self.hero.hp > 0 and self.boss.hp > 0:\n print(\"Choose parts for attack\")\n print_parts(self.boss.body_parts)\n self.hero.attack = int(input(\"part number: \"))\n print(\"Choose parts for defence\")\n print_parts(self.hero.body_parts)\n self.hero.defence = int(input(\"part number: \"))\n attack_part = randint(0, len(self.hero.body_parts)-1)\n self.boss.attack = attack_part\n print(\"boss attack:\", self.hero.body_parts[attack_part])\n defence_part = randint(0, len(self.boss.body_parts)-1)\n self.boss.defence = defence_part\n print(\"boss defence:\", self.hero.body_parts[defence_part])\n self.hero / self.boss\n print(\"hero: \", self.hero.hp)\n print(\"boss: \", self.boss.hp)\n\n def __str__(self):\n return '\\n'.join([str(self.hero) , str(self.boss) ])\n\n def view(self):\n print(self)\n input()\n","repo_name":"Lairion/fight_club","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44813469910","text":"import sys,os,subprocess,datetime\r\nfrom argparse import ArgumentParser\r\ndef main(argv):\r\n print(\"Twittify V1.0, coded by Bijay Regmi [https://github.com/regmibijay]\")\r\n print(\"Converts any video file to mp4 format suitable to Twitter standards using FFMPEG.\")\r\n parser = ArgumentParser()\r\n parser.add_argument(\"-i\", help = \"Input a video file to be converted.\")\r\n parser.add_argument(\"-ss\", help = \"Start time (format 00:00:00)\", nargs=\"?\", default = \"00:00:00\")\r\n parser.add_argument(\"--dur\", help = \"Duration of the video\", nargs = '?',default = \"length\")\r\n parser.add_argument(\"-o\", help=\"Output file name with format (e.g. out.mp4)\")\r\n args = parser.parse_args()\r\n print(\"---\")\r\n if args.i is None:\r\n print (\"[FATAL ERROR] .. Please input a video file to be converted with -i \")\r\n sys.exit()\r\n if args.o is None:\r\n print (\"[FATAL ERROR] .. Please input an output file name with video format (e.g. out.mp4) with -o \")\r\n sys.exit()\r\n if not os.path.isfile(args.i):\r\n print(\"The given input file does not exist. Please make sure specified file name is written correctly.\")\r\n sys.exit()\r\n fargs = \"\"\r\n if args.dur != \"length\":\r\n args.dur = \"-t \" + args.dur\r\n fargs = 'ffmpeg -y -ss ' + args.ss + ' -i ' + args.i + ' '+ args.dur + ' -vcodec libx264 -vf \"pad=ceil(iw/2)*2:ceil(ih/2)*2\" -pix_fmt yuv420p -strict experimental -r 30 -t 2:20 -acodec aac -vb 1024k -minrate 1024k -maxrate 1024k -bufsize 1024k -ar 44100 -ac 2 ' + args.o \r\n if args.dur == \"length\":\r\n fargs = 'ffmpeg -y -ss ' + args.ss + ' -i ' + args.i + ' -vcodec libx264 -vf \"pad=ceil(iw/2)*2:ceil(ih/2)*2\" -pix_fmt yuv420p -strict experimental -r 30 -t 2:20 -acodec aac -vb 1024k -minrate 1024k -maxrate 1024k -bufsize 1024k -ar 44100 -ac 2 ' + args.o\r\n #print (fargs)\r\n error = 0\r\n start = datetime.datetime.now()\r\n print(\"Converting now ... this might take a while.\")\r\n try:\r\n rc = subprocess.call(fargs, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\r\n except:\r\n print (\"Something went wrong while converting the video.\")\r\n sys.exit()\r\n finally:\r\n end = datetime.datetime.now()\r\n print(\"Script ended. Conversion took \" + str(end - start)) \r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:]) ","repo_name":"regmibijay/tweetify","sub_path":"tweetify.py","file_name":"tweetify.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1393163090","text":"import sqlalchemy\nimport pandas as pd\nimport logging\nfrom sqlalchemy.orm import sessionmaker\n\nfrom . import db_info\n# import db_info as db_info #this file has the dbinfo:server,db,etc.\n#This class is to connect to the database\nclass DbCon:\n connectionLog=logging.getLogger('connectionDB')\n connectionLog.addHandler(logging.FileHandler('connectinDB.log'))\n connectionLog.setLevel(logging.DEBUG)\n \n def __init__(self):\n self.m_sServer = db_info.server\n self.m_sDriver = db_info.driver\n self.m_sDb = db_info.database\n self.m_sPort=db_info.port\n self.m_bConnected = False\n def Connect(self):\n self.connectionLog.debug('begining connection')\n self.create_engine()\n self.connectionLog.debug('created engine')\n #engine = sqlalchemy.create_engine('mssql+pyodbc://{}/{}?driver={}'.format(self.m_sServer, self.m_sDb, driver))\n self.m_oConn = self.m_engine.raw_connection()\n #self.m_oSession = Session(sessionmaker(bind=self.m_engine,autocommit=False))\n Session = sessionmaker(bind=self.m_engine,autocommit=False)\n self.m_oSession = Session()\n self.m_bConnected = True\n self.connectionLog.debug('end connection')\n\n def create_engine(self):\n self.connectionLog.debug('begin create engine')\n s = 'mssql+pyodbc://@' + self.m_sServer + '/' + self.m_sDb + '?trusted_connection=yes&driver='+self.m_sDriver\n self.m_engine = sqlalchemy.create_engine(s)\n def Disconnect(self):\n self.connectionLog.debug('begin disconnect')\n if self.m_bConnected:\n self.m_oConn.cursor().close()\n self.m_oSession.close_all()\n self.m_engine.dispose()\n self.m_oConn.close()\n self.m_bConnected = False\n def ReadSqlQuery(self, sQuery):\n if (self.m_bConnected == False):\n print('Error: db found disconnected and will try to connect again while tryng to run a query')\n self.Connect()\n if (self.m_bConnected == False):\n print('Error: db disconnected while tryng to run a query')\n df = pd.read_sql_query(sQuery,self.m_engine)\n return df\n def insert_df(self, df_to_insert, s_table_name):\n #__init__() got multiple values for argument 'schema'\n self.connectionLog.debug('insert_df')\n df_to_insert.to_sql(s_table_name, con=self.m_engine, if_exists='append', index=False, chunksize=1000) #,\n self.m_oConn.cursor().commit()\n self.connectionLog.debug('insert committed')\n ","repo_name":"HadassaMenucha/Hotel_Reservations_Final_Project","sub_path":"src/Hotel Reservations/sql_dir/database_connect.py","file_name":"database_connect.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28216283018","text":"import binascii\nimport os\n\nfrom utils import view_hex, new_hex\n\n\ndef add_footer(file_name, sync_header):\n \"\"\"\n 添加header/footer到文件结尾,方便replace_hex/extract_hex查找特定字段\n :param file_name: 文件名地址,以十六进制表示,如 b'eb906767'\n :param sync_header: 标识符,以十六进制表示,如 b'6767'\n :return: 返回内容末尾添加标识符的文件\n \"\"\"\n footer = binascii.unhexlify(sync_header) # 将十六进制字符串转化为二进制字符串\n with open(file_name, \"a+b\") as f: # 以追加方式写入文件中\n f.write(footer)\n\n\ndef delete_footer(file_name, sync_header):\n \"\"\"\n 删除最后添加的footer,恢复文件内容\n :param file_name: 文件名地址,以十六进制表示,如 b'eb906767'\n :param sync_header: 标识符,以十六进制表示,如 b'6767'\n :return: 返回删除掉标识符的文件\n \"\"\"\n\n for i in range(len(binascii.unhexlify(sync_header))): # 以二进制字符串的形式从后一个一个删除字符,直到删除末尾的footer\n with open(file_name, \"r+b\") as f:\n f.seek(-1, os.SEEK_END)\n f.truncate()\n\n\nif __name__ == \"__main__\":\n test = b'67671828abcdeffa'\n file_name = '../data/test.dat'\n new_hex.create_new(file_name=file_name, content=test)\n footer = b'6767'\n print(\"before add footer: \")\n view_hex.look_over(file_name=file_name)\n\n add_footer(file_name=file_name, sync_header=footer)\n print(\"after add footer: \")\n view_hex.look_over(file_name=file_name)\n \n delete_footer(file_name=file_name, sync_header=footer)\n print(\"after delete footer: \")\n view_hex.look_over(file_name=file_name)\n","repo_name":"xujinzh/timestamp","sub_path":"utils/repair_hex.py","file_name":"repair_hex.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1559082353","text":"\nfrom flask import Flask, render_template, abort, request, redirect, url_for\nimport model\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef welcome_view():\n return render_template(\"welcome.html\", db=model.db)\n\n\n@app.route(\"/salary/\")\ndef salary_view(index):\n try:\n salary_record = model.db[index]\n return render_template(\"salary.html\",\n salary_record=salary_record,\n index=index,\n max_index=len(model.db)-1)\n except IndexError:\n abort(404)\n\n\n@app.route(\"/add_salary\", methods=[\"GET\", \"POST\"])\ndef add_salary():\n if request.method == \"POST\":\n salary_record = {\"name\": request.form['name'],\n \"salary\": request.form['salary']}\n model.db.append(salary_record)\n model.save_db()\n return redirect(url_for('salary_view', index=len(model.db)-1))\n else:\n return render_template(\"add_salary.html\")\n\n\n@app.route(\"/delete_salary/\", methods=[\"GET\", \"POST\"])\ndef delete_salary(index):\n try:\n if request.method == \"POST\":\n del model.db[index]\n model.save_db()\n return redirect(url_for('welcome_view'))\n else:\n return render_template(\"delete_salary.html\",\n salary_record=model.db[index])\n except IndexError:\n abort(404)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"jamespec/LearningProjects","sub_path":"LearnFlask/learn_flask.py","file_name":"learn_flask.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15111740761","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 4 14:10:51 2019\n\n@author: User\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import OneHotEncoder \n\n\n\n#把資料集做preprocessing 變成dataframe\ndf = pd.read_csv(\"abc.csv\")\n#tmp = df.keys()[8]\ndf1 = df.iloc[:,0:8]\n\nprint(df1)\n\n# One Hot Encodes \none_hot_cols = df1.columns.tolist()\n\n#one_hot_cols.remove('salary')\ndataset_bin_enc = pd.get_dummies(df1, columns=one_hot_cols)\n#print(dataset_bin_enc.head())\n#dataset_bin_enc.head()\n#print(type(df.iloc[:,[8]]))\n#encoder = OneHotEncoder(sparse=False)\n#target_salary = encoder.fit_transform(df.iloc[:,[8]])\n\ndf['salary'] = df['salary'].map({'<=50K':1,'>50K':0}).astype(int)\n#print(df['salary'])\n#df.keys()[8]\n#df.info()\n#df_feat.head(6)\n#print(type(df))\n#print(df.DESC)\n\nfrom sklearn.model_selection import train_test_split\n\nX = dataset_bin_enc\ny=df['salary']\nprint(X)\n#將資料分成訓練集和測試集\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.1,random_state=16,stratify=y)\n\n#print(X_train)\n#載入support vector classifier套件\nfrom sklearn.svm import SVC\nmodel = SVC()\n\n#使用support vector classifier建立模型\nmodel.fit(X_train,y_train)\n\n#利用測試組資料測試模型結果\nprediction = model.predict(X_test)\n\n\nfrom sklearn.metrics import classification_report, confusion_matrix\nprint(\"Confusion Matrix:\\n\")\nprint(confusion_matrix(y_test,prediction))\nprint('\\n')\nprint(\"Classification report:\\n\")\nprint(classification_report(y_test,prediction))\n\nfrom sklearn import metrics\n\n#印出accuracy\naccuracy = metrics.accuracy_score(y_test,prediction)\nprint(\"Accuracy: \",accuracy)\n\n#印出precision\nprecision = metrics.precision_score(y_test,prediction,pos_label=3,average=None)\nprint(\"Precision: \",precision)\n\n#印出recall\nrecall = metrics.recall_score(y_test,prediction,pos_label=3,average=None)\nprint(\"Recall:\",recall)\n\n\nfpr, tpr, thresholds = metrics.roc_curve(y_test, prediction, pos_label=2)\nprint(\"AUC: \",metrics.auc(fpr, tpr))\nprint(\"\\n\")\n#AUC = metrics.roc_auc_score(y_test,prediction,average=None)\n\"\"\"from sklearn.model_selection import GridSearchCV\nparam_grid = {'C':[0.1,1,10,100,1000],'gamma':[1,0.1,0.01,0.001,0.0001]}\ngrid = GridSearchCV(SVC(),param_grid,verbose=3)\n\ngrid.fit(X_train,y_train)\ngrid.best_estimator_\ngrid_predictions = grid.predict(X_test)\nprint(confusion_matrix(y_test,grid_predictions))\nprint('\\n')\nprint(classification_report(y_test,grid_predictions))\"\"\"\n","repo_name":"zhichenyang/108DL","sub_path":"DL_Final/.ipynb_checkpoints/k-checkpoint.py","file_name":"k-checkpoint.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5375124492","text":"#!/usr/bin/python\n\n# Imports some Python Date/Time functions\nimport time\nimport datetime\n\n# Imports the phue library\nfrom phue import Bridge\n\n# Create a Hue Bridge object\n# Replace IP address with the IP address of your Hue Bridge \nb = Bridge('192.168.0.71')\n\n# If the app is not registered and the button is not pressed, press the button and call connect() (this only needs to be run a single time)\n#b.connect()\n\n# Imports the PyOTA library\nfrom iota import Iota\nfrom iota import Address\n\n# Function for checking address balance on the IOTA tangle. \ndef checkbalance():\n\n print(\"Checking balance\")\n gb_result = api.get_balances(address)\n balance = gb_result['balances']\n return (balance[0])\n\n# URL to IOTA fullnode used when checking balance\niotaNode = \"https://nodes.thetangle.org:443\"\n\n# Create an IOTA object\napi = Iota(iotaNode, \"\")\n\n# IOTA address to be checked for new light funds \n# IOTA addresses can be created using the IOTA Wallet\naddress = [Address(b'NYZBHOVSMDWWABXSACAJTTWJOQRPVVAWLBSFQVSJSWWBJJLLSQKNZFC9XCRPQSVFQZPBJCJRANNPVMMEZQJRQSVVGZ')]\n\n# Get current address balance at startup and use as baseline for measuring new funds being added. \ncurrentbalance = checkbalance()\nlastbalance = currentbalance\n\n# Define some variables\nlightbalance = 0\nbalcheckcount = 0\nlightstatus = False\n\n# Assign hue device ID\ndevice_id = 1\n\n# Main loop that executes every 1 second\nwhile True:\n \n # Check for new funds and add to lightbalance when found.\n if balcheckcount == 10:\n currentbalance = checkbalance()\n if currentbalance > lastbalance:\n lightbalance = lightbalance + (currentbalance - lastbalance)\n lastbalance = currentbalance\n balcheckcount = 0\n\n # Manage light balance and light ON/OFF\n if lightbalance > 0:\n if lightstatus == False:\n print(\"light ON\")\n b.set_light(device_id,'on', True) # Turn Hue light ON\n lightstatus=True\n lightbalance = lightbalance -1 \n else:\n if lightstatus == True:\n print(\"light OFF\")\n b.set_light(device_id,'on', False) # Turn Hue light OFF\n lightstatus=False\n \n # Print remaining light balance \n print(datetime.timedelta(seconds=lightbalance))\n\n # Increase balance check counter\n balcheckcount = balcheckcount +1\n\n # Pause for 1 sec.\n time.sleep(1)\n","repo_name":"huggre/pay_the_light_hue","sub_path":"pay_the_light_hue.py","file_name":"pay_the_light_hue.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73592719203","text":"import time\nfrom plistlib import Dict\n\n\n\ndef func(s: dict(type=str, help=\"this is a string\")) -> Dict:\n print(func.__annotations__)\n resList = {}\n length = len(s)\n for i in range(length):\n for j in range(i, length):\n if s[i:j:1] == s[j:i:-1]:\n resList.update({j - i + 1: s[i:j + 1:1]})\n else:\n continue\n\n return resList\n\n\nif __name__ == '__main__':\n start = time.clock()\n print(func(\"dccaccd\"))\n stop = time.clock()\n print(stop - start)\n","repo_name":"gj-hat/Leetcode","sub_path":"5-最长回文串/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"15652137551","text":"from bs4 import BeautifulSoup\nimport urllib\nimport urllib.request\nimport re\nimport json\n\nurl_bu_class_search = \"https://www.bu.edu/phpbin/course-search/search.php?page=w0&pagesize=50&adv=1&nolog=&search_adv_all=&yearsem_adv=2023-SPRG&credits=*&pathway=&hub_match=all&pagesize=-1\"\nreq = urllib.request.Request(url_bu_class_search, headers={'User-Agent': 'Mozilla/5.0'})\nhtml = urllib.request.urlopen(req).read()\n \nsoup = BeautifulSoup(html, \"lxml\")\ncontent = soup.get_text()\ncontent_list = content.split(\"\\n\\n\\n\\n\\n\")\n\nprefix = []\nHUB_dict = {}\nfor content in content_list:\n if \"\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\" in content:\n content = content.replace(\"\\n\\t\\t\\t\\t\\t\\t\\t\\t\", \"\")\n content = content[2:]\n if len(content)==3:\n prefix.append(content)\n# prefix.append(content)\n\n if \"(\"in content:\n index_f = content.index(\"(\")\n index_l = content.index(\")\")\n HUB_dict[content[index_f+1: index_l]] = content[:index_f-1]\n \n \nprefix.append(\"XRG\")\nprefix.append(\"XCC\")\nHUB_dict.pop('XCC')\n\nHUB_fullname = list(HUB_dict.values())\n\nlink_lst = []\nfor link in soup.find_all(\"a\"):\n if \"/phpbin\" in link.get('href'):\n link_lst.append(\"https://www.bu.edu\"+link.get('href'))\n \ncls_info = {}\nfor content in content_list:\n cls = content[:11]\n if cls[:3] in prefix:\n cls_info[cls[:-1]] = {}\n \ncls_info_keys = list(cls_info.keys())\n \nfor content in content_list:\n cls = content[:11]\n if cls[:3] in prefix:\n cls_info[cls[:-1]][\"major\"]=cls[4:6]\n \n cls_info[cls[:-1]][\"HUB\"] = []\n for hub in HUB_fullname:\n if hub in content:\n cls_info[cls[:-1]][\"HUB\"].append(hub)\n \n if \"Prereq\" in content:\n cls_info[cls[:-1]][\"Prereq\"] = []\n i = content.index(\"\\n\\n\\n\")\n prereq_line = content[i:]\n for class_name in cls_info_keys:\n name1 = class_name\n name2 = class_name[:3]+class_name[4:6]+class_name[7:]\n name3 = name1.lower()\n name4 = name2.lower()\n name5 = class_name[-6:]\n name6 = class_name[-6:-4]+class_name[-3:]\n name7 = name5.lower()\n name8 = name6.lower()\n if name1 in prereq_line or name2 in prereq_line or name3 in prereq_line or name4 in prereq_line or name5 in prereq_line or name6 in prereq_line or name7 in prereq_line or name8 in prereq_line:\n cls_info[cls[:-1]][\"Prereq\"].append(class_name)\n \n \n\nfor inner_url in link_lst[1:-3]:\n req = urllib.request.Request(inner_url, headers={'User-Agent': 'Mozilla/5.0'})\n html2 = urllib.request.urlopen(req).read()\n soup2 = BeautifulSoup(html2, \"lxml\")\n content = soup2.get_text()\n class_name = content[69:79]\n print(\"class_name\", class_name)\n \n if class_name[-3:].isnumeric():\n cls_info[class_name][\"section\"] = {}\n \n num_of_LEC = content.count(\"LEC\") + content.count(\"IND\")\n \n for num in range(num_of_LEC):\n sect = chr(65+num)+str(1)\n if sect in content:\n index = content.index(sect)\n time_list = content[index:].split()\n time_schdeule = time_list[3]+time_list[4]+time_list[5][:2]\n if len(time_schdeule)<=15 and \"-\" in time_schdeule:\n cls_info[class_name][\"section\"][sect] = time_schdeule\n print(time_schdeule)\n \n\nwith open(\"class_info.json\", \"w\") as fp:\n json.dump(cls_info,fp)\n ","repo_name":"axinio/bostonhacks22","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27947869314","text":"from selenium.webdriver.common.by import By\nfrom appium.webdriver.common.appiumby import AppiumBy\nfrom driver import WebCommon\n\n\nclass Test06Android:\n def setup_method(self):\n self.driver = WebCommon(\"/Users/benazir/QTFProjects/theapp.apk\")\n self.app = self.driver.init_driver()\n\n def verify(self, text):\n self.text = text\n return self.app.find_element(AppiumBy.ACCESSIBILITY_ID, text)\n\n def test_06(self):\n self.app.find_element(By.XPATH, '//android.view.ViewGroup[@content-desc=\"Echo Box\"]').click()\n self.app.implicitly_wait(2)\n self.app.find_element(AppiumBy.ACCESSIBILITY_ID, \"messageInput\").send_keys(\"Hello World\")\n self.app.implicitly_wait(2)\n self.app.find_element(AppiumBy.ACCESSIBILITY_ID, \"messageSaveBtn\").click()\n self.app.implicitly_wait(2)\n\n assert self.verify(\"Hello World\").is_displayed()\n\n def teardown_method(self):\n self.driver.close_driver()\n\n\n\n","repo_name":"Benazir-Ali/mobile_qa_automation_bootcamp","sub_path":"test_06_send_keys.py","file_name":"test_06_send_keys.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24486277843","text":"import random\n\nimport torch\nimport numpy as np\n\n\nclass MemoryPPOLSTM:\n def __init__(self, batch_size, seq_len):\n self.batch_size = batch_size\n self.seq_len = seq_len\n\n self.data = self.init_data()\n\n def init_data(self):\n keys = [\"frames\", \"robot_poses\", \"actions\", \"rewards\", \"frames_prime\", \"robot_poses_prime\", \"probs\",\n \"returns\", \"advantages\", \"h_in\", \"c_in\", \"h_out\", \"c_out\"]\n data = {}\n for key in keys:\n data[key] = []\n return data\n\n def reset_data(self):\n self.data = self.init_data()\n\n def put_data(self, transitions):\n # if t_th == 0:\n # 只有t_th = 0 的时候才取得transition中h_in,c_in,h_out,c_out作为这个seq 的 t0, c0\n for i, key in enumerate(self.data.keys()):\n if key == \"done\":\n done_mask = 1 if transitions[i] else 0\n self.data[key].append([done_mask])\n elif key in [\"h_in\", \"c_in\", \"h_out\", \"c_out\"]:\n self.data[key].append(transitions[i].squeeze(1))\n else:\n self.data[key].append(transitions[i])\n\n def make_batch(self, train_device):\n frame_batch = self.data[\"frames\"]\n pose_batch = self.data[\"robot_poses\"]\n a_batch = self.data[\"actions\"]\n r_batch = self.data[\"rewards\"]\n frame_prime_batch = self.data[\"frames_prime\"]\n pos_prime_batch = self.data[\"robot_poses_prime\"]\n probs_batch = self.data[\"probs\"]\n returns_batch = self.data[\"returns\"]\n advantages_batch = self.data[\"advantages\"]\n\n h_in_batch = self.data[\"h_in\"]\n c_in_batch = self.data[\"c_in\"]\n h_out_batch = self.data[\"h_out\"]\n c_out_batch = self.data[\"c_out\"]\n\n frame_batch = torch.tensor(np.array(frame_batch), dtype=torch.float).to(train_device)\n pose_batch = torch.tensor(np.array(pose_batch), dtype=torch.float).to(train_device)\n a_batch = torch.tensor(np.array(a_batch)).to(train_device)\n r_batch = torch.tensor(np.array(r_batch)).to(train_device)\n frame_prime_batch = torch.tensor(np.array(frame_prime_batch), dtype=torch.float).to(train_device)\n pos_prime_batch = torch.tensor(np.array(pos_prime_batch), dtype=torch.float).to(train_device)\n probs_batch = torch.tensor(np.array(probs_batch), dtype=torch.float).to(train_device)\n returns_batch = torch.tensor(np.array(returns_batch), dtype=torch.float).to(train_device)\n advantages_batch = torch.tensor(np.array(advantages_batch), dtype=torch.float).to(train_device)\n h_in_batch = torch.Tensor(h_in_batch).to(train_device)\n c_in_batch = torch.Tensor(c_in_batch).to(train_device)\n h_out_batch = torch.Tensor(h_out_batch).to(train_device)\n c_out_batch = torch.Tensor(c_out_batch).to(train_device)\n\n frame_batch = frame_batch.unsqueeze(2)\n # pose_batch = pose_batch.unsqueeze(2)\n a_batch = a_batch.unsqueeze(2)\n r_batch = r_batch.unsqueeze(2)\n frame_prime_batch = frame_prime_batch.unsqueeze(2)\n # pos_prime_batch = pos_prime_batch.unsqueeze(2)\n probs_batch = probs_batch.unsqueeze(2)\n returns_batch = returns_batch.unsqueeze(2)\n advantages_batch = advantages_batch.unsqueeze(2)\n\n frame_batch = frame_batch.permute(1, 0, 2, 3, 4)\n pose_batch = pose_batch.permute(1, 0, 2)\n a_batch = a_batch.permute(1, 0, 2)\n r_batch = r_batch.permute(1, 0, 2)\n frame_prime_batch = frame_prime_batch.permute(1, 0, 2, 3, 4)\n pos_prime_batch = pos_prime_batch.permute(1, 0, 2)\n probs_batch = probs_batch.permute(1, 0, 2, 3)\n returns_batch = returns_batch.permute(1, 0, 2)\n advantages_batch = advantages_batch.permute(1, 0, 2)\n\n h_in_batch = h_in_batch.permute(1, 0, 2)\n c_in_batch = c_in_batch.permute(1, 0, 2)\n h_out_batch = h_out_batch.permute(1, 0, 2)\n c_out_batch = c_out_batch.permute(1, 0, 2)\n\n return frame_batch, pose_batch, a_batch, r_batch, frame_prime_batch, pos_prime_batch, probs_batch, returns_batch, advantages_batch, \\\n h_in_batch.contiguous(), c_in_batch.contiguous(), h_out_batch.contiguous(), c_out_batch.contiguous()\n\n def __len__(self):\n return len(self.data[\"frames\"])\n\n def is_full_batch(self):\n return len(self.data[\"frames\"]) >= self.batch_size\n","repo_name":"zengxyu/rl_find_targets","sub_path":"src/memory/memory_ppo_lstm.py","file_name":"memory_ppo_lstm.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"6838140042","text":"import subprocess\nimport sys\nimport os\n\n\ndef get_number_to_imsi(path=\"simInfos.csv\", sep=';'):\n \"\"\" Reads the file with sim infos and creates a dictionary\n with numbers as key and imsis as values for all the phones in the file.\n\n Args:\n path: The path of the file with the phone infos.\n sep: The separator used in the file.\n ignore: whether to ignore the first line of the file or not.\n\n Returns:\n Number To Imsi dictionary\n \"\"\"\n number_to_imsi = dict()\n with open(path) as f:\n f.readline() # used to ignore header of csv.\n for line in f:\n l = line.rstrip().split(sep)\n if len(l) > 2:\n number_to_imsi.update({l[1]:l[2]}) # Dict are insertion ordered since Python 3.6\n return number_to_imsi\n\ndef get_imsi_to_id():\n \"\"\" Reads the info on plugged in devices and creates a dictionary\n with imsis as key and ADB ids as values.\n\n Returns:\n IMSI To Id dictionary\n \"\"\"\n subprocess.run([\"getPhoneId.bat\"])\n imsi_to_id = dict() # IMSI and corresponding id\n try:\n with open(\"imsiList.txt\") as f:\n for line in f:\n l = line.rstrip().split(';')\n imsi_to_id.update({l[0]:l[1]})\n os.remove(\"imsiList.txt\")\n return imsi_to_id\n except:\n print(\"No devices has been plugged in.\", file=sys.stderr)\n return\n\ndef get_dictionaries():\n \"\"\" Returns the dictionaries needed.\n \"\"\"\n number_to_imsi = get_number_to_imsi()\n imsi_to_id = get_imsi_to_id()\n return number_to_imsi, imsi_to_id\n\ndef check_root(index):\n \"\"\" Adds the APN with the selected parameters for\n the selected phone.\n\n Args:\n\tindex: The list of indexes of the phones to check.\n \"\"\"\n try:\n with open(\"rootList.txt\", 'w') as f:\n pass\n except:\n print(\"rootList.txt not found\", file=sys.stderr)\n # we use range for when the indexes of the phones are skipped\n # ([1, 2, 5] instead of [1, 2, 3] for example)\n for i in range(len(index)):\n num = tuple(number_to_imsi.items())[int(i)][0]\n try:\n id = imsi_to_id[number_to_imsi[num]]\n subprocess.run([\"checkRoot.bat\", id])\n except: # Triggers when a phone is not plugged in.\n with open(\"rootList.txt\", 'a+') as f:\n f.write(\";;false\\n\")\n \nif __name__ == \"__main__\":\n number_to_imsi, imsi_to_id = get_dictionaries()\n check_root(sys.argv[1:])\n","repo_name":"yousefElo/TransatelTestTool-ADB_ID","sub_path":"checkRoot.py","file_name":"checkRoot.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44733103599","text":"import torch\nfrom torch import nn\nfrom torch import optim\nimport numpy as np\nfrom torchvision import datasets, models, transforms\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('data_dir', help = 'Provide data directory', type = str)\nparser.add_argument('--save_dir', help = 'Provide saving directory', type = str)\nparser.add_argument('--arch', help = 'To choose architecture, type alexnet. Default is densenet121.', type = str)\nparser.add_argument('--learning_rate', help = 'Learning rate', type = float)\nparser.add_argument('--hidden_units', help = 'Number of hidden units', type = int)\nparser.add_argument('--epochs', help = 'Number of training epochs', type = int)\nparser.add_argument('--gpu', help = \"To choose to train the model on GPU, type cuda\", type = str)\n\nargs = parser.parse_args()\n\ndata_dir = args.data_dir\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'\n\nif args.gpu == 'cuda':\n device = 'cuda'\nelse:\n device = 'cpu'\n\ntrain_transforms = transforms.Compose([transforms.RandomRotation (30),\n transforms.RandomResizedCrop (224),\n transforms.RandomHorizontalFlip (),\n transforms.ToTensor (),\n transforms.Normalize ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n ])\n\nvalid_transforms = transforms.Compose([transforms.Resize (255),\n transforms.CenterCrop (224),\n transforms.ToTensor (),\n transforms.Normalize ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n ])\n\ntest_transforms = transforms.Compose([transforms.Resize (255),\n transforms.CenterCrop (224),\n transforms.ToTensor (),\n transforms.Normalize ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n ])\n\ntrain_data = datasets.ImageFolder(data_dir + '/train', transform = train_transforms)\nvalid_data = datasets.ImageFolder(data_dir + '/valid', transform = valid_transforms)\ntest_data = datasets.ImageFolder(data_dir + '/test', transform = test_transforms)\n\n \ntrainloader = torch.utils.data.DataLoader(train_data, batch_size = 64, shuffle = True)\nvalidloader = torch.utils.data.DataLoader(valid_data, batch_size = 64)\ntestloader = torch.utils.data.DataLoader(test_data, batch_size = 64)\n\n\ndef load_model(arch, hidden_units):\n if arch == 'alexnet':\n model = models.alexnet(pretrained = True)\n for param in model.parameters():\n param.requires_grad = False\n if hidden_units:\n model.classifier = nn.Sequential(nn.Linear(9216, 4096),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(4096, hidden_units),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(hidden_units, 102),\n nn.LogSoftmax(dim=1))\n else:\n model.classifier = nn.Sequential(nn.Linear(9216, 4096),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(4096, 2048),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(2048, 102),\n nn.LogSoftmax(dim=1))\n else:\n arch = 'densenet121'\n model = models.densenet121(pretrained = True)\n for param in model.parameters():\n param.requires_grad = False\n if hidden_units:\n model.classifier = nn.Sequential(nn.Linear(1024, 512),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(512, hidden_units),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(hidden_units, 102),\n nn.LogSoftmax(dim=1))\n else:\n model.classifier = nn.Sequential(nn.Linear(1024, 512),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(512, 256),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(256, 102),\n nn.LogSoftmax(dim=1)) \n \n \n return model, arch\n\nmodel, arch = load_model(args.arch, args.hidden_units)\ncriterion = nn.NLLLoss()\n\nif args.learning_rate:\n optimizer = optim.Adam(model.classifier.parameters(), lr = args.learning_rate)\nelse:\n optimizer = optim.Adam(model.classifier.parameters(), lr = 0.002)\n\nmodel.to(device)\n\nif args.epochs:\n epochs = args.epochs\nelse:\n epochs = 7\n \nsteps = 0\nrunning_loss = 0\nprint_every = 50\nfor epoch in range(epochs):\n for inputs, labels in trainloader:\n steps += 1\n inputs, labels = inputs.to(device), labels.to(device)\n optimizer.zero_grad()\n \n logps = model.forward(inputs)\n loss = criterion(logps, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n \n if steps % print_every == 0:\n valid_loss = 0\n accuracy = 0\n model.eval()\n with torch.no_grad():\n for inputs, labels in validloader:\n inputs, labels = inputs.to(device), labels.to(device)\n logps = model.forward(inputs)\n batch_loss = criterion(logps, labels)\n \n valid_loss += batch_loss.item()\n \n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n \n print(f\"Epoch {epoch+1}/{epochs}.. \"\n f\"Train loss: {running_loss/print_every:.3f}.. \"\n f\"Valid loss: {valid_loss/len(validloader):.3f}.. \"\n f\"Valid accuracy: {(accuracy/len(validloader))*100:.2f} %\")\n running_loss = 0\n model.train()\n\ntest_loss = 0\naccuracy = 0\nmodel.eval()\nwith torch.no_grad():\n for inputs, labels in testloader:\n inputs, labels = inputs.to(device), labels.to(device)\n logps = model.forward(inputs)\n batch_loss = criterion(logps, labels)\n \n test_loss += batch_loss.item()\n \n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n \n print(f\"Test loss: {test_loss/len(testloader):.3f}.. \"\n f\"Test accuracy: {(accuracy/len(testloader))*100:.2f} %\")\n \n \nmodel.to('cpu')\nmodel.class_to_idx = train_data.class_to_idx\ncheckpoint = {'class_to_idx': model.class_to_idx,\n 'arch': arch,\n 'classifier': model.classifier,\n 'state_dict': model.state_dict()\n }\n\nif args.save_dir:\n torch.save(checkpoint, args.save_dir + '/checkpoint.pth')\nelse:\n torch.save(checkpoint, 'checkpoint.pth')","repo_name":"avneeshkhare/udacity","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4283261558","text":"# python detect_age_video.py --face face_detector --age age_detector\nfrom imutils.video import VideoStream\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport os\nimport json\n\ndef detect_and_predict_age(frame, faceNet, ageNet, gender_net, minConf=0.5):\n\tAGE_BUCKETS = [\"(0-2)\", \"(4-6)\", \"(8-12)\", \"(15-20)\", \"(25-32)\",\n\t\t\"(38-43)\", \"(48-53)\", \"(60-100)\"]\n\n\tresults = []\n\n\t(h, w) = frame.shape[:2]\n\tblob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),\n\t\t(104.0, 177.0, 123.0))\n\n\tfaceNet.setInput(blob)\n\tdetections = faceNet.forward()\n\n\tfor i in range(0, detections.shape[2]):\n\t\tconfidence = detections[0, 0, i, 2]\n\n\t\tif confidence > minConf:\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\tface = frame[startY:endY, startX:endX]\n\n\t\t\tif face.shape[0] < 20 or face.shape[1] < 20:\n\t\t\t\tcontinue\n\n\t\t\tfaceBlob = cv2.dnn.blobFromImage(face, 1.0, (227, 227),\n\t\t\t\t(78.4263377603, 87.7689143744, 114.895847746),\n\t\t\t\tswapRB=False)\n\n\t\t\tageNet.setInput(faceBlob)\n\t\t\tpreds = ageNet.forward()\n\t\t\ti = preds[0].argmax()\n\t\t\tage = AGE_BUCKETS[i]\n\t\t\tageConfidence = preds[0][i]\n\t\t\t\n\t\t\tgender_net.setInput(faceBlob)\n\t\t\tgender_preds = gender_net.forward()\n\t\t\tgender_list = ['Male', 'Female']\n\t\t\tgender = gender_list[gender_preds[0].argmax()]\n\t\t\td = {\n\t\t\t\t\"loc\": (startX, startY, endX, endY),\n\t\t\t\t\"age\": (age, ageConfidence),\n\t\t\t\t\"gender\": (gender)\n\t\t\t}\n\t\t\tresults.append(d)\n\n\treturn results\n\t\ndef get_fromA(data):\n\tdata = data.replace('(','')\n\tdata = data.replace(')','')\n\tdata0 = int(data.split(\"-\")[1])\n\tdata1 = int(data.split(\"-\")[0])\n\treturn (data0+data1)/2\n\t\ndef change_pic(avg_age, num_pic, path, gender):\n\tif(num_pic==-1):\n\t\t#scelgo una foto a caso\n\t\tnum_pic = 0\n\telse:\n\t\t#foto successiva\n\t\tnum_pic = num_pic +1\n\t#print(num_pic)\n\tind = 0\n\tfor i in PATH:\n\t\tif int(path[ind][\"min\"])<=avg_age and avg_age<=int(path[ind][\"max\"]) and (path[ind][\"gender\"]==gender or path[ind][\"gender\"]==\"U\"):\n\t\t\tbreak\n\t\tind = ind + 1\n\tif(ind<=len(path)):\n\t\ttot_arr = os.listdir(path[ind][\"path\"])\n\t\t#print(len(tot_arr))\n\t\tif len(tot_arr) >= (num_pic+1) :\n\t\t\tpath = path[ind][\"path\"]+\"/\"+tot_arr[num_pic]\n\t\telse:\n\t\t\tnum_pic=0\n\t\t\tpath = path[ind][\"path\"]+\"/\"+tot_arr[num_pic]\n\telse:\n\t\tpath=path['default']\n\t\tnum_pic=0\n\treturn num_pic, path\n\n\nwith open('/home/pi/StimaAGPubblicitaPython/custom.json') as f:\n JSON = json.load(f)\t\nTHRS = JSON[\"THRS\"]\nTHRS_FRAME_RATE = JSON[\"THRS_FRAME_RATE\"]\nSMALL_IMAGE = JSON[\"SMALL_IMAGE\"]\nPATH = JSON[\"PATH\"]\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--face\", required=True,\n\thelp=\"path to face detector model directory\")\nap.add_argument(\"-a\", \"--age\", required=True,\n\thelp=\"path to age detector model directory\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\thelp=\"minimum probability to filter weak detections\")\nargs = vars(ap.parse_args())\n\nprint(\"[INFO] loading face detector model...\")\nprototxtPath = os.path.sep.join([args[\"face\"], \"deploy.prototxt\"])\nweightsPath = os.path.sep.join([args[\"face\"],\n\t\"res10_300x300_ssd_iter_140000.caffemodel\"])\nfaceNet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\nprint(\"[INFO] loading age detector model...\")\nprototxtPath = os.path.sep.join([args[\"age\"], \"age_deploy.prototxt\"])\nweightsPath = os.path.sep.join([args[\"age\"], \"age_net.caffemodel\"])\nageNet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\ngender_net = cv2.dnn.readNetFromCaffe(\n\t\t'/home/pi/StimaAGPubblicitaPython/data/deploy_gender.prototxt', \n\t\t'/home/pi/StimaAGPubblicitaPython/data/gender_net.caffemodel')\n\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\n#vs = cv2.VideoCapture('/home/pi/StimaAGPubblicitaPython/videoPro.mp4')\ntime.sleep(2.0)\navg_age = 0\nold_avg_age = 0\nold_pic_num = 0\nold_gender=\"M\"\nindex = 0\ninit_frame = cv2.imread(JSON['default'])\nwhile True:\n\tframe = vs.read()\n\t#ret, frame = vs.read()\n\tframe = imutils.resize(frame, width=400)\n\n\tresults = detect_and_predict_age(frame, faceNet, ageNet, gender_net, \n\t\tminConf=args[\"confidence\"])\n\n\tindexFace=1\n\tavg_Frame = 0;\n\tavg_gender_f =0\n\tavg_gender_m =0\n\tnow_gender = \"M\"\n\tfor r in results:\n\t\ttext = \"{}: {:.2f}%, {}\".format(r[\"age\"][0], r[\"age\"][1] * 100,r[\"gender\"][0])\n\t\t(startX, startY, endX, endY) = r[\"loc\"]\n\t\ty = startY - 10 if startY - 10 > 10 else startY + 10\n\t\tcv2.rectangle(frame, (startX, startY), (endX, endY),\n\t\t\t(0, 0, 255), 2)\n\t\tcv2.putText(frame, text, (startX, y),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n\t\tavg_Frame = (avg_Frame+get_fromA(r[\"age\"][0]))/indexFace\n\t\tindexFace=indexFace+1\n\t\tif r[\"gender\"][0]==\"M\" :\n\t\t\tavg_gender_m=avg_gender_m+1\n\t\telse:\n\t\t\tavg_gender_f=avg_gender_f+1\n\t\t#avg_age = (avg_age+get_fromA(r[\"age\"][0]))/index\n\t\t#print(str(avg_age))\n\tif avg_gender_f>=avg_gender_m :\n\t\tnow_gender = \"F\"\n\tavg_age = (avg_age +avg_Frame)/2\n\t#print(len(results))\n\tif(len(results)==0):\n\t\tavg_age=0\n\t\told_pic_num=0\n\t\tpath_real=JSON['default']\n\t\t#print(avg_age)\n\telse: \n\t\tif(index%THRS_FRAME_RATE==0):\n\t\t\tpath_real = \"\"\n\t\t\tif((old_avg_age+THRS)>=avg_age and (old_avg_age-THRS)<=avg_age and old_gender==now_gender):\n\t\t\t\told_pic_num, path_real = change_pic(avg_age, old_pic_num, PATH, now_gender)\n\t\t\telse:\n\t\t\t\told_pic_num, path_real = change_pic(avg_age, -1, PATH, now_gender)\n\t\t\told_avg_age = avg_age\n\t\t\told_gender = now_gender\n\t\t\t#print(path_real)\n\t\t\tinit_frame = cv2.imread(path_real)\n\t\t\t#print(old_pic_num)\n\t\t\t#print(old_avg_age)\n\tindex = index+1\n\t#cv2.imshow(\"Frame\", frame)\n\t\n\tcv2.namedWindow(\"window\", cv2.WND_PROP_FULLSCREEN)\n\tcv2.setWindowProperty(\"window\",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n\tcv2.imshow(\"window\", init_frame)\n\tif(SMALL_IMAGE==1):\n\t\tcv2.imshow(\"Frame\", frame)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\tif key == ord(\"q\"):\n\t\tbreak\n\t\t\ncv2.destroyAllWindows()\n#vs.stop()\n","repo_name":"danisk89/StimaAGPubblicitaPython","sub_path":"detect_age_video.py","file_name":"detect_age_video.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74229440162","text":"import sys\nimport json\nimport argparse\n\nmarkdown = \"\"\ntab = \" \"\nlist_tag = '* '\ninline_code = '`'\ncode_block = '```'\nsubtitle = '## '\nhtag = '#'\n\n\nif sys.version_info < (3, 0):\n raise Exception(\"[ERROR] This program requires Python 3.0 or greater\")\n\n\ndef load_json(file):\n try:\n with open(file, 'r') as f:\n data = f.read()\n return json.loads(data)\n except: # noqa\n print(\"[ERROR] File must be a valid json file\")\n\n\ndef parse_json(json_block, depth, options):\n if isinstance(json_block, dict):\n parse_dict(json_block, depth, options)\n if isinstance(json_block, list):\n parse_list(json_block, depth, options)\n\n\ndef parse_dict(d, depth, options):\n for k in d:\n if k in options['ignore']:\n continue\n if options['keep'] != '':\n if k not in options['keep']:\n continue\n if isinstance(d[k], (dict, list)):\n add_header(k, depth)\n parse_json(d[k], depth + 1, options)\n else:\n add_value(k, d[k], depth)\n\n\ndef parse_list(l, depth, options): # noqa\n for value in l:\n if not isinstance(value, (dict, list)):\n index = l.index(value)\n add_value(index, value, depth)\n else:\n parse_dict(value, depth, options)\n\n\ndef build_header_chain(depth):\n chain = list_tag * (bool(depth)) + htag * (depth + 1) + \\\n ' value ' + (htag * (depth + 1) + '\\n')\n return chain\n\n\ndef build_value_chain(key, value, depth):\n chain = tab * (bool(depth - 1)) + list_tag + \\\n str(key) + \": \" + inline_code + str(value) + inline_code + \"\\n\"\n return chain\n\n\ndef add_header(value, depth):\n chain = build_header_chain(depth)\n global markdown\n markdown += chain.replace('value', value.title())\n\n\ndef add_value(key, value, depth):\n chain = build_value_chain(key, value, depth)\n global markdown\n markdown += chain\n\n\ndef write_out(markdown, output_file):\n with open(output_file, 'w+') as f:\n f.write(markdown)\n\n\ndef convert(input_file, output_file, options):\n json_data = load_json(input_file)\n depth = 0\n parse_json(json_data, depth, options)\n global markdown\n markdown = markdown.replace('#######', '######')\n write_out(markdown, output_file)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Json to Markdown converter\",\n usage='%(prog)s -i $INPUTFILENAME [options]', # noqa\n epilog=\"Ca va bien aller!\") # noqa\n parser.add_argument('-i', '--input', help='Input filename', required=True)\n parser.add_argument('-o', '--output', help='Output filename')\n parser.add_argument('-x', '--ignore', help='A list of keys to ignore in a json file')\n parser.add_argument('-k', '--keep', help='A list of keys to convert exclusively in a json file')\n parser.add_argument('-r', '--replace', help='A list of dict to replace keys values. Not implemented')\n args = parser.parse_args()\n\n if args.input is None:\n print('[Error] User must specify input')\n exit\n else:\n input_file = args.input\n\n if args.output is None:\n output_file = f'{args.input[:-4]}md'\n else:\n output_file = args.output\n print(f'[INFO] output: {output_file}')\n\n if args.ignore is not None:\n keys_to_ignore = load_json(args.ignore)\n print(keys_to_ignore)\n else:\n keys_to_ignore = ''\n\n if args.keep is not None:\n keys_to_keep = load_json(args.keep)\n print(keys_to_keep)\n else:\n keys_to_keep = ''\n\n options = dict()\n options['ignore'] = keys_to_ignore\n options['keep'] = keys_to_keep\n print(options)\n\n convert(input_file, output_file, options)\n \"\"\"\n if len(sys.argv) > 1:\n input_file = sys.argv[1]\n output_file = input_file[:-4] + 'md'\n if input_file[-4:] == 'json':\n convert(input_file, output_file)\n else:\n print('Input must be a .json file')\n else:\n print(\"[ERROR] You must specify an input file.\")\n print(\"Usage: \\n python json_to_md.py $JSONFILE\" + '\\n')\n \"\"\"\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"python-geeks/Automation-scripts","sub_path":"json_to_md/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","stars":719,"dataset":"github-code","pt":"54"} +{"seq_id":"45497827427","text":"# -*- coding: utf-8 -*-\nimport asyncio\nimport pytest\nimport logging\n\nfrom bitshares.aio.asset import Asset\n\nlogger = logging.getLogger(\"websockets\")\nlogger.setLevel(logging.DEBUG)\n\nlog = logging.getLogger(\"grapheneapi\")\nlog.setLevel(logging.DEBUG)\n\n\n@pytest.mark.asyncio\nasync def test_parallel_queries(event_loop, bitshares, assets):\n \"\"\"When performing multiple calls at once from different coroutines, responses\n should correctly match with queries.\"\"\"\n\n async def get_asset(asset):\n a = await Asset(asset, blockchain_instance=bitshares)\n assert a[\"symbol\"] == asset\n\n async def get_info():\n await bitshares.info()\n\n for _ in range(0, 40):\n tasks = []\n tasks.append(asyncio.ensure_future(get_asset(\"USD\")))\n tasks.append(asyncio.ensure_future(get_asset(\"GOLD\")))\n tasks.append(asyncio.ensure_future(get_info()))\n await asyncio.gather(*tasks)\n","repo_name":"bitshares/python-bitshares","sub_path":"tests/testnet/aio/test_jsonrpc.py","file_name":"test_jsonrpc.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"54"} +{"seq_id":"8912735352","text":"#!/usr/bin/python3\n\nimport argparse\nimport logging\nimport re\nimport subprocess\nimport sys\n\nfrom reviewer import CommitRef\nfrom reviewer import Reviewer\n\nlogging.basicConfig(stream=sys.stdout, level=logging.WARNING)\nlogger = logging.getLogger(__name__)\n\ndef review_change(reviewer, local_sha):\n local_sha = CommitRef(sha=local_sha)\n upstream_sha = reviewer.get_cherry_pick_sha_from_local_sha(local_sha.sha)\n upstream_patch = reviewer.get_commit_from_sha(CommitRef(sha=upstream_sha))\n local_patch = reviewer.get_commit_from_sha(local_sha)\n result = reviewer.compare_diffs(upstream_patch, local_patch)\n\n if reviewer.verbose or reviewer.chatty or len(result):\n logger.info('Reviewing %s (rmt=%s)' % (local_sha.sha, upstream_sha[:11]))\n\n for l in result:\n logger.info(l)\n\n if len(result):\n logger.info('')\n\n return len(result)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Auto review UPSTREAM patches')\n parser.add_argument('--start', help='commit hash to start from',\n required=True)\n parser.add_argument('--prefix', default='UPSTREAM', help='subject prefix')\n parser.add_argument('--verbose', help='print commits', action='store_true')\n parser.add_argument('--chatty', help='print diffs', action='store_true')\n args = parser.parse_args()\n\n if args.verbose or args.chatty:\n logger.setLevel(logging.DEBUG)\n\n proc = subprocess.check_output(\n ['git', 'log', '--oneline', '%s^..' % args.start])\n\n regex = re.compile('([0-9a-f]*) (%s): ' % (args.prefix), flags=re.I)\n ret = 0\n reviewer = Reviewer(args.verbose, args.chatty)\n for l in reversed(proc.decode('UTF-8').split('\\n')):\n this_ret = 0\n m = regex.match(l)\n if m:\n this_ret = review_change(reviewer, m.group(1))\n ret += this_ret\n\n return ret\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"atseanpaul/review-o-matic","sub_path":"review-o-matic.py","file_name":"review-o-matic.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"6122430634","text":"from typing import List\r\nfrom requests_futures.sessions import FuturesSession\r\nfrom concurrent.futures import as_completed\r\nfrom requests import Session\r\nfrom bs4 import BeautifulSoup as BP\r\nfrom uuid import uuid4\r\n\r\n\r\nclass MicholloApi:\r\n FILTER = {'new':'new', 'popular':'popular'}\r\n # new api url : https://app.michollo.com/api/home/popular?limit=10&offset=10&hide_expired=0\r\n API_URL = \"https://app.michollo.com/api/home/{}?limit=10&offset={}&hide_expired=0\"\r\n URL = \"https://michollo.com\"\r\n HEADERS = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 OPR/72.0.3815.459' , 'accept':'*/*', 'accept-language':'es-ES,es;q=0.9,en;q=0.8,ca;q=0.7', 'accept-encoding':'gzip, deflate, br', 'x-requested-with':'XMLHttpRequest'}\r\n INDICES = 100\r\n FILTERS = ['new', 'popular']\r\n\r\n ISO_CODE = 'EURO'\r\n SYMBOL = '€'\r\n SYMBOL_SIDE = 'right'\r\n\r\n def __init__(self):\r\n self._create_session()\r\n\r\n def _create_session(self):\r\n self._session = Session()\r\n self._session.get(self.URL)\r\n\r\n def get_new_items(self, filter='new') -> List:\r\n\r\n items: List = []\r\n if filter.lower() in self.FILTERS:\r\n for i in range(0, self.INDICES, 10):\r\n items.extend(self._get_new_items(filter, offset=i))\r\n return items\r\n \r\n def _get_new_items(self, filter: str, offset: int = 0) -> List:\r\n \r\n response = self._session.get(self.API_URL.format(self.FILTER[filter.lower()], offset), headers=self.HEADERS)\r\n items = []\r\n urls = []\r\n\r\n if response.status_code == 200:\r\n content = response.json()\r\n if content['ok']:\r\n for (i, item) in enumerate(content['deals']['results'], 0):\r\n try:\r\n if item.get(\"status\") == \"expired\":\r\n continue\r\n\r\n discount_code = self.get_coupons(item)\r\n description = BP(item['description'], 'lxml').get_text()\r\n items.append({\"title\": item['name'], \"description\": description, \"linked_url\":self.URL+'/'+item.get('slug', ''), \"price\": str(item['sale_price']/100), \"regular_price\": str(item['old_price']/100),\r\n \"market\": item[\"store\"][0]['name'], \"id\": str(uuid4()), \"chollo_url\": '', \"index\": i, \"image_url\":item['image_url'].replace('r/190/', ''),\"original_url\":item['image_url'], \"discount\":discount_code, \"currency\": {'iso_code':self.ISO_CODE, 'symbol':self.SYMBOL, 'symbol_side':self.SYMBOL_SIDE}})\r\n urls.append(item['offer_url'])\r\n except:\r\n continue\r\n urls = self._get_items(urls)\r\n self.sort_urls(urls, items)\r\n\r\n return items\r\n \r\n def _get_items(self, urls):\r\n\r\n future_session = FuturesSession(session=self._session, max_workers=4)\r\n futures = []\r\n chollos_url = {}\r\n for (i, url) in enumerate(urls, 0):\r\n futures.append((future_session.get(url, timeout=5, headers=self.HEADERS), i))\r\n\r\n for (future, i) in futures:\r\n try:\r\n response = future.result()\r\n\r\n if response.status_code == 200:\r\n chollos_url[i] = response.url\r\n else:\r\n chollos_url[i] = response.url\r\n\r\n except:\r\n print(\"[x] error with: \" + urls[i])\r\n chollos_url[i] = urls[i]\r\n\r\n chollos_url = self.sorted(chollos_url)\r\n assert len(urls) == len(chollos_url)\r\n return chollos_url\r\n\r\n @staticmethod\r\n def sorted(items):\r\n _list = []\r\n\r\n for i in range(len(items)):\r\n _list.append(items[i])\r\n return _list\r\n\r\n @staticmethod\r\n def get_coupons(item):\r\n try:\r\n coupons = item['coupons'][0]['code']\r\n except:\r\n return\r\n return coupons\r\n\r\n def validate_url(self, url):\r\n return url.count('https://a.michollo.to') == 1\r\n\r\n def sort_urls(self, urls, items):\r\n # _rebase = lambda : [items[i].update({'chollo_url':item}) for (i, item) in enumerate(urls, 0)]\r\n for (i, url) in enumerate(urls, 0):\r\n try:\r\n if self.validate_url(url):\r\n items[i].update({'chollo_url':url})\r\n items[i].update({'title': '[VERIFICAR ENLACE]' + items[i]['title']})\r\n else:\r\n items[i].update({'chollo_url':url})\r\n except:\r\n continue\r\n\r\nif __name__ == \"__main__\":\r\n from time import time\r\n\r\n test = MicholloApi()\r\n execute_time = time()\r\n print(test.get_new_items(filter='popular')[0])\r\n print(f\"Execute time {execute_time - time()}\")","repo_name":"MatiasMinoni/test","sub_path":"core/webs/michollo/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43971454182","text":"#-*- coding:utf-8 -*-\r\nfrom django.shortcuts import render_to_response,render\r\nfrom django.http import HttpResponse,HttpResponseRedirect\r\nimport simplejson,sys,os,urllib,re,datetime,time,md5,hashlib,random,calendar,json\r\nimport calendar as cal\r\nfrom conn import crmdb\r\nfrom zz91page import *\r\nfrom sphinxapi import *\r\nfrom settings import searchconfig\r\ndb=crmdb()\r\nreload(sys)\r\nsys.setdefaultencoding('UTF-8')\r\nnowpath=os.path.dirname(__file__)\r\nexecfile(nowpath+\"/func/hr_function.py\")\r\nexecfile(nowpath+\"/func/crmtools.py\")\r\nexecfile(nowpath+\"/func/company_function.py\")\r\nzzc=customer()\r\nzzs=zzhr()\r\n\r\n#取出所有人员信息\r\ndef hr_list(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n page=request.GET.get('page')\r\n #是否为主管\r\n has_auth=zzc.is_hasauth(user_id=user_id)\r\n if not page:\r\n page=1\r\n searchlist={}\r\n dotype=request.GET.get(\"dotype\")\r\n if dotype:\r\n searchlist['dotype']=dotype\r\n star=request.GET.get(\"star\")\r\n if star:\r\n searchlist['star']=star\r\n else:\r\n star=''\r\n username=request.GET.get(\"username\")\r\n if username:\r\n searchlist['username']=username\r\n else:\r\n username=''\r\n mobile=request.GET.get(\"mobile\")\r\n if mobile:\r\n searchlist['mobile']=mobile\r\n else:\r\n mobile=''\r\n email=request.GET.get(\"email\")\r\n if email:\r\n searchlist['email']=email\r\n else:\r\n email=''\r\n sex=request.GET.get(\"sex\")\r\n if sex:\r\n searchlist['sex']=sex\r\n contactstat=request.GET.get(\"contactstat\")\r\n if contactstat:\r\n searchlist['contactstat']=contactstat\r\n jl1=request.GET.get(\"jl1\")\r\n if jl1:\r\n searchlist['jl1']=jl1\r\n jl2=request.GET.get(\"jl2\")\r\n if jl2:\r\n searchlist['jl2']=jl2\r\n jl3=request.GET.get(\"jl3\")\r\n if jl3:\r\n searchlist['jl3']=jl3\r\n jl4=request.GET.get(\"jl4\")\r\n if jl4:\r\n searchlist['jl4']=jl4\r\n jl5=request.GET.get(\"jl5\")\r\n if jl5:\r\n searchlist['jl5']=jl5\r\n personid=request.GET.get(\"personid\")\r\n if personid:\r\n searchlist['personid']=personid\r\n rpersonid=request.GET.get(\"rpersonid\")\r\n if rpersonid:\r\n searchlist['rpersonid']=rpersonid\r\n orderstr=request.GET.get(\"orderstr\")\r\n if orderstr:\r\n searchlist['orderstr']=orderstr\r\n searchlist['user_id']=user_id\r\n contactstat_list=zzs.getcategorylist(code=\"22\")\r\n jl1_list=zzs.getcategorylist(code=\"17\")\r\n jl2_list=zzs.getcategorylist(code=\"18\")\r\n jl3_list=zzs.getcategorylist(code=\"19\")\r\n jl4_list=zzs.getcategorylist(code=\"20\")\r\n jl5_list=zzs.getcategorylist(code=\"21\")\r\n #获得销售人员列表(selection)\r\n allsalesman=zzc.get_allsalesman(user_id=user_id,renshi=1)\r\n interviewTime1=request.GET.get(\"interviewTime1\")\r\n interviewTime2=request.GET.get(\"interviewTime2\")\r\n if interviewTime1 and interviewTime2:\r\n searchlist['interviewTime1']=interviewTime1\r\n searchlist['interviewTime2']=interviewTime2\r\n else:\r\n interviewTime1=''\r\n interviewTime2=''\r\n gmt_created1=request.GET.get(\"gmt_created1\")\r\n gmt_created2=request.GET.get(\"gmt_created2\")\r\n if gmt_created1 and gmt_created2:\r\n searchlist['gmt_created1']=gmt_created1\r\n searchlist['gmt_created2']=gmt_created2\r\n else:\r\n gmt_created1=''\r\n gmt_created2=''\r\n orderstr=request.GET.get('orderstr')\r\n if orderstr:\r\n searchlist['orderstr']=orderstr\r\n else:\r\n orderstr=''\r\n searchurl=urllib.urlencode(searchlist)\r\n funpage=zz91page()\r\n limitNum=funpage.limitNum(15)\r\n nowpage=funpage.nowpage(int(page))\r\n frompageCount=funpage.frompageCount()\r\n after_range_num = funpage.after_range_num(3)\r\n before_range_num = funpage.before_range_num(6)\r\n userallr=zzs.gethrlist(frompageCount=frompageCount,limitNum=limitNum,searchlist=searchlist)\r\n listcount=userallr['count']\r\n listall=userallr['list']\r\n listcount = funpage.listcount(listcount)\r\n page_listcount=funpage.page_listcount()\r\n firstpage = funpage.firstpage()\r\n lastpage = funpage.lastpage()\r\n page_range = funpage.page_range()\r\n nextpage = funpage.nextpage()\r\n prvpage = funpage.prvpage()\r\n return render_to_response('hr/hr_list.html',locals())\r\n#添加人员\r\ndef hr_add(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n renshi_station=zzs.getcategorylist(code=\"15\")\r\n education_list=zzs.getcategorylist(code=\"16\")\r\n return render_to_response('hr/hr_add.html',locals())\r\ndef hr_save(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n username=request.POST.get('username')\r\n mobile=request.POST.get('mobile')\r\n othercontact=request.POST.get('othercontact')\r\n sex=request.POST.get('sex')\r\n email=request.POST.get('email')\r\n education=request.POST.get('education')\r\n worklonger=request.POST.get('worklonger')\r\n laiyuan=request.POST.get('laiyuan')\r\n station=request.POST.get('station')\r\n station2=request.POST.get('station2')\r\n gmt_created=gmt_modified=datetime.datetime.now()\r\n \r\n obj=request.FILES.get('fileField')\r\n timepath=time.strftime('%Y/%m/%d/',time.localtime(time.time()))\r\n tmp = random.randint(100, 999)\r\n nowtime=int(time.time())\r\n time_now=datetime.datetime.now()\r\n resumeUrl=''\r\n if obj:\r\n filename=obj.name\r\n kzname=''\r\n if filename:\r\n arrfile=filename.split(\".\")\r\n kzname=arrfile[len(arrfile)-1]\r\n newpath=nowpath+\"/file/\"+timepath\r\n \r\n imgpath=newpath+str(nowtime)+str(tmp)+\".\"+kzname\r\n if not os.path.isdir(newpath):\r\n os.makedirs(newpath)\r\n f=open(imgpath, 'wb')\r\n for chunk in obj.chunks():\r\n f.write(chunk)\r\n f.close()\r\n resumeUrl=timepath+str(nowtime)+str(tmp)+\".\"+kzname\r\n sql='insert into renshi_user(username,mobile,othercontact,sex,email,education,worklonger,laiyuan,station,station2,gmt_created,gmt_modified,resumeUrl,personid) values(%s, %s, %s,%s, %s, %s, %s, %s, %s, %s,%s,%s,%s,%s)'\r\n result=db.updatetodb(sql,[username,mobile,othercontact,sex,email,education,worklonger,laiyuan,station,station2,gmt_created,gmt_modified,resumeUrl,user_id])\r\n \r\n sql='select id from renshi_user order by id desc '\r\n result1=db.fetchonedb(sql)\r\n last_insert_id=result1['id']\r\n sql='insert into renshi_assign(uid,personid,fdate) values(%s, %s, %s)'\r\n result2=db.updatetodb(sql,[last_insert_id,user_id,time_now])\r\n \r\n return HttpResponseRedirect('list.html')\r\n#修改人员信息\r\ndef hr_mod(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n if request.method==\"POST\":\r\n gmt_modified=datetime.datetime.now()\r\n mobile=request.POST.get('mobile')\r\n username=request.POST.get('username')\r\n othercontact=request.POST.get('othercontact')\r\n sex=request.POST.get('sex')\r\n email=request.POST.get(\"email\")\r\n worklonger=request.POST.get('worklonger')\r\n laiyuan=request.POST.get('laiyuan')\r\n id=request.POST.get('id')\r\n resumeUrl=request.POST.get('resumeUrl')\r\n station2=request.POST.get('station2')\r\n station=request.POST.get('station')\r\n education=request.POST.get('education')\r\n obj=request.FILES.get('fileField')\r\n timepath=time.strftime('%Y/%m/%d/',time.localtime(time.time()))\r\n tmp = random.randint(100, 999)\r\n nowtime=int(time.time())\r\n if obj:\r\n filename=obj.name\r\n kzname=''\r\n if filename:\r\n arrfile=filename.split(\".\")\r\n kzname=arrfile[len(arrfile)-1]\r\n newpath=nowpath+\"/file/\"+timepath\r\n \r\n imgpath=newpath+str(nowtime)+str(tmp)+\".\"+kzname\r\n if not os.path.isdir(newpath):\r\n os.makedirs(newpath)\r\n f=open(imgpath, 'wb')\r\n for chunk in obj.chunks():\r\n f.write(chunk)\r\n f.close()\r\n resumeUrl=timepath+str(nowtime)+str(tmp)+\".\"+kzname\r\n if username:\r\n sql='update renshi_user set mobile=%s,username=%s,othercontact=%s,sex=%s,email=%s,worklonger=%s,laiyuan=%s,gmt_modified=%s,resumeUrl=%s,station=%s,station2=%s,education=%s where id=%s'\r\n result=db.updatetodb(sql,[mobile,username,othercontact,sex,email,worklonger,laiyuan,gmt_modified,resumeUrl,station,station2,education,id])\r\n return HttpResponseRedirect('list.html')\r\n else:\r\n id=request.GET.get('id')\r\n resumeUrl=request.GET.get('resumeUrl')\r\n renshi_station=zzs.getcategorylist(code=\"15\")\r\n education_list=zzs.getcategorylist(code=\"16\")\r\n if id:\r\n sql='select * from renshi_user where id=%s'\r\n result=db.fetchonedb(sql,[id])\r\n if result:\r\n if result['sex']=='男':\r\n pass\r\n elif result['sex'] == '女':\r\n del result['sex']\r\n if not result['resumeUrl']:\r\n result['resumeUrl']=None\r\n return render_to_response('hr/hr_mod.html',locals())\r\n if resumeUrl:\r\n newpath=os.path+resumeUrl\r\n with open(newpath) as f: \r\n c = f.read()\r\n return render_to_response('hr/hr_mod.html',locals())\r\n\r\n#批量处理\r\ndef hr_all(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n check_box_list = request.REQUEST.getlist(\"check_box_list\")\r\n topersonid=request.POST.get('topersonid')\r\n value=request.POST.get('dostay',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n fdate=datetime.datetime.now()\r\n if not value:\r\n for id in check_box_list:\r\n sql='delete from renshi_user where id=%s'\r\n result=db.updatetodb(sql,[id])\r\n elif value=='assignto':\r\n for id in check_box_list:\r\n sql=\"select id from renshi_assign where uid=%s\"\r\n result=db.fetchonedb(sql,[id])\r\n if not result:\r\n sql='insert into renshi_assign(personid,uid,fdate) values(%s, %s, %s)'\r\n db.updatetodb(sql,[topersonid,id,fdate])\r\n else:\r\n sql=\"update renshi_assign set personid=%s where id=%s\"\r\n db.updatetodb(sql,[topersonid,result['id']])\r\n bz=\"客户分配\"\r\n sql='insert into renshi_history(bz,uid,personid,fdate) values(%s,%s,%s,%s)'\r\n db.updatetodb(sql,[bz,id,user_id,fdate])\r\n elif value=='tomy':\r\n for id in check_box_list:\r\n sql=\"select id from renshi_assign where uid=%s\"\r\n result=db.fetchonedb(sql,[id])\r\n if not result:\r\n sql='insert into renshi_assign(personid,uid,fdate) values(%s, %s, %s)'\r\n db.updatetodb(sql,[user_id,id,fdate])\r\n else:\r\n sql=\"update renshi_assign set personid=%s where id=%s\"\r\n db.updatetodb(sql,[user_id,result['id']])\r\n bz=\"放到我的客户库\"\r\n sql='insert into renshi_history(bz,uid,personid,fdate) values(%s,%s,%s,%s)'\r\n db.updatetodb(sql,[bz,id,user_id,fdate])\r\n elif value=='gonghai':\r\n for id in check_box_list:\r\n sql='delete from renshi_assign where personid=%s and uid=%s'\r\n result=db.updatetodb(sql,[user_id,id])\r\n bz=\"放入公海\"\r\n sql='insert into renshi_history(bz,uid,personid,fdate) values(%s,%s,%s,%s)'\r\n db.updatetodb(sql,[bz,id,user_id,fdate])\r\n return HttpResponseRedirect('list.html')\r\n \r\n#单独界面显示个人信息\r\ndef hr_usershow(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n contactstat_list=zzs.getcategorylist(code=\"22\")\r\n jl1_list=zzs.getcategorylist(code=\"17\")\r\n jl2_list=zzs.getcategorylist(code=\"18\")\r\n jl3_list=zzs.getcategorylist(code=\"19\")\r\n jl4_list=zzs.getcategorylist(code=\"20\")\r\n jl5_list=zzs.getcategorylist(code=\"21\")\r\n if request.method==\"POST\":\r\n uid=request.GET.get('uid')\r\n contactstat=request.POST.get('contactstat')\r\n selectjl=request.POST.get('selectjl')\r\n if selectjl==\"1\":\r\n code=request.POST.get('jl1')\r\n if selectjl==\"2\":\r\n code=request.POST.get('jl2')\r\n if selectjl==\"3\":\r\n code=request.POST.get('jl3')\r\n if selectjl==\"4\":\r\n code=request.POST.get('jl4')\r\n if selectjl==\"5\":\r\n code=request.POST.get('jl5')\r\n star=request.POST.get('star')\r\n nextteltime=request.POST.get('nextteltime')\r\n bz=request.POST.get('bz')\r\n user_id=request.session.get('user_id',default=None)\r\n fdate=datetime.datetime.now()\r\n gmt_created=datetime.datetime.now()\r\n if uid:\r\n sql='insert into renshi_history(contactstat,code,star,nextteltime,bz,uid,personid,fdate) values(%s,%s,%s,%s,%s,%s,%s,%s)'\r\n result=db.updatetodb(sql,[contactstat,code,star,nextteltime,bz,uid,user_id,fdate])\r\n sql='update renshi_user set star=%s,gmt_created=%s where id=%s'\r\n result=db.updatetodb(sql,[star,gmt_created,uid])\r\n return HttpResponseRedirect('list.html')\r\n else:\r\n id=request.GET.get('uid')\r\n if id:\r\n sql='select id,username,mobile,station,station2,othercontact,sex,email,education,worklonger,laiyuan from renshi_user where id=%s'\r\n result=db.fetchonedb(sql,[id])\r\n if result:\r\n result['station_name']=zzs.getcategorylabel(result['station'])\r\n result['station2_name']=zzs.getcategorylabel(result['station2'])\r\n result['education_name']=zzs.getcategorylabel(result['education'])\r\n return render_to_response('hr/hr_usershow.html',locals())\r\n#操作记录\r\ndef hr_usershow_history(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n page=request.GET.get('page')\r\n if not page:\r\n page=1\r\n searchlist={}\r\n uid=request.GET.get('uid')\r\n if uid:\r\n searchlist['uid']=uid\r\n funpage=zz91page()\r\n limitNum=funpage.limitNum(4)\r\n nowpage=funpage.nowpage(int(page))\r\n frompageCount=funpage.frompageCount()\r\n after_range_num = funpage.after_range_num(3)\r\n before_range_num = funpage.before_range_num(6)\r\n userallr=zzs.getrenshihistory(searchlist=searchlist,frompageCount=frompageCount,limitNum=limitNum)\r\n listall=userallr['list']\r\n listcount=userallr['count']\r\n listcount = funpage.listcount(listcount)\r\n page_listcount=funpage.page_listcount()\r\n firstpage = funpage.firstpage()\r\n lastpage = funpage.lastpage()\r\n page_range = funpage.page_range()\r\n nextpage = funpage.nextpage()\r\n prvpage = funpage.prvpage()\r\n return render_to_response('hr/hr_usershow_history.html',locals())\r\n \r\n#人事基础数据\r\ndef hr_basic(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n searchlist={}\r\n label=request.GET.get('label')\r\n if label:\r\n searchlist['label']=label\r\n basiclist=zzs.gethrbasiclist(searchlist=searchlist)\r\n return render_to_response('hr/hr_basic.html',locals())\r\n\r\n#添加人事基础数据\r\ndef hr_basic_add(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n code=request.GET.get('code')\r\n if code:\r\n label=request.POST.get('label')\r\n ord=request.POST.get('ord')\r\n if label:\r\n sql=\"select count(0) as count from renshi_category where code like %s\"\r\n code1=str(db.fetchonedb(sql,[\"\"+code+\"__\"])['count']+1)\r\n code1=code1.zfill(2)\r\n code=code+code1\r\n sql=\"insert into renshi_category(code,label,ord) values(%s, %s, %s)\"\r\n result=db.updatetodb(sql,[code,label,ord])\r\n return HttpResponseRedirect('basic.html')\r\n return render(request,'hr/hr_basic_add.html')\r\n else:\r\n label=request.POST.get('label')\r\n ord=request.POST.get('ord')\r\n if label:\r\n sql=\"select max(left(code,2))+1 from renshi_category\"\r\n code=db.fetchonedb(sql)['max(left(code,2))+1']\r\n sql=\"insert into renshi_category (code,label,ord) values(%s, %s, %s)\"\r\n result=db.updatetodb(sql,[code,label,ord])\r\n return HttpResponseRedirect('basic.html')\r\n return render(request,'hr/hr_basic_add.html')\r\n#修改人事基础数据\r\ndef hr_basic_mod(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n if request.method==\"POST\":\r\n label=request.POST.get('label')\r\n ord=request.POST.get('ord')\r\n id=request.GET.get('id')\r\n if label:\r\n sql='update renshi_category set label=%s,ord=%s where id=%s'\r\n result=db.updatetodb(sql,[label,ord,id])\r\n return HttpResponseRedirect('basic.html')\r\n else:\r\n id=request.GET.get('id')\r\n if id:\r\n sql='select * from renshi_category where id=%s'\r\n result=db.fetchonedb(sql,[id])\r\n return render_to_response('hr/hr_basic_mod.html',locals())\r\n#删除人事基础数据\r\ndef hr_basic_del(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n id=request.GET.get('id')\r\n if id:\r\n sql='delete from renshi_category where id=%s'\r\n result=db.updatetodb(sql,[id])\r\n return HttpResponseRedirect('basic.html')\r\n#保存\r\ndef hr_list_save(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n list={'err':'login'}\r\n return HttpResponse(simplejson.dumps(list, ensure_ascii=False))\r\n hid=request.GET.get(\"hid\")\r\n hvalue=request.GET.get(\"hvalue\")\r\n hfild=request.GET.get(\"hfild\")\r\n sql=\"update renshi_user set \"+hfild+\"=%s where id=%s\"\r\n db.updatetodb(sql,[hvalue,hid])\r\n fdate=datetime.datetime.now()\r\n sql=\"insert into renshi_history(uid,code,personid,fdate) values(%s,%s,%s,%s)\"\r\n db.updatetodb(sql,[hid,hvalue,user_id,fdate])\r\n list={'err':'false'}\r\n return HttpResponse(simplejson.dumps(list, ensure_ascii=False))\r\n \r\n#获取基础类别列表\r\ndef hr_categorylist(request):\r\n username=request.session.get('username',default=None)\r\n user_id=request.session.get('user_id',default=None)\r\n if not username or not user_id:\r\n return HttpResponseRedirect(\"relogin.html\")\r\n code=request.GET.get(\"code\")\r\n list=zzs.getcategorylist(code=code[0:2])\r\n return HttpResponse(simplejson.dumps(list, ensure_ascii=False))","repo_name":"cash2one/zzpython","sub_path":"客户关系管理系统/zz91crm/hr.py","file_name":"hr.py","file_ext":"py","file_size_in_byte":20160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30114433096","text":"import telebot\nfrom os import environ\nfrom telebot import types\nimport random\nimport time\nimport jsonpickle\nfrom io import BytesIO\nfrom PIL import Image\nimport requests\nimport numpy as np\n\n#DOC: https://pypi.org/project/pyTelegramBotAPI/\nbot = telebot.TeleBot(environ['TELEGRAM_TOKEN'])\n\nbot_text = \"Bienvenides!\"\n\nstatus = {}\n\ndef save():\n pass\n with open('status.json', 'w') as outfile: \n outfile.write(jsonpickle.encode(status, keys = True))\n\ndef restore():\n pass\n global status\n with open('status.json') as infile:\n status = jsonpickle.decode(infile.read(), keys = True)\n print (\"Restore\")\n\ndef desambiguar(id):\n return [group for group in status if id in status[group].scores]\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n\tbot.reply_to(message, bot_text)\n \ndef send_photos(chatid, imgurls):\n list_im = []\n \n for url in imgurls:\n response = requests.get(url)\n img = BytesIO(response.content)\n list_im.append(img)\n \n imgs = [Image.open(i) for i in list_im]\n # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n try:\n imgs_comb = np.hstack([np.asarray(i.resize(min_shape)) for i in imgs])\n imgs_comb = Image.fromarray(imgs_comb)\n \n bio = BytesIO()\n bio.name = 'image.jpeg'\n imgs_comb.save(bio, 'JPEG')\n bio.seek(0)\n bot.send_photo(chatid, photo = bio)\n except:\n print(status[-258588711].manos[chatid])\n\nfrom PIL import ImageDraw\ndef send_photos2(chatid, imgurls, votes, authors):\n list_im = []\n \n for url in imgurls:\n response = requests.get(url)\n img = BytesIO(response.content)\n list_im.append(img)\n \n imgs = [Image.open(i) for i in list_im]\n # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n min_shape = (114, 167) #sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n imgs_resized = []\n for i in range(len(imgs)):\n img_resized = imgs[i].resize(min_shape)\n d = ImageDraw.Draw(img_resized)\n d.text((10,10), str(votes[i]), fill=(255,255,0))\n d.text((10,20), authors[i], fill=(255,255,0))\n imgs_resized.append(img_resized)\n \n try:\n imgs_comb = np.hstack([np.asarray(i) for i in imgs_resized])\n imgs_comb = Image.fromarray(imgs_comb)\n \n bio = BytesIO()\n bio.name = 'image.jpeg'\n imgs_comb.save(bio, 'JPEG')\n bio.seek(0)\n bot.send_photo(chatid, photo = bio)\n except:\n print(status[-258588711].manos[chatid])\n\nurls = [\n \"https://cdn.glitch.com/7ce740c2-ba20-4cc4-8e40-661d54b24761%2FDixit\" + str(i) + \".png\" for i in range(1, 17)\n]\nurls[9] = \"https://cdn.glitch.com/7ce740c2-ba20-4cc4-8e40-661d54b24761%2FDixit10.jpg?v=1562703231326\"\n\n@bot.message_handler(func = lambda m: True)\ndef echo_all(message):\n msgtext = message.text\n \n print (message.message_id)\n \n if len(status) == 0: \n restore() #se cayó el bot y hay que volver desde status.json \n group = None\n if message.chat.type == \"private\":\n \n #para votar y mandar definiciones\n #hay que buscar en todos los grupos en cuál está el jugador, que no va poder participar en 2 juegos a la vez\n #el tema de siempre bah\n ag = desambiguar(message.from_user.id)\n if len(ag) == 0:\n return\n elif len(ag)== 1:\n group = ag[0]\n else:\n bot.send_message(message.from_user.id, \"There is more than one active group for your user\")\n return\n pass\n else:\n group = message.chat.id\n\n if msgtext == \"/alive\":\n print(\"yes\")\n \n if (msgtext == \"/debug\" and group in status):\n #acá no hay self, usamos status[group]\n print(\"self.scores :\")\n print(status[group].scores) \n print(\"self.users :\")\n print(status[group].users)\n print(\"self.otherimgs :\")\n print(status[group].chosenimgs)\n print(\"self.def_scores :\")\n print(status[group].def_scores)\n print(\"self.phase :\")\n print(status[group].phase)\n print(\"self.grupo :\")\n print(status[group].grupo)\n print(\"self.opciones:\")\n print(status[group].opciones)\n print(\"self.leftToVote:\")\n print(status[group].leftToVote)\n '''print(\"self.usersqueue :\")\n print(status[group].usersqueue)'''\n \n print (status)\n print (group)\n if group not in status:\n print (\"Reset\")\n status[group] = game(group)\n \n if (msgtext == \"/force_end\"):\n status[group] = game(group)\n \n if (msgtext == \"/phase\"):\n bot.send_message(group, str(status[group].phase) + \": \" + phase_number_to_readable(status[group].phase))\n \n #if (msgtext == \"/leave\"):\n # status[group].removeUser(message.from_user.id)\n \n if status[group].phase == -1: #la parte que si hiciera un state de verdad le sacaría el switch (que python ni tiene switch)\n status[group].phasemenos1(message, msgtext)\n elif status[group].phase == 0: \n status[group].phase0(message, msgtext)\n elif status[group].phase == 0.5: \n status[group].phase0_5(message, msgtext)\n elif status[group].phase == 1:\n status[group].phase1(message, msgtext)\n else:\n status[group].phase2(message, msgtext)\n \n save()\n \ndef format(s):\n return s[0].upper() + s[1:].lower()\n\ndef numberKeyboard(n):\n markup = types.ReplyKeyboardMarkup(one_time_keyboard = True, row_width = 100, resize_keyboard = True)\n buttons = [types.KeyboardButton(str(i)) for i in range(1, n + 1)]\n markup.add(*buttons)\n return markup\n \nclass game():\n \n def __init__(self, grupo):\n self.scores = {} #y de paso players con las keys\n self.users = {}\n self.chosenimgs = {} #la definicion para cada jugador\n self.def_scores = {} #los votos en esta ronda para cada jugador (no es necesario pero está bueno, incluso podemos mostrar quién votó que cosa)\n self.phase = -1\n self.grupo = grupo\n #self.usersqueue = set([])\n self.leftToVote = []\n self.manos = {}\n self.storyteller = 0\n self.definition = None\n self.opciones = {}\n \n def getST(self):\n return list(self.users.keys())[self.storyteller % len(self.users)]\n\n def phasemenos1(self, ctx, msgtext):\n if (\"/join\" in msgtext):\n try:\n if ctx.from_user.id in self.users: \n bot.send_message(ctx.from_user.id, \"Ya habías joineado\")\n else:\n bot.send_message(ctx.from_user.id, \"Joineaste\")\n self.scores[ctx.from_user.id] = 0\n self.users[ctx.from_user.id] = ctx.from_user\n except:\n bot.send_message(self.grupo, \"Error, ¿me starteaste ya?\")\n if (\"/startgame\" in msgtext):\n N = len(urls)\n \n mazo = list(range(N))\n random.shuffle(mazo)\n i = 0\n cartasenmano = N//len(self.scores)\n for player in self.scores:\n self.manos[player] = mazo[i * cartasenmano : (i + 1) * cartasenmano]\n i += 1\n bot.send_message(ctx.chat.id, \"Game starteado\")\n self.initphase0()\n \n def initphase0(self):\n self.storyteller += 1 \n \n opciones = [urls[id] for id in self.manos[self.getST()]]\n send_photos(self.getST(), opciones)\n print(opciones)\n markup = numberKeyboard(len(opciones))\n bot.send_message(self.getST(), \"Elegí la carta para la próxima ronda\", reply_markup = markup) \n self.phase = 0\n \n def phase0(self, ctx, msgtext):\n if (\"/hurry\" in msgtext):\n s = mentionUser(self.users[self.getST()])\n bot.send_message(self.grupo, s, parse_mode=\"Markdown\")\n if (ctx.from_user.id != self.getST()): return\n if (ctx.chat.type != \"private\"): return\n try:\n self.chosenimgs = {self.getST(): self.manos[self.getST()].pop(int(msgtext) - 1)}\n bot.send_message(self.getST(), \"Elegí la definición para la próxima ronda\")\n self.phase = 0.5\n except:\n bot.send_message(self.getST(), \"Error\")\n \n def phase0_5(self, ctx, msgtext):\n if (\"/hurry\" in msgtext):\n s = mentionUser(self.users[self.getST()])\n bot.send_message(self.grupo, s, parse_mode=\"Markdown\") \n if (ctx.from_user.id != self.getST()): return\n if (ctx.chat.type != \"private\"): return\n if (\" \" in msgtext): \n bot.send_message(self.getST(), \"Una sola palabra plis\")\n return\n self.definition = msgtext\n self.initphase1()\n self.def_scores[ctx.from_user.id] = 0 \n #validar que no tenga espacios\n \n '''def encolarUser(self, user):\n if user.id in self.users: \n bot.send_message(ctx.from_user.id, \"Ya habías joineado\")\n else:\n self.usersqueue.add(user)\n bot.send_message(self.grupo, \"OK, cuando termine la ronda te agrego\")'''\n \n def initphase1(self):\n '''for user in self.usersqueue:\n self.scores[user.id] = 0\n self.users[user.id] = user\n bot.send_message(user.id, \"Joineaste gil\")\n self.usersqueue = set([])'''\n self.phase = 1\n self.def_scores = {}\n for player in self.scores:\n if player == self.getST(): continue\n opciones = [urls[id] for id in self.manos[player]]\n send_photos(player, opciones)\n markup = numberKeyboard(len(opciones))\n bot.send_message(player, \"Mandá una card para \" + self.definition, reply_markup = markup) \n\n def phase1(self, ctx, msgtext):\n\n if (ctx.chat.type != \"private\"): \n '''if (\"/join\" in msgtext):\n self.encolarUser(ctx.from_user)'''\n if (\"/hurry\" in msgtext):\n #bot.send_message(self.grupo, \"Lxs siguientes giles no han enviado sus cards todavía:\")\n s = \"\"\n for player in self.users:\n print (player)\n if player not in self.chosenimgs:\n s += \" \" + mentionUser(self.users[player])\n bot.send_message(self.grupo, s, parse_mode=\"Markdown\")\n if (\"/meaburri\" in msgtext):\n self.initphase2()\n return\n \n if (ctx.from_user.id == self.getST()): return\n \n try:\n self.chosenimgs[ctx.from_user.id] = self.manos[ctx.from_user.id].pop(int(msgtext) - 1) \n #podríamos guardarnos id para forwardear para decir de quien es cada uno pero paja\n self.def_scores[ctx.from_user.id] = 0 \n #eso pisa, podía hacer que solo se pueda una vez\n if len([pl for pl in self.scores if pl not in self.chosenimgs and pl != self.getST()]) == 0: #\"no hay jugadores sin definir\"\n self.initphase2()\n except:\n pass\n \n def initphase2(self):\n self.phase = 2\n for player in self.scores:\n if player == self.getST(): continue\n #muestra keyboard (convendría abstraer)\n self.opciones[player] = [self.chosenimgs[pl] for pl in self.def_scores if pl != player]\n send_photos(player, [urls[i] for i in self.opciones[player]])\n markup = numberKeyboard(len(self.opciones[player]))\n bot.send_message(player, \"Votá una card para \" + self.definition, reply_markup = markup)\n self.leftToVote = list(self.scores.keys())\n self.leftToVote.remove(self.getST())\n \n def phase2(self, ctx, msgtext):\n if (ctx.chat.type != \"private\"):\n '''if \"/join\" in msgtext:\n self.encolarUser(ctx.from_user)'''\n if \"/hurry\" in msgtext:\n #bot.send_message(self.grupo, \"Lxs siguientes giles no han votado todavía:\")\n s = \"\"\n for player in self.leftToVote:\n s += mentionUser(self.users[player]) + \" \"\n bot.send_message(self.grupo, s, parse_mode=\"Markdown\") \n if \"/meaburri\" in msgtext:\n self.leftToVote = []\n self.endPhase2()\n return\n\n if (ctx.from_user.id not in self.leftToVote): return\n #chequea que el mensaje sea una de las definiciones (y no la propia), si 2 mandan la misma puede fallar\n try:\n vote = self.opciones[ctx.from_user.id][int(msgtext) - 1]\n votedList = [player for player in self.chosenimgs if self.chosenimgs[player] == vote]\n if len(votedList) == 0 or votedList[0] == ctx.from_user.id:\n bot.send_message(ctx.from_user.id, \"Elegí una card de las que mandaron\")\n return\n except:\n bot.send_message(ctx.from_user.id, \"Elegí una card de las que mandaron\")\n return\n \n self.leftToVote.remove(ctx.from_user.id)\n voted = votedList[0]\n if voted == self.getST():\n self.def_scores[voted] += 1\n #self.scores[ctx.from_user.id] += 3\n bot.send_message(ctx.from_user.id, \"Le pegaste\")\n else:\n self.def_scores[voted] += 1\n bot.send_message(ctx.from_user.id, \"Pifiaste\")\n if (len(self.leftToVote) == 0): #timeoutear también\n self.endPhase2()\n \n \n def endPhase2(self):\n time.sleep(2)\n bot.send_message(self.grupo, 'La card correcta era: ')\n send_photos(self.grupo, [urls[self.chosenimgs[self.getST()]]])\n s = \"Votos recibidos:\\n\"\n \n results = [(player, urls[self.chosenimgs[player]], self.def_scores[player]) for player in self.def_scores]\n results.sort(key = lambda tup: -tup[2]) #sort por la 3er componente - el score\n \n #for res in results:\n # send_photos(self.grupo, [res[1]])\n # s = self.users[res[0]].first_name + ': ' + str(res[2]) + \"\\n\"\n # bot.send_message(self.grupo, s)\n \n send_photos2(self.grupo, [i[1] for i in results], [i[2] for i in results], [self.users[res[0]].first_name for res in results])\n \n # for player in self.def_scores:\n # s += str(player)+': \"'+str(self.otherimgs[player]) + \" - \" + str(self.def_scores[player]) + \"\\n\"\n \n if (self.def_scores[self.getST()] == len(self.scores) - 1):\n bot.send_message(self.grupo, \"Todos eligieron la del storyteller. Son 2 puntos para cada jugador\")\n for player in self.def_scores:\n self.scores[player] += (player != self.getST()) * 2\n \n '''elif (self.def_scores[getST()] == 0):\n bot.send_message(self.grupo, \"Nadie eligió la del storyteller. Son 2 puntos para cada jugador\") \n for player in self.def_scores:\n self.scores[player] += (player == self.getST()) * 2\n for player in self.def_scores:\n self.scores[player] += self.def_scores[player]'''\n\n else: \n for player in self.def_scores:\n self.scores[player] += self.def_scores[player]\n \n s = \"Puntajes:\\n\"\n\n for (player, score) in sorted(self.scores.items(), key = lambda kv:(kv[1], kv[0]))[::-1]:\n s += self.users[player].first_name + \": \" + str(score) + \"\\n\"\n bot.send_message(self.grupo, s)\n time.sleep(2)\n \n if len(self.manos[self.getST()]) == 0:\n bot.send_message(self.grupo, \"Game over\")\n player = sorted(self.scores.items(), key = lambda kv:(kv[1], kv[0]))[::-1][0][0]\n bot.send_message(self.grupo, \"Felicitaciones \" + self.users[player].first_name + \" sos el más capo\")\n self.scores = {}\n self.users = {}\n self.chosenimgs = {}\n self.def_scores = {}\n self.phase = -1\n else:\n self.initphase0()\n \n '''def removeUser(self, id):\n if id not in self.scores: return\n bot.send_message(self.grupo, \"You left this game. Use /join to rejoin. You score will be lost\") \n self.scores.pop(id, None)\n self.users.pop(id, None)\n self.chosenimgs.pop(id, None)\n if len([user for user in self.users if user not in self.chosenimgs]) == 0 and self.phase == 2:\n pass\n self.def_scores.pop(id, None)\n self.leftToVote.pop(id, None)\n if len(self.leftToVote) == 0 and self.phase == 2:\n pass\n '''\n #falta que si era el ultimo que faltaba para avanzar se avance\n #habría que buscar también el callback de user left así cuando un user se va del grupo lo rajamos\n \n\t#bot.reply_to(message, message.text)\n \nbot.set_webhook(\"https://{}.glitch.me/{}\".format(environ['PROJECT_NAME'], environ['TELEGRAM_TOKEN']))\n\n# ================= UTILS ================\n\ndef mentionUser(user):\n try:\n return \"@\" + user.username\n except:\n return \"[\" + user.first_name + \"](tg://user?id=\" + str(user.id) + \")\"\n\ndef phase_number_to_readable(phase):\n if phase == -1:\n return \"esperando que joineen y starteen\"\n \n # repartimos las cartas\n # esto es, seteamos ids para cada uno\n # y le mandamos a cada uno la imagen de lo que tiene\n # seteamos primer storyteller\n \n if phase == 0:\n return \"esperando que el storyteller elija su card\"\n \n if phase == 0.5:\n return \"esperando que el storyteller ponga la definición\"\n \n #elige. Le popeamos esa carta de la mano. También tiene que mandar una definición (de 1 palabra)\n if phase == 1:\n return \"esperando que todos pongan su card\"\n \n #todos ponen su card (la popeamos de la mano)\n #mostramos las imágenes que se eligieron\n if phase == 2:\n return \"esperando que voten\"\n \n #distribuimos puntos y volvemos a 0 (le mandamos a cada uno la img de lo que tiene de vuelta)\n #O si todos se quedan sin cartas terminamos y volvemos a -1 (gurdándonos quieres eran y con una opcion de restart same players)","repo_name":"Karamchi/Telegram-bots","sub_path":"Dixit/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":16666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2874570297","text":"dr = [-1, 0, 1, 0]\ndc = [0, 1, 0, -1]\nn, m, k = map(int, input().split())\nmap_ = [[False for i in range(n)] for j in range(m)]\nfor _ in range(k):\n i, j, r, c = map(int, input().split())\n for x in range(i, r):\n for y in range(j, c):\n map_[x][y] = True\n\n\n# def dfs(r, c, cnt):\n# for d in range(4):\n# nr = r + dr[d]\n# nc = c + dc[d]\n# if nr >= m or nc >= n or nr < 0 or nc < 0:\n# continue\n# if map_[nr][nc]:\n# continue\n# map_[nr][nc] = True\n# cnt = dfs(nr, nc, cnt)\n# return cnt + 1\n\ndef bfs(node):\n q = [[node[0], node[1]]]\n map_[node[0]][node[1]] = True\n cnt = 1\n while q:\n tmp = q.pop()\n for d in range(4):\n nr = tmp[0] + dr[d]\n nc = tmp[1] + dc[d]\n if nr >= m or nc >= n or nr < 0 or nc < 0:\n continue\n if map_[nr][nc]:\n continue\n map_[nr][nc] = True\n q.append([nr, nc])\n cnt += 1\n return cnt\n\nareas = []\nfor i in range(m):\n for j in range(n):\n if not map_[i][j]:\n areas.append(bfs([i, j]))\nprint(len(areas))\nprint(*sorted(areas))","repo_name":"johyunsub/Algorithm","sub_path":"Python/백준/영역구하기_2583.py","file_name":"영역구하기_2583.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"821253012","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import View\nfrom .forms import UserForm, UserProfileForm, PostForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse\nimport pudb\nfrom django.contrib.auth.decorators import login_required #fancy decorator\nfrom .models import UserProfile, Post\nfrom django.contrib.auth.models import User\n\n\n# Create your views here.\nclass Index(View):\n def get(self, request):\n context = {}\n if request.user.is_authenticated(): # check to see if someone is already logged in \n username = request.user.username\n # username = User.objects.get(username=username)\n message = (\"Hello, \" + username + \" You are logged in\")\n context = {\n 'message': message,\n }\n # return render(request, \"blog/index.html\", context)\n\n # this line gets all the todos that we have in the db\n posts = Post.objects.all().order_by('-updated_at')\n # creates them into a context dict\n context[\"posts\"]=posts\n # context = {\n # 'posts': posts,\n # }\n # send them all to the template\n return render(request, \"blog/index.html\", context)\n\nclass User_Register(View):\n # pu.db\n\t# link to our tamplate page\n template = \"blog/register.html\"\n\n def get(self, request):\n user_form = UserForm()\n profile_form = UserProfileForm()\n context = {\n 'user_form': user_form,\n 'profile_form': profile_form\n }\n # using self is a fancy way to give a variable name to the template ...?\n return render(request, self.template, context)\n\n def post(self, request):\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileForm(data=request.POST)\n\n # If the two forms are valid...\n if user_form.is_valid() and profile_form.is_valid():\n # Save the user's form data to the database.\n user = user_form.save()\n # Now sort out the UserProfile instance.\n # Since we need to set the user attribute ourselves, we set commit=False.\n profile = profile_form.save(commit=False)\n profile.user =user\n print (profile.user)\n # Now we save the UserProfile model instance.\n profile.save()\n # return render(request, \"blog/index.html\", {})\n return redirect(\"posts:index\")\n else:\n context = {\n 'user_form': user_form,\n 'profile_form': profile_form\n }\n return render(request, self.template, context)\n\nclass User_Login(View):\n template = \"blog/login.html\"\n\n def post(self, request):\n # Gather the username and password provided by the user.\n # This information is obtained from the login HTML form.\n username = request.POST['username']\n password = request.POST['password']\n\n # Use Django's machinery to attempt to see if the username/password\n # combination is valid - a User object is returned if it is.\n user = authenticate(username=username, password=password)\n\n # If we have a User object, the details are correct.\n # If None (Python's way of representing the absence of a value), no user\n # with matching credentials was found.\n if user:\n # Is the account active? It could have been disabled.\n if user.is_active:\n # If the account is valid and active, we can log the user in.\n # We'll send the user back to the homepage.\n login(request, user)\n # username = request.user.username\n # message = (\"Hello, \" + username + \" You are signed in\")\n # context = {\n # 'message': message,\n # }\n # return render(request, \"blog/index.html\", context)\n return redirect(\"posts:index\")\n\n else:\n # An inactive account was used - no logging in!\n return HttpResponse(\"Your account is disabled.\")\n else:\n # Bad login details were provided. So we can't log the user in.\n print(\"Invalid login details: {0}, {1}\".format(username, password))\n return HttpResponse(\"Invalid login details supplied.\")\n\n def get(self, request):\n # if the user is already signed in \n if request.user.is_authenticated():\n # username = request.user.username\n # # username = User.objects.get(username=username)\n # message = (\"Hello, \" + username + \" You are already signed in\")\n # context = {\n # 'message': message,\n # }\n return redirect(\"posts:index\")\n return render(request, self.template, {})\n\nclass User_Logout(View):\n # Use the login_required() decorator to ensure only those logged in can access the view.\n # @login_required\n def get(self, request):\n # Since we know the user is logged in, we can now just log them out.\n logout(request)\n # Take the user back to the homepage.\n return redirect(\"posts:index\")\n\nclass Create_Post(View):\n\n def get(self, request):\n form = PostForm()\n context = {\n \"PostForm\": form }\n return render (request, \"blog/create.html\", context)\n\n\n\n\n def post(self, request):\n if not request.user.is_authenticated():\n return HttpResponseForbidden()\n\n form = PostForm(data=request.POST)\n\n if form.is_valid():\n # need to save the username the post is attached to\n user = request.user\n print (user) \n post = form.save(commit=False)\n post.user = user \n post.save()\n return redirect(\"posts:index\")\n\n else:\n context = {\n \"PostForm\": form,\n }\n return render(request, 'blog/create.html', context)\n # else:\n # return HttpResponseNotAllowed(['GET', 'POST'])\n\n\nclass Edit_Post(View):\n # def edit(request, post_slug): \n def get(self, request, post_slug=None):\n post = Post.objects.get(slug=post_slug)\n form = PostForm(instance=post)\n context = {\n \"post\": post,\n \"EditForm\": form,\n }\n return render(request, \"blog/edit.html\", context)\n\n def post(self, request, post_slug=None):\n post = Post.objects.get(slug=post_slug)\n form = PostForm(data=request.POST, instance=post)\n if form.is_valid():\n form.save()\n return redirect(\"posts:index\")\n else:\n context = {\n \"post\": post,\n \"EditForm\": form,\n }\n return render(request, 'blog/edit.html', context)\n # else:\n # return HttpResponseNotAllowed(['GET', 'POST'])\n\nclass Delete_Post(View):\n def post(self, request, post_slug=None):\n post = Post.objects.get(slug=post_slug)\n post.show = False\n post.save()\n return redirect('posts:index')\n\n\n\n","repo_name":"Janteby1/bloggy_v2","sub_path":"projects/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43976406599","text":"from scipy.spatial import Voronoi, voronoi_plot_2d\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\ndef perlin(x,y,seed=0):\n # permutation table\n np.random.seed(seed)\n p = np.arange(256,dtype=int)\n np.random.shuffle(p)\n p = np.stack([p,p]).flatten()\n # coordinates of the top-left\n xi = x.astype(int)\n yi = y.astype(int)\n # internal coordinates\n xf = x - xi\n yf = y - yi\n # fade factors\n u = fade(xf)\n v = fade(yf)\n # noise components\n n00 = gradient(p[p[xi]+yi],xf,yf)\n n01 = gradient(p[p[xi]+yi+1],xf,yf-1)\n n11 = gradient(p[p[xi+1]+yi+1],xf-1,yf-1)\n n10 = gradient(p[p[xi+1]+yi],xf-1,yf)\n # combine noises\n x1 = lerp(n00,n10,u)\n x2 = lerp(n01,n11,u) # FIX1: I was using n10 instead of n01\n return lerp(x1,x2,v) # FIX2: I also had to reverse x1 and x2 here\n\ndef lerp(a,b,x):\n \"linear interpolation\"\n return a + x * (b-a)\n\ndef fade(t):\n \"6t^5 - 15t^4 + 10t^3\"\n return 6 * t**5 - 15 * t**4 + 10 * t**3\n\ndef gradient(h,x,y):\n \"grad converts h to the right gradient vector and return the dot product with (x,y)\"\n vectors = np.array([[0,1],[0,-1],[1,0],[-1,0]])\n g = vectors[h%4]\n return g[:,:,0] * x + g[:,:,1] * y\n\n\ndef Lloyd(V):\n points = []\n\n for region in V.regions:\n L = [x for x in region if x != -1]\n if L != []:\n verts = V.vertices[L]\n point = 0\n for vert in verts:\n if vert[0] > 1000:\n vert[0] = 1000\n if vert[0] < 0:\n vert[0] = 0\n if vert[1] < 0:\n vert[1] = 0\n if vert[1] > 1000:\n vert[1] = 1000\n point += vert / len(verts)\n if type(point) == np.ndarray:\n points.append(point)\n return points\n\ndef Lloyds(n=200):\n points = 1000*np.random.rand(n,2)\n V = Voronoi(points)\n for _ in range(100):\n points = Lloyd(V)\n V = Voronoi(points)\n voronoi_plot_2d(Voronoi(points))\n plt.show()\n\ndef turnImage(img=\"UKMap.jpg\"):\n img = Image.open(img)\n img= img.resize((200,200))\n return img\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n\n\n\nif __name__ =='__main__':\n lin = np.linspace(0,5,200,endpoint=False)\n x,y = np.meshgrid(lin,lin) # FIX3: I thought I had to invert x and y here but it was a mistake\n UK = turnImage()\n UK = UK.convert('L')\n terrain = perlin(x,y,seed=4)\n print(terrain.shape)\n UK = np.array(UK.getdata()).reshape(200,200)==0\n #UK = rgb2gray(np.ndarray(UK))\n Lloyds()\n plt.imshow(UK*terrain,cmap='ocean')\n plt.show()\n turnImage()","repo_name":"avivalbeg/The-Island-Boys","sub_path":"Island.py","file_name":"Island.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28433416996","text":"\"\"\"\n2D Item class.\n\"\"\"\nclass Item:\n \"\"\"\n Items class for rectangles inserted into sheets\n \"\"\"\n def __init__(self, width, height,\n CornerPoint: tuple = (0, 0),\n rotation: bool = True) -> None:\n self.width = width\n self.height = height\n self.x = CornerPoint[0]\n self.y = CornerPoint[1]\n self.area = self.width * self.height\n self.rotated = False\n self.id = 0\n\n\n def __repr__(self):\n return 'Item(width=%r, height=%r, x=%r, y=%r)' % (self.width, self.height, self.x, self.y)\n\n\n def rotate(self) -> None:\n self.width, self.height = self.height, self.width\n self.rotated = False if self.rotated == True else True\n","repo_name":"solomon-b/greedypacker","sub_path":"greedypacker/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"54"} +{"seq_id":"37773059471","text":"# import __main__ as main\n# from Helper.TimerLogger import CodeTimeLogging\n# fileName = main.__file__\n# fileName = fileName.split('\\\\')[-1]\n\n# CodeTimeLogging(Flag='F', filename=fileName, Tag='Graphs', Difficult='Medium')\n\n\ndef getDestinantionCity(paths):\n routes = {val: False for x in paths for val in x}\n for scr, dst in paths:\n if scr in routes:\n routes[scr] = True\n # print(routes)\n for location in routes:\n if not routes[location]:\n return location\n return -1\n\n\npaths = [[\"London\", \"New York\"], [\"New York\", \"Lima\"], [\"Lima\", \"Sao Paulo\"]]\npaths = [[\"B\", \"C\"], [\"D\", \"B\"], [\"C\", \"A\"]]\nprint(getDestinantionCity(paths))\n","repo_name":"Omkar02/FAANG","sub_path":"LC_yelp_1436_Destination_City.py","file_name":"LC_yelp_1436_Destination_City.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"10557565445","text":"import configparser\nimport json\nimport logging\nimport os.path\nimport shutil\nimport sys\nimport time\nimport typing as t\n\nimport xdg.BaseDirectory\n\nif t.TYPE_CHECKING:\n from . import graphics\n from . import cursors as cursors_\n\nlog = logging.getLogger(__name__)\nstart_time = time.time() # for profiling\n\n# General\nVERSION = \"1.0\"\nAPPNAME = 'pylitaire'\n\n# Paths\nGAMEDIR = os.path.abspath(os.path.dirname(__file__) or '.')\nDATADIR = os.path.join(GAMEDIR, 'data')\nCONFIGDIR = xdg.BaseDirectory.save_config_path(APPNAME)\nWINDOWFILE = os.path.join(CONFIGDIR, 'window.json')\nCONFIGFILE = os.path.join(CONFIGDIR, '{}.conf'.format(APPNAME))\n\n# Graphics\nFPS = 30\nBGCOLOR = (0, 80, 16) # Dark green\nMARGIN = (20, 10) # Board margin and minimum card padding\nSBHEIGHT = 25 # status bar height\nSBCOLOR = (242, 241, 240) # status bar background color\nMIN_SIZE = (320, 192) # Minimum windows size\n\nbackground: t.Optional['graphics.Background'] = None\nslot: t.Optional['graphics.Slot'] = None\ncursors: t.Dict[str, t.Optional['cursors_.Cursor']] = {\n 'default': None,\n 'drag': None,\n 'draggable': None,\n}\n\n# Options\n# Actual defaults are at config/config.template.ini\nfull_screen = False\nwindow_size = (960, 640)\ndebug = False\nprofile = False\nbaize = \"baize-ubuntu\"\ntheme = \"life_and_smooth\"\nslotname = \"slot-gnome\"\ndoubleclicklimit = 400\ngamename = \"klondike\"\n\n\ndef datadirs(dirname):\n \"\"\"List of game relevant data directories.\n\n Useful for finding data files such as themes and images.\n \"\"\"\n return [os.path.join(CONFIGDIR, dirname),\n os.path.join(DATADIR, dirname)]\n\n\ndef load_options(args):\n \"\"\"Load all global options from config file and command line arguments.\"\"\"\n global window_size, full_screen, debug, profile\n global baize, theme, slotname, gamename, doubleclicklimit\n\n # Too lazy for argparse right now\n if args is None:\n args = sys.argv[1:]\n # pre-read debug to configure logging sooner\n if \"--debug\" in args:\n logging.getLogger(__package__).setLevel(logging.DEBUG)\n log.debug(args)\n\n options: t.Dict[str, t.Dict[str, t.Any]] = {'options': dict(\n full_screen=full_screen,\n window_size=window_size,\n debug=debug,\n profile=profile,\n baize=baize,\n theme=theme,\n slotname=slotname,\n doubleclicklimit=doubleclicklimit,\n gamename=gamename,\n )}\n try:\n read_config(CONFIGFILE, options)\n except (IOError, ValueError) as e:\n log.warning(\"Error reading config: %s\", e)\n\n # Override options with command-line arguments\n if \"--fullscreen\" in args: full_screen = True\n if \"--debug\" in args: debug = True\n if \"--profile\" in args: profile = True\n\n # Set the log level\n loglevel = None\n if profile: loglevel = logging.INFO\n if debug: loglevel = logging.DEBUG\n if loglevel:\n logging.getLogger(__package__).setLevel(loglevel)\n\n log.debug(options)\n baize = options[\"options\"][\"baize\"]\n theme = options[\"options\"][\"theme\"]\n slotname = options[\"options\"][\"slotname\"]\n doubleclicklimit = options[\"options\"][\"doubleclicklimit\"]\n gamename = options[\"options\"][\"gamename\"]\n\n try:\n log.debug(\"Loading window size from: %s\", WINDOWFILE)\n with open(WINDOWFILE) as fp:\n # Read in 2 steps to guarantee a valid (w, h) numeric 2-tuple\n width, height = json.load(fp)\n window_size = (int(width),\n int(height))\n except (IOError, ValueError) as e:\n log.warning(\"Error reading window size, using factory default: %s\", e)\n\n\ndef save_options():\n try:\n log.debug(\"Saving window size to: %s\", WINDOWFILE)\n with open(WINDOWFILE, 'w') as fp:\n json.dump(window_size, fp)\n except IOError as e:\n log.warning(\"Could not write window size: %s\", e)\n\n\ndef read_config(path, options):\n cp = configparser.ConfigParser()\n\n log.debug(\"Loading config from: %s\", CONFIGFILE)\n if not cp.read(path, encoding='utf-8'):\n # Config file does not exist, create one from template and read again\n log.info(\"Config not found, creating one and using default values: %s\", path)\n shutil.copyfile(os.path.join(DATADIR, 'config', 'config.template.ini'), path)\n cp.read(path, encoding='utf-8')\n\n def get_iter(s, o):\n return (_.strip() for _ in cp.get(s, o).split(','))\n\n def get_list(s, o):\n return list(get_iter(s, o))\n\n def get_tuple(s, o):\n return tuple(get_iter(s, o))\n\n # .keys() to avoid 'RuntimeError: dictionary changed size during iteration'\n for section in options.keys():\n if not cp.has_section(section):\n log.warning(\"Section [%s] not found in %s\", section, path)\n continue\n\n # For other sections options list is taken from options dict\n for opt in options[section]:\n if isinstance(options[section][opt], bool): get = cp.getboolean\n elif isinstance(options[section][opt], int): get = cp.getint\n elif isinstance(options[section][opt], float): get = cp.getfloat\n elif isinstance(options[section][opt], list): get = get_list\n elif isinstance(options[section][opt], tuple): get = get_tuple\n else: get = cp.get\n\n try:\n options[section][opt] = get(section, opt)\n\n except configparser.NoOptionError as e:\n log.warning(\"%s in %s\", e, path)\n\n except ValueError as e:\n log.warning(\"%s in '%s' option of %s\", e, opt, path)\n\n\ndef runtime(start=0):\n if not start:\n start = start_time\n return \"{:.0f}\".format(1000 * (time.time() - start))\n","repo_name":"MestreLion/pylitaire","sub_path":"pylitaire/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30712403893","text":"from rest_framework import serializers\nfrom .models import Text, Sentence\n\n\nclass NewTextSerializer(serializers.Serializer):\n id = serializers.IntegerField(read_only=True)\n title = serializers.CharField(required=True, allow_blank=False, max_length=200)\n content = serializers.CharField(required=True, allow_blank=False, trim_whitespace=True, max_length=100000, write_only=True)\n\n def create(self, validated_data):\n return Text.parse_and_create(**validated_data)\n\n\nclass TextSerializer(serializers.ModelSerializer):\n class Meta:\n model = Text\n fields = ['id', 'title', 'created_at']\n\n\nclass SentenceSerializer(serializers.ModelSerializer):\n text = TextSerializer(read_only=True, required=False)\n class Meta:\n model = Sentence\n fields = ['id', 'number', 'content', 'text']\n\n\nclass SimilarSentenceSerializer(serializers.Serializer):\n similarity = serializers.FloatField(read_only=True)\n sentence = SentenceSerializer(read_only=True)\n\n\nclass SimilarTextSerializer(serializers.Serializer):\n text = TextSerializer()\n similar_sentences = SimilarSentenceSerializer(many=True, read_only=True)\n\n\nclass TextDetailSerializer(serializers.ModelSerializer):\n sentences = SentenceSerializer(many=True, read_only=True)\n\n class Meta:\n model = Text\n fields = ['id', 'title', 'created_at', 'sentences']\n","repo_name":"rukeba/text-analyzer","sub_path":"sentences/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3579414862","text":"import pydash\nimport pytest\nfrom marshmallow import ValidationError\n\nfrom azure.ai.ml._utils.utils import load_yaml\nfrom azure.ai.ml.entities._load_functions import load_schedule\n\nfrom .._util import _SCHEDULE_TIMEOUT_SECOND\n\n\n@pytest.mark.timeout(_SCHEDULE_TIMEOUT_SECOND)\n@pytest.mark.unittest\n@pytest.mark.pipeline_test\nclass TestScheduleSchema:\n def test_load_cron_schedule_with_file_reference(self):\n test_path = \"./tests/test_configs/schedule/hello_cron_schedule_with_file_reference.yml\"\n schedule = load_schedule(test_path)\n expected_dict = {\n \"name\": \"weekly_retrain_2022_cron_file\",\n \"description\": \"a weekly retrain schedule\",\n \"display_name\": \"weekly retrain schedule\",\n \"trigger\": {\n \"start_time\": \"2022-03-10T10:15:00\",\n \"end_time\": \"2022-06-10T10:15:00\",\n \"time_zone\": \"Pacific Standard Time\",\n \"type\": \"cron\",\n \"expression\": \"15 10 * * 1\",\n },\n \"create_job\": {\n \"display_name\": \"hello_pipeline_abc\",\n \"compute\": \"azureml:cpu-cluster\",\n \"type\": \"pipeline\",\n \"inputs\": {\"hello_string_top_level_input\": {\"path\": \"${{name}}\"}},\n \"jobs\": {\n \"a\": {\n \"inputs\": {\"hello_string\": {\"path\": \"${{parent.inputs.hello_string_top_level_input}}\"}},\n \"component\": {\n \"name\": \"azureml_anonymous\",\n \"version\": \"1\",\n \"is_deterministic\": True,\n \"inputs\": {\"hello_string\": {\"type\": \"string\"}},\n \"type\": \"command\",\n \"command\": \"echo hello ${{inputs.hello_string}}\",\n \"environment\": \"azureml:AzureML-sklearn-1.0-ubuntu20.04-py38-cpu@latest\",\n },\n \"type\": \"command\",\n },\n \"b\": {\n \"component\": {\n \"name\": \"azureml_anonymous\",\n \"version\": \"1\",\n \"is_deterministic\": True,\n \"outputs\": {\"world_output\": {\"type\": \"uri_folder\"}},\n \"type\": \"command\",\n \"command\": 'echo \"world\" >> ${{outputs.world_output}}/world.txt',\n \"environment\": \"azureml:AzureML-sklearn-1.0-ubuntu20.04-py38-cpu@latest\",\n },\n \"type\": \"command\",\n },\n \"c\": {\n \"inputs\": {\"world_input\": {\"path\": \"${{parent.jobs.b.outputs.world_output}}\"}},\n \"component\": {\n \"name\": \"azureml_anonymous\",\n \"version\": \"1\",\n \"is_deterministic\": True,\n \"inputs\": {\"world_input\": {\"type\": \"uri_folder\"}},\n \"type\": \"command\",\n \"command\": \"echo ${{inputs.world_input}}/world.txt\",\n \"environment\": \"azureml:AzureML-sklearn-1.0-ubuntu20.04-py38-cpu@latest\",\n },\n \"type\": \"command\",\n },\n },\n },\n }\n assert schedule._to_dict() == expected_dict\n\n def test_load_cron_schedule_with_job_updates(self):\n test_path = \"./tests/test_configs/schedule/hello_cron_schedule_with_job_updates.yml\"\n yaml_obj = load_yaml(test_path)\n expected_updates = yaml_obj[\"create_job\"]\n expected_updates.pop(\"job\")\n # Workaround the binding shape changes\n expected_updates[\"inputs\"][\"hello_string_top_level_input\"] = {\"path\": \"${{creation_context.trigger_time}}\"}\n scheduled_job = load_schedule(test_path).create_job\n actual_dict = scheduled_job._to_dict()\n for key, val in expected_updates.items():\n assert actual_dict[key] == val\n\n def test_load_cron_schedule_with_arm_id(self):\n test_path = \"./tests/test_configs/schedule/hello_cron_schedule_with_arm_id.yml\"\n schedule = load_schedule(test_path)\n expected_dict = {\n \"name\": \"weekly_retrain_2022_cron_arm\",\n \"description\": \"a weekly retrain schedule\",\n \"display_name\": \"weekly retrain schedule\",\n \"trigger\": {\"time_zone\": \"UTC\", \"type\": \"cron\", \"expression\": \"15 10 * * 1\"},\n \"create_job\": \"azureml:/subscriptions/d511f82f-71ba-49a4-8233-d7be8a3650f4/resourceGroups/RLTesting/providers/Microsoft.MachineLearningServices/workspaces/AnkitWS/jobs/test_617704734544\",\n }\n assert schedule._to_dict() == expected_dict\n\n def test_load_cron_schedule_with_arm_id_and_updates(self):\n test_path = \"./tests/test_configs/schedule/hello_cron_schedule_with_arm_id_and_updates.yml\"\n schedule = load_schedule(test_path)\n expected_dict = {\n \"create_job\": {\n \"experiment_name\": \"schedule_test_exp\",\n \"id\": \"azureml:/subscriptions/d511f82f-71ba-49a4-8233-d7be8a3650f4/resourceGroups/RLTesting/providers/Microsoft.MachineLearningServices/workspaces/AnkitWS/jobs/test_617704734544\",\n \"inputs\": {\"hello_string_top_level_input\": {\"path\": \"${{name}}\"}},\n \"settings\": {\"continue_on_step_failure\": True, \"default_compute\": \"azureml:cpu-cluster\"},\n \"type\": \"pipeline\",\n },\n \"name\": \"weekly_retrain_2022_cron_arm_updates\",\n \"trigger\": {\n \"expression\": \"15 10 * * 1\",\n \"start_time\": \"2022-03-10T10:15:00\",\n \"time_zone\": \"UTC\",\n \"type\": \"cron\",\n },\n }\n assert schedule._to_dict() == expected_dict\n\n def test_load_recurrence_schedule_no_pattern(self):\n test_path = \"./tests/test_configs/schedule/hello_recurrence_schedule_no_pattern.yml\"\n schedule = load_schedule(test_path)\n yaml_obj = load_yaml(test_path)\n expected_trigger_dict = yaml_obj[\"trigger\"]\n # Append empty pattern\n assert schedule not in expected_trigger_dict\n actual_trigger_dict = schedule._to_dict()[\"trigger\"]\n # Remove emtpy key 'schedule': {'hours': [], 'minutes': []}\n actual_trigger_dict = pydash.omit(actual_trigger_dict, [\"schedule\"])\n assert actual_trigger_dict == expected_trigger_dict\n\n def test_load_recurrence_schedule_with_pattern(self):\n test_path = \"./tests/test_configs/schedule/hello_recurrence_schedule_with_pattern.yml\"\n schedule = load_schedule(test_path)\n yaml_obj = load_yaml(test_path)\n expected_trigger_dict = {\n \"frequency\": \"week\",\n \"interval\": 1,\n \"schedule\": {\"hours\": 10, \"minutes\": 15, \"week_days\": \"monday\"},\n \"start_time\": \"2022-05-10T10:15:00\",\n \"time_zone\": \"Pacific Standard Time\",\n \"type\": \"recurrence\",\n }\n assert schedule._to_dict()[\"trigger\"] == expected_trigger_dict\n expected_updates = yaml_obj[\"create_job\"]\n expected_updates.pop(\"job\")\n scheduled_job = load_schedule(test_path).create_job\n actual_dict = scheduled_job._to_dict()\n for key, val in expected_updates.items():\n assert actual_dict[key] == val\n\n def test_load_invalid_schedule_missing_type(self):\n test_path = \"./tests/test_configs/schedule/invalid/hello_cron_schedule_with_arm_id_no_type.yml\"\n with pytest.raises(ValidationError) as e:\n load_schedule(test_path)\n assert \"'type' must be specified when scheduling a remote job with updates.\" in e.value.messages[0]\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/ml/azure-ai-ml/tests/schedule/unittests/test_schedule_schema.py","file_name":"test_schedule_schema.py","file_ext":"py","file_size_in_byte":7827,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"35621866345","text":"#!/usr/bin/env python\n\n\nimport subprocess\n#module allows you to run system commands\nimport optparse\nimport re\n\ndef argument():\n # handing command - line arguments\n paser = optparse.OptionParser()\n paser.add_option(\"-i\", \"--interface\", dest=\"interface\", help=\"interface for device \")\n paser.add_option(\"-m\", \"--mac\", dest=\"new_mac\", help=\"new mac address\")\n (options, arguments) = paser.parse_args()\n #handing error\n if not options.interface:\n paser.error(\"[*] enter an interface, use --help for more information. \")\n elif not options.new_mac:\n paser.error(\"[*]enter new mac address for interfacw, use --help for more information.\")\n return options\ndef change_mac(interface,new_mac):\n print(\"[!] changing Mac address to \" + interface + \" to [=] \" + new_mac)\n\n subprocess.call([\"ifconfig\", interface, \"down\"])\n # putting wlan0 connection down\n\n subprocess.call([\"ifconfig\", interface, \"hw\", \"ether\", new_mac])\n # changeing wlan0 mac address\n\n subprocess.call([\"ifconfig\", interface, \"up\"])\n # bring the new mac address up\n\ndef get_current_mac(interface):\n ifconfig_results = subprocess.check_output([\"ifconfig\", interface])\n # search and print the new mac address\n mac_address_search = re.search(r\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w\", ifconfig_results)\n # handing errors\n if mac_address_search:\n return mac_address_search.group(0)\n\n else:\n print(\"[-] couldn't find any Mac Address\")\n\n\n\n\noptions = argument()\n#printing your current mac address\ncurrent_mac = get_current_mac(options.interface)\nprint(\"current = \" + str(current_mac))\n\n\n #device interface\n #new Mac address\nchange_mac(options.interface, options.new_mac)\ncurrent_mac = get_current_mac(options.interface)\nif current_mac == options.new_mac:\n print(\"Mac Address was successfully changed. \" + current_mac)\nelse:\n print(\"Mac Address was not successful\")","repo_name":"adongo1/Mac_changer","sub_path":"mac_changer.py","file_name":"mac_changer.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"23052077505","text":"from api.v1.auth import auth_app\nfrom flask import jsonify, request\nfrom rappi_api import Rappi\nimport os\nimport time\nfrom threading import Thread\nfrom api.v1.utils import get_status, save_status\n\n\ndef login_tread(device_id, action, phone):\n rappi_interface = Rappi(device_id)\n st = rappi_interface.login(action, phone)\n\n\n@auth_app.route('/login', methods=['POST'])\ndef login():\n \"\"\"\n Register a phone number in Rappi\n and return depending on the state a new request\n :return:\n \"\"\"\n action = request.form.get('action')\n device_id = request.form.get('device_id')\n phone = request.form.get('phone')\n code = request.form.get('code')\n\n if not action:\n return jsonify(error='Missing field '), 403\n if not phone:\n return jsonify(error='Missing field '), 403\n if (action == 'sms' or action == 'email') and not code:\n return jsonify(error='Missing field '), 403\n\n if action == 'init':\n Thread(target=login_tread, args=(device_id, action, phone)).start()\n time.sleep(3)\n return jsonify(next_action='sms')\n\n elif action == 'sms' or action == 'email':\n save_status(device_id, action, code)\n while get_status(device_id)['action'] == action:\n time.sleep(1)\n time.sleep(1)\n return jsonify(next_action=get_status(device_id)['action'])\n\n\ndef check_login_status(device_id):\n status_path = f'{os.getcwd()}/sessions/{device_id}.status'\n if os.path.exists(status_path):\n return True\n else:\n return False\n","repo_name":"Danucas/alexa","sub_path":"api/v1/auth/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74766459680","text":"# 로봇\n\nfrom collections import deque\n\nn, m = map(int, input().split())\nmaps = [list(map(int, input().split())) for _ in range(n)]\nvisited = [[[-1] * 4 for _ in range(m)] for _ in range(n)]\n\ns1, s2, s_dir = map(int, input().split())\ns1 -= 1 ; s2 -= 1; s_dir -= 1\ne1, e2, e_dir = map(int, input().split())\ne1 -= 1 ; e2 -= 1; e_dir -= 1\n\nqueue = deque()\nqueue.append((s1, s2, s_dir))\nvisited[s1][s2][s_dir] = 0\n\n# 동서남북 순서\ndx = [0, 0, 1, -1]\ndy = [1, -1, 0, 0]\n\nwhile queue:\n x, y, dirt = queue.popleft()\n\n if x == e1 and y == e2 and dirt == e_dir:\n print(visited[x][y][dirt])\n break\n \n # 3칸까지 직진 가능하기 때문에 범위 설정 1 ~ 4\n for i in range(1, 4):\n nx = x + (dx[dirt] * i) # 현재 바라보는 방향으로 직진\n ny = y + (dy[dirt] * i) \n \n if not (0 <= nx < n and 0 <= ny < m) or maps[nx][ny]: # 범위를 벗어나거나 1을 만나면\n break\n\n if visited[nx][ny][dirt] == -1:\n visited[nx][ny][dirt] = visited[x][y][dirt] + 1\n queue.append((nx, ny, dirt))\n \n if dirt > 1:\n # 동, 서로 방향 바꾸기\n for i in range(2):\n if visited[x][y][i] == -1:\n visited[x][y][i] = visited[x][y][dirt] + 1\n queue.append((x, y, i))\n \n if dirt <= 1:\n # 남, 북으로 방향 바꾸기\n for i in range(2, 4):\n if visited[x][y][i] == -1:\n visited[x][y][i] = visited[x][y][dirt] + 1\n queue.append((x, y, i)) \n","repo_name":"ererink/TIL","sub_path":"Algorithm/BAEKJOON/1726.py","file_name":"1726.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"ko","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"14542084901","text":"import requests\r\nfrom requests.adapters import HTTPAdapter\r\n\r\n\r\nclass ListService(object):\r\n\r\n @staticmethod\r\n def find_list_page(page):\r\n session = requests.Session()\r\n session.mount(\"http://\", HTTPAdapter(max_retries=3))\r\n\r\n url = f\"http://challenge-api.luizalabs.com/api/product/?page={page}\"\r\n response = session.get(url)\r\n result = response.json()\r\n return result\r\n\r\n @staticmethod\r\n def find_list_id(id):\r\n session = requests.Session()\r\n session.mount(\"http://\", HTTPAdapter(max_retries=3))\r\n\r\n url = f\"http://challenge-api.luizalabs.com/api/product/{id}/\"\r\n response = session.get(url)\r\n result = response.json()\r\n return result\r\n","repo_name":"michelleNunes/Wishlist","sub_path":"Service/list_service.py","file_name":"list_service.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20933409171","text":"from django.db import models\nfrom accounts.models import UserProfile\nfrom blog.models import BlogPost\n\n# Create your models here.\nclass Category(models.Model):\n category = models.CharField(max_length=50, unique=True)\n def __str__(self):\n return self.category\n\nclass Tag(models.Model):\n tag = models.CharField(max_length=50, unique=True)\n def __str__(self):\n return self.tag\n\nclass Question(models.Model):\n question = models.CharField(max_length=500, blank=False)\n # question_url = models.ImageField(upload_to='question_pics/',blank=True, null=True)\n question_url = models.URLField(blank=True, null=True)\n tag = models.ManyToManyField(Tag)\n choice1 = models.CharField(max_length=150)\n choice2 = models.CharField(max_length=150)\n choice3 = models.CharField(max_length=150)\n choice4 = models.CharField(max_length=150)\n TYPE_CHOICES = (\n ('Multiple Choice', 'Multiple Choice'),\n ('True or False', 'True or False'),\n )\n question_type = models.CharField(max_length=50,choices= TYPE_CHOICES)\n question_level = models.IntegerField(blank=False)\n correct_answer = models.CharField(max_length=50, choices=(\n ('A', 'A'),\n ('B', 'B'),\n ('C', 'C'),\n ('D', 'D'),\n ))\n # blog = models.ForeignKey(BlogPost, on_delete=models.CASCADE, blank=True,null=True)\n detail_explanation = models.TextField(blank=True, null=True)\n # explanation_url = models.ImageField(upload_to='explanation_pics/',blank=True, null=True)\n explanation_url = models.URLField(blank=True, null=True)\n choice1_selected = models.IntegerField(default=0)\n choice2_selected = models.IntegerField(default=0)\n choice3_selected = models.IntegerField(default=0)\n choice4_selected = models.IntegerField(default=0)\n total_amount = models.IntegerField(default=0)\n correct_amount = models.IntegerField(default=0)\n correct_rate = models.FloatField(default=0)\n def __str__(self):\n return self.question\n \n def save(self, *args, **kwargs):\n if self.correct_answer == 'A':\n self.correct_amount = self.choice1_selected\n elif self.correct_answer == 'B':\n self.correct_amount = self.choice2_selected\n elif self.correct_answer == 'C':\n self.correct_amount = self.choice3_selected\n elif self.correct_answer == 'D':\n self.correct_amount = self.choice4_selected\n\n self.total_amount = self.choice1_selected + self.choice2_selected + self.choice3_selected + self.choice4_selected\n if self.total_amount > 0:\n self.correct_rate = round((self.correct_amount / self.total_amount), 4)*100\n super().save(*args, **kwargs)\n\n\nclass Quiz(models.Model):\n quiz_name = models.CharField(max_length=50)\n quiz_description = models.TextField(blank=True, null=True)\n quiz_questions = models.ManyToManyField(Question)\n question_amount = models.IntegerField(blank=False, default=10)\n question_score = models.IntegerField(blank=False, default=10)\n quiz_time = models.IntegerField(blank=False, default=300)\n def __str__(self):\n return self.quiz_name\n \n def save(self, *args, **kwargs):\n if self.question_amount > 0:\n self.question_score = 100 / self.question_amount\n super().save(*args, **kwargs)\n\nclass QuizResult(models.Model):\n quiz = models.ForeignKey(Quiz, on_delete=models.PROTECT)\n user = models.ForeignKey(UserProfile, on_delete=models.PROTECT)\n correct_amount = models.IntegerField(blank=False,default=0)\n score = models.IntegerField(blank=False)\n user_answer_time = models.IntegerField(blank=False, default=0)\n def __str__(self):\n return self.quiz.quiz_name + ' - ' + self.user.user.username + ' - ' + str(self.score)\n \nclass QuizResultDetail(models.Model):\n quiz_result = models.ForeignKey(QuizResult, on_delete=models.PROTECT)\n question = models.ForeignKey(Question, on_delete=models.PROTECT)\n user_answer = models.CharField(max_length=50)\n correct = models.BooleanField(default=False)\n def __str__(self):\n return self.quiz_result.quiz.quiz_name","repo_name":"THChen2002/Django-finalproj","sub_path":"finalproj/quiz/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3241324904","text":"# Given an m x n grid of characters board and a string word, return true if word exists in the grid.\n\n# The word can be constructed from letters of sequentially adjacent cells, where adjacent cells are horizontally or vertically neighboring. The same letter cell may not be used more than once.\n\n\n# Example 1:\n\n# Input: board = [[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]], word = \"ABCCED\"\n# Output: true\n\nclass Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n row, col = len(board), len(board[0])\n path = set()\n\n # r = row, c = col, i = index in world \n def dfs(r, c, i):\n # base case: always start with true bottom up, then if all false from all directions then stop and return false \n if i == len(word):\n return True\n \n # base case: false if word not match or out of bound, or word end or already in path\n if (r<0 or \n c<0 or \n r>=row or \n c>=col or\n board[r][c] != word[i] or \n (r,c) in path\n ):\n return False \n\n # add visited\n path.add((r, c))\n\n # check neighbors, if any of them return true then continue, think about starting from last node for dfs \n res = ( \n dfs(r+1, c, i+1) or \n dfs(r, c+1, i+1) or \n dfs(r-1, c, i+1) or \n dfs(r, c-1, i+1) \n ) \n #remove from path after visit\n path.remove((r, c))\n return res\n \n # for each starting point, run dfs\n for i in range(row):\n for j in range(col):\n if dfs(i, j, 0): \n return True\n \n return False\n","repo_name":"Barneybean/LeetCode-Practices","sub_path":"lt79_word_search.py","file_name":"lt79_word_search.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"72001290721","text":"# -*- coding: utf-8 -*-\nimport os\nimport xbmc\nimport xbmcaddon\nimport xbmcvfs\nfrom bs4 import BeautifulSoup\nfrom xml.etree.ElementTree import parse, Element\nfrom videomaker import VideoMaker\nimport launchplugin\nfrom md5_check import get_directory_hash\n\n\ndef print_log(*args):\n try:\n prefix = ADDON_ID\n except NameError:\n prefix = \"CUSTOM_NOTICE>> \"\n arg = \"\\n\".join([str(a) for a in args])\n try:\n xbmc.log(\"{0} :: {1}\".format(prefix, arg), 2)\n except NameError:\n import xbmc\n xbmc.log(\"{0} :: {1}\".format(prefix, arg), 2)\n except ImportError:\n print(\"{0} :: {1}\".format(prefix, arg))\n\n\n# CONSTANTS\nADDON_ID = 'screensaver.customslideshow'\nADDON = xbmcaddon.Addon(id=ADDON_ID)\nCWD = ADDON.getAddonInfo('path').decode(\"utf-8\")\nXBMC_HOME = xbmc.translatePath('special://home/')\nXBMC_USER_DATA = xbmc.translatePath('special://home/userdata')\nADDON_HOME = os.path.join(XBMC_HOME, 'addons', ADDON_ID)\nCACHE_DATA_FOLDER = os.path.join(XBMC_USER_DATA, 'addon_data', ADDON_ID)\nLABELS = {0: 'EVEN_PICTURES', 1: 'ODD_PICTURES'}\nADDON_RESOURCE_SETTING = os.path.join(ADDON_HOME, 'resources', 'settings.xml')\nCACHE_SETTING_FILE = os.path.join(CACHE_DATA_FOLDER, 'settings.xml')\n\nprint_log(\" SCREEN_SAVER_ADDON_INITIALIZED... \",\n \"ADDON_ID : {0}\".format(ADDON_ID),\n \"ADDON : {0}\".format(ADDON),\n \"CWD : {0}\".format(CWD),\n \"XBMC_HOME : {0}\".format(XBMC_HOME),\n \"XBMC_USER_DATA : {0}\".format(XBMC_USER_DATA),\n \"ADDON_HOME : {0}\".format(ADDON_HOME),\n \"ADDON_RESOURCE_SETTING : {0}\".format(ADDON_RESOURCE_SETTING),\n \"CACHE_SETTING_FILE : {0}\".format(CACHE_SETTING_FILE)\n )\n\nif not os.path.exists(CACHE_DATA_FOLDER):\n xbmcvfs.mkdir(CACHE_DATA_FOLDER)\n\nif not os.path.exists(CACHE_SETTING_FILE):\n with open(CACHE_SETTING_FILE, \"a+\") as cache_media:\n xml_default_content = \"\"\"\n \n \n \n \n \n \n \n \n \"\"\"\n cache_media.write(xml_default_content)\n print_log(\"create file xml\")\n_image_extension = [\".png\", \".jpeg\", \".jpg\"]\n_video_extension = [\".mp4\", \".mkv\", \".avi\"]\n\n\ndef insert_data(content_list=None, content_location=None, flag=0, target_xml=ADDON_RESOURCE_SETTING):\n print_log(\"insert_data() : initialized\")\n xml_doc = parse(target_xml)\n xml_root = xml_doc.getroot()\n label = LABELS[flag]\n category = xml_root.findall('.//category[@label=\"' + label + '\"]')[0]\n # print_log(\"content_list\", content_list, \"content_location\", content_location, \"flag\", flag, \"target_xml\", target_xml)\n if content_list:\n for content in content_list:\n if content.split('.')[-1] in ['mp4', 'mkv']:\n label = '.'.join(content.split(\".\")[:-1])\n category.append(\n Element(\"setting\",\n {\"label\": label,\n \"type\": \"slider\", \"default\": \"8\",\n \"range\": \"0,30\",\n \"option\": \"int\",\n \"id\": content\n }))\n if content_location:\n if os.path.exists(content_location):\n for content in os.listdir(content_location):\n if content.endswith(\".mp4\") or content.endswith(\".mkv\"):\n label = '.'.join(content.split(\".\")[:-1])\n category.append(\n Element(\"setting\",\n {\"label\": label,\n \"type\": \"slider\", \"default\": \"8\",\n \"range\": \"0,30\",\n \"option\": \"int\",\n \"id\": content\n }))\n xml_doc.write(target_xml, xml_declaration=True)\n print_log(\"insert_data() : xml updated\")\n\n\ndef remove_xml(target_xml=ADDON_RESOURCE_SETTING, label=LABELS[0]):\n print_log(\"remove_xml() : initialized\")\n xml_doc = parse(target_xml)\n xml_root = xml_doc.getroot()\n for category in xml_root:\n if 'label' in category.attrib:\n if category.attrib['label'] == label:\n for elem in list(category):\n category.remove(elem)\n xml_doc.write(target_xml, xml_declaration=True)\n print_log(\"remove_xml() : xml tag successfully cleared\")\n\n\ndef writexml(path, flag=0):\n print_log(\"writexml() : initialized\")\n if os.path.exists(path):\n print_log(path)\n remove_xml(label=LABELS[flag])\n cache_folder = os.path.join(path, \".cache\")\n if not os.path.exists(cache_folder):\n os.mkdir(cache_folder)\n xbmc.executebuiltin(\"ActivateWindow(busydialog)\")\n xbmc.sleep(100)\n converter_obj = VideoMaker()\n source = os.listdir(path)\n print_log(\"source image list\", source, converter_obj.supported_extension)\n for content in source:\n content_extension = content.split(\".\")[-1]\n if content_extension in ['jpg', 'jpeg', 'png']:\n converter_obj.make_video_ffmpeg(content, source_path=path, target_path=cache_folder, duration=8)\n print(\"Process for {} completed\".format(content))\n # print_log(os.listdir(cache_folder), path)\n target_list = os.listdir(cache_folder)\n target_content = ['.'.join(content.split(\".\")[:-1]) for content in target_list]\n source_content = ['.'.join(content.split(\".\")[:-1]) for content in source]\n for file in target_content:\n if file not in source_content:\n for elem in target_list:\n if elem.startswith(file):\n os.remove(os.path.join(cache_folder, elem))\n insert_data(content_list=os.listdir(cache_folder), flag=flag)\n insert_data(content_list=os.listdir(path), flag=flag)\n xbmc.sleep(100)\n xbmc.executebuiltin(\"Dialog.Close(busydialog)\")\n with open(os.path.join(ADDON_HOME, \"{}.md5\".format(LABELS[flag])), \"w\") as f:\n f.write(get_directory_hash(path))\n print_log(\"writexml() : progress 100%\")\n\n\ndef readxml(directory_items, target_xml=ADDON_RESOURCE_SETTING):\n print_log(\"readxml() : initialized\")\n with open(target_xml, \"r\") as f:\n content = f.read()\n soup = BeautifulSoup(content, 'html.parser')\n settings = soup.find_all(\"category\")\n for sett in settings:\n setting_tags = sett.find_all(\"setting\")\n if LABELS[0] in sett[\"label\"]: # EVEN_PICTURES\n even_images_from_settings = setting_tags if setting_tags else None\n if even_images_from_settings:\n new_images_in_even_folder_exists = os.listdir(directory_items.get(\"select_media_even\"))\n if len(even_images_from_settings) != len(new_images_in_even_folder_exists):\n writexml(directory_items.get(\"select_media_even\"), flag=0)\n if LABELS[1] in sett[\"label\"]: # ODD_PICTURES\n odd_images_from_settings = setting_tags if setting_tags else None\n if odd_images_from_settings:\n new_images_in_odd_folder_exists = os.listdir(directory_items.get(\"select_media_odd\"))\n if len(odd_images_from_settings) != len(new_images_in_odd_folder_exists):\n writexml(directory_items.get(\"select_media_odd\"), flag=1)\n\n\ndef check_new_path(target_xml=CACHE_SETTING_FILE):\n if os.path.exists(target_xml):\n with open(target_xml, \"r\") as f: # opening xml file\n content = f.read()\n soup = BeautifulSoup(content, 'html.parser')\n settings = soup.find_all(\"setting\")\n # for old python version use below line\n media_dict = dict((sett[\"id\"], sett[\"value\"]) for sett in settings if \"select_media\" in sett[\"id\"])\n # media_dict = {sett[\"id\"]: sett[\"value\"] for sett in settings if \"select_media\" in sett[\"id\"]}\n return media_dict\n else:\n return \"File not found\"\n\n\nclass BaseMonitor(xbmc.Monitor):\n def onSettingsChanged(self):\n return True\n\n\nif __name__ == '__main__':\n print_log(\"VideoScreenSaverService: Startup checks\")\n # Make sure that the settings have been updated correctly\n media_data_path = check_new_path()\n monitor = BaseMonitor()\n # Check if we should start the screen saver video on startup\n launchplugin.main()\n while not monitor.abortRequested():\n xbmc.sleep(500)\n if monitor.abortRequested():\n xbmc.sleep(500)\n os._exit(1)\n\n recursive_path_check = check_new_path()\n print_log(\"path_recursion\", recursive_path_check)\n\n if monitor.onSettingsChanged():\n # print_log(\"Settings Changed\")\n if 'select_media_odd' in media_data_path and 'select_media_odd' in recursive_path_check:\n if not media_data_path['select_media_odd'] == recursive_path_check['select_media_odd']:\n print_log(\"odd_change_trigger\")\n get_odd_path = recursive_path_check.get(\"select_media_odd\")\n writexml(get_odd_path, flag=1)\n\n if 'select_media_even' in media_data_path and 'select_media_even' in recursive_path_check:\n if not media_data_path['select_media_even'] == recursive_path_check['select_media_even']:\n print_log(\"even_change_trigger\")\n get_even_path = recursive_path_check.get(\"select_media_even\")\n writexml(get_even_path, flag=0)\n media_data_path = check_new_path()\n else:\n xbmc.sleep(500)\n if 'select_media_odd' in recursive_path_check:\n get_odd_path = recursive_path_check.get(\"select_media_odd\")\n odd_md5 = os.path.join(ADDON_HOME, \"{}.md5\".format(LABELS[1]))\n if os.path.exists(odd_md5):\n with open(odd_md5, \"r\") as f:\n existing_md5 = f.read().strip()\n new_md5 = get_directory_hash(get_odd_path)\n if new_md5 != existing_md5:\n print_log(\"odd_md5_trigger\", existing_md5, new_md5)\n writexml(get_odd_path, flag=1)\n else:\n with open(odd_md5, 'w') as f:\n f.write(get_directory_hash(get_odd_path))\n if 'select_media_even' in recursive_path_check:\n get_even_path = recursive_path_check.get(\"select_media_even\")\n even_md5 = os.path.join(ADDON_HOME, \"{}.md5\".format(LABELS[0]))\n if os.path.exists(even_md5):\n with open(even_md5, \"r\") as f:\n existing_md5 = f.read().strip()\n if get_directory_hash(get_even_path) != existing_md5:\n print_log(\"even_md5_trigger\", existing_md5, even_md5)\n writexml(get_even_path, flag=0)\n else:\n with open(even_md5, 'w') as f:\n f.write(get_directory_hash(get_even_path))\n\n","repo_name":"gahan9/ivscreensaver","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":11601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25934111230","text":"import unittest\nfrom typing import Any, List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n if(not nums):\n return 0\n prev = nums[-1]\n for i in range(len(nums)-2, -1, -1):\n if(nums[i] == prev):\n prev = nums[i]\n nums.pop(i)\n else:\n prev = nums[i]\n return len(nums)\n\n\nclass TestCase(unittest.TestCase):\n sln = Solution()\n\n def test_1(self):\n i = [1, 1, 2]\n o = [1, 2]\n l = 2\n res = self.sln.removeDuplicates(i)\n self.assertEqual(res, l)\n self.assertEqual(i, o)\n\n def test_2(self):\n i = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]\n o = [0, 1, 2, 3, 4]\n l = 5\n res = self.sln.removeDuplicates(i)\n self.assertEqual(res, l)\n self.assertEqual(i, o)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"CAH9487/LeetCode","sub_path":"026_Remove_Duplicates_from_Sorted_Array/26_Remove_Duplicates_from_Sorted_Array.py","file_name":"26_Remove_Duplicates_from_Sorted_Array.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25377708814","text":"import sys\nsys.stdin=open(\"input.txt\",\"r\")\ninput = sys.stdin.readline\n \ndef dfs(cnt, idx):\n if cnt == n:\n vo, co = 0, 0\n\n for j in res:\n if j in vowel:\n vo += 1\n else:\n co += 1\n \n if vo>=1 and co>=2:\n print(\"\".join(res))\n\n return\n \n for i in range(idx, m):\n res.append(word_list[i])\n dfs(cnt+1, i+1)\n res.pop()\n\nn, m = map(int, input().split())\nword_list = sorted(list(map(str, input().split())))\nvowel = ['a', 'e', 'i', 'o', 'u']\nres = []\ndfs(0, 0)\n","repo_name":"noxknow/Python-Coding_test","sub_path":"07. 백트래킹/1759 암호 만들기.py","file_name":"1759 암호 만들기.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8073409691","text":"import numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\ndef wavelength_to_rgb(wavelength, gamma=0.8):\n ''' taken from http://www.noah.org/wiki/Wavelength_to_RGB_in_Python\n This converts a given wavelength of light to an\n approximate RGB color value. The wavelength must be given\n in nanometers in the range from 380 nm through 750 nm\n (789 THz through 400 THz).\n\n Based on code by Dan Bruton\n http://www.physics.sfasu.edu/astro/color/spectra.html\n Additionally alpha value set to 0.5 outside range\n '''\n wavelength = float(wavelength)\n if wavelength >= 380 and wavelength <= 750:\n A = 1.\n else:\n A=0.5\n if wavelength < 380:\n wavelength = 380.\n if wavelength >750:\n wavelength = 750.\n if wavelength >= 380 and wavelength <= 440:\n attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)\n R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma\n G = 0.0\n B = (1.0 * attenuation) ** gamma\n elif wavelength >= 440 and wavelength <= 490:\n R = 0.0\n G = ((wavelength - 440) / (490 - 440)) ** gamma\n B = 1.0\n elif wavelength >= 490 and wavelength <= 510:\n R = 0.0\n G = 1.0\n B = (-(wavelength - 510) / (510 - 490)) ** gamma\n elif wavelength >= 510 and wavelength <= 580:\n R = ((wavelength - 510) / (580 - 510)) ** gamma\n G = 1.0\n B = 0.0\n elif wavelength >= 580 and wavelength <= 645:\n R = 1.0\n G = (-(wavelength - 645) / (645 - 580)) ** gamma\n B = 0.0\n elif wavelength >= 645 and wavelength <= 750:\n attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)\n R = (1.0 * attenuation) ** gamma\n G = 0.0\n B = 0.0\n else:\n R = 0.0\n G = 0.0\n B = 0.0\n return (R,G,B,A)\n\n\n_visible_range_plus = (350,780)\n_norm = plt.Normalize(*_visible_range_plus)\n_wl = np.arange(_visible_range_plus[0],_visible_range_plus[1]+1,2)\n_colorlist = list(zip(_norm(_wl),[wavelength_to_rgb(w) for w in _wl]))\nspectralmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"spectrum\", _colorlist)\n\n\ndef plot_spectrum(x, y=None, lo=400, hi=700, ylabel=None):\n if y is None:\n y = x\n interval = (hi-lo)//(len(spectrum)-1)\n x = np.arange(lo, hi+interval, interval)\n plt.plot(x, y, color='darkred')\n\n y_max = max(1, max(y))\n y_steps = np.linspace(0, y_max, 100)\n X, Y = np.meshgrid(np.linspace(min(x), max(x), 100), y_steps)\n\n extent = (min(x), max(x), min(y_steps), y_max)\n\n plt.imshow(X, clim=_visible_range_plus, extent=extent, cmap=spectralmap, aspect='auto')\n plt.xlabel('$\\lambda$ (Wavelength nm)')\n if ylabel is not None:\n plt.ylabel(ylabel)\n plt.fill_between(x, y, y_max, color='w')\n","repo_name":"karaimer/Image-Formation","sub_path":"ivrl_helper.py","file_name":"ivrl_helper.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44021509697","text":"\nimport collections\n\n\nclass BuildArcs:\n def __init__(self):\n pass\n\n def build_parse_arcs(self, deps, words, postags, netags):\n\n class Node:\n def __init__(self, head, relation, word, postag, netag, idx):\n self.head = head - 1\n self.relation = relation\n self.children = collections.defaultdict(list)\n self.postag = postag\n self.netag = netag\n self.word = word\n self.idx = idx\n self.children_indexes = set()\n\n _raw_arcs = [0 for i in range(len(deps))]\n for dep in deps:\n _raw_arcs[dep[2] - 1] = dep\n arcs = []\n _raw_arcs = [t for t in _raw_arcs if t != 0]\n for idx, dep in enumerate(_raw_arcs):\n _node = Node(head=dep[1],\n relation=dep[0],\n word=words[idx],\n postag=postags[idx],\n netag=netags[idx],\n idx=idx)\n arcs.append(_node)\n for idx, arc in enumerate(arcs):\n if arc.head != -1:\n arcs[arc.head].children[arc.relation].append(idx)\n arcs[arc.head].children_indexes.add(idx)\n return arcs\n\n def revise_arcs(self, arcs):\n for idx, arc in enumerate(arcs):\n if 'cop' in arc.children:\n if len(arc.children['cop']) != 1:\n break\n cop_index = arc.children['cop'][0]\n\n arcs[cop_index].head = -1\n arcs[cop_index].relation = 'ROOT'\n\n arcs[cop_index].children['dobj'].append(idx)\n arcs[cop_index].children_indexes.add(idx)\n arcs[idx].children_indexes.remove(cop_index)\n\n del arcs[idx].children['cop']\n\n if 'nsubj' in arc.children:\n sbj_idx = min(arc.children['nsubj'])\n arcs[cop_index].children['nsubj'].append(sbj_idx)\n arcs[sbj_idx].head = cop_index\n arcs[cop_index].children_indexes.add(sbj_idx)\n arcs[idx].children_indexes.remove(sbj_idx)\n\n arcs[idx].children['nsubj'].remove(sbj_idx)\n\n for _type in ['conj', 'ccomp']:\n if _type == arc.relation:\n head_idx = arcs[idx].head\n arcs[cop_index].head = head_idx\n arcs[cop_index].relation = arcs[idx].relation\n arcs[head_idx].children_indexes.remove(idx)\n arcs[head_idx].children_indexes.add(cop_index)\n arcs[head_idx].children[_type].remove(idx)\n arcs[head_idx].children[_type].append(cop_index)\n arcs[idx].relation = 'dobj'\n arcs[idx].head = cop_index\n return arcs\n\n def build_arcs(self, deps, words, postags, netags):\n arcs = self.build_parse_arcs(deps, words, postags, netags)\n arcs = self.revise_arcs(arcs)\n return arcs\n","repo_name":"MRKINKI/oie","sub_path":"main/tool/arcs/build_arcs.py","file_name":"build_arcs.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28128989830","text":"import sys\nfrom threading import Thread\nfrom functools import partial\n\nfrom PyQt5.QtCore import Qt, QDir, QSize\nfrom PyQt5.QtGui import QIcon, QFont, QTextCursor\nfrom PyQt5.QtWidgets import (QWidget, QMainWindow, QAction, qApp, QLineEdit, QLabel, QTextEdit, QListWidgetItem,\n QToolButton, QMenu, QSizePolicy, QPushButton, QApplication, QScrollBar,\n QDesktopWidget, QInputDialog, QFileSystemModel, QListWidget, QSplitter, QHBoxLayout,\n QVBoxLayout, QTreeView)\n\nfrom client import Client\nfrom utils import *\n\nclient = Client()\n\n\ndef _getDevideLine():\n devideLine = QPushButton()\n devideLine.setMaximumWidth(1)\n devideLine.setFocusPolicy(Qt.NoFocus)\n return devideLine\n\n\nclass InputRowWidget(QWidget):\n def __init__(self, parent=None):\n super(InputRowWidget, self).__init__(parent)\n self.hBox = QHBoxLayout()\n\n self.hostTextEdit = QLineEdit()\n self.usernameTextEdit = QLineEdit()\n self.passwordTextEdit = QLineEdit()\n self.portTextEdit = QLineEdit()\n self.connButton = QPushButton('Quickconnect')\n self.moreButton = QToolButton()\n\n self.hostTextEdit.setFixedWidth(120)\n self.usernameTextEdit.setFixedWidth(120)\n self.passwordTextEdit.setFixedWidth(120)\n self.passwordTextEdit.setEchoMode(QLineEdit.Password)\n self.portTextEdit.setMaxLength(5)\n self.portTextEdit.setFixedWidth(80)\n\n self.moreButton.setArrowType(Qt.DownArrow)\n self.moreButton.setPopupMode(QToolButton.InstantPopup)\n\n self.hBox.addWidget(QLabel('Host:', self))\n self.hBox.addWidget(self.hostTextEdit)\n self.hBox.addWidget(QLabel('Username:', self))\n self.hBox.addWidget(self.usernameTextEdit)\n self.hBox.addWidget(QLabel('Password:', self))\n self.hBox.addWidget(self.passwordTextEdit)\n self.hBox.addWidget(QLabel('Port:', self))\n self.hBox.addWidget(self.portTextEdit)\n self.hBox.addWidget(self.connButton)\n self.hBox.addWidget(self.moreButton)\n self.hBox.addStretch(1)\n\n self.setLayout(self.hBox)\n self.setContentsMargins(0, 0, 0, 0)\n\n\nclass LocalArea(QWidget):\n def __init__(self, parent=None):\n super(LocalArea, self).__init__(parent)\n\n self.localPathLabel = QLabel(getLinuxCwd())\n self.localFileLable = QLabel('')\n self.localModel = QFileSystemModel()\n self.localModel.setRootPath(QDir.rootPath())\n\n self.localPathLabel.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.localFileLable.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n\n localArea = QVBoxLayout()\n sitePath = QHBoxLayout()\n sitePath.addWidget(QLabel('Local site:'))\n sitePath.addWidget(self.localPathLabel, stretch=1)\n sitePath.addStretch(1)\n siteFile = QHBoxLayout()\n siteFile.addWidget(QLabel('Selected file:'))\n siteFile.addWidget(self.localFileLable, stretch=1)\n siteFile.addStretch(1)\n\n treeView = QTreeView()\n treeView.setModel(self.localModel)\n for pos, width in enumerate((280, 100, 100, 70)):\n treeView.setColumnWidth(pos, width)\n treeView.clicked.connect(self._localTreeViewClicked)\n\n localPath = self.localPathLabel.text()\n for i in range(len(localPath)):\n if localPath[i] == '/':\n treeView.expand(self.localModel.index(localPath[:i]))\n treeView.expand(self.localModel.index(localPath))\n\n buttonBox = QHBoxLayout()\n self.uploadButton = QPushButton('Upload')\n buttonBox.addStretch(1)\n buttonBox.addWidget(self.uploadButton)\n\n localArea.addLayout(sitePath)\n localArea.addLayout(siteFile)\n localArea.addWidget(treeView)\n localArea.addLayout(buttonBox)\n self.setLayout(localArea)\n\n def _localTreeViewClicked(self, indexItem):\n localPathOrFile = self.localModel.filePath(indexItem)\n if self.localModel.isDir(indexItem):\n self.localPathLabel.setText(localPathOrFile)\n else:\n self.localFileLable.setText(localPathOrFile)\n localPathOrFile = re.findall(r'([\\s\\S]+)/[\\s\\S]+?', localPathOrFile)[0]\n self.localPathLabel.setText(localPathOrFile)\n os.chdir(localPathOrFile)\n\n\nclass ServerArea(QWidget):\n def __init__(self, parent=None):\n super(ServerArea, self).__init__(parent)\n self.serverPathLabel = QLabel('/')\n self.serverFileLable = QLabel('')\n\n self.serverPathLabel.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.serverFileLable.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n\n serverArea = QVBoxLayout()\n\n sitePath = QHBoxLayout()\n sitePath.addWidget(QLabel('Remote site:'))\n sitePath.addWidget(self.serverPathLabel, stretch=1)\n sitePath.addStretch(1)\n siteFile = QHBoxLayout()\n siteFile.addWidget(QLabel('Selected file:'))\n siteFile.addWidget(self.serverFileLable, stretch=1)\n siteFile.addStretch(1)\n\n self.fileListWidget = QListWidget()\n self.fileListWidget.setModelColumn(4)\n\n buttonBox1 = QHBoxLayout()\n self.downloadButton = QPushButton('Download')\n self.refreshButton = QPushButton('Refresh')\n self.deleteButton = QPushButton('Delete file')\n buttonBox1.addStretch(1)\n buttonBox1.addWidget(self.downloadButton)\n buttonBox1.addWidget(self.refreshButton)\n buttonBox1.addWidget(self.deleteButton)\n\n buttonBox2 = QHBoxLayout()\n self.mkdirButton = QPushButton('New Folder')\n self.rmdirButton = QPushButton('Remove folder')\n self.renameButton = QPushButton('Rename')\n buttonBox2.addStretch(1)\n buttonBox2.addWidget(self.mkdirButton)\n buttonBox2.addWidget(self.rmdirButton)\n buttonBox2.addWidget(self.renameButton)\n\n serverArea.addLayout(sitePath)\n serverArea.addLayout(siteFile)\n serverArea.addWidget(self.fileListWidget)\n serverArea.addLayout(buttonBox1)\n serverArea.addLayout(buttonBox2)\n\n self.setLayout(serverArea)\n\n def setFileList(self, fileList):\n self.fileListWidget.blockSignals(True)\n self.fileListWidget.clear()\n self.fileListWidget.blockSignals(False)\n for file in fileList:\n item, widget = self._getFileListItem(file['name'], file['isDir'], file['size'], file['time'])\n self.fileListWidget.addItem(item)\n self.fileListWidget.setItemWidget(item, widget)\n\n @staticmethod\n def _getFileListItem(name, isDir, size, modifyTime):\n widget = QWidget()\n layout = QHBoxLayout()\n\n nameLabel = QLabel(name)\n nameLabel.setObjectName('name')\n layout.addWidget(nameLabel, stretch=1)\n layout.addWidget(_getDevideLine())\n\n isDirLabel = QLabel(isDir)\n isDirLabel.setObjectName('isDir')\n layout.addWidget(isDirLabel, stretch=1)\n layout.addWidget(_getDevideLine())\n\n sizeLabel = QLabel(size)\n sizeLabel.setObjectName('size')\n layout.addWidget(sizeLabel, stretch=1)\n layout.addWidget(_getDevideLine())\n\n modifyTimeLabel = QLabel(modifyTime)\n modifyTimeLabel.setObjectName('modifyTime')\n layout.addWidget(modifyTimeLabel, stretch=1)\n\n widget.setLayout(layout)\n\n item = QListWidgetItem()\n item.setSizeHint(QSize(0, 43))\n\n return item, widget\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.inputRow = InputRowWidget(self)\n self.localArea = LocalArea(self)\n self.serverArea = ServerArea(self)\n\n self.logEditText = QTextEdit()\n\n self.serverFileList = []\n\n self.isConnect = False\n\n self.initUI()\n\n client.setWindow(self)\n\n def saveHistory(self):\n with open(\"history.ini\", \"w+\") as fp:\n fp.write(\"{},{},{},{}\\n\".format(\n self.inputRow.hostTextEdit.text(),\n self.inputRow.usernameTextEdit.text(),\n self.inputRow.passwordTextEdit.text(),\n self.inputRow.portTextEdit.text(),\n ))\n\n @staticmethod\n def readHistory():\n records = []\n with open(\"history.ini\", \"r\") as fp:\n record = fp.readline().strip()\n while record:\n record = record.split(',')\n if len(record) != 4:\n continue\n records.append(record)\n record = fp.readline()\n return records\n\n @staticmethod\n def clearHistory():\n with open(\"history.ini\", \"w\"):\n pass\n\n def loadHistory(self, connInfo):\n self.inputRow.hostTextEdit.setText(connInfo[0])\n self.inputRow.usernameTextEdit.setText(connInfo[1])\n self.inputRow.passwordTextEdit.setText(connInfo[2])\n self.inputRow.portTextEdit.setText(connInfo[3])\n\n def clearInput(self):\n self.inputRow.hostTextEdit.clear()\n self.inputRow.usernameTextEdit.clear()\n self.inputRow.passwordTextEdit.clear()\n self.inputRow.portTextEdit.clear()\n\n @staticmethod\n def _setClientMode(mode):\n if mode != 'PORT' and mode != 'PASV':\n return\n client.mode = mode\n\n def initToolbar(self):\n toolbar = self.addToolBar('FTP client')\n toolbar.setMovable(False)\n\n refreshAction = QAction(QIcon('res/refresh.png'), '&Refresh', self)\n refreshAction.setStatusTip('Refresh')\n refreshAction.triggered.connect(self._refreshServerFileList)\n toolbar.addAction(refreshAction)\n\n portAction = QAction(QIcon('res/port.png'), '&Port', self)\n portAction.setStatusTip('Active mode')\n portAction.triggered.connect(partial(self._setClientMode, 'PORT'))\n toolbar.addAction(portAction)\n\n pasvAction = QAction(QIcon('res/pasv.png'), '&Pasv', self)\n pasvAction.setStatusTip('Passive mode')\n pasvAction.triggered.connect(partial(self._setClientMode, 'PASV'))\n toolbar.addAction(pasvAction)\n\n systAction = QAction(QIcon('res/system.png'), '&System', self)\n systAction.setStatusTip('System info')\n systAction.triggered.connect(lambda: Thread(target=self.clientSyst).start())\n toolbar.addAction(systAction)\n\n terminateAction = QAction(QIcon('res/terminate.png'), '&Terminate', self)\n terminateAction.setStatusTip('System info')\n terminateAction.triggered.connect(lambda: Thread(target=self.clientSyst).start())\n toolbar.addAction(systAction)\n\n disconectAction = QAction(QIcon('res/disconnect.png'), '&Disconnect', self)\n disconectAction.setStatusTip('Disconnect')\n disconectAction.triggered.connect(lambda: Thread(target=self.clientDisconnect).start())\n toolbar.addAction(disconectAction)\n\n exitAction = QAction(QIcon('res/exit.png'), '&Exit', self)\n exitAction.setStatusTip('Exit')\n exitAction.triggered.connect(qApp.quit)\n toolbar.addAction(exitAction)\n\n def _moveCenter(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def initInputRow(self):\n menu = QMenu()\n clearInputAction = QAction('Clear input', menu)\n clearInputAction.triggered.connect(self.clearInput)\n clearHistoryAction = QAction('Clear history', menu)\n clearHistoryAction.triggered.connect(self.clearHistory)\n menu.addActions([clearInputAction, clearHistoryAction])\n recordActions = []\n records = self.readHistory()\n for record in records:\n recordAction = QAction('{}@{}'.format(record[1], record[0]), menu)\n recordAction.triggered.connect(partial(self.loadHistory, record))\n recordActions.append(recordAction)\n menu.addSeparator()\n menu.addActions(recordActions)\n\n self.inputRow.moreButton.setMenu(menu)\n self.inputRow.setMaximumHeight(50)\n\n def initLogArea(self):\n self.logEditText.setAlignment(Qt.AlignLeft | Qt.AlignTop)\n self.logEditText.setStyleSheet(\"font-size: 14; font-family: Segoe UI; padding: 10px\")\n self.logEditText.setMinimumHeight(120)\n self.logEditText.setText(\"Log output:\")\n self.logEditText.setReadOnly(True)\n self.logEditText.setMaximumHeight(200)\n self.logEditText.ensureCursorVisible()\n scrollBar = QScrollBar()\n self.logEditText.setVerticalScrollBar(scrollBar)\n return self.logEditText\n\n def echo(self, title, content):\n self.logEditText.append(str(title) + ': ' + str(content))\n cursor = self.logEditText.textCursor()\n cursor.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)\n self.logEditText.setTextCursor(cursor)\n\n def threadClientConnect(self):\n status, msg = client.connect()\n if not status:\n self.echo('Status', msg)\n return\n self.echo('Status', 'Connection established, waiting for welcome message...')\n self.echo('Status', msg)\n self.threadClientLogin()\n\n def threadClientLogin(self):\n status, msg = client.login()\n if not status:\n self.echo('Wrong', 'Login failed!')\n return\n self.isConnect = True\n self.echo('Status', 'Logged in')\n status, msg = client.send_syst()\n self.echo('Status', msg)\n self._refreshServerFileList()\n\n def connectServer(self):\n if self.isConnect:\n try:\n msg = client.quit()\n self.echo('Status', msg)\n except Exception as err:\n self.echo('Error', err)\n\n if not ipPattern.fullmatch(self.inputRow.hostTextEdit.text()):\n self.echo('Wrong', 'Invaild host!')\n return\n if not self.inputRow.portTextEdit.text().isdigit():\n self.echo('Wrong', 'Invaild port!')\n return\n client.setInfo({\n 'host': self.inputRow.hostTextEdit.text(),\n 'username': self.inputRow.usernameTextEdit.text(),\n 'password': self.inputRow.passwordTextEdit.text(),\n 'port': int(self.inputRow.portTextEdit.text())\n })\n # Thread(target=self.threadClientConnect).start()\n self.threadClientConnect()\n\n def _refreshServerFileList(self):\n if not self.isConnect:\n return\n self.serverFileList = [{\n 'name': 'Name',\n 'isDir': 'Type',\n 'size': 'Size',\n 'time': 'Modify time'\n }]\n if self.serverArea.serverPathLabel.text() != '/':\n self.serverFileList.append({\n 'name': '..',\n 'isDir': 'Directory',\n 'size': '',\n 'time': ''\n })\n self.echo('Status', 'Retrieving directory listing of \"{}\"...'.format(self.serverArea.serverPathLabel.text()))\n fileInfoList = client.listFiles()\n self.echo('Status', 'Directory listing of \"{}\" successful'.format(self.serverArea.serverPathLabel.text()))\n for fil in fileInfoList:\n try:\n findResult = \\\n re.findall(r\"[\\S\\s]*?([0-9]+) ([A-Za-z]+ [0-9][0-9] [0-9][0-9]:[0-9][0-9]) ([\\S\\s]+)\", fil)[0]\n newFile = {\n 'isDir': 'Directory' if fil.startswith('d') else 'File',\n 'name': findResult[2],\n 'time': findResult[1],\n 'size': convert(findResult[0])\n }\n self.serverFileList.append(newFile)\n except Exception as err:\n print(err)\n print(fil)\n self.serverArea.setFileList(self.serverFileList)\n\n def clientDisconnect(self):\n self.serverArea.fileListWidget.blockSignals(True)\n self.serverArea.fileListWidget.clear()\n self.serverArea.fileListWidget.blockSignals(False)\n self.serverArea.serverPathLabel.setText('/')\n self.serverArea.serverFileLable.setText('')\n try:\n if self.isConnect:\n self.isConnect = False\n msg = client.quit()\n self.echo('Status', msg)\n client.reset()\n finally:\n self.echo('Status', 'Disconnected from server')\n\n def clientUpload(self):\n filename = self.localArea.localFileLable.text()\n if not self.isConnect:\n self.echo('Wrong', \"No server connection\")\n return\n if filename == '':\n self.echo('Wrong', \"No local file selected\")\n return\n status = client.upload(filename)\n if status:\n self.echo('Status', 'Begin to upload \\'{}\\''.format(filename))\n\n def initButtons(self):\n self.inputRow.connButton.clicked.connect(self.connectServer)\n self.localArea.uploadButton.clicked.connect(lambda: self.clientUpload())\n self.serverArea.downloadButton.clicked.connect(lambda: Thread(target=self.clientDownload).start())\n self.serverArea.mkdirButton.clicked.connect(self.clientMkdir)\n self.serverArea.refreshButton.clicked.connect(self._refreshServerFileList)\n self.serverArea.rmdirButton.clicked.connect(self._preRemoveDir)\n self.serverArea.renameButton.clicked.connect(self._preRenameDir)\n self.serverArea.deleteButton.clicked.connect(self.clientDelete)\n\n self.serverArea.fileListWidget.clicked.connect(self._selectServerFile)\n self.serverArea.fileListWidget.doubleClicked.connect(self.clientChangeDir)\n\n def clientDownload(self):\n filename = self.serverArea.serverFileLable.text()\n if not self.isConnect:\n self.echo('Wrong', \"No server connection\")\n return\n if filename == '':\n self.echo('Wrong', \"No server file selected\")\n return\n status = client.download(filename)\n if status:\n self.echo('Status', 'Begin to download \\'{}\\''.format(filename))\n\n def clientSyst(self):\n if not self.isConnect:\n self.echo('Wrong', \"No server connection\")\n return\n status, msg = client.send_syst()\n if status:\n self.echo('Status', msg)\n else:\n self.echo('Error', 'Get system info failed')\n\n def clientDelete(self):\n if not self.isConnect:\n self.echo('Wrong', \"No server connection\")\n return\n filename = self.serverArea.serverFileLable.text()\n if filename == '':\n self.echo('Wrong', \"No local file selected\")\n return\n status = client.deleteFile(filename)\n if status:\n self.echo('Status', 'Delete \"{}\" successfully'.format(filename))\n self._refreshServerFileList()\n else:\n self.echo('Error', 'Delete \"{}\" failed'.format(filename))\n\n def clientMkdir(self):\n if not self.isConnect:\n self.echo('Wrong', \"No server connection\")\n return\n value, ok = QInputDialog.getText(self, \"New Folder\", \"Input the name of the new folder:\", QLineEdit.Normal)\n if not value or not ok:\n return\n status = client.makeDir(value)\n if status:\n self.echo('Status', 'New Directory \\'{}\\' created'.format(value))\n self._refreshServerFileList()\n else:\n self.echo('Wrong', 'Fail to create folder \\'{}\\''.format(value))\n\n def clientChangeDir(self):\n if not self.isConnect:\n self.echo('Wrong', \"No server connection\")\n return\n currentItem = self.serverArea.fileListWidget.currentItem()\n currentWidget = self.serverArea.fileListWidget.itemWidget(currentItem)\n name = currentWidget.findChild(QLabel, 'name').text()\n isDir = currentWidget.findChild(QLabel, 'isDir').text()\n if isDir != 'Directory':\n return\n status = client.changeWrokDir(name)\n if status:\n self.echo('Status', 'Change work directory succeed')\n serverPath = client.printWorkDir()\n self.serverArea.serverPathLabel.setText(serverPath)\n self._refreshServerFileList()\n else:\n self.echo('Status', 'Change work directory failed')\n\n def _selectServerFile(self):\n if not self.isConnect:\n self.echo('Wrong', \"No server connection\")\n return\n currentItem = self.serverArea.fileListWidget.currentItem()\n currentWidget = self.serverArea.fileListWidget.itemWidget(currentItem)\n name = currentWidget.findChild(QLabel, 'name').text()\n isDir = currentWidget.findChild(QLabel, 'isDir').text()\n if isDir != 'File':\n return\n serverPath = self.serverArea.serverPathLabel.text()\n if serverPath == '/':\n serverPath = ''\n self.serverArea.serverFileLable.setText(serverPath + '/' + name)\n\n def _preRemoveDir(self):\n if not self.isConnect:\n self.echo('Wrong', \"No server connection\")\n return\n currentItem = self.serverArea.fileListWidget.currentItem()\n currentWidget = self.serverArea.fileListWidget.itemWidget(currentItem)\n name = currentWidget.findChild(QLabel, 'name').text()\n isDir = currentWidget.findChild(QLabel, 'isDir').text()\n if isDir != 'Directory':\n self.echo(\"Wrong\", \"No directory selected\")\n return\n status = client.removeDir(name)\n if status:\n self.echo(\"Status\", \"Remove folder successfully\")\n self._refreshServerFileList()\n else:\n self.echo(\"Status\", \"Remove folder failed\")\n\n def _preRenameDir(self):\n if not self.isConnect:\n self.echo('Wrong', \"No server connection\")\n return\n currentItem = self.serverArea.fileListWidget.currentItem()\n currentWidget = self.serverArea.fileListWidget.itemWidget(currentItem)\n name = currentWidget.findChild(QLabel, 'name').text()\n isDir = currentWidget.findChild(QLabel, 'isDir').text()\n if isDir != 'Directory':\n self.echo(\"Wrong\", \"No directory selected\")\n return\n value, ok = QInputDialog.getText(self, \"Rename\", \"Input new name for folder {}:\".format(name), QLineEdit.Normal)\n if not value or not ok:\n return\n self.echo('Status', 'Renaming \"{}\" to \"{}\"'.format(name, value))\n status = client.rename(name, value)\n if status:\n self.echo('Status', 'Rename succeed')\n self._refreshServerFileList()\n\n def initUI(self):\n self.statusBar()\n self.initToolbar()\n self.initInputRow()\n self.initButtons()\n\n mainVBox = QVBoxLayout()\n mainVBox.addWidget(self.inputRow)\n mainVBox.addWidget(self.initLogArea())\n\n hSpliter = QSplitter(Qt.Horizontal)\n hSpliter.addWidget(self.localArea)\n hSpliter.addWidget(_getDevideLine())\n hSpliter.addWidget(self.serverArea)\n\n mainVBox.addWidget(hSpliter)\n\n widget = QWidget()\n self.setCentralWidget(widget)\n widget.setLayout(mainVBox)\n\n self.resize(1200, 800)\n self._moveCenter()\n self.setWindowTitle('FTP-Client')\n self.setFont(QFont('Segoe UI', 10))\n self.setWindowIcon(QIcon('./res/icon.png'))\n self.show()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MainWindow()\n sys.exit(app.exec_())\n","repo_name":"yorhaha/ftp-server-client","sub_path":"client/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27467265905","text":"# !/usr/bin/env python3\n# coding=utf-8\nfrom __future__ import annotations\n\nimport json\nimport time\nfrom typing import Any, Generator\n\nimport ftfy\nimport openai\n\nimport ebooklib\nfrom ebooklib import epub\nfrom bs4 import BeautifulSoup\n\n\ndef chapter_to_str(chapter):\n soup = BeautifulSoup(chapter.get_body_content(), \"html.parser\")\n text = [para.get_text() for para in soup.find_all(\"p\")]\n return \"\\n\".join(text)\n\n\ndef clip_chunk(raw_text: str) -> str:\n text = raw_text.strip()\n len_text = len(text)\n if len_text < 1:\n return text\n\n start = 0\n end = len_text\n\n # remove partial sentences at the beginning\n while start < end:\n if text[start] in {\".\", \"!\", \"?\"}:\n start += 1\n break\n start += 1\n\n # remove partial sentences at the end\n while end > start:\n if text[end - 1] in {\".\", \"!\", \"?\"}:\n break\n end -= 1\n\n return text[start:end].strip()\n\n\ndef read_epub(file_name: str) -> str:\n book = epub.read_epub(file_name)\n\n started = False\n text = \"\"\n for item in book.get_items():\n if item.get_type() not in {ebooklib.ITEM_DOCUMENT}:\n continue\n\n if not started:\n if item.id == \"intro\":\n started = True\n else:\n continue\n\n chapter = chapter_to_str(item)\n text += chapter\n return text\n\n\ndef generate_chunks(file_name: str, chunk_size: int = 3000, overlap: int = 400) -> Generator[str, None, None]:\n if file_name.endswith(\".epub\"):\n text = read_epub(file_name)\n elif file_name.endswith(\".txt\"):\n with open(file_name, mode=\"r\", encoding=\"utf-8\") as f:\n text = f.read()\n len_text = len(text)\n chunk_overlap_start = overlap\n chunk_overlap_end = overlap\n for i in range(0, len_text - chunk_size, chunk_size):\n chunk_start = max(0, i - chunk_overlap_start)\n chunk_end = min(len_text, i + chunk_size + chunk_overlap_end)\n text_chunk = text[chunk_start:chunk_end]\n yield clip_chunk(text_chunk)\n\n\ndef main() -> None:\n file_name = r\"D:\\Dropbox\\Bücher\\efficiency\\Tiago Forte - Building a Second Brain _ A Proven Method to Organize Your Digital Life and Unlock Your Creative Potential (2022, \" \\\n r\"Atria Books) - libgen.li.epub\"\n\n file_name = r\"resources/summary_02.txt\"\n\n summary_file = \"resources/summary.txt\"\n with open(\"resources/config_summarize.json\", mode=\"r\") as f:\n config = json.load(f)\n\n openai.api_key = config[\"openai_secret\"]\n openai.organization = config[\"organization_id\"]\n parameters: dict[str, Any] = config[\"parameters\"]\n\n messages = parameters.pop(\"messages\")\n history = list()\n chunk_size = 3000\n summary_size = chunk_size // 10\n\n chunks = list(generate_chunks(file_name, chunk_size=chunk_size))\n summary = \"\"\n for n, chunk in enumerate(chunks):\n print(f\"chunk {n+1:d} of {len(chunks):d}\")\n text = f\"Text passage:\\n{chunk}\"\n if n >= 1:\n text = f\"Preface:\\n{summary:s}\\n\\n\" + text\n instruction = f\"{text:s}\\n\\nSummarize the interesting, novel, or unique aspects from the above text passage in about {summary_size:d} characters.\"\n if n >= 1:\n instruction += \" Write the summary as a natural continuation of the preface above. Don't start with the exact same words. When necessary, use terms from the preface \" \\\n \"instead of introducing new ones.\"\n\n print(instruction)\n history.append({\n \"role\": \"user\",\n \"content\": instruction,\n })\n\n # parameters: https://platform.openai.com/docs/api-reference/chat/create\n response = openai.ChatCompletion.create(\n **parameters,\n messages=messages + history,\n )\n\n history.clear()\n\n first_choice = response[\"choices\"][0]\n reply: str = first_choice[\"message\"][\"content\"]\n finish_reason = first_choice[\"finish_reason\"]\n\n summary = ftfy.fix_encoding(reply)\n try:\n output = f\"summary: {summary:s} (fr: {str(finish_reason):s})\"\n except TypeError as e:\n raise e\n print(output)\n\n with open(summary_file, mode=\"a\", encoding=\"utf-8\") as f:\n # f.write(instruction + \"\\n\\n\")\n f.write(summary + \"\\n\\n\")\n\n print(\"sleeping for 1 second...\")\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wehnsdaefflae/chatGPTAPI","sub_path":"summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1017041970","text":"n = int(input('Digite um numero: '))\nc = n\nf = 1\nprint(f'Calculando {n}! ', end='')\nwhile c > 0:#SEMPRE VAI PRECISAR DO CONTADOR\n print(f'{c}', end='')\n print(' x 'if c > 1 else ' = ', end='')\n f *= c\n c = c - 1#c -= 1\nprint(f'{f}')\n'''n = int(input('Digite um numero: '))\nf = 1\nprint(f'{n}!', end=' ')\nfor c in range(n, 0, -1):\n f = f * c #f *= c\n print(f'{c}', end=' ')\n print(f'x' if c > 1 else '=', end=' ')\nprint(f'{f}')'''\n\n\n\n\n","repo_name":"antoniocarlosdiniz8/NovosDesafios","sub_path":"Desafio060.py","file_name":"Desafio060.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3149977945","text":"\"\"\"\n\nFunctions to perform data treatments\nauthor : Les Loustiques\ncreated on : 04/03/2022\n\n==========\n\nTable of contents :\n\n | :meth:`replace_string `\n | :meth:`check_NAF `\n | :meth:`clean_data ` merge_one_hot_encoding\n | :meth:`merge_one_hot_encoding ` \n \n TO DO\n \n------------------------------------------------------------------\n\nLoad only this module :\n\n >>> import sys\n >>> sys.path.append('Scripts')\n >>> import func as fc\n \n------------------------------------------------------------------\n\n\"\"\"\n\nimport sys\nimport time\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport unidecode\nfrom joblib import load, dump\nfrom tqdm.notebook import tqdm\n\nimport category_encoders as ce\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\nfrom sklearn.ensemble import RandomForestRegressor\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nsys.path.append('../Sources')\n\nfrom params_data import params, paires\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef replace_string(df, col_name, params) :\n \"\"\"\n Replace wrong strings in a column of a dataframe using default params\n \n :param df: dataframe\n :param col_name: string name of the column to treat\n :param params: dict of params\n :return df: dataframe treated\n \"\"\"\n \n # On retire les accents et les espaces inutils (début et fin) et on fixe la casse en majuscule dans la colonne\n tmp = [unidecode.unidecode(elt).strip().upper() for elt in df[col_name]]\n # On récupère les individus mal orthographiés et leur correction\n string_false = params['dict_ville'][col_name]['false']\n string_true = params['dict_ville'][col_name]['true']\n # Pour chaque erreur, on applique la correction\n for i in range(len(string_false)) :\n ind_false = np.where(np.array(tmp) == string_false[i])[0].tolist()\n for j in ind_false :\n tmp[j] = string_true[i]\n # On met à jour la colonne corrigée dans le dataframe\n df[col_name] = tmp\n \n return(df)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef check_NAF(df, col_name):\n \"\"\"\n Check the structure of a column content\n \n :param df: dataframe\n :param col_name: string name of the column to treat\n \"\"\"\n \n # On génère une liste de chiffres et de lettres sous format caractère\n num_str = [str(i) for i in range(10)]\n alphabet = [chr(i).upper() for i in range(ord('a'), ord('z') + 1)]\n # On initialise une variable de comptage qui compte le nombre d'incohérences\n count_false = 0\n # On calcule le nombre d'incohérences selon des conditions\n for i in range(len(df[col_name].unique())):\n elt = df[col_name][i].upper()\n if len([i for i in elt[0:4] if i in num_str]) < 4:\n count_false += 1\n elif elt[4] not in alphabet: \n count_false += 1\n # On affiche le bilan général du nombre d'incohérences\n if (count_false == 0):\n print('Aucune donnée incohérente')\n else:\n print(f'Il y a {count_false} donnée(s) incohérente(s)')\n \n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef clean_data(df_clt, df_job, new_obs_job=False, new_obs_clt=False):\n \"\"\"\n Pipeline to clean tatami's data\n \n :param df_clt: dataframe of client data\n :param df_job: dataframe of jobbeur data\n :new_obs_job: string choice to treat new observation from a jobbeur\n :new_obs_clt: string choice to treat new observation from a clt\n :return df_clt: dataframe of client data cleaned\n :return df_job: dataframe of jobbeur data cleaned\n \"\"\"\n \n time_total = time.time()\n print('------------------- NETTOYAGE COMPLET DES DONNEES -------------------\\n')\n time.sleep(0.1)\n \n print('Suppression des colonnes de la table client ...', end=' ')\n start_time = time.time()\n df_clt = df_clt.drop(params['client']['col_to_del'], axis=1)\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n \n if not new_obs_clt :\n print('Suppression des lignes de la table client ...', end=' ')\n start_time = time.time()\n df_clt = df_clt.iloc[:-2]\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n \n print('Suppression des colonnes de la table jobbeur ...', end=' ')\n start_time = time.time()\n df_job = df_job.drop(params['jobbeur']['col_to_del'], axis=1)\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n \n if not new_obs_job :\n print('Suppression des lignes de la table jobbeur ...', end=' ')\n start_time = time.time()\n df_job = df_job.iloc[:-1]\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n \n print('Nettoyage des doublons dans les variables client et correction ...', end=' ')\n start_time = time.time()\n df_clt = replace_string(df_clt, 'Métier du poste', params['client'])\n df_clt = replace_string(df_clt, 'Localisation du poste', params['client'])\n if 'Poste avec du déplacement (en %) si 75 ramené a 100%' in df_clt.columns :\n ind = np.where(df_clt['Poste avec du déplacement (en %) si 75 ramené a 100%'] == 75)[0]\n df_clt.loc[ind, 'Poste avec du déplacement (en %) si 75 ramené a 100%'] = 100\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n \n print('Nettoyage des doublons dans les variables jobbeur ...', end=' ')\n start_time = time.time()\n df_job = replace_string(df_job, 'VILLE', params['jobbeur'])\n df_job = replace_string(df_job, 'Dernier poste occupé (ou actuel)', params['jobbeur'])\n df_job = replace_string(df_job, 'Mission recherchée : Exemple n°1 de poste (métier + secteur)', params['jobbeur'])\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n \n print('Complétion des données dans les variables jobbeur ...', end=' ')\n start_time = time.time()\n df_job['Vos compétences 2'] = df_job['Vos compétences 2'].replace(np.nan, 'Non renseigné')\n df_job['Vos compétences 3'] = df_job['Vos compétences 3'].replace(np.nan, 'Non renseigné')\n df_job['CODE POSTAL'] = df_job['CODE POSTAL'].astype('str')\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n \n print('\\nOpération complète terminée en {}s'.format(round(time.time() - time_total, 3)))\n \n return(df_clt, df_job)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef merge(df_1, df_2):\n \"\"\"\n Computes Merge algorithms on two dataframes \n \n :param df_1: first dataframe\n :param df_2: second dataframe\n :return col_del: list of column names deleted after encoding\n :return df_dummies: dataframe with OneHotEncoded columns\n \"\"\"\n\n # On fait une copie des df d'entrée\n df1 = df_1.copy()\n df2 = df_2.copy() \n # On place une clé commune à nos deux tables\n df1['key'] = 1\n df2['key'] = 1\n # On fait fait le produit cartésien de toutes les lignes suivant la clé\n merge_df = pd.merge(df1, df2, on ='key').drop('key', axis=1) \n \n return(merge_df)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef labellisation(df_merge, verbose=True):\n \"\"\"\n Labellises merged dataframe according to business rules \n \n :param df_merge: dataframe\n :param verbose: boolean choice of progression bar visualisation \n :return df_merge: dataframe with labelled rows\n \"\"\"\n \n df_for_label = df_merge.copy()\n df_for_label['y'] = 0\n \n print('{} \\033[1met\\033[0m {}'.format('niveau de rémunération', 'Niveau de rémunération mensuelle brute souhaitée'))\n ind = df_for_label[df_for_label['niveau de rémunération'] > df_for_label['Niveau de rémunération mensuelle brute souhaitée']].index.values\n df_for_label.loc[ind, 'y'] += 1\n\n for paire in paires :\n print('{} \\033[1met\\033[0m {}'.format(paires[paire]['item'][0], paires[paire]['item'][1]))\n if paires[paire]['correction?'] == 1 :\n for elt_to_corr in paires[paire]['corr'] :\n ind_to_corr = np.where(df_for_label[paires[paire]['item'][1]] == elt_to_corr[1])\n df_for_label[paires[paire]['item'][1]].iloc[ind_to_corr] = elt_to_corr[0]\n list_pair_1 = df_for_label[paires[paire]['item'][0]].unique()\n if verbose : \n loop = tqdm(list_pair_1)\n else :\n loop = list_pair_1\n for elt_pair_1 in loop :\n if (elt_pair_1 in df_for_label[paires[paire]['item'][0]].values) & (elt_pair_1 in df_for_label[paires[paire]['item'][1]].values) :\n ind_rows = np.where((df_for_label[paires[paire]['item'][0]] == elt_pair_1) & (df_for_label[paires[paire]['item'][1]] == elt_pair_1))[0]\n df_for_label.loc[ind_rows, 'y'] += paires[paire]['poids']\n df_merge['y'] = df_for_label['y']\n \n return(df_merge)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef shaping_for_model(df_merge, load_enc=True, code_filename='encoder', save_enc=True) :\n \"\"\"\n TO DO\n \"\"\"\n \n X = df_merge.iloc[:, :-1]\n y = df_merge['y'].values\n \n print('Encodage des colonnes ...', end=' ')\n start_time = time.time()\n if load_enc :\n encoder = load('Sources/'+code_filename)\n X_encoded = encoder.transform(X)\n else :\n encoder = ce.OneHotEncoder(use_cat_names=True)\n X_encoded = encoder.fit_transform(X)\n if save_enc :\n with open(\"Sources/encoder\", \"wb\") as f :\n pickle.dump(encoder, f)\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n \n print('Normalisation de la variable réponse ...', end=' ')\n start_time = time.time()\n scaler = MinMaxScaler()\n y_scaled = scaler.fit_transform(y.reshape(-1, 1)).ravel()\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n \n return(X_encoded, y_scaled)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef model_construction(X, y, model) :\n \"\"\"\n TO DO\n \"\"\"\n \n print(\"\\nEntraînement du modèle ...\", end=' ')\n start_time = time.time()\n model_trained = model\n model_trained.fit(X, y)\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n print(\"\\nR² obtenu : {}\".format(round(r2_score(model_trained.predict(X), y), 2))) \n \n return(model_trained, X, y)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef pipeline_modelisation(df_clt, df_job, load_enc=True, code_filename='encoder', save_enc=True, \n model=RandomForestRegressor(n_jobs=-1), save_model=True, verbose=False) :\n \"\"\"\n TO DO\n \"\"\"\n \n df_clt_clean, df_job_clean = clean_data(df_clt, df_job)\n\n print('\\n------------------- ADAPTATION POUR MODELISATION --------------------\\n')\n time.sleep(0.1)\n time_total = time.time()\n print('Merge des deux dataframes ...', end=' ')\n start_time = time.time()\n df_merge = merge(df_clt_clean, df_job_clean)\n print('fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n\n print('\\nLabellisation des données :')\n start_time = time.time()\n df_merge = labellisation(df_merge, verbose = False)\n print('Fini en {}s '.format(round(time.time() - start_time, 3)))\n time.sleep(0.1)\n\n print('\\nOneHotEncoding et Standardisation variable réponse :')\n time.sleep(0.1)\n X, y = shaping_for_model(df_merge)\n\n print('\\nOpération complète terminée en {}s'.format(round(time.time() - time_total, 3)))\n time.sleep(0.1)\n\n print('\\n---------------------- ENTRAINEMENT DU MODELE -----------------------')\n time.sleep(0.1)\n \n time_total = time.time()\n model_trained, X, y = model_construction(X, y, model=model)\n if save_model :\n filename = 'Sources/model.sav'\n pickle.dump(model_trained, open(filename, 'wb'))\n print('Modèle sauvegardé dans {}.'.format(filename))\n time.sleep(0.1)\n\n print('\\nOpération complète terminée en {}s'.format(round(time.time() - time_total, 3))) \n \n return(model_trained, X, y)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef matching_new_job(new_job, df_clt) :\n \"\"\"\n TO DO\n \"\"\"\n \n df_clt_clean, df_job_clean = clean_data(df_clt, new_job, new_obs_job=True)\n df_merge = merge(df_clt_clean, df_job_clean)\n \n loaded_model = pickle.load(open('Sources/model.sav', 'rb'))\n encoder = load('Sources/encoder')\n X_encoded = encoder.transform(df_merge)\n matchs = loaded_model.predict(X_encoded)\n df_matchs = pd.DataFrame(np.array([X_encoded.index, matchs]).transpose(), columns=['index', 'matching'])\n ind_best_match = df_matchs.sort_values(by='matching', ascending=False)['index'].values\n df_best_match = encoder.inverse_transform(X_encoded.loc[ind_best_match])\n res = df_clt.loc[ind_best_match]\n res['match_value'] = df_matchs.loc[ind_best_match, 'matching']\n return(res)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\ndef matching_new_clt(new_clt, df_job) :\n \"\"\"\n TO DO\n \"\"\"\n \n df_clt_clean, df_job_clean = clean_data(new_clt, df_job, new_obs_clt=True)\n df_merge = merge(df_clt_clean, df_job_clean)\n \n loaded_model = pickle.load(open('Sources/model.sav', 'rb'))\n encoder = load('Sources/encoder')\n X_encoded = encoder.transform(df_merge)\n matchs = loaded_model.predict(X_encoded)\n df_matchs = pd.DataFrame(np.array([X_encoded.index, matchs]).transpose(), columns=['index', 'matching'])\n ind_best_match = df_matchs.sort_values(by='matching', ascending=False)['index'].values\n df_best_match = encoder.inverse_transform(X_encoded.loc[ind_best_match])\n res = df_job.loc[ind_best_match]\n res['match_value'] = df_matchs.loc[ind_best_match, 'matching']\n return(res)\n","repo_name":"PyaFrost/data_challenge_aqui","sub_path":"Scripts/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":14876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8184194649","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport os\nimport re\nimport string\nimport sys\nimport logging\nfrom config import YARA_RULE_PATH\n\ntry:\n import yara # pip yara-python\n has_yara = True\nexcept ImportError:\n yara = None\n has_yara = False\n\nyara_rules = None\n\n\"\"\"\nScan a dictionary of YARA rule files to determine\nwhich are valid for compilation.\n\nArguments:\n\tyara_files: path to folder containing rules\n\"\"\"\ndef yara_rule_check(yara_files):\n result = dict()\n for yara_id in yara_files:\n fname = yara_files[yara_id]\n try:\n yara.compile(filepath=fname)\n result[yara_id] = fname\n except yara.SyntaxError:\n logging.warning('Syntax Error found in YARA file: {}'.format(fname))\n return result\n\n\"\"\"\nlist file\n\"\"\"\ndef list_files(path):\n lsdir = os.listdir(path)\n dirs = [i for i in lsdir if os.path.isdir(os.path.join(path, i))]\n if dirs:\n for i in dirs:\n list_files(os.path.join(path, i))\n files = [i for i in lsdir if os.path.isfile(os.path.join(path,i))]\n return files\n\n\"\"\"\nImport a folder of YARA rule files\n\nArguments:\n\tyara_path: path to folder containing rules\nResults:\n\trules: a yara.Rules structure of available YARA rules\n\"\"\"\ndef yara_import_rules(yara_path):\n yara_files = {}\n \n logging.info('Loading YARA rules from folder: {}'.format(yara_path))\n files = list_files(yara_path)\n\n # 获取所有的rule文件放到yara_files\n for file_name in files:\n file_extension = os.path.splitext(file_name)[1]\n if '.yar' in file_extension:\n yara_files[file_name.split(os.sep)[-1]] = os.path.join(yara_path, file_name)\n\n # 对rule检查,保证rule可以编译过\n yara_files = yara_rule_check(yara_files)\n rules = ''\n # 编译rule\n if yara_files:\n try:\n rules = yara.compile(filepaths=yara_files)\n logging.info('YARA rules loaded. Total files imported: %d' % (len(yara_files)))\n except yara.SyntaxError:\n logging.error('YARA rules disabled , rule format error.')\n\n return rules\n\n\"\"\"\nScan a given file to see if it matches a given set of YARA rules\n\nArguments:\n\tfile_path: full path to a file to scan\n\trules: a yara.Rules structure of available YARA rules\nResults:\n\tresults: a string value that's either null (no hits)\n\t\t\t or formatted with hit results\n\"\"\"\ndef yara_filescan(file_path, rules):\n if not rules:\n return '','',''\n \n if os.path.isdir(file_path):\n logging.debug('file: {} is dir \\n'.format(file_path))\n return '','',''\n \n matchrules = ''\n score = 0\n desc = ''\n results = ''\n\n try:\n matches = rules.match(file_path)\n except yara.Error: # If can't open file\n logging.debug('YARA can\\'t open file: {}'.format(file_path))\n return '','',''\n\n # 命中规则,组装信息\n if matches:\n matchrules = '\\t[YARA: {}]'.format(', '.join(str(x) for x in matches))\n if 'description' in matches[0].meta:\n desc = matches[0].meta['description']\n for i in matches:\n if ('severity' in i.meta) and (i.meta['severity'] > score):\n score = i.meta['severity']\n desc = i.meta['description']\n else:\n logging.debug('YARA not match file: {}'.format(file_path))\n\n return matchrules,score,desc\n\n\"\"\"\nGiven the location of CSV and TXT files, parse the CSV for notable items\n\nArguments:\n\tcsv_file: path to csv output to parse\n\treport: OUT string text containing the entirety of the text report\n\ttimeline: OUT string text containing the entirety of the CSV report\n\"\"\"\ndef yara_init_rule():\n global yara_rules\n global yara_rules\n logging.info('YARA init_rule starting')\n \n if False == has_yara:\n logging.error('do not have yara module')\n return\n yara_rules = yara_import_rules(YARA_RULE_PATH)\n if yara_rules:\n logging.info('YARA init_rule successfully!')\n else:\n logging.error('YARA init_rule Failed!')\n\n\"\"\"\nGiven the location of CSV and TXT files, parse the CSV for notable items\n\nArguments:\n\tcsv_file: path to csv output to parse\n\treport: OUT string text containing the entirety of the text report\n\ttimeline: OUT string text containing the entirety of the CSV report\n\"\"\"\n# 匹配文件\ndef yara_process_file(filename):\n global yara_rules\n yara_hits = ''\n score = 0\n desc = ''\n ret = 0\n \n if os.path.exists(filename) and yara_rules:\n yara_hits,score,desc = yara_filescan(filename, yara_rules)\n if len(yara_hits) == 0:\n ret = -1\n logging.debug('Processing match yara Failed!')\n else:\n logging.debug(yara_hits)\n else:\n logging.debug('file {} not exit or rules init failed'.format(filename))\n ret = -1\n \n rettxt = [{'file':filename, 'ret':ret, 'matchrules':yara_hits, 'score':score, 'description':desc}]\n return ret, rettxt\n\n# 直接对内容匹配 str\ndef yara_process_match(report):\n global yara_rules\n \n if yara_rules:\n try:\n matches = yara_rules.match(data=report)\n except yara.Error: \n logging.warning('YARA cant match report')\n return -1,''\n else:\n logging.warning('YARA yara_rule init failed cant match')\n return -1,''\n\n if matches:\n matchrules = []\n desc = ''\n score = 0\n max_serverity = 0\n\n if 'description' in matches[0].meta:\n desc = matches[0].meta['description']\n\n for i in matches:\n if 'severity' in i.meta:\n score += i.meta['severity']\n if i.meta['severity'] > max_serverity:\n max_serverity = i.meta['severity']\n desc = i.meta['description']\n matchrules.insert(0, str(i))\n else:\n matchrules.append(str(i))\n else:\n matchrules.append(str(i))\n else:\n logging.debug('YARA not match file')\n return -1,''\n \n rettxt = {'errcode':0, 'rulename':matchrules, 'score':score, 'desc':desc}\n return 0,rettxt\n","repo_name":"ddytime20/Project","sub_path":"vm/bin/yara_rule.py","file_name":"yara_rule.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35661366360","text":"from flask import Flask, jsonify, redirect, url_for, request\nimport time\nimport uuid\nimport pymongo\nfrom prometheus_flask_exporter import PrometheusMetrics\nimport os\n\nmongoClient = pymongo.MongoClient(\"mongodb://mongo:27017\")\nmongoCollection = mongoClient[\"cs2304\"][\"blabber\"]\n\n'''\ndb_secret = os.getenv('MONGO_SECRET')\nif os.getenv('MONGO_CONFIGURED') == 'true' and db_secret is not None:\n print(\"using secret-provided mongo config\")\n mongoClient = pymongo.MongoClient(\"mongodb://mongo:27017\")\n mongoCollection = mongoClient[\"cs2304\"][\"blabber\"]\n'''\n\napp = Flask(__name__)\n\nmetrics = PrometheusMetrics(app)\n\n#Add a new Blab\n@app.route('/blabs', methods=['POST'])\n@metrics.counter('blabs_created', 'Number of Blabs created')\ndef newBlab():\n content = request.json\n newBlab = {\"id\": str(uuid.uuid4()), \n \"postTime\": int(time.time()),\n \"author\": content.get(\"author\"),\n \"message\": content.get(\"message\")}\n mongoCollection.insert_one(newBlab)\n newBlab.pop(\"_id\")\n return jsonify(newBlab), 201, {'Content-Type': 'application/json'}\n\n#Get blabs since\n@app.route('/blabs', methods=['GET'])\ndef getBlabs():\n sinceTime = request.args.get(\"createdSince\")\n retBlabs = []\n for item in mongoCollection.find():\n blab = item.copy()\n if blab.get(\"postTime\") >= int(sinceTime):\n blab.pop(\"_id\")\n retBlabs.append(blab)\n return jsonify(retBlabs), 200, {'Content-Type': 'application/json'}\n\n#Delete a Blab by id\n@app.route('/blabs/', methods=['DELETE'])\ndef removeBlab(id):\n result = mongoCollection.delete_one({\"id\": id})\n if result.deleted_count > 0:\n return \"200: Blab deleted successfully\", 200\n msg = \"404: Blab not found: \"+str(id)\n return msg, 404\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")","repo_name":"matt-davison/docker-python-app","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3407825204","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# AMIU copyleft 2021\n# Roberto Marzocchi\n\n'''\nLo script importa in automatico alcuni elementi su una lista di piazzole date in un file CSV\n'''\n\nimport os, sys, re # ,shutil,glob\nimport inspect, os.path\n\nimport csv\n\nimport psycopg2\n\nimport datetime\n\nfrom credenziali import *\n\n\n#import requests\n\nimport logging\n\n\n\n#LOG\n\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\npath = os.path.dirname(os.path.abspath(filename))\n\n#path=os.path.dirname(sys.argv[0]) \n#tmpfolder=tempfile.gettempdir() # get the current temporary directory\nlogfile='{}/log/import_legno_plastica.log'.format(path)\n#if os.path.exists(logfile):\n# os.remove(logfile)\n\nlogging.basicConfig(\n handlers=[logging.FileHandler(filename=logfile, encoding='utf-8', mode='w')],\n format='%(asctime)s\\t%(levelname)s\\t%(message)s',\n #filemode='w', # overwrite or append\n #filename=logfile,\n level=logging.INFO)\n\n\n\n\n\ndef main():\n\n logging.info('Lettura file CSV')\n\n \n id_piazzola=[]\n civico=[]\n riferimento=[]\n \n with open('input/piazzole_imballaggi.csv', mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=';')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n logging.info(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n #logging.debug(len(row))\n if len(row):\n #logging.debug(line_count)\n #logging.debug(len(row))\n id_piazzola.append(int(row['ID_Piazzola']))\n civico.append(row['Civico'])\n riferimento.append(row['Riferimento'])\n line_count += 1\n #logging.debug(id_piazzola)\n logging.info('Lette {} righe nel file CSV'.format(len(id_piazzola)))\n logging.info('I dati sono inseriti su SIT e nel campo \"modificato_da\" abbiamo inserito il valore \"Importato da script\" ')\n\n \n\n # # carico i mezzi sul DB PostgreSQL\n logging.info('Connessione al db')\n conn = psycopg2.connect(dbname=db,\n port=port,\n user=user,\n password=pwd,\n host=host)\n\n curr = conn.cursor()\n conn.autocommit = True\n\n\n # num_giorno=datetime.datetime.today().weekday()\n # giorno=datetime.datetime.today().strftime('%A')\n # giorno_file=datetime.datetime.today().strftime('%Y%m%d')\n # logging.debug('Il giorno della settimana è {} o meglio {}'.format(num_giorno, giorno))\n \n # if num_giorno==0:\n # num=3\n # elif num_giorno in (5,6):\n # num=0\n # logging.info('Oggi è {0}, lo script non gira'.format(giorno))\n # exit()\n # else:\n # num=1\n \n # query='''select distinct p.cod_percorso , p.descrizione, s.descrizione as servizio, u.descrizione as ut\n # from util.sys_history h\n # inner join elem.percorsi p \n # on h.id_percorso = p.id_percorso \n # inner join elem.percorsi_ut pu \n # on pu.cod_percorso =p.cod_percorso \n # inner join elem.servizi s \n # on s.id_servizio =p.id_servizio\n # inner join topo.ut u \n # on u.id_ut = pu.id_ut \n # where h.datetime > (current_date - INTEGER '{0}') \n # and h.datetime < current_date \n # and h.\"type\" = 'PERCORSO' \n # and h.action = 'UPDATE_ELEM'\n # and pu.responsabile = 'S'\n # order by ut, servizio'''.format(num)\n \n\n i=0\n while i < len(id_piazzola):\n query='''select e.id_asta, e.id_cliente, e.posizione, e.privato, e.numero_civico_old, \n ep.id_utenzapap, e.numero_civico, e.lettera_civico, e.colore_civico, e.note, e.riferimento, \n ep.id_elemento_privato, \n ep.descrizione \n from elem.elementi e\n join elem.elementi_privati ep \n on e.x_id_elemento_privato = ep.id_elemento_privato\n left join utenze.utenze u on \n ep.id_utenzapap = u.id_utenza \n where e.id_piazzola = {}\n group by e.id_asta, e.id_cliente, e.posizione, e.privato, e.numero_civico_old, \n ep.id_utenzapap, e.numero_civico, e.lettera_civico, e.colore_civico, e.note, e.riferimento, \n ep.id_elemento_privato, \n ep.descrizione, u.data_cessazione\n order by u.data_cessazione, ep.id_utenzapap desc\n '''.format(id_piazzola[i])\n try:\n curr.execute(query)\n parametri_elemento=curr.fetchall()\n except Exception as e:\n logging.error(e)\n if len(parametri_elemento) > 1:\n logging.warning('La piazzola {} contiene più di un elemento privato'.format(id_piazzola[i]))\n if len(parametri_elemento) < 1:\n logging.warning('La piazzola {} non contiene elementi privati. Si consiglia di controllare l\\'esattezza delle informazioni su SIT'.format(id_piazzola[i]))\n c=0\n for vv in parametri_elemento: \n #modificato_da\n #data_ultima_modifica\n #freq_stimata = 3\n if c==0:\n if vv[1] != None:\n id_cliente= vv[1]\n else:\n id_cliente = -1\n \n if vv[2] != None:\n posizione= vv[2]\n else:\n posizione = 0\n \n #if vv[5] != None:\n # id_utenza= vv[5]\n #else:\n # id_utenza = -1\n \n if riferimento[i] != None:\n rif = riferimento[i]\n else: \n if vv[10] != None:\n rif= vv[10]\n else: \n rif = 'nd'\n \n #posizione, privato, numero_civico_old\n # id_utenza, numero_civico, lettera_civico, colore_civico, \n # note\n logging.debug(len(vv))\n if civico[i] != None and vv[4] != None:\n if vv[4].lower() != civico[i].lower():\n logging.info('Piazzola {} - incongruenza civici con file excel:\\n - numero civico letto = {}\\n - numero civico csv = {}'.format(id_piazzola[i],vv[4],civico[i]))\n else:\n logging.info('Piazzola {} incongruenza / assenza civici su file excel :\\n - numero civico letto = {}\\n - numero civico csv = {}'.format(id_piazzola[i],vv[4],civico[i]))\n \n\n \n #campi= ''' id_asta, id_cliente, posizione, privato, id_utenza, id_piazzola, modificato_da, data_ultima_modifica, freq_stimata, riferimento'''\n #valori= '''{0},{1},{2},1,{3},{4}, 'Importato da script SIT', now(), 3 '''.format(vv[0], id_cliente, posizione, id_utenza, id_piazzola[i])\n \n for tipo in (170, 178):\n insert_query= ''' INSERT INTO elem.elementi (tipo_elemento, id_asta, id_cliente, posizione, privato, \n id_piazzola, peso_reale, peso_stimato, percent_riempimento, modificato_da, data_ultima_modifica, freq_stimata, riferimento)\n VALUES (%s, %s, %s, %s, 1, %s, 0, 0, 90, 'Importato da script', now(), 3 , %s )'''\n logging.debug(insert_query)\n curr2 = conn.cursor()\n curr2.execute(insert_query, (tipo, vv[0], id_cliente, posizione, id_piazzola[i], rif))\n curr2.close()\n\n\n upd= 'UPDATE elem.elementi SET'\n cond = 'WHERE id_piazzola = {} and tipo_elemento in (170, 178)'. format(id_piazzola[i])\n # numero_civico_old\n if vv[4] != None:\n update_query='''UPDATE elem.elementi SET numero_civico_old = %s \n WHERE id_piazzola = %s and tipo_elemento in (170, 178)'''\n logging.debug(update_query)\n curr3 = conn.cursor()\n curr3.execute(update_query, (vv[4], id_piazzola[i]))\n curr3.close()\n else:\n if civico[i] != None:\n update_query='''UPDATE elem.elementi SET numero_civico_old = %s \n WHERE id_piazzola = %s and tipo_elemento in (170, 178)'''\n curr3 = conn.cursor()\n curr3.execute(update_query, (civico[i], id_piazzola[i]))\n curr3.close()\n\n\n if vv[5] != None:\n update_query='''UPDATE elem.elementi SET id_utenza = %s \n WHERE id_piazzola = %s and tipo_elemento in (170, 178)'''\n logging.debug(update_query)\n curr3 = conn.cursor()\n curr3.execute(update_query, (vv[5], id_piazzola[i]))\n curr3.close()\n\n # numero_civico, 6\n if vv[6] != None:\n update_query='''UPDATE elem.elementi SET numero_civico = %s \n WHERE id_piazzola = %s and tipo_elemento in (170, 178)'''\n logging.debug(update_query)\n curr3 = conn.cursor()\n curr3.execute(update_query, (vv[6], id_piazzola[i]))\n curr3.close()\n # lettera_civico, 7\n if vv[7] != None:\n update_query='''UPDATE elem.elementi SET lettera_civico = %s\n WHERE id_piazzola = %s and tipo_elemento in (170, 178)'''\n logging.debug(update_query)\n curr3 = conn.cursor()\n curr3.execute(update_query, (vv[7], id_piazzola[i]))\n curr3.close()\n # colore_civico, 8\n if vv[8] != None:\n update_query='''UPDATE elem.elementi SET colore_civico = %s \n WHERE id_piazzola = %s and tipo_elemento in (170, 178)'''\n logging.debug(update_query)\n curr3 = conn.cursor()\n curr3.execute(update_query, (vv[8], id_piazzola[i]))\n curr3.close()\n # note, 9\n if vv[9] != None:\n update_query='''UPDATE elem.elementi SET note = %s \n WHERE id_piazzola = %s and tipo_elemento in (170, 178)'''\n logging.debug(update_query)\n curr3 = conn.cursor()\n curr3.execute(update_query, (vv[9], id_piazzola[i]))\n curr3.close()\n \n #check per non fare doppia importazione nel caso in cui in precedenza ci fosse più di un cliente\n c+=1\n \n i+=1\n\n curr.close()\n conn.close()\n\n \n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"amiugete/script_sit_amiu","sub_path":"import_elementi.py","file_name":"import_elementi.py","file_ext":"py","file_size_in_byte":10868,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10380294446","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nurlpatterns = [\n path('View_Lab_report_user/',views.View_Lab_report_user, name='View_Lab_report_user'),\n path('View_Prescription_user/',views.View_Prescription_user, name='View_Prescription_user'),\n path('',views.PatientHome, name='PatientHome'),\n path('updatePatientExtraForm_c/',views.updatePatientExtraForm_c, name='updatePatientExtraForm_c'),\n path('Patient_Signin',views.Patient_Signin, name='Patient_Signin'),\n path('Patient_Signup',views.Patient_Signup, name='Patient_Signup'),\n path('Patient_done',views.Patient_done, name='Patient_done'),\n path('Patient_Prescription',views.Patient_Prescription, name='Patient_Prescription'),\n path('Patient_Report',views.Patient_Report, name='Patient_Report'),\n path('updatePatientExtraForm/',views.updatePatientExtraForm, name='updatePatientExtraForm'),\n] \n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"Paliwalmahesh/Health_in_hand","sub_path":"hospital_system/Patient_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"27613343666","text":"# Code by Lukas WinklerPrins\n# lukas_wp@berkeley.edu\n# Last modified Dec 2 2020\n\nfrom dgs import *\nimport os, glob\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\nimport numpy as np\nplt.style.use('fivethirtyeight')\n\ndef run_tomales_gs():\n files = glob.glob('tomales/crops/*.jpeg')\n resolution = 1 # Doesn't seem to work if I make it 10/158\n maxscale = 3\n verbose = 1\n x = -1 # Gonna use this as a tuning parameter...\n\n actual_resolution = 10/158\n\n sieving_bins = [0.05, 0.063, 0.09, 0.125, 0.18, 0.25, 0.355, 0.5, 0.71, 1, 1.4, 2, 2.8, 4, 5.6, 8, 11.2, 16]\n # in mm 0.5 represents finer\n with open('test_sieves.csv') as csvfile:\n csvreader = csv.reader(csvfile,quoting=csv.QUOTE_NONNUMERIC)\n for row in csvreader:\n total = sum(row)\n # new_row = [i/total for i in row]\n # new_row = np.cumsum(new_row)\n new_row = np.cumsum(row)/total\n plt.plot(sieving_bins,new_row,'r',alpha=0.7)\n i = np.where(new_row > 0.5)\n\n for f in tqdm(files):\n filename = f + '_percentiles.csv'\n data_out = dgs(f, resolution, maxscale, verbose, x)\n\n with open(filename,'w') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(data_out['percentile_values']*actual_resolution)\n csvwriter.writerow(data_out['percentiles'])\n\n filename = f + '_bins.csv'\n\n with open(filename,'w') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(data_out['grain size bins']*actual_resolution)\n csvwriter.writerow(data_out['grain size frequencies'])\n\n # plt.plot(data_out['grain size bins']*actual_resolution,data_out['grain size frequencies'],'b',alpha=0.7)\n plt.plot(data_out['percentile_values']*actual_resolution,data_out['percentiles'],'b',alpha=0.7)\n\n plt.show()\n\n\nif __name__ == '__main__':\n\n # all images in data folder, with plot\n run_tomales_gs()\n","repo_name":"ltwp/tb_grain_photo","sub_path":"tomales_pydgs.py","file_name":"tomales_pydgs.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14575747625","text":"import os\nimport sys\nimport yaml\nimport pytest\nfrom maestro.input import read_roles, read_groups\nfrom maestro.playbooks import gen_concerto, gen_individual_playbook, gen_all_groups_playbook\n\n\norchestra = \\\n\"\"\"\ndatabases:\n sql: 1\n mongo: 1\n\ncomputing: 7\n\"\"\"\n\ngroups = read_groups(yaml.safe_load(orchestra))\n\ninstruments = \\\n\"\"\"\ndatabases:\n create_server:\n image: cirros\n external_network: public\n flavor: m1.nano\n\nsql:\n create_server:\n image: cirros\n flavor: m1.medium\n username: l337\n docker:\n\ncomputing:\n docker:\n username: JorgeJesus\n\"\"\"\n\ngroups = read_roles(yaml.safe_load(instruments), groups)\n\nexpected_databases_playbook = \\\n\"\"\"- import_playbook: mongo.yml\n\n- import_playbook: sql.yml\n\"\"\"\n\nexpected_sql_playbook = \\\n\"\"\"- hosts: sql\n gather_facts: yes\n remote_user: l337\n\n tasks:\n\n - name: Execute role \\'docker\\'\n include_role:\n name: docker\n\"\"\"\n\nexpected_mongo_playbook = \\\n\"\"\"- hosts: mongo\n gather_facts: yes\n remote_user: dummy\n\n tasks:\n\"\"\"\n\nexpected_computing_playbook = \\\n\"\"\"- hosts: computing\n gather_facts: yes\n remote_user: dummy\n\n tasks:\n\n - name: Execute role \\'docker\\'\n include_role:\n name: docker\n vars:\n username: JorgeJesus\n\"\"\"\n\nexpected_intermezzo = \\\n\"\"\"- import_playbook: group/databases.yml\n\n- import_playbook: group/computing.yml\"\"\"\n\nexpected_concerto = \\\n\"\"\"# Play 1: Create all servers\n- hosts: localhost\n gather_facts: no\n vars:\n provider: openstack\n\n tasks:\n\n - name: Setup image for servers of group 'computing'\n include_role:\n name: setup_image\n defaults_from: \"{{ provider }}.yml\"\n\n - name: Create servers of group 'computing'\n include_role:\n name: create_server\n defaults_from: \"{{ provider }}.yml\"\n with_items:\n - computing-001\n - computing-002\n - computing-003\n - computing-004\n - computing-005\n - computing-006\n - computing-007\n loop_control:\n loop_var: server\n\n - name: Setup image for servers of group 'mongo'\n include_role:\n name: setup_image\n defaults_from: \"{{ provider }}.yml\"\n\n - name: Create servers of group 'mongo'\n include_role:\n name: create_server\n defaults_from: \"{{ provider }}.yml\"\n vars:\n image: cirros\n external_network: public\n flavor: m1.nano\n with_items:\n - databases-mongo-001\n loop_control:\n loop_var: server\n\n - name: Setup image for servers of group 'sql'\n include_role:\n name: setup_image\n defaults_from: \"{{ provider }}.yml\"\n\n - name: Create servers of group 'sql'\n include_role:\n name: create_server\n defaults_from: \"{{ provider }}.yml\"\n vars:\n username: l337\n flavor: m1.medium\n image: cirros\n external_network: public\n with_items:\n - databases-sql-001\n loop_control:\n loop_var: server\n\n - name: Refresh in-memory openstack cache\n meta: refresh_inventory\n\"\"\"\n\ndef test_gen_individual_playbook():\n\n databases_playbook = gen_individual_playbook(groups[\"databases\"], \"dummy\")\n sql_playbook = gen_individual_playbook(groups[\"sql\"], \"dummy\")\n mongo_playbook = gen_individual_playbook(groups[\"mongo\"], \"dummy\")\n computing_playbook = gen_individual_playbook(groups[\"computing\"], \"dummy\")\n\n assert databases_playbook == expected_databases_playbook\n assert sql_playbook == expected_sql_playbook\n assert mongo_playbook == expected_mongo_playbook\n assert computing_playbook == expected_computing_playbook\n\ndef test_gen_intermezzo():\n intermezzo = gen_all_groups_playbook(groups)\n assert intermezzo == expected_intermezzo\n\ndef test_gen_concerto():\n concerto = gen_concerto(groups, \"openstack\")\n assert concerto == expected_concerto\n","repo_name":"ppintosilva/maestro","sub_path":"maestro/tests/test_gen_playbooks.py","file_name":"test_gen_playbooks.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"4792855034","text":"import os\nimport sys\n\n\ndef fix_ldflag(f):\n if not f.startswith('-lrte_'):\n return f\n return '-l:lib' + f[2:] + '.a'\n\n\ndef fix_libs_private(line):\n if not line.startswith('Libs.private'):\n return line\n ldflags = [fix_ldflag(flag) for flag in line.split()]\n return ' '.join(ldflags) + '\\n'\n\n\ndef process_pc_file(filepath):\n print('Processing', filepath)\n with open(filepath) as src:\n lines = src.readlines()\n with open(filepath, 'w') as dst:\n dst.writelines([fix_libs_private(line) for line in lines])\n\n\nif 'MESON_BUILD_ROOT' not in os.environ:\n print('This script must be called from a meson build environment')\n sys.exit(1)\nfor root, dirs, files in os.walk(os.environ['MESON_BUILD_ROOT']):\n pc_files = [f for f in files if f.endswith('.pc')]\n for f in pc_files:\n process_pc_file(os.path.join(root, f))\n","repo_name":"F-Stack/f-stack","sub_path":"dpdk/buildtools/pkg-config/set-static-linker-flags.py","file_name":"set-static-linker-flags.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":3600,"dataset":"github-code","pt":"54"} +{"seq_id":"30096652074","text":"import logging\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, Depends, UploadFile, File\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom starlette import status\n\nfrom app.api.dependencies.db import get_db\nfrom app.models.schema.bse_client_code import InBseClientCodeSchema, BseClientCodeSchema\nfrom app.models.schema.bse_client_code_screen_two import (\n BseClientCodeScreenTwo,\n BseClientCodeScreenThree,\n)\nfrom app.service.bse_client_code import BseClientCodeService\n\nrouter = APIRouter()\nlogger = logging.getLogger(__name__)\n\n\n@router.post(\"/\", status_code=status.HTTP_201_CREATED)\nasync def create_bse_client_code(\n payload: InBseClientCodeSchema, db: AsyncSession = Depends(get_db)\n):\n \"\"\" api to save bse client code data. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n bse_user_account = await bse_user_account_service.create(payload)\n return bse_user_account\n\n\n@router.patch(\"/\", status_code=status.HTTP_200_OK)\nasync def update_bse_user_account(\n payload: BseClientCodeSchema, db: AsyncSession = Depends(get_db)\n):\n \"\"\" api to update bse client code data. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n await bse_user_account_service.update(payload)\n\n\n@router.get(\"/{uuid}\", status_code=status.HTTP_200_OK)\nasync def get_bse_user_account(uuid: UUID, db: AsyncSession = Depends(get_db)):\n \"\"\" api to fetch bse client code data by id. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n bse_user_account = await bse_user_account_service.get_by_id(uuid)\n return bse_user_account\n\n\n@router.get(\"/\", status_code=status.HTTP_200_OK)\nasync def get_bse_user_account(db: AsyncSession = Depends(get_db)):\n \"\"\" api to fetch all bse client code data. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n bse_user_account = await bse_user_account_service.get_all()\n return bse_user_account\n\n\n@router.delete(\"/{uuid}\", status_code=status.HTTP_200_OK)\nasync def delete_bse_user_account(uuid: UUID, db: AsyncSession = Depends(get_db)):\n \"\"\" api to delete bse client code data by id. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n await bse_user_account_service.delete(uuid)\n\n\n@router.post(\"/upload/signature\")\nasync def create_upload_file(\n user_id: UUID, image: UploadFile = File(...), db: AsyncSession = Depends(get_db)\n):\n \"\"\" api to upload signature. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n return await bse_user_account_service.upload_file(user_id, image)\n\n\n@router.get(\"/screen/two/{user_id}\", status_code=status.HTTP_200_OK)\nasync def get_screen_two_data(user_id: UUID, db: AsyncSession = Depends(get_db)):\n \"\"\" api to fetch second screen data by user id. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n return await bse_user_account_service.get_screen_two(user_id)\n\n\n@router.get(\"/screen/three/{user_id}\", status_code=status.HTTP_200_OK)\nasync def get_screen_three_data(user_id: UUID, db: AsyncSession = Depends(get_db)):\n \"\"\" api to fetch third screen data. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n return await bse_user_account_service.get_screen_three(user_id)\n\n\n@router.patch(\"/screen/two/{user_id}\", status_code=status.HTTP_200_OK)\nasync def update_screen_two_data(\n payload: BseClientCodeScreenTwo, db: AsyncSession = Depends(get_db)\n):\n \"\"\" api to update second screen data. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n await bse_user_account_service.update_screen_two(payload)\n\n\n@router.patch(\"/screen/three/{user_id}\", status_code=status.HTTP_200_OK)\nasync def update_screen_three_data(\n payload: BseClientCodeScreenThree, db: AsyncSession = Depends(get_db)\n):\n \"\"\" api to update third screen data. \"\"\"\n bse_user_account_service = BseClientCodeService(db)\n return await bse_user_account_service.update_screen_three(payload)\n\n\n@router.delete(\"/by/user/id/\", status_code=status.HTTP_200_OK)\nasync def delete_bse_client_code_by_user_id(\n user_id: UUID, db: AsyncSession = Depends(get_db)\n):\n \"\"\" api to delete bse client code data. \"\"\"\n bse_client_code_service = BseClientCodeService(db)\n return await bse_client_code_service.delete(user_id)\n","repo_name":"NaveenBalram/FliberAPI","sub_path":"FliberAPI/app/api/routes/bse_client_code.py","file_name":"bse_client_code.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15842912349","text":"\"\"\"\npep3110.py\nCreated by Peng Xiao on 2018-07-23. xiaoquwl@gmail.com\n\"\"\"\n\nfrom __future__ import print_function\n\n\ndef compute(a, b):\n try:\n return a / b\n except (TypeError, ZeroDivisionError, Exception) as e:\n print('has error')\n print(type(e))\n print(e)\n\nif __name__ == \"__main__\":\n compute(1, 0)\n","repo_name":"xiaopeng163/python3-new-feature","sub_path":"pep3110.py","file_name":"pep3110.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23049833986","text":"from decimal import Decimal\nimport settings\nfrom trading_bot import bitso_client, okx_client\n\n\ndef get_buy_order_price(max_price, ref_price, spread=None, greedy_mood=True):\n if spread is None:\n spread = settings.MIN_SPREAD\n max_price = max_price * Decimal(1 - spread / 100)\n if ref_price > max_price or not greedy_mood:\n return max_price\n return ref_price + settings.ORDER_PRICE_DELTA\n\n\ndef get_sell_order_price(min_price, ref_price, spread=None, greedy_mood=True):\n if spread is None:\n spread = settings.MIN_SPREAD\n min_price = min_price * Decimal(1 + spread / 100)\n if ref_price < min_price or not greedy_mood:\n return min_price\n return ref_price - settings.ORDER_PRICE_DELTA\n\n\ndef get_order_value(max_balance, price, max_order_value=20_000.00, side=\"buy\"):\n # Setting order value\n MAX_ORDER_VALUE = Decimal(str(max_order_value))\n\n if side == \"buy\":\n if max_balance > MAX_ORDER_VALUE:\n return MAX_ORDER_VALUE\n else:\n return max_balance\n\n order_value = max_balance * price\n\n if order_value > MAX_ORDER_VALUE:\n return MAX_ORDER_VALUE\n\n return order_value\n\n\ndef get_external_price(market, ask=True):\n source = settings.PRICE_SOURCE_RULES[market]\n if source == settings.BITSO:\n client = bitso_client\n elif source == settings.OKX:\n client = okx_client\n else:\n raise NotImplementedError(f\"Source price rule not defined for {market} market\")\n if ask:\n return client.get_ask_price(market=market)\n return client.get_bid_price(market=market)\n","repo_name":"cpleonardo/simple-market-maker","sub_path":"trading_bot/price_source.py","file_name":"price_source.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"10928174811","text":"#-*- coding: UTF-8 -*-\n\nclass Solution(object):\n def hIndex(self, citations):\n \"\"\"\n :type citations: List[int]\n :rtype: int\n \"\"\"\n if not citations:\n return 0\n citations.sort(reverse=True)\n res = max(min(citations[i], i+1) for i in range(len(citations)))\n return res\n\n\nif __name__ == '__main__':\n solu = Solution()\n citations = [3,0,6,1,5]\n citations2 = [3, 3, 5, 8, 25]\n res = solu.hIndex(citations2)\n print(res)","repo_name":"Macielyoung/LeetCode","sub_path":"274. H-Index/H-Index.py","file_name":"H-Index.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36098112135","text":"__author__ = 'Si Yi Wu'\n\nimport boto.ec2\nimport os\n\nAWS_KEY_ID = 'AKIAJWKFMXPDMTABNXUA'\nAWS_SECRET_ACCESS_KEY = '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'\nKEY_NAME = 'greenLight'\nSECURITY_GROUP_NAME = 'csc326-group26'\n\n\n#establish connection to amazon\nawsConnection = boto.ec2.connect_to_region (\"us-east-1\",aws_access_key_id=AWS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\nprint (\"connection successfully established.\\n\")\n\n\n\n#delete any previously created key pairs with the key name\nawsConnection.delete_key_pair(KEY_NAME, dry_run=False)\nos.system(\"rm -f %s\" % './' + KEY_NAME+'.pem')\nprint (\"key_pair deleted successfully.\\n\")\n\n\n#delete any existing security group with same name\nawsConnection.delete_security_group(name=SECURITY_GROUP_NAME)\nprint (\"security group deleted successfully.\\n\")\n","repo_name":"ceciliawu/mini-search-engine","sub_path":"lab2_group_26/DeleteAwsKeyPair.py","file_name":"DeleteAwsKeyPair.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21787591902","text":"#coding:utf-8\n#\nimport re\nimport requests\n\n# 读取源代码文件\nf = open('source.html','r',encoding='utf-8')\nhtml = f.read()\nf.close()\n\n# 匹配图片属性\nzhiye_div = re.findall('
(.*?)
',html,re.S)[0]\n\npic_url = re.findall('', methods=['PUT'])\ndef update_comment(id):\n comment = Comment.query.get(id)\n if not comment:\n return jsonify({'error_code': 400, 'result': 'not ok'}), 200\n if request.json.get('text'):\n comment.text = request.json.get('text')\n db.session.commit()\n comment = Comment.query.get(id)\n information = response_builder(comment, Comment)\n return jsonify({'error_code': 200, 'result': information}), 200\n\n\n@mod.route('/', methods=['GET'])\ndef get_comment(id):\n comment = Comment.query.get(id)\n if not comment:\n return jsonify({'error_code': 400, 'result': 'not ok'}), 200 # comment with `id` isn't exist\n information = response_builder(comment, Comment)\n return jsonify({'error_code': 200, 'result': information}), 200\n\n\n@mod.route('/', methods=['GET'])\ndef get_all_comments():\n comments = []\n for comment in Comment.query.filter_by(is_deleted=0):\n information = response_builder(comment, Comment)\n comments.append(information)\n return jsonify({'error_code': 200, 'result': comments}), 200\n\n\n@mod.route('/', methods=['DELETE'])\ndef delete_comment(id):\n comment = Comment.query.get(id)\n if not comment:\n return jsonify({'error_code': 400, 'result': 'not ok'}), 200 # comment with `id` isn't exist\n db.session.delete(comment)\n db.session.commit()\n return jsonify({'error_code': 200}), 200","repo_name":"ClingApp/cling_api","sub_path":"app/api/comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23440411161","text":"import random\nimport json\n\nclass PointOffset:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n @staticmethod\n def rounds():\n return [PointOffset(-1, -1), PointOffset(0, -1), PointOffset(1, -1),\n PointOffset(-1, 0), PointOffset(1, 0),\n PointOffset(-1, 1), PointOffset(0, 1), PointOffset(1, 1)]\n\nclass Cell:\n def __init__(self, isOpen, isFlag, isMine):\n self.isOpen = isOpen\n self.isFlag = isFlag \n self.isMine = isMine \n \n def __repr__(self):\n return json.dumps(self.getPublicInfo())\n\n def getPublicInfo(self):\n result = {\n 'open': int(self.isOpen),\n 'flag': int(self.isFlag)\n }\n if self.isOpen:\n result['mine'] = int(self.isMine)\n return result\n\nclass Field:\n def __init__(self, width, height, mine):\n self.cells = list()\n self.width = width \n self.height = height \n self.mine = mine\n for i in range(self.width * self.height):\n cellMine = True if i < mine else False\n self.cells.append(Cell(False, False, cellMine))\n random.shuffle(self.cells)\n \n def __repr__(self):\n return json.dumps(self.getPublicInfo())\n \n def getPublicInfo(self):\n rest = self.mine\n publicCells = list()\n for i, cell in enumerate(self.cells):\n cellInfo = cell.getPublicInfo()\n # ドカン!\n if cellInfo[\"open\"] == 1 and cellInfo[\"mine\"] == 0:\n cellInfo[\"number\"] = self.roundNum(\n i % self.width, int(i / self.width)\n )\n # フラグを立てる\n if cellInfo[\"open\"] == 0 and cellInfo[\"flag\"] == 1:\n rest = rest - 1\n publicCells.append(cellInfo)\n\n status = \"continue\"\n if self.isOver():\n status = \"over\"\n elif self.isClear():\n status = \"cleared\"\n\n result = {\n \"cells\": publicCells,\n \"width\": self.width,\n \"height\": self.height,\n \"mine\": self.mine,\n \"status\": status,\n \"rest\": rest\n }\n\n return result\n\n def cell(self, x, y):\n if 0 > x or 0 > y or self.width <= x or self.height <= y:\n return None\n return self.cells[y * self.width + x]\n \n def open(self, x, y):\n cell = self.cell(x, y)\n if cell is None:\n return \n # すでに開いているマスなら何もしない\n if cell.isOpen:\n return\n cell.isOpen = True\n if not cell.isMine:\n if self.roundNum(x, y) == 0:\n for offset in PointOffset.rounds():\n self.open(x + offset.x, y + offset.y)\n\n def flag(self, x, y, isFlag):\n cell = self.cell(x, y)\n if cell is None:\n return \n cell.isFlag = isFlag\n\n def roundNum(self, x, y):\n round = 0\n for offset in PointOffset.rounds():\n if self.isMine(x + offset.x, y + offset.y):\n round += 1\n return round\n\n # 指定セルが存在してmine状態の場合にTrueを返す","repo_name":"moeka802/minesweeper","sub_path":"bg-app/ms_model.py","file_name":"ms_model.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73052609440","text":"__author__ = 'inamoto21'\n\n\ndef binary_sum(S, start, stop):\n\n \"\"\"\n :param S: sequence\n :param start: start\n :param stop: end\n :return: the sum of the slice of the sequence\n \"\"\"\n\n if start >= stop:\n return 0\n elif start == stop - 1:\n return S[start]\n else:\n mid = (start + stop) // 2\n return binary_sum(S, start, mid) + binary_sum(S, mid, stop)\n\n\ndef main():\n seq = range(3, 10, 3)\n print(binary_sum(seq, 0, len(seq)))\n\n\nif __name__ == '__main__':\n main()","repo_name":"GeorgiosKyritsis/data_structures_and_algorithms_in_python","sub_path":"04.Recursion/Binary Sum.py","file_name":"Binary Sum.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43663072581","text":"import os\nimport torch\nimport torch.nn as nn\n\nfrom .DistanceMetric import DistanceMetric\nfrom .FeatureExtractor import FeatureExtractor\n\nclass DTN(nn.Module):\n '''\n Args:\n hid_dim: dimension for hidden vectors\n m: # of images augmented in each batch (default=0, not hall)\n distance_type: what kind of distance metric is used\n '''\n def __init__(self, hid_dim=64, hall=True, distance_type='cosine'):\n super().__init__()\n self.hid_dim = hid_dim\n self.hall = hall\n self.distance_type = distance_type\n\n self.distance = DistanceMetric(distance_type, hid_dim)\n self.encoder = FeatureExtractor(fc_dim=hid_dim) # (bs, hid_dim)\n self.hallucinator = DTNHallucinator(hid_dim)\n\n self.apply(init_weights)\n\n # for m in self.children():\n # init_weights(m)\n\n def forward(self, shot, query, ref1, ref2, num_way):\n # Input: shot = (shot*way, 3, 84, 84)\n # Input: query = (query, )\n # Input: ref1, ref2 = (H, 3, 84, 84)\n # Output: (query, way)\n\n num_shot = shot.size(0) // num_way \n num_query = query.size(0)\n proto_s = self.encoder(shot) # (shot*num_way, hid_dim)\n proto_q = self.encoder(query) # (query, hid_dim)\n proto_s = proto_s.reshape(num_shot, num_way, -1)\n\n \n # Randomly select a data as seed\n # Hallucinate it and concat with original shot \n if self.hall:\n ind = torch.randperm(num_shot)\n support_feat = proto_s[ind].view(1, num_way, -1).detach() # (way, hid_dim)\n ref_feat1 = self.encoder(ref1) # (H, hid_dim)\n ref_feat2 = self.encoder(ref2) \n imaginary_data = self.hallucinator(support_feat,\n ref_feat1, ref_feat2) # (H, way, hid_dim)\n\n proto_s = torch.cat((proto_s, imaginary_data), dim=0)\n \n # Average the prototypes within shot => (num_way, hid_dim)\n proto_s = proto_s.mean(dim=0)\n logits = self.distance(proto_q, proto_s)\n\n return logits\n\n\nclass DTNHallucinator(nn.Module):\n def __init__(self, hid_dim):\n super().__init__()\n self.diversify = AddDiversity(hid_dim)\n self.generator = nn.Sequential(\n nn.Linear(hid_dim*2, hid_dim),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout(0.5)\n )\n\n def forward(self, support_feat, ref_feat1, ref_feat2):\n # support_feat: (1, way, hid_dim)\n # ref_feat1, ref_feat2: (H, hid_dim)\n # This will create H new features\n num_way = support_feat.size(1)\n ref_feat1 = ref_feat1.unsqueeze(1).expand(-1, num_way, -1)\n ref_feat2 = ref_feat2.unsqueeze(1).expand(-1, num_way, -1)\n\n diversified = self.diversify(support_feat, ref_feat1, ref_feat2)\n rebuild = self.generator(diversified)\n #rebuild = self.l2_norm(rebuild)\n\n return rebuild # (H, way, hid_dim)\n\n def l2_norm(self, input):\n input_size = input.size()\n buffer = torch.pow(input, 2)\n\n norm = torch.sum(buffer, 1).add_(1e-10)\n norm = torch.sqrt(norm)\n\n _output = torch.div(input, norm.view(-1, 1).expand_as(input))\n\n output = _output.view(input_size)\n\n return output\n\n\nclass AddDiversity(nn.Module):\n def __init__(self, hid_dim):\n super().__init__()\n self.encode = nn.Sequential(\n nn.Linear(hid_dim, hid_dim*2),\n nn.LeakyReLU(0.2, inplace=True)\n ) \n self.dropout = nn.Dropout(0.5)\n\n def forward(self, A, B1, B2):\n A, B1, B2 = self.encode(A), self.encode(B1), self.encode(B2)\n out = A + (B1 - B2)\n out = self.dropout(out)\n return out\n\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\nif __name__ == '__main__':\n from torchsummary import summary\n # Input: shots = (shot*way, 3, 84, 84)\n # Input: query = (query, 3, 84, 84)\n # Input: ref1, ref2 = (H, 3, 84, 84)\n # Output: (query, way)\n shot = 1\n query = 15\n way = 5\n H = 100\n hid_dim = 64\n shot = torch.rand((shot*way, 3, 84, 84)).cuda()\n query = torch.rand((query, 3, 84, 84)).cuda()\n ref_feat1 = torch.rand((H, 3, 84, 84)).cuda()\n ref_feat2 = torch.rand((H, 3, 84, 84)).cuda()\n\n\n model = DTN(64).cuda()\n logits = model(shot, query, ref_feat1, ref_feat2, way)\n print(logits.shape)\n #print(model)","repo_name":"pavlion/NTU-DLCV-Fall2020","sub_path":"hw4/src/models/DTN.py","file_name":"DTN.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"31010762556","text":"'''\nCreated on 10 Dec 2012\n\n@author: musselle\n'''\nimport os \nfrom os.path import join as joinp\nimport sys \n\nimport glob\nimport socket\nimport re\n\nfrom utils import get_path_prefix\nfrom utils.preprocess import Preprocessor, ConfigClass\nfrom utils.cluster import ClusterClass\nimport cPickle as pkl\n\nfrom utils.database import Popgen_db\n\nstarting_dir = os.getcwd()\nc = ConfigClass()\n\n# Work out where data is stored on this machine\nprefix = get_path_prefix()\n\n#==============================================================================\n''' Filter and Clean SCRIPT FOR ALLL READS IN Gazelles-Zebras RAD-data'''\n#===============================================================================\n\nc.root = 'gazelles-zebras'\n\nc.db_name = 'gz_allg-allz.db'\n\ndb_path = joinp(prefix, c.root) \n\ndb = Popgen_db(joinp(db_path, c.db_name), recbyname=True, new=True)\n\n# Testing\ntesting = False \nif testing: \n #testfile = 'testset_10m.fastq.bgzf'\n testfile = 'testset_500.fastq.bgzf'\n\n#===============================================================================\n# Setup Configuration\n#===============================================================================\n\n# Set paths \nif testing:\n c.data_inpath = joinp(prefix, c.root, 'testset')\nelse:\n c.data_inpath = joinp(prefix, c.root, 'raw-data') \nc.barcode_inpath = joinp(prefix, c.root , 'barcodes')\nc.filtered_outpath = joinp(prefix, c.root , 'processed-data')\nc.tag_processed_outpath = joinp(prefix, c.root, 'processed-data')\nc.tag_split_outpath = joinp(prefix, c.root, 'processed-data', 'per-species')\nc.clusters_outpath = joinp(prefix, c.root, 'clusters')\nc.cdhit_path = os.path.expanduser(\"~/bin/cd-hit-v4.6.1/\")\n\n\n# Set interim file suffixes\nc.filtered_files_postfix = '-pass'\nc.tag_processed_files_postfix = '-clean'\n\n# MIDtags\nc.cutsite = 'TGCAGG'\nc.max_edit_dist = 2\n \n# FILTERING\n# Whether to log reads that fail the filtering \nc.log_fails = False\n\n#===============================================================================\n# Update/input samples-datafiles info in Database \n#===============================================================================\n \n# Testing \nif testing:\n L8_barcode_files = glob.glob(joinp(c.barcode_inpath, '*[8].txt')) \n datafiles = glob.glob(joinp(c.data_inpath, testfile))\n db.add_barcodes_datafiles(L8_barcode_files, datafiles, datafile_type='raw_mixed')\nelse:\n# os.chdir(c.barcode_inpath)\n L6_barcode_files = glob.glob(joinp(c.barcode_inpath, '*[6].txt')) \n L8_barcode_files = glob.glob(joinp(c.barcode_inpath, '*[8].txt')) \n L6_datafiles = glob.glob(joinp(c.data_inpath, 'lane6*bgzf'))\n L8_datafiles = glob.glob(joinp(c.data_inpath, 'lane8*bgzf'))\n\n # Associate subsets of the data files list to their respective barcode files. \n db.add_barcodes_datafiles(L6_barcode_files, L6_datafiles, datafile_type='raw_mixed')\n db.add_barcodes_datafiles(L8_barcode_files, L8_datafiles, datafile_type='raw_mixed')\n\n# Define Preprocessing Class and set inputs\nPreprocess = Preprocessor(c)\n\nif testing:\n Preprocess.set_input_files(data_inpath=c.data_inpath, file_pattern=testfile)\nelse:\n Preprocess.set_input_files(data_inpath=c.data_inpath, file_pattern='lane*bgzf')\n \nPreprocess.db = db # Pass database reference to Preprocessor Object\n\n\n#===============================================================================\n# Setup Filtering Parameters\n#===============================================================================\np = {'filtering' : {'propN': 0.1,\n 'phred': 25,\n 'cutsite_edit_dist' : 2,\n 'overhang_edit_dist' : 0},\n 'cleaning' : {'max_edit_dist' : 1 }}\n\n# Insert into filter_parameters table\nc.filterparam_id = db.insert_binary(p, col='params', table='filtering_parameters')\n\nPreprocess.filter_functions = [\n Preprocess.make_propN_filter(p['filtering']['propN']),\n Preprocess.make_phred_filter(p['filtering']['phred']),\n Preprocess.make_cutsite_filter(max_edit_dist=p['filtering']['cutsite_edit_dist']),\n Preprocess.make_overhang_filter('TCGAGG', 'GG', p['filtering']['overhang_edit_dist'])\n ]\n\n#===============================================================================\n# Run Filtering\n#===============================================================================\nPreprocess.filter_reads_pipeline()\n\n#===============================================================================\n# Process and Correct MID tag \n#===============================================================================\nPreprocess.process_MIDtag(max_edit_dist = p['cleaning']['max_edit_dist'])\nPreprocess.cleanup_files('filtered') # Remove filtered intermediate files \n\n# Store or pass on config file to clustering section\n# Pickle config \npkl.dump(c, open(joinp(prefix, c.root, 'config.pkl'), 'wb'))\n","repo_name":"MrKriss/popGen","sub_path":"archive/runscripts/filter_clean_script.py","file_name":"filter_clean_script.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1279768955","text":"'''\n@Author: your name\n@Date: 2019-11-28 15:40:29\n@LastEditTime: 2019-11-28 17:01:11\n@LastEditors: Please set LastEditors\n@Description: In User Settings Edit\n@FilePath: \\RealTimeDspFIA\\Ex4-15.py\n'''\n# 程序还有问题,感觉滤波的效果完全看不出来\n\nfrom scipy import signal\nimport matplotlib.pyplot\nimport numpy\nimport scipy\n\nN = 400\nFs = 1000\n\n# 生成序列 0到300\nn = numpy.arange(0, N-1, 1, dtype=int)\n# print(n)\n# 生成具有单位方差的随机数列\ndelta = numpy.sqrt(3)\nxn = numpy.random.uniform(-1*delta, delta, size=N-1)\n\n# 生成正弦波数列\n# 计算正玄波频率 2πf/fs\nomega = 0.8*numpy.pi\n\n# 生成正弦波序列\nsn = numpy.sin(omega*n)\n\nxn = xn+30*sn\n# xn = sn\n# 通带边沿频率,浮点序列\nWp = numpy.array([140, 160],dtype=float)/(Fs/2)\n# 阻带边沿频率,浮点序列\nWs = numpy.array([130, 170],dtype=float)/(Fs/2)\n# 通带纹波\nRp = 3\n# 阻带纹波\nRs = 40\n# 计算滤波器阶数\nNn,Wn = scipy.signal.buttord(Wp, Ws, Rp, Rs)\n# IIR滤波器\nb,a = scipy.signal.butter(Nn, Wn, btype='band',analog=True)\n# 对白噪音进行滤波\ny = scipy.signal.lfilter(b, a, xn)\n\n# 使用 figure 函数给绘图命名\nmatplotlib.pyplot.figure('例 4.15 时域信号')\n# 使用 subplot 函数设置图的排列\nmatplotlib.pyplot.subplot(221)\n# 使用 plot 函数绘制白噪音\nmatplotlib.pyplot.plot(n,xn)\n# 使用 subplot 函数设置图的排列\nmatplotlib.pyplot.subplot(222)\n# 使用 plot 函数绘制输出\nmatplotlib.pyplot.plot(n,y)\n\nXk = numpy.fft.fft(xn)\nYk = numpy.fft.fft(y)\n# 使用 subplot 函数设置图的排列\nmatplotlib.pyplot.subplot(223)\n# 使用 plot 函数绘制白噪音\nmatplotlib.pyplot.plot(n,20*numpy.log10(abs(Xk)))\n# 使用 subplot 函数设置图的排列\nmatplotlib.pyplot.subplot(224)\n# 使用 plot 函数绘制输出\nmatplotlib.pyplot.plot(n,20*numpy.log10(abs(Yk)))\n# 使用 show 函数显示\nmatplotlib.pyplot.show()\n\n# EOF","repo_name":"combawyp/RealTimeDspFIA","sub_path":"Ex4-15.py","file_name":"Ex4-15.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6873400563","text":"def solution(K, numbers, up_down):\n left = 1\n right = K\n for num, word in zip(numbers, up_down):\n if word == \"UP\":\n left = max(num, left) # 입력한 숫자보다 크다면 left와 입력 수 중 큰 것을 left에 저장\n\n elif word == \"DOWN\":\n right = min(num, right) # 입력한 숫자보다 작다면 right와 입력 수 중 큰 것을 right에 저장\n\n elif word == \"RIGHT\":\n return 1\n return right - left - 1 # 정답이 될 수 있는 수의 갯수 리턴\n\n\n# 아래는 테스트케이스 출력을 해보기 위한 코드입니다.\nK1 = 10\nnumbers1 = [4, 9, 6]\nup_down1 = [\"UP\", \"DOWN\", \"UP\"]\nret1 = solution(K1, numbers1, up_down1)\n\n# [실행] 버튼을 누르면 출력 값을 볼 수 있습니다.\nprint(\"solution 함수의 반환 값은\", ret1, \"입니다.\")\n\nK2 = 10\nnumbers2 = [2, 1, 6]\nup_down2 = [\"UP\", \"UP\", \"DOWN\"]\nret2 = solution(K2, numbers2, up_down2)\n\n# [실행] 버튼을 누르면 출력 값을 볼 수 있습니다.\nprint(\"solution 함수의 반환 값은\", ret2, \"입니다.\")\n\nK3 = 100\nnumbers3 = [97, 98]\nup_down3 = [\"UP\", \"RIGHT\"]\nret3 = solution(K3, numbers3, up_down3)\n\n# [실행] 버튼을 누르면 출력 값을 볼 수 있습니다.\nprint(\"solution 함수의 반환 값은\", ret3, \"입니다.\")","repo_name":"saevyeokvyeol/python_algorithm_study","sub_path":"yuda/COS1/0825_03.py","file_name":"0825_03.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18405067478","text":"from dash import Dash, html, dcc\r\nimport pandas as pd\r\nimport pymysql\r\nimport datetime\r\nfrom dash.dependencies import Output, Input\r\n\r\nconn = pymysql.connect(\r\n host=\"127.0.0.1\",\r\n user=\"song1\",\r\n password=\"1q2w3e4r\",\r\n db=\"yangsong2\",\r\n charset=\"utf8\",\r\n autocommit=True,\r\n cursorclass=pymysql.cursors.DictCursor,\r\n)\r\n\r\nexternal_stylesheets = [\r\n {\r\n \"href\": \"https://fonts.googleapis.com/css2?\" \"family=Lato:wght@400;700&display=swap\",\r\n \"rel\": \"stylesheet\",\r\n },\r\n]\r\n\r\napp = Dash(__name__, external_stylesheets=external_stylesheets)\r\napp.title = \"양송이 데이터\"\r\n\r\n\r\ndef serve_layout():\r\n cur = conn.cursor()\r\n cur.execute(\"select stamp from yangsong2.yang1 limit 1\")\r\n result = cur.fetchone()\r\n min_date = result[\"stamp\"].date()\r\n cur.execute(\"select stamp from yangsong2.yang1 order by id desc limit 1\")\r\n result = cur.fetchone()\r\n max_date = result[\"stamp\"].date()\r\n return html.Div(\r\n children=[\r\n html.Div(\r\n children=[\r\n html.P(\r\n children=\"📈\",\r\n className=\"header_emoji\",\r\n ),\r\n html.H1(\r\n children=\"양송이 데이터\",\r\n className=\"header_title\",\r\n ),\r\n html.P(\r\n children=\"이산화탄소, 온도, 습도\",\r\n className=\"header_description\",\r\n ),\r\n ],\r\n className=\"header\",\r\n ),\r\n html.Div(\r\n children=[\r\n html.Div(\r\n children=[\r\n html.Div(children=\"데이터\", className=\"menu-title\"),\r\n dcc.Dropdown(\r\n id=\"type-filter\",\r\n options=[\r\n {\"label\": \"이산화탄소\", \"value\": \"co2\"},\r\n {\"label\": \"온도\", \"value\": \"temperature\"},\r\n {\"label\": \"습도\", \"value\": \"humidity\"},\r\n ],\r\n value=\"co2\",\r\n clearable=False,\r\n searchable=False,\r\n className=\"dropdown\",\r\n ),\r\n ],\r\n ),\r\n html.Div(\r\n children=[\r\n html.Div(children=\"날짜\", className=\"menu-title\"),\r\n dcc.DatePickerSingle(\r\n id=\"date-single\",\r\n min_date_allowed=min_date,\r\n max_date_allowed=max_date,\r\n initial_visible_month=max_date,\r\n date=max_date,\r\n ),\r\n ]\r\n ),\r\n dcc.Interval(id=\"interval-component\", interval=60000, n_intervals=0),\r\n ],\r\n className=\"menu\",\r\n ),\r\n html.Div(\r\n children=[\r\n html.Div(\r\n dcc.Graph(\r\n id=\"data-chart\",\r\n config={\"displayModeBar\": False},\r\n ),\r\n className=\"card\",\r\n ),\r\n ],\r\n className=\"wrapper\",\r\n ),\r\n ]\r\n )\r\n\r\n\r\napp.layout = serve_layout\r\n\r\n\r\n@app.callback(\r\n [Output(\"data-chart\", \"figure\")],\r\n [\r\n Input(\"type-filter\", \"value\"),\r\n Input(\"date-single\", \"date\"),\r\n Input(\"interval-component\", \"n_intervals\"),\r\n ],\r\n)\r\ndef update_chart(data_type, date, n_intervals):\r\n cur = conn.cursor()\r\n if data_type == \"co2\":\r\n sql = f\"select date_format(stamp,'%Y-%m-%d %H:%i') as stamp, ROUND(AVG(CO2), 2) as CO2 from yangsong2.yang1 WHERE DATE_FORMAT(stamp, '%Y-%m-%d')='{date}' GROUP BY 1\"\r\n cur.execute(sql)\r\n dfdf = pd.DataFrame(cur.fetchall())\r\n if dfdf.empty:\r\n x = []\r\n y = []\r\n else:\r\n x = pd.to_datetime(dfdf.stamp)\r\n y = dfdf.CO2\r\n data_chart_figure = {\r\n \"data\": [\r\n {\r\n \"x\": x,\r\n \"y\": y,\r\n \"line\": {\"shape\": \"spline\"},\r\n \"type\": \"lines\",\r\n \"hovertemplate\": \"%{x} 이산화탄소 : %{y}\" \"\",\r\n },\r\n ],\r\n \"layout\": {\r\n \"title\": {\r\n \"text\": \"이산화탄소 데이터(ppm)\",\r\n \"x\": 0.5,\r\n \"xanchor\": \"center\",\r\n },\r\n \"xaxis\": {\"tickformat\": \"%H시 %M분\"},\r\n \"yaxis\": {\r\n \"ticksuffix\": \"ppm\",\r\n },\r\n \"colorway\": [\"#119dff\"],\r\n },\r\n }\r\n elif data_type == \"temperature\":\r\n sql = f\"select date_format(stamp,'%Y-%m-%d %H:%i') as stamp, ROUND(AVG(temperature), 1) as temperature from yangsong2.yang1 WHERE DATE_FORMAT(stamp, '%Y-%m-%d')='{date}' GROUP BY 1\"\r\n cur.execute(sql)\r\n res = cur.fetchall()\r\n dfdf = pd.DataFrame(res)\r\n if dfdf.empty:\r\n x = []\r\n y = []\r\n else:\r\n x = pd.to_datetime(dfdf.stamp)\r\n y = dfdf.temperature\r\n data_chart_figure = {\r\n \"data\": [\r\n {\r\n \"x\": x,\r\n \"y\": y,\r\n \"line\": {\"shape\": \"spline\"},\r\n \"type\": \"lines\",\r\n \"hovertemplate\": \"%{x} 온도 : %{y}\" \"\",\r\n },\r\n ],\r\n \"layout\": {\r\n \"title\": {\r\n \"text\": \"온도 데이터(℃)\",\r\n \"x\": 0.5,\r\n \"xanchor\": \"center\",\r\n },\r\n \"xaxis\": {\"tickformat\": \"%H시 %M분\"},\r\n \"yaxis\": {\"ticksuffix\": \"℃\", \"range\": [-10, 50]},\r\n \"colorway\": [\"#17B897\"],\r\n },\r\n }\r\n elif data_type == \"humidity\":\r\n sql = f\"select date_format(stamp,'%Y-%m-%d %H:%i') as stamp, ROUND(AVG(humidity), 1) as humidity from yangsong2.yang1 WHERE DATE_FORMAT(stamp, '%Y-%m-%d')='{date}' GROUP BY 1\"\r\n cur.execute(sql)\r\n dfdf = pd.DataFrame(cur.fetchall())\r\n if dfdf.empty:\r\n x = []\r\n y = []\r\n else:\r\n x = pd.to_datetime(dfdf.stamp)\r\n y = dfdf.humidity\r\n data_chart_figure = {\r\n \"data\": [\r\n {\r\n \"x\": x,\r\n \"y\": y,\r\n \"line\": {\"shape\": \"spline\"},\r\n \"type\": \"lines\",\r\n \"hovertemplate\": \"%{x} 습도 : %{y}\" \"\",\r\n },\r\n ],\r\n \"layout\": {\r\n \"title\": {\r\n \"text\": \"습도 데이터(%)\",\r\n \"x\": 0.5,\r\n \"xanchor\": \"center\",\r\n },\r\n \"xaxis\": {\"tickformat\": \"%H시 %M분\"},\r\n \"yaxis\": {\"range\": [0, 100], \"ticksuffix\": \"%\"},\r\n \"colorway\": [\"#E12D39\"],\r\n },\r\n }\r\n\r\n return [data_chart_figure]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run_server(host=\"0.0.0.0\", port=\"80\", debug=True)","repo_name":"AntonSangho/yangsong-Arduino-Rasberrypi-Data","sub_path":"python/yangsong2/dash_test.py","file_name":"dash_test.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24425992189","text":"from pyspark.sql import SparkSession\nfrom pyspark import SparkContext\nfrom pyspark.sql.functions import col, explode\nfrom pyspark.ml.tuning import CrossValidator\n\nfrom model import Model, Tune, Utils\n\nspark = SparkSession.builder.appName('Recommendation').getOrCreate()\nsc = SparkContext\n\nmovies = spark.read.csv('data/movies.csv', header=True)\nratings = spark.read.csv('data/ratings.csv', header=True)\n\n# ratings.show()\n\n#ratings.printSchema()\n\nratings = ratings.withColumn('userId', col('userId').cast('integer')).\\\n withColumn('movieId', col('movieId').cast('integer')).\\\n withColumn('rating', col('rating').cast('float')).\\\n drop('timestamp')\n\n# ratings.show()\n\n\nsparsity = Utils.sparsity(ratings)\n\n# type(als)\n\nmodel = Model(ratings)\n\ntrain, test, als = model.build_model()\n\n\nranks = [10, 50, 100, 150]\nregparams = [.01, .05, .1, .15]\n\nevaluator, param_grid = Tune.tune(als, ranks, regparams, 'rmse', 'ratings', 'prediction')\n\ncv = CrossValidator(estimator=als,\n estimatorParamMaps=param_grid,\n evaluator=evaluator,\n numFolds=5)\n\nprint(cv)\n\nmodel = cv.fit(train)\n\n\nbest_model = Model.best_model(model)\n\nprediction = Model.predict(best_model, test, evaluator)\n\nModel.save_model(best_model)\n\nn_recommendations = best_model.recommendForAllUsers(10)\nn_recommendations.limit(10).show()\n\nn_recommendations = n_recommendations.withColumn('rec_exp', explode('recommendations')) \\\n .select('userId', col('rec_exp.movieId'), col(\"rec_exp.rating\"))\n\nn_recommendations.limit(10).show()\n\nn_recommendations.join(movies, on='movieId').filter('userId = 100').show()\n\nratings.join(movies, on='movieId').filter('userId = 100').sort('rating', ascending=False).limit(10).show()","repo_name":"ChamathKB/Recommendation-Engine","sub_path":"model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73171477923","text":"\n'''def calc():\n\n print(\"5 + 5 = 10\\n\\n\")\n\nwhile True:\n\n answer = input(\"Do you want to continue [Y/N] \")\n\n if answer == 'Y' or answer == 'y':\n print(\"I am going to continue\")\n calc()\n \n elif answer == 'N' or answer == 'n':\n print (\"I am going to exit.\")\n break\n else:\n print(\"Invalid input. Try again.\")\n continue'''\n\n\ndef validate_input(given_num1, given_num2):\n while True:\n if given_num1.isnumeric():\n return True, float(given_num1)\n break\n else:\n print(\"The given input is of type \",type(given_num1))\n print(\"\\nPlease enter a valid Float/Integer: \")\n ","repo_name":"Ishwarya-arch/basic_calculator","sub_path":"while_loop2.py","file_name":"while_loop2.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32570771631","text":"import os\nimport subprocess\nfrom pathlib import Path\nfrom unittest.mock import AsyncMock, MagicMock, ANY\n\nimport pytest\nfrom PySide6.QtWidgets import QDialog\n\nfrom randovania.gui.main_window import MainWindow\nfrom randovania.interface_common.options import Options\nfrom randovania.interface_common.preset_manager import PresetManager\nfrom randovania.layout.generator_parameters import GeneratorParameters\nfrom randovania.layout.permalink import Permalink\n\n\ndef create_window(options: Options | MagicMock,\n preset_manager: PresetManager) -> MainWindow:\n return MainWindow(options, preset_manager, MagicMock(), False)\n\n\n@pytest.fixture(name=\"default_main_window\")\ndef _default_main_window(skip_qtbot, preset_manager, mocker) -> MainWindow:\n mocker.patch(\"randovania.gui.lib.theme.set_dark_theme\")\n window = create_window(Options(MagicMock()), preset_manager)\n skip_qtbot.addWidget(window)\n return window\n\n\ndef test_drop_random_event(default_main_window: MainWindow,\n ):\n # Creating a window should not fail\n pass\n\n\n@pytest.mark.parametrize([\"url\", \"should_accept\"], [\n (\"something/game.iso\", False),\n (\"other/game.rdvgame\", True),\n (\"boss/custom.rdvpreset\", True),\n])\ndef test_dragEnterEvent(default_main_window: MainWindow, url, should_accept):\n mock_url = MagicMock()\n mock_url.toLocalFile.return_value = url\n event = MagicMock()\n event.mimeData.return_value.urls.return_value = [mock_url]\n\n # Run\n default_main_window.dragEnterEvent(event)\n\n # Assert\n if should_accept:\n event.acceptProposedAction.assert_called_once_with()\n else:\n event.acceptProposedAction.assert_not_called()\n\n\ndef test_drop_event_layout(default_main_window, mocker):\n mock_url = MagicMock()\n mock_url.toLocalFile.return_value = \"/my/path.rdvgame\"\n\n event = MagicMock()\n event.mimeData.return_value.urls.return_value = [mock_url]\n mock_from_file: MagicMock = mocker.patch(\"randovania.layout.layout_description.LayoutDescription.from_file\")\n\n default_main_window.open_game_details = MagicMock()\n\n # Run\n default_main_window.dropEvent(event)\n\n # Assert\n mock_from_file.assert_called_once_with(Path(\"/my/path.rdvgame\"))\n default_main_window.open_game_details.assert_called_once_with(mock_from_file.return_value)\n\n\nasync def test_drop_event_preset(default_main_window):\n await default_main_window._initialize_post_show_body()\n\n mock_url = MagicMock()\n mock_url.toLocalFile.return_value = \"/my/path.rdvpreset\"\n event = MagicMock()\n event.mimeData.return_value.urls.return_value = [mock_url]\n default_main_window.generate_seed_tab.import_preset_file = MagicMock()\n\n # Run\n default_main_window.dropEvent(event)\n\n # Assert\n default_main_window.generate_seed_tab.import_preset_file(Path(\"/my/path.rdvpreset\"))\n assert default_main_window.main_tab_widget.currentWidget() == default_main_window.tab_create_seed\n\n\nasync def test_browse_racetime(default_main_window, mocker):\n mock_new_dialog = mocker.patch(\"randovania.gui.dialog.racetime_browser_dialog.RacetimeBrowserDialog\")\n mock_execute_dialog = mocker.patch(\"randovania.gui.lib.async_dialog.execute_dialog\", new_callable=AsyncMock,\n return_value=QDialog.Accepted)\n dialog = mock_new_dialog.return_value\n dialog.refresh = AsyncMock(return_value=True)\n default_main_window.generate_seed_from_permalink = AsyncMock()\n\n # Run\n await default_main_window._browse_racetime()\n\n # Assert\n mock_new_dialog.assert_called_once_with()\n dialog.refresh.assert_awaited_once_with()\n mock_execute_dialog.assert_awaited_once_with(dialog)\n default_main_window.generate_seed_from_permalink.assert_awaited_once_with(dialog.permalink)\n\n\nasync def test_generate_seed_from_permalink(default_main_window, mocker):\n permalink = MagicMock(spec=Permalink)\n permalink.seed_hash = None\n permalink.parameters = MagicMock(spec=GeneratorParameters)\n mock_generate_layout: MagicMock = mocker.patch(\"randovania.interface_common.simplified_patcher.generate_layout\",\n autospec=True)\n default_main_window.open_game_details = MagicMock()\n mock_open_for_background_task = mocker.patch(\n \"randovania.gui.dialog.background_process_dialog.BackgroundProcessDialog.open_for_background_task\",\n new_callable=AsyncMock,\n side_effect=lambda a, b: a(MagicMock())\n )\n\n # Run\n await default_main_window.generate_seed_from_permalink(permalink)\n\n # Assert\n mock_open_for_background_task.assert_awaited_once()\n mock_generate_layout.assert_called_once_with(progress_update=ANY,\n parameters=permalink.parameters,\n options=default_main_window._options)\n default_main_window.open_game_details.assert_called_once_with(mock_generate_layout.return_value)\n\n\n@pytest.mark.parametrize(\"os_type\", [\"Windows\", \"Darwin\", \"Linux\"])\n@pytest.mark.parametrize(\"throw_exception\", [True, False])\ndef test_on_menu_action_previously_generated_games(default_main_window, mocker, os_type, throw_exception, monkeypatch):\n mock_start_file = MagicMock()\n mock_subprocess_run = MagicMock()\n monkeypatch.setattr(os, \"startfile\", mock_start_file, raising=False)\n monkeypatch.setattr(subprocess, \"run\", mock_subprocess_run, raising=False)\n mocker.patch(\"platform.system\", return_value=os_type)\n mock_message_box = mocker.patch(\"PySide6.QtWidgets.QMessageBox\")\n\n # Run\n if throw_exception:\n if os_type == \"Windows\":\n mock_start_file.side_effect = OSError()\n else:\n mock_subprocess_run.side_effect = OSError()\n\n default_main_window._on_menu_action_previously_generated_games()\n\n # Assert\n if throw_exception:\n mock_message_box.return_value.show.assert_called_once()\n else:\n if os_type == \"Windows\":\n mock_start_file.assert_called_once()\n mock_message_box.return_value.show.assert_not_called()\n else:\n mock_subprocess_run.assert_called_once()\n mock_message_box.return_value.show.assert_not_called()\n","repo_name":"vgm5/randovania","sub_path":"test/gui/test_main_window.py","file_name":"test_main_window.py","file_ext":"py","file_size_in_byte":6222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"6461454088","text":"#绘制出直方图\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimg = cv2.imread('/home/polya/mine/picture/test3.jpg',1)\nimg=cv2.resize(img,None,fx=0.4,fy=0.4,interpolation=cv2.INTER_AREA)\n\n# # #第一种显示方法:使用matplot\n# plt.hist(img.ravel(),256,[0,256]);\n# plt.show()\n\n# #第二种,分别显示三个分量的灰度值\n# color = ('b','g','r')\n# # 对一个列表或数组既要遍历索引又要遍历元素时\n# # 使用内置 enumerrate 函数会有更加直接,优美的做法\n# #enumerate 会将数组或列表组成一个索引序列。\n# # 使我们再获取索引和索引内容的时候更加方便\n# 用for循环来显示出三颜色\n# for i,col in enumerate(color):\n# histr = cv2.calcHist([img],[i],None,[256],[0,256])\n# plt.plot(histr,color = col)\n# plt.xlim([0,256])\n# plt.show()\n\n\n#使用掩模\ncolor = ('b','g','r')\nmask = np.zeros(img.shape[:2], np.uint8)\nmask[100:300, 100:400] = 255\nmasked_img = cv2.bitwise_and(img,img,mask = mask)\n# Calculate histogram with mask and without mask\n# Check third argument for mask\nplt.subplot(221), plt.imshow(img, 'gray')\nplt.subplot(222), plt.imshow(mask, 'gray')\nplt.subplot(223), plt.imshow(masked_img, 'gray')\n#对比一下使用掩模和没使用的区别\nhist_full = cv2.calcHist([img],[0],None,[256],[0,256])\nhist_mask = cv2.calcHist([img],[0],mask,[256],[0,256])\nplt.subplot(224),plt.plot(hist_full), plt.plot(hist_mask)\n#plt.xlim([0,256])\nplt.show()\n\ncv2.waitKey()\ncv2.destroyAllWindows()","repo_name":"polya-xue/Opencv_image_processing","sub_path":"B1histogram_paint/B1histogram_paint.py","file_name":"B1histogram_paint.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27796234070","text":"import sys\nimport random\nimport pygame\n\n# initial pygame module\npygame.init()\n\n# set width and height\nwidth = 600\nheight = 600\n\n# initial the screen\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption('Snake Game by DB')\n\n# initial the speed\nclock = pygame.time.Clock()\nspeed = 15\n\n# the color\nwhite = (255, 255, 255)\nyellow = (255, 255, 102)\nblack = (0, 0, 0)\nred = (213, 50, 80)\ngreen = (0, 255, 0)\nblue = (50, 153, 213)\n\n\n# define the main function\ndef Main():\n # the snake parameter\n snake_x = width / 2\n snake_y = height / 2\n snake_x_change = 0\n snake_y_change = 0\n snake_list = [[snake_x, snake_y]]\n snake_len = len(snake_list)\n snake_block = 10\n snake = Snake()\n\n # initial the food\n food_x = round(random.randrange(0, width - snake_block) / 10.0) * 10.0\n food_y = round(random.randrange(0, height - snake_block) / 10.0) * 10.0\n\n # initial game state\n game_state = True\n\n # initial the direction\n direction = None\n\n # score\n score = 0\n\n while True:\n while game_state:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if (event.key == pygame.K_LEFT) & (direction != \"right\"):\n snake_x_change = - 10\n snake_y_change = 0\n direction = \"left\"\n elif (event.key == pygame.K_RIGHT) & (direction != \"left\"):\n snake_x_change = 10\n snake_y_change = 0\n direction = \"right\"\n elif (event.key == pygame.K_UP) & (direction != \"down\"):\n snake_x_change = 0\n snake_y_change = -10\n direction = \"up\"\n elif (event.key == pygame.K_DOWN) & (direction != \"up\"):\n snake_x_change = 0\n snake_y_change = 10\n direction = \"down\"\n\n snake_x += snake_x_change\n snake_y += snake_y_change\n snake_list.append(list((snake_x, snake_y)))\n if len(snake_list) > snake_len:\n del snake_list[0]\n\n if snake_x >= width or snake_x < 0 or snake_y >= height or snake_y < 0:\n game_state = False\n\n if [snake_x, snake_y] in snake_list[:-1]:\n game_state = False\n screen.fill(black)\n snake.display(snake_list, snake_block)\n pygame.draw.rect(screen, red, [food_x, food_y, snake_block, snake_block])\n text_display(score)\n pygame.display.update()\n if snake_x == food_x and snake_y == food_y:\n food_x = round(random.randrange(0, width - snake_block) / 10.0) * 10.0\n food_y = round(random.randrange(0, height - snake_block) / 10.0) * 10.0\n snake_len += 1\n score += 1\n clock.tick(speed)\n print(score)\n sys.exit()\n\n\ndef text_display(x):\n pygame.font.init()\n my_font = pygame.font.Font(None, 30)\n text_image = my_font.render(\"Score:{}\".format(x), True, white)\n screen.blit(text_image, (20, 20))\n\n\nclass Snake(object):\n def __init__(self):\n # the snake parameter\n pass\n\n def display(self, snake_list_1, snake_block_1):\n for i in snake_list_1:\n pygame.draw.rect(screen, blue, [i[0], i[1], snake_block_1, snake_block_1])\n\n\nif __name__ == '__main__':\n Main()\n","repo_name":"fengluodb/a-simple-snake-game","sub_path":"snake1.py","file_name":"snake1.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26151183465","text":"import requests\n\nfrom flask import current_app, flash, session\n\nfrom gerritclient import client\nfrom gerritclient import error as client_error\n\n\ndef get_gerrit_url():\n return session.get('gerrit_url') or current_app.config.get('GERRIT_URL')\n\n\ndef get_connection():\n return client.connect(get_gerrit_url(),\n auth_type=session.get('auth_type'),\n username=session.get('username'),\n password=session.get('password'))\n\n\ndef get_version(url=None):\n version = None\n gerrit_url = url or get_gerrit_url()\n try:\n version = client.get_client(\n 'server',\n connection=client.connect(gerrit_url)\n ).get_version()\n except (requests.ConnectionError, client_error.HTTPError) as error:\n current_app.logger.error(error)\n flash(\"Can't establish connection with Gerrit server at '{0}'. \"\n \"See logs for more details\".format(gerrit_url), category='error')\n return version\n","repo_name":"tivaliy/gerrit-quick-viewer","sub_path":"gerritviewer/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42077573130","text":"import cv2\nimport numpy as np\nimport os\n\n# path to the text file containing paths to the positive samples\n# positive_samples_file = './annotations.txt'\npositive_samples_file = './data/positive.txt'\n\n# path to the directory containing negative samples\nnegative_samples_dir = './data/negative/'\n\n# output path for the .vec file\noutput_file = 'samples.vec'\n\n# size of the sample images\nsample_size = (50, 50)\n\n# read the paths of positive samples from the text file\nwith open(positive_samples_file, 'r') as f:\n positive_samples_paths = f.readlines()\n\n# remove any leading or trailing whitespace characters from the paths\n# positive_samples_paths = [path.strip() for path in positive_samples_paths]\n\n# initialize an empty list to store the positive samples\npositive_samples = []\n\n# read and resize each positive sample image and append it to the list\nfor path in positive_samples_paths:\n image_path, xml_path = path.strip().split()\n # print(image_path)\n image_name = os.path.basename(image_path)\n # print(image_name)\n img = cv2.imread(os.path.join('./data/positive/', image_name))\n img_resized = cv2.resize(img, sample_size)\n positive_samples.append(img_resized)\n\n# create an array to store the positive samples\npositive_samples_array = np.array(positive_samples)\n\n# create an array to store the labels for the positive samples (1 for positive)\npositive_labels = np.ones(positive_samples_array.shape[0], np.int32)\n\n# create an array to store the negative samples\nnegative_samples = []\n\n# read and resize each negative sample image and append it to the list\nfor filename in os.listdir(negative_samples_dir):\n if filename.endswith('.jpg'):\n img = cv2.imread(os.path.join(negative_samples_dir, filename))\n img_resized = cv2.resize(img, sample_size)\n negative_samples.append(img_resized)\n\n# create an array to store the negative samples\nnegative_samples_array = np.array(negative_samples)\n\n# create an array to store the labels for the negative samples (0 for negative)\nnegative_labels = np.zeros(negative_samples_array.shape[0], np.int32)\n\n# concatenate the positive and negative samples and labels\nsamples_array = np.concatenate((positive_samples_array, negative_samples_array), axis=0)\nlabels = np.concatenate((positive_labels, negative_labels), axis=0)\n\n# create the .vec file using the cv2.imwrite function\nwith open(output_file, 'wb') as f:\n f.write(np.array([0, 0, 0, 0], np.int32).tobytes())\n f.write(np.array([len(samples_array)], np.int32).tobytes())\n for i in range(len(samples_array)):\n img = samples_array[i]\n img_bytes = img.tobytes()\n label_bytes = np.array([labels[i]], np.int32).tobytes()\n f.write(label_bytes)\n f.write(np.array([sample_size[1], sample_size[0]], np.int32).tobytes())\n f.write(img_bytes)\n","repo_name":"Pravat-Lama/Nepali-Number-Plate-Recognition-System","sub_path":"src/create_vec.py","file_name":"create_vec.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14010215417","text":"#!python3\n# __init__.py\n\n# Corban Swain , 2020\n\nimport time\nimport numpy as np\nimport pandas as pd\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom scipy.signal import peak_widths, find_peaks\nfrom matplotlib.gridspec import GridSpec\nimport c_swain_python_utils as csutils\n\n\ndef gen_info_plot(x, y, wavelength, power):\n peak_idx = np.argmax(y)\n peak_val = y[peak_idx]\n y_norm = y / peak_val\n\n width_result = peak_widths(y_norm, [peak_idx])\n peak_width_height = width_result[1][0]\n peak_left = np.interp(width_result[2], np.arange(x.size), x)[0]\n peak_right = np.interp(width_result[3], np.arange(x.size), x)[0]\n peak_width = abs(peak_right - peak_left)\n peak_wavelength = np.average([peak_left, peak_right])\n peak_delta = peak_wavelength - wavelength\n\n window_size = 50\n upper_lim = wavelength + window_size / 2\n lower_lim = wavelength - window_size / 2\n\n fig = plt.figure(figsize=(7, 4))\n ax = fig.add_subplot(position=(0.03, 0.18, 0.57, 0.77))\n ax.plot(x, y_norm, lw=2.5, color='C3')\n ax.set_ylim([-0.02, 1.02])\n ax_lw = 1.5\n sns.despine(ax=ax,\n top=True, right=True, left=True, bottom=False,\n offset=ax_lw * 0)\n ax.spines['bottom'].set_linewidth(ax_lw)\n ax.set_yticks([])\n ax.set_xticks(np.arange(650, 1100, 10))\n ax.set_xticks(np.arange(650, 110, 2), minor=True)\n ax.set_xlim([lower_lim, upper_lim])\n ax.set_xlabel('$\\lambda_{measured}$, nm')\n ax.minorticks_on()\n ax.grid(True, 'major', 'x', lw=ax_lw)\n ax.grid(True, 'minor', 'x', lw=ax_lw / 2, ls=':')\n ax.tick_params(axis='x',\n bottom=True,\n direction='out',\n width=ax_lw,\n length=8)\n ax.annotate('', (peak_wavelength, 1.03), (peak_wavelength, 1.03 + 1e-3),\n arrowprops=dict(headwidth=7,\n headlength=4,\n lw=ax_lw,\n color='C3'),\n annotation_clip=False)\n ax.annotate('', (peak_right, 0.5), (peak_right + window_size / 15, 0.5),\n arrowprops=dict(arrowstyle='->', lw=ax_lw))\n ax.annotate(f'{peak_width:.1f} nm',\n (peak_left, 0.5), (peak_left - window_size / 15, 0.5),\n arrowprops=dict(arrowstyle='->', lw=ax_lw),\n va='center',\n ha='right',\n backgroundcolor='w')\n\n ax.annotate(f'Expected Wavelength: {wavelength:6.1f} nm\\n'\n f'Peak Wavelength: {peak_wavelength:6.1f} nm\\n'\n f'Peak Delta: {peak_delta:6.1f} nm\\n'\n f'Peak FWHM: {peak_width:6.1f} nm\\n'\n f'Measured Max Power: {power:6.1f} mW',\n xy=(0.95, 0.95),\n xycoords='figure fraction',\n fontfamily='Input',\n va='top',\n ha='right')\n\n return peak_delta, peak_width\n\n\n\ndef main():\n data_dir = 'data'\n specta_filename = 'spectra_set_combined.csv'\n power_filename = '201028_power_through_fiber.csv'\n power_norm_filename = '201028_power_at laser.csv'\n\n spectra_df = pd.read_csv(os.path.join(data_dir, specta_filename),\n index_col=0)\n\n power_df = pd.read_csv(os.path.join(data_dir, power_filename),\n index_col=0,\n header=None,\n names=['wavelength', 'power']) * 1e3\n\n power_norm_df = pd.read_csv(os.path.join(data_dir, power_norm_filename),\n index_col=0,\n header=None,\n names=['wavelength', 'power']) * 1e3\n\n correction_df = (power_norm_df / power_df).dropna()\n correction_x = correction_df.index.values\n correction_y = correction_df.values.flatten()\n\n power_x = power_df.index.values\n\n correction_y_full = np.interp(power_x, correction_x, correction_y)\n power = power_df.values.flatten() * correction_y_full\n power_dict = dict(zip(power_x, power))\n\n spectra_x = spectra_df.index.values\n spectra_y_names = spectra_df.columns.values.astype(int)\n spectra_y = spectra_df.values\n\n batches = []\n for i in range(spectra_y_names.size):\n batches.append(dict(x=spectra_x,\n y=spectra_y[:, i],\n wavelength=spectra_y_names[i],\n power=power_dict[spectra_y_names[i]]))\n\n fig = plt.figure(num=1, figsize=(7, 4))\n\n results = [gen_info_plot(**b) for b in batches]\n peak_deltas, _peak_widths = tuple(np.array(x) for x in zip(*results))\n\n\n gs = GridSpec(3, 1, figure=fig,\n bottom=0.17,\n top=0.965,\n right=0.95,\n left=0.1,\n hspace=0.26)\n\n ax1 = fig.add_subplot(gs[0])\n ax1.plot(spectra_y_names, peak_deltas, color='black', lw=1.5, marker='.',\n clip_on=False)\n ax1.fill_between(spectra_y_names, peak_deltas, 0, where=peak_deltas >= 0,\n fc='C0', interpolate=True, alpha=0.7)\n ax1.fill_between(spectra_y_names, peak_deltas, 0, where=peak_deltas <= 0,\n fc='C3', interpolate=True, alpha=0.7)\n ax1.set_yticks([-10, 0])\n ax1.set_ylim([-12, 2])\n ax1.set_ylabel(r'$\\Delta\\lambda_{peak}$, nm')\n\n ax2 = fig.add_subplot(gs[1])\n ax2.plot(spectra_y_names, _peak_widths, color='black', lw=1.5, marker='.',\n clip_on=False)\n ax2.fill_between(spectra_y_names, _peak_widths, 0, where=_peak_widths >= 0,\n fc='black', interpolate=True, alpha=0.2)\n ax2.set_yticks([0, 6, 12])\n ax2.set_ylim([0, 12])\n ax2.set_ylabel('FWHM, nm')\n\n ax3 = fig.add_subplot(gs[2])\n ax3.plot(power_x, power / 1000, color='black', lw=1.5, marker='.')\n ax3.plot(power_x[-1], power[-1] / 1000, color='black', lw=1.5, marker='.',\n clip_on=False)\n ax3.fill_between(power_x, power / 1000, 0, where=power >= 0,\n fc='black', interpolate=True, alpha=0.2)\n ax3.set_yticks([0, 0.8, 1.6])\n ax3.set_ylim([8e-2, 2])\n ax3.set_yscale('log')\n ax3.set_xlabel('Expected $\\lambda_{peak}$, nm')\n ax3.set_ylabel('Max Power, W')\n ax_lw = 1\n\n axs = [ax1, ax2, ax3]\n\n for i, ax in enumerate(axs):\n sns.despine(ax=ax,\n top=True, right=True, left=False, bottom=False,\n offset=ax_lw * 0)\n ax.spines['bottom'].set_linewidth(ax_lw)\n ax.set_xticks(np.arange(690, 1045, 50))\n ax.set_xticks(np.arange(690, 1045, 10), minor=True)\n ax.minorticks_on()\n ax.grid(True, 'major', 'both', lw=ax_lw, zorder=0)\n ax.grid(True, 'minor', 'both', lw=ax_lw / 2, ls=':', zorder=0)\n ax.tick_params(axis='x',\n bottom=True,\n direction='out',\n width=ax_lw,\n length=5,\n labelbottom=i == 2)\n\n ax.set_xlim([690, 1040.1])\n\n fig.align_ylabels(axs)\n\n csutils.save_figures(\n '201028_mai_tia_laser_measurements_cswain',\n add_filename_timestamp=False,\n stamp_kwargs=dict(stamp_str='MaiTai Measurements from 20.10.28 | Fig. '\n '#%n | Generated on %d by Corban S.',\n fontfamily='Input',\n fontstyle='italic'))\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()","repo_name":"CorbanSwain/Laser-Spec-Plots","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21508399358","text":"from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError\nfrom django.db import IntegrityError\nfrom django.core.paginator import Paginator, EmptyPage\nfrom django.http import HttpRequest, HttpResponse\nfrom http import HTTPStatus\nfrom .jsonUtils import CustomJSONEncoder\nfrom ..simulation.Core import SimulationException\nfrom ..bmgtModels import BMGTTransaction\n\nimport regex as re\nimport json\n\n\n__BATCH_QUERY_SIZE = 40\n\n\ndef get_batch_size(listObj):\n return min(len(listObj), __BATCH_QUERY_SIZE)\n\n\ndef request_error_handler(func):\n \"\"\"\n API level exception handling\n \"\"\"\n\n def wrapped(request, *args, **kwargs) -> HttpResponse:\n try:\n return func(request, *args, **kwargs)\n\n except json.JSONDecodeError as e:\n resp = AppResponse()\n resp.reject(e.args[0])\n\n except ObjectDoesNotExist as e:\n resp = AppResponse()\n resp.reject(e.args[0])\n\n except MultipleObjectsReturned as e:\n resp = AppResponse()\n resp.reject(e.args[0])\n\n except KeyError as e:\n resp = AppResponse()\n resp.reject(f'Key missing: {e.args[0]}')\n\n except IntegrityError as e:\n resp = AppResponse()\n resp.reject(e.args[0])\n\n except ValidationError as e:\n resp = AppResponse()\n resp.reject(e.args[0])\n\n except NotImplementedError as e:\n resp = AppResponse()\n resp.reject(e.args[0])\n\n except SimulationException as e:\n resp = AppResponse()\n resp.reject(e.args[0])\n\n except ValueError as e:\n resp = AppResponse()\n resp.reject(e.args[0])\n \n except Exception as e:\n # resp = HttpResponse()\n # resp.reject(e.args[0])\n # resp.status_code = HTTPStatus.INTERNAL_SERVER_ERROR\n\n # if settings.DEBUG:\n raise\n # else:\n # resp = HttpResponse()\n # resp.status_code = HTTPStatus.INTERNAL_SERVER_ERROR\n # resp.write(\"Internal server error!\")\n \n return resp\n\n return wrapped\n\n\ndef password_valid(password: str) -> bool:\n \"\"\"\n password strength validation\n \"\"\"\n\n leng_valid = len(password) >= 8 and len(password) <= 20\n has_char = bool(re.search(pattern=r'\\w', string=password))\n has_num = bool(re.search(pattern=r'\\d', string=password))\n return leng_valid and has_char and has_num\n\n\ndef create_pager_params(page: int, size: int, asc: int, order: str) -> dict:\n \"\"\"\n convert get parameters to pagination parameters for paginated query\n \"\"\"\n\n params = {}\n params['page'] = page\n params['size'] = size\n params['asc'] = asc\n params['order'] = order\n return params\n\n\ndef pager_params_from_request(request: HttpRequest) -> dict:\n \"\"\"\n convert get parameters to pagination parameters for paginated query\n \"\"\"\n\n params = {}\n params['page'] = request.GET.get('page', None)\n params['size'] = request.GET.get('size', None)\n params['asc'] = request.GET.get('asc', '1')\n params['order'] = request.GET.get('order', 'id')\n if not params['page'] or not params['size']:\n raise KeyError(\"missing pagination parameters\")\n params['page'] = int(params['page'])\n params['size'] = int(params['size'])\n if not params['size'] > 0:\n raise ValueError(\"invalid page size\")\n return params\n\n\ndef generic_paginated_query(dbModel, pager_params, **kwargs) -> HttpResponse:\n \"\"\"\n generic paginated query on one table\n pass in a model class and a request object \n kwargs: filter conditions\n \"\"\"\n try:\n resp = AppResponse()\n\n obj_set = dbModel.objects.filter(**kwargs)\n obj_set = obj_set.order_by(\n pager_params['order'] if pager_params['asc'] else '-'+pager_params['order'])\n pager = Paginator(obj_set, pager_params['size'])\n page = pager_params['page']\n\n if page > pager.num_pages or page < 1:\n resp.reject(\"Page not found!\")\n else:\n resp.resolve({\n \"page\": page,\n \"totalPage\": pager.num_pages,\n \"data\":pager.page(page).object_list,\n })\n \n except EmptyPage:\n resp.reject(\"Page empty!\")\n except KeyError:\n resp.reject(\"Missing pagination parameters!\")\n\n return resp\n\n\ndef __log_event(request: HttpRequest, status_code: int):\n try:\n user = request.user or None\n ip = request.META.get('REMOTE_ADDR')[:BMGTTransaction.IP_MAX_LENGTH]\n device = request.META.get('HTTP_USER_AGENT')[:BMGTTransaction.DEVICE_MAX_LENGTH]\n path = request.path\n met = request.method\n new_record = BMGTTransaction(user = user, ip = ip, device= device, method=met, path=path, status_code=status_code)\n new_record.save()\n except Exception as e:\n raise e\n \n\ndef logger(func):\n def wrapper(request: HttpRequest, **kwargs):\n try:\n response = func(request, **kwargs)\n status_code = response.status_code\n __log_event(request, status_code)\n return response\n except Exception as e:\n raise e\n return wrapper\n\n\nclass AppResponse(HttpResponse):\n def __init__(self, status: int = HTTPStatus.OK, reject = None, resolve= None) -> None:\n super().__init__(status=status)\n if reject and resolve:\n raise ValueError(\"reject and resolve cannot be both non-null\")\n \n if reject:\n self.reject(reject)\n elif resolve:\n self.resolve(resolve)\n\n def reject(self, errorMsg: str):\n self.flush()\n self.write(json.dumps({\n 'errorMsg': errorMsg,\n }, cls=CustomJSONEncoder\n ))\n\n def resolve(self, data):\n self.flush()\n self.write(json.dumps({\n 'data': data\n }, cls=CustomJSONEncoder\n ))","repo_name":"Chacoon3/sim_server_django","sub_path":"bmgt435_elp/utils/apiUtils.py","file_name":"apiUtils.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"880941359","text":"import json\nimport re\nfrom configparser import ConfigParser\n\nfrom unittest import mock\n\nfrom lxml import html\nimport pytest\nimport flask\nfrom sqlalchemy import desc, and_\n\n\nfrom copr_common.enums import ActionTypeEnum, ActionPriorityEnum\nfrom coprs import app, cache, models\n\nfrom coprs.helpers import generate_repo_name\nfrom coprs.logic.coprs_logic import CoprsLogic, CoprDirsLogic\nfrom coprs.logic.actions_logic import ActionsLogic\n\nfrom commands.create_chroot import create_chroot_function\n\nfrom tests.coprs_test_case import CoprsTestCase, TransactionDecorator\nfrom tests.request_test_api import parse_web_form_error\n\n\nclass TestMonitor(CoprsTestCase):\n\n @pytest.mark.usefixtures(\"f_db\", \"f_users\", \"f_mock_chroots\", \"f_db\")\n def test_regression_monitor_no_copr_returned(self):\n # https://bugzilla.redhat.com/show_bug.cgi?id=1165284\n copr_name = u\"temp\"\n\n # trying to get monitor page for non-existing project\n url_monitor = \"/coprs/{}/{}/monitor/\".format(self.u1.name, copr_name)\n\n res = self.tc.get(url_monitor)\n assert res.status_code == 404\n\n # https://github.com/PyCQA/pylint/issues/3793\n # pylint: disable=assigning-non-slot\n flask.g.user = self.u1\n tmp_copr = CoprsLogic.add(\n self.u1, name=copr_name,\n selected_chroots=[\"fedora-rawhide-i386\"],\n )\n self.db.session.commit()\n\n res = self.tc.get(url_monitor)\n assert res.status_code == 200\n\n CoprsLogic.delete_unsafe(self.u1, tmp_copr)\n self.db.session.commit()\n\n res = self.tc.get(url_monitor)\n assert res.status_code == 404\n\n\nclass TestCoprsShow(CoprsTestCase):\n\n def test_show_no_entries(self):\n assert b\"No projects...\" in self.tc.get(\"/\").data\n\n def test_show_more_entries(self, f_users, f_coprs, f_db):\n r = self.tc.get(\"/\")\n assert r.data.count(b'') == 3\n\n\nclass TestCoprsOwned(CoprsTestCase):\n\n @TransactionDecorator(\"u3\")\n def test_owned_none(self, f_users, f_coprs, f_db):\n self.db.session.add(self.u3)\n r = self.test_client.get(\"/coprs/{0}/\".format(self.u3.name))\n assert b\"No projects...\" in r.data\n\n @TransactionDecorator(\"u1\")\n def test_owned_one(self, f_users, f_coprs, f_db):\n self.db.session.add(self.u1)\n r = self.test_client.get(\"/coprs/{0}/\".format(self.u1.name))\n assert r.data.count(b'') == 1\n\n\nclass TestCoprNew(CoprsTestCase):\n success_string = \"New project has been created successfully.\"\n\n @TransactionDecorator(\"u1\")\n def test_copr_new_normal(self, f_users, f_mock_chroots, f_db):\n r = self.test_client.post(\n \"/coprs/{0}/new/\".format(self.u1.name),\n data={\"name\": \"foo\",\n \"chroots\": [\"fedora-rawhide-i386\"],\n \"arches\": [\"i386\"]},\n follow_redirects=True)\n\n assert self.models.Copr.query\\\n .order_by(desc(models.Copr.created_on))\\\n .filter(self.models.Copr.name == \"foo\").first()\n assert self.success_string.encode(\"utf-8\") in r.data\n\n # make sure no initial build was submitted\n assert self.models.Build.query.first() is None\n # one createrepo action generated\n actions = ActionsLogic.get_many().all()\n assert len(actions) == 2\n for action in actions:\n if action.action_type == ActionTypeEnum(\"createrepo\"):\n assert json.loads(actions[0].data)[\"devel\"] is False\n\n @TransactionDecorator(\"u1\")\n @pytest.mark.usefixtures(\"f_users\", \"f_mock_chroots\", \"f_db\")\n def test_copr_new_ACR_OFF(self):\n r = self.test_client.post(\n \"/coprs/{0}/new/\".format(self.u1.name),\n data={\n \"name\": \"foo\",\n \"chroots\": [\"fedora-rawhide-i386\"],\n \"arches\": [\"i386\"],\n \"disable_createrepo\": True,\n },\n follow_redirects=True)\n\n assert self.models.Copr.query\\\n .order_by(desc(models.Copr.created_on))\\\n .filter(self.models.Copr.name == \"foo\").first()\n assert self.success_string.encode(\"utf-8\") in r.data\n\n # make sure no initial build was submitted\n assert self.models.Build.query.first() is None\n\n actions = ActionsLogic.get_many().filter_by(action_type=3).order_by('id').all()\n assert {True, False} == {json.loads(action.data)[\"devel\"]\n for action in actions}\n\n @TransactionDecorator(\"u3\")\n def test_copr_new_exists_for_another_user(self, f_users, f_coprs,\n f_mock_chroots, f_db):\n\n self.db.session.add(self.c1)\n foocoprs = len(self.models.Copr.query\n .order_by(desc(models.Copr.created_on))\n .filter(self.models.Copr.name == self.c1.name).all())\n assert foocoprs > 0\n\n r = self.test_client.post(\n \"/coprs/{0}/new/\".format(self.u3.name),\n data={\"name\": self.c1.name,\n \"chroots\": [\"fedora-rawhide-i386\"]},\n follow_redirects=True)\n\n self.db.session.add(self.c1)\n\n assert len(self.models.Copr.query\n .order_by(desc(models.Copr.created_on))\n .filter(self.models.Copr.name == self.c1.name).all()) == foocoprs + 1\n assert self.success_string.encode(\"utf-8\") in r.data\n\n @TransactionDecorator(\"u1\")\n def test_copr_new_exists_for_this_user(self, f_users, f_coprs,\n f_mock_chroots, f_db):\n self.db.session.add(self.c1)\n foocoprs = len(self.models.Copr.query\n .order_by(desc(models.Copr.created_on))\n .filter(self.models.Copr.name == self.c1.name).all())\n assert foocoprs > 0\n\n r = self.test_client.post(\n \"/coprs/{0}/new/\".format(self.u1.name),\n data={\"name\": self.c1.name,\n \"fedora-rawhide-i386\": \"y\"},\n follow_redirects=True)\n\n self.db.session.add(self.c1)\n assert len(self.models.Copr.query\n .order_by(desc(models.Copr.created_on))\n .filter(self.models.Copr.name == self.c1.name).all()) == foocoprs\n assert b\"You already have a project named\" in r.data\n\n @TransactionDecorator(\"u1\")\n def test_copr_new_with_initial_pkgs(self, f_users, f_mock_chroots, f_db):\n r = self.test_client.post(\"/coprs/{0}/new/\".format(self.u1.name),\n data={\"name\": \"foo\",\n \"chroots\": [\"fedora-rawhide-i386\"],\n \"initial_pkgs\": [\"http://a/f.src.rpm\",\n \"http://a/b.src.rpm\"],\n \"build_enable_net\": True,\n },\n follow_redirects=True)\n\n copr = self.models.Copr.query\\\n .order_by(desc(models.Copr.created_on))\\\n .filter(self.models.Copr.name == \"foo\").first()\n assert copr\n assert self.success_string.encode(\"utf-8\") in r.data\n\n assert self.models.Build.query.first().copr == copr\n assert self.models.Build.query.first().enable_net is True\n assert copr.build_count == 1\n assert b\"Initial packages were successfully submitted\" in r.data\n\n\n @TransactionDecorator(\"u1\")\n def test_copr_new_with_initial_pkgs_disabled_net(self, f_users, f_mock_chroots, f_db):\n r = self.test_client.post(\"/coprs/{0}/new/\".format(self.u1.name),\n data={\"name\": \"foo\",\n \"chroots\": [\"fedora-rawhide-i386\"],\n \"initial_pkgs\": [\"http://a/f.src.rpm\",\n \"http://a/b.src.rpm\"],\n \"build_enable_net\": None\n },\n follow_redirects=True)\n\n copr = self.models.Copr.query\\\n .order_by(desc(models.Copr.created_on))\\\n .filter(self.models.Copr.name == \"foo\").first()\n assert copr\n assert self.success_string.encode(\"utf-8\") in r.data\n\n assert self.models.Build.query.first().copr == copr\n assert self.models.Build.query.first().enable_net is False\n assert copr.build_count == 1\n assert b\"Initial packages were successfully submitted\" in r.data\n\n @TransactionDecorator(\"u1\")\n def test_copr_new_is_allowed_even_if_deleted_has_same_name(\n self, f_users, f_coprs, f_mock_chroots, f_db):\n\n self.db.session.add(self.c1)\n self.db.session.add(self.c1_dir)\n self.c1.deleted = True\n self.c1.user = self.u1\n CoprDirsLogic.delete_all_by_copr(self.c1)\n self.db.session.commit()\n\n self.db.session.add(self.c1)\n r = self.test_client.post(\"/coprs/{0}/new/\".format(self.u1.name),\n data={\"name\": self.c1.name,\n \"chroots\": [\"fedora-rawhide-i386\"],\n \"arches\": [\"i386\"]},\n follow_redirects=True)\n\n self.c1 = self.db.session.merge(self.c1)\n self.u1 = self.db.session.merge(self.u1)\n assert len(self.models.Copr.query\n .order_by(desc(models.Copr.created_on))\n .filter(self.models.Copr.name == self.c1.name)\n .filter(self.models.Copr.user == self.u1)\n .all()) == 2\n assert self.success_string.encode(\"utf-8\") in r.data\n\n @TransactionDecorator(\"u1\")\n @pytest.mark.usefixtures(\"f_users\", \"f_mock_chroots\", \"f_db\")\n def test_copr_new_contains_isolation(self):\n r = self.test_client.post(\"/coprs/{0}/new/\".format(self.u1.name),\n data={\"name\": \"foo\",\n \"chroots\": [\"fedora-rawhide-i386\"],\n \"arches\": [\"i386\"],\n \"isolation\": \"simple\"},\n follow_redirects=True)\n assert r.status_code == 200\n copr = self.models.Copr.query \\\n .order_by(desc(models.Copr.created_on)) \\\n .filter(self.models.Copr.name == \"foo\").first()\n assert copr.isolation == \"simple\"\n\n\nclass TestCoprDetail(CoprsTestCase):\n\n def test_copr_detail_not_found(self):\n r = self.tc.get(\"/coprs/foo/bar/\")\n assert r.status_code == 404\n\n def test_copr_detail_normal(self, f_users, f_coprs, f_db):\n r = self.tc.get(\"/coprs/{0}/{1}/\".format(self.u1.name, self.c1.name))\n assert r.status_code == 200\n assert self.c1.name.encode(\"utf-8\") in r.data\n\n def test_copr_detail_contains_builds(self, f_users, f_coprs,\n f_mock_chroots, f_builds, f_db):\n r = self.tc.get(\n \"/coprs/{0}/{1}/builds/\".format(self.u1.name, self.c1.name))\n assert r.data.count(b'' not in r.data\n\n def test_copr_detail_contains_permissions_table(self, f_users, f_coprs,\n f_copr_permissions, f_db):\n\n r = self.tc.get(\n \"/coprs/{0}/{1}/permissions/\".format(self.u2.name, self.c3.name))\n assert b'' in r.data\n assert '{0}'.format(self.u3.name).encode(\"utf-8\") in r.data\n assert '{0}'.format(self.u1.name).encode(\"utf-8\") in r.data\n\n @TransactionDecorator(\"u2\")\n def test_detail_has_correct_permissions_form(self, f_users, f_coprs,\n f_copr_permissions, f_db):\n\n self.db.session.add_all([self.u2, self.c3])\n r = self.test_client.get(\n \"/coprs/{0}/{1}/permissions/\".format(self.u2.name, self.c3.name))\n\n assert r.data.count(b\"nothing\") == 2\n assert b'' in r.data\n\n def test_copr_detail_doesnt_show_cancel_build_for_anonymous(self, f_users, f_coprs, f_builds, f_db):\n r = self.tc.get(\n \"/coprs/{0}/{1}/build/{2}/\".format(self.u2.name, self.c2.name, self.c2.builds[0].id))\n assert b\"/cancel_build/\" not in r.data\n\n @TransactionDecorator(\"u1\")\n def test_copr_detail_doesnt_allow_non_submitter_to_cancel_build(\n self, f_users, f_coprs, f_mock_chroots, f_builds, f_db):\n self.u1.admin = False\n self.db.session.add_all([self.u1, self.u2, self.c2])\n r = self.test_client.get(\n \"/coprs/{0}/{1}/build/{2}/\".format(self.u2.name, self.c2.name, self.c2.builds[0].id))\n assert b\"/cancel_build/\" not in r.data\n\n @TransactionDecorator(\"u2\")\n def test_copr_detail_allows_submitter_to_cancel_build(\n self, f_users, f_coprs, f_mock_chroots, f_builds, f_db):\n\n self.db.session.add_all([self.u2, self.c2])\n build_id = self.c2.builds[0].id\n r = self.test_client.get(\n \"/coprs/{0}/{1}/build/{2}/\".format(self.u2.name, self.c2.name, build_id))\n\n # The button exists!\n assert b\"/cancel_build/\" in r.data\n\n # And now cancel the build.\n self.web_ui.cancel_build(self.c2.name, build_id)\n build = models.Build.query.get(build_id)\n assert build.state == \"canceled\"\n\n\n def test_codeblock_html_in_project_description(self, f_users, f_coprs):\n r = self.tc.get(\"/coprs/{0}/{1}/\".format(self.u1.name, self.c1.name))\n lines = ['
# code snippet',\n 'def foo():',\n ' bar()',\n ' return 1',\n '
',\n '
']\n removed_code = ['']\n\n generated_html = r.data.decode(\"utf-8\")\n for line in lines:\n assert line in generated_html\n for line in removed_code:\n assert line not in generated_html\n\n\nclass TestCoprEdit(CoprsTestCase):\n\n @TransactionDecorator(\"u1\")\n def test_edit_prefills_id(self, f_users, f_coprs, f_db):\n self.db.session.add_all([self.u1, self.c1])\n r = self.test_client.get(\n \"/coprs/{0}/{1}/edit/\".format(self.u1.name, self.c1.name))\n # TODO: use some kind of html parsing library to look\n # for the hidden input, this ties us\n # to the precise format of the tag\n assert (''\n .format(self.c1.id).encode(\"utf-8\") in r.data)\n\n\nclass TestCoprUpdate(CoprsTestCase):\n\n @TransactionDecorator(\"u1\")\n def test_update_no_changes(self, f_users, f_coprs, f_mock_chroots, f_db):\n self.db.session.add_all([self.u1, self.c1])\n r = self.test_client.post(\"/coprs/{0}/{1}/update/\"\n .format(self.u1.name, self.c1.name),\n data={\"name\": self.c1.name,\n \"chroots\": [\"fedora-18-x86_64\"],\n \"id\": self.c1.id},\n follow_redirects=True)\n\n assert b\"Project has been updated successfully\" in r.data\n\n @TransactionDecorator(\"u1\")\n def test_copr_admin_can_update(self, f_users, f_coprs,\n f_copr_permissions, f_mock_chroots, f_db):\n\n self.db.session.add_all([self.u2, self.c3])\n r = self.test_client.post(\"/coprs/{0}/{1}/update/\"\n .format(self.u2.name, self.c3.name),\n data={\"name\": self.c3.name,\n \"chroots\": [\"fedora-rawhide-i386\"],\n \"id\": self.c3.id},\n follow_redirects=True)\n\n assert b\"Project has been updated successfully\" in r.data\n\n @TransactionDecorator(\"u1\")\n def test_update_multiple_chroots(self, f_users, f_coprs,\n f_copr_permissions, f_mock_chroots, f_db):\n\n self.db.session.add_all(\n [self.u1, self.c1, self.mc1, self.mc2, self.mc3])\n r = self.test_client.post(\"/coprs/{0}/{1}/update/\"\n .format(self.u1.name, self.c1.name),\n data={\"name\": self.c1.name,\n \"chroots\": [\n self.mc2.name,\n self.mc3.name,\n ],\n \"id\": self.c1.id},\n follow_redirects=True)\n\n assert b\"Project has been updated successfully\" in r.data\n self.c1 = self.db.session.merge(self.c1)\n self.mc1 = self.db.session.merge(self.mc1)\n self.mc2 = self.db.session.merge(self.mc2)\n self.mc3 = self.db.session.merge(self.mc3)\n\n mock_chroots = (self.models.MockChroot.query\n .join(self.models.CoprChroot)\n .filter(self.models.CoprChroot.copr_id ==\n self.c1.id).all())\n\n mock_chroots_names = map(lambda x: x.name, mock_chroots)\n assert self.mc2.name in mock_chroots_names\n assert self.mc3.name in mock_chroots_names\n assert self.mc1.name not in mock_chroots_names\n\n @TransactionDecorator(\"u2\")\n def test_update_deletes_multiple_chroots(self, f_users, f_coprs,\n f_copr_permissions,\n f_mock_chroots, f_db):\n\n # https://fedorahosted.org/copr/ticket/42\n self.db.session.add_all([self.u2, self.c2, self.mc1])\n # add one more mock_chroot, so that we can remove two\n self.db.session.add(self.models.CoprChroot(copr_id=self.c2.id, mock_chroot=self.mc1))\n self.db.session.commit()\n\n r = self.test_client.post(\"/coprs/{0}/{1}/update/\"\n .format(self.u2.name, self.c2.name),\n data={\"name\": self.c2.name,\n \"chroots\": [self.mc1.name],\n \"id\": self.c2.id},\n follow_redirects=True)\n\n assert b\"Project has been updated successfully\" in r.data\n self.c2 = self.db.session.merge(self.c2)\n self.mc1 = self.db.session.merge(self.mc1)\n mock_chroots = (self.models.MockChroot.query\n .join(self.models.CoprChroot)\n .filter(and_(self.models.CoprChroot.copr_id ==\n self.c2.id,\n self.models.CoprChroot.deleted.is_(False)))\n .all())\n\n assert len(mock_chroots) == 1\n\n @TransactionDecorator(\"u1\")\n @pytest.mark.usefixtures(\"f_users\", \"f_coprs\", \"f_mock_chroots\", \"f_db\")\n def test_changed_ACR_produces_action(self):\n\n self.db.session.add_all(\n [self.u1, self.c1, self.mc1, self.mc2, self.mc3])\n\n username = self.u1.name\n coprname = self.c1.name\n copr_id = self.c1.id\n chroot = self.mc1.name\n chroots = {self.mc1.name, self.mc2.name, self.mc3.name}\n\n # 1. Ensure ACR is enabled\n self.db.session.commit()\n c1_actual = CoprsLogic.get(self.u1.name, self.c1.name).one()\n assert c1_actual.auto_createrepo\n assert len(ActionsLogic.get_many().all()) == 0\n\n # 2. Disabling ACR (generates createrepo action in devel/\n self.test_client.post(\n \"/coprs/{0}/{1}/update/\".format(username, coprname),\n data={\"name\": coprname, \"chroots\": [chroot], \"id\": copr_id,\n \"disable_createrepo\": True},\n follow_redirects=True\n )\n self.db.session.commit()\n c1_actual = CoprsLogic.get(username, coprname).one()\n assert not c1_actual.auto_createrepo\n assert len(ActionsLogic.get_many().all()) == 1\n action = ActionsLogic.get_many().one()\n handled_ids = {action.id}\n\n expected_data = {\n \"ownername\": \"user1\",\n \"projectname\": \"foocopr\",\n \"project_dirnames\": [\"foocopr\"],\n \"chroots\": [\"fedora-18-x86_64\"],\n \"appstream\": True,\n \"devel\": True,\n }\n\n assert json.loads(action.data) == expected_data\n\n # 3. Re-enable ACR, and enable two new chroots\n self.test_client.post(\n \"/coprs/{0}/{1}/update/\".format(username, coprname),\n data={\"name\": coprname, \"chroots\": list(chroots), \"id\": copr_id,\n \"disable_createrepo\": \"false\"},\n follow_redirects=True\n )\n self.db.session.commit()\n c1_actual = CoprsLogic.get(username, coprname).one()\n assert c1_actual.auto_createrepo\n actions = ActionsLogic.get_many().all()\n assert len(actions) == 3\n\n expected_chroots = chroots\n\n for action in ActionsLogic.get_many():\n if action.id in handled_ids:\n continue\n expected_data[\"devel\"] = False\n # TODO: the form re-sets appstream to False for None value\n expected_data[\"appstream\"] = False\n data = json.loads(action.data)\n expected_data[\"chroots\"] = data[\"chroots\"]\n for chroot in data[\"chroots\"]:\n assert chroot in expected_chroots\n expected_chroots.remove(chroot)\n assert data == expected_data\n # createrepo was created in all the three chroots\n assert len(expected_chroots) == 0\n\n\nclass TestCoprApplyForPermissions(CoprsTestCase):\n\n @TransactionDecorator(\"u2\")\n def test_apply(self, f_users, f_coprs, f_db):\n self.db.session.add_all([self.u1, self.u2, self.c1])\n r = self.test_client.post(\"/coprs/{0}/{1}/permissions_applier_change/\"\n .format(self.u1.name, self.c1.name),\n data={\"copr_builder\": \"1\"},\n follow_redirects=True)\n\n assert b\"Successfully updated\" in r.data\n\n self.u1 = self.db.session.merge(self.u1)\n self.u2 = self.db.session.merge(self.u2)\n self.c1 = self.db.session.merge(self.c1)\n new_perm = (self.models.CoprPermission.query\n .filter(self.models.CoprPermission.user_id == self.u2.id)\n .filter(self.models.CoprPermission.copr_id == self.c1.id)\n .first())\n\n assert new_perm.copr_builder == 1\n assert new_perm.copr_admin == 0\n\n @TransactionDecorator(\"u1\")\n def test_apply_doesnt_lower_other_values_from_admin_to_request(\n self, f_users, f_coprs, f_copr_permissions, f_db):\n\n self.db.session.add_all([self.u1, self.u2, self.cp1, self.c2])\n r = self.test_client.post(\"/coprs/{0}/{1}/permissions_applier_change/\"\n .format(self.u2.name, self.c2.name),\n data={\"copr_builder\": 1, \"copr_admin\": \"1\"},\n follow_redirects=True)\n assert b\"Successfully updated\" in r.data\n\n self.u1 = self.db.session.merge(self.u1)\n self.c2 = self.db.session.merge(self.c2)\n new_perm = (self.models.CoprPermission.query\n .filter(self.models.CoprPermission.user_id == self.u1.id)\n .filter(self.models.CoprPermission.copr_id == self.c2.id)\n .first())\n\n assert new_perm.copr_builder == 2\n assert new_perm.copr_admin == 1\n\n\nclass TestCoprUpdatePermissions(CoprsTestCase):\n\n @TransactionDecorator(\"u2\")\n def test_cancel_permission(self, f_users, f_coprs,\n f_copr_permissions, f_db):\n\n self.db.session.add_all([self.u2, self.c2])\n r = self.test_client.post(\"/coprs/{0}/{1}/update_permissions/\"\n .format(self.u2.name, self.c2.name),\n data={\"copr_builder_1\": \"0\"},\n follow_redirects=True)\n\n # very volatile, but will fail fast if something changes\n check_string = (\n ''\n )\n assert check_string.encode(\"utf-8\") not in r.data\n\n @TransactionDecorator(\"u2\")\n def test_update_more_permissions(self, f_users, f_coprs,\n f_copr_permissions, f_db):\n\n self.db.session.add_all([self.u2, self.c3])\n self.test_client.post(\"/coprs/{0}/{1}/update_permissions/\"\n .format(self.u2.name, self.c3.name),\n data={\"copr_builder_1\": \"2\",\n \"copr_admin_1\": \"1\",\n \"copr_admin_3\": \"2\"},\n follow_redirects=True)\n\n self.u1 = self.db.session.merge(self.u1)\n self.u3 = self.db.session.merge(self.u3)\n self.c3 = self.db.session.merge(self.c3)\n\n u1_c3_perms = (self.models.CoprPermission.query\n .filter(self.models.CoprPermission.copr_id ==\n self.c3.id)\n .filter(self.models.CoprPermission.user_id ==\n self.u1.id)\n .first())\n\n assert (u1_c3_perms.copr_builder ==\n self.helpers.PermissionEnum(\"approved\"))\n assert (u1_c3_perms.copr_admin ==\n self.helpers.PermissionEnum(\"request\"))\n\n u3_c3_perms = (self.models.CoprPermission.query\n .filter(self.models.CoprPermission.copr_id ==\n self.c3.id)\n .filter(self.models.CoprPermission.user_id ==\n self.u3.id)\n .first())\n assert (u3_c3_perms.copr_builder ==\n self.helpers.PermissionEnum(\"nothing\"))\n assert (u3_c3_perms.copr_admin ==\n self.helpers.PermissionEnum(\"approved\"))\n\n @TransactionDecorator(\"u1\")\n def test_copr_admin_can_update_permissions(self, f_users, f_coprs,\n f_copr_permissions, f_db):\n\n self.db.session.add_all([self.u2, self.c3])\n r = self.test_client.post(\"/coprs/{0}/{1}/update_permissions/\"\n .format(self.u2.name, self.c3.name),\n data={\"copr_builder_1\": \"2\",\n \"copr_admin_3\": \"2\"},\n follow_redirects=True)\n\n assert b\"Project permissions were updated\" in r.data\n\n @TransactionDecorator(\"u1\")\n def test_copr_admin_can_give_up_his_permissions(self, f_users, f_coprs,\n f_copr_permissions, f_db):\n # if admin is giving up his permission and there are more permissions for\n # this copr, then if the admin is altered first, he won\"t be permitted\n # to alter the other permissions and the whole update would fail\n self.db.session.add_all([self.u2, self.c3, self.cp2, self.cp3])\n # mock out the order of CoprPermission objects, so that we are sure\n # the admin is the first one and therefore this fails if\n # the view doesn\"t reorder the permissions\n\n # flexmock(self.models.Copr, copr_permissions=[self.cp3, self.cp2])\n r = self.test_client.post(\"/coprs/{0}/{1}/update_permissions/\"\n .format(self.u2.name, self.c3.name),\n data={\"copr_admin_1\": \"1\",\n \"copr_admin_3\": \"1\"},\n follow_redirects=True)\n\n self.u1 = self.db.session.merge(self.u1)\n self.c3 = self.db.session.merge(self.c3)\n perm = (self.models.CoprPermission.query\n .filter(self.models.CoprPermission.user_id == self.u1.id)\n .filter(self.models.CoprPermission.copr_id == self.c3.id)\n .first())\n\n assert perm.copr_admin == 1\n assert b\"Project permissions were updated\" in r.data\n\n\nclass TestCoprDelete(CoprsTestCase):\n\n @TransactionDecorator(\"u1\")\n def test_delete(self, f_users, f_coprs, f_db):\n self.db.session.add_all([self.u1, self.c1])\n r = self.test_client.post(\"/coprs/{0}/{1}/delete/\"\n .format(self.u1.name, self.c1.name),\n data={\"verify\": \"yes\"},\n follow_redirects=True)\n\n assert b\"Project has been deleted successfully\" in r.data\n self.db.session.add(self.c1)\n assert self.models.Action.query.first().id == self.c1.id\n assert self.models.Copr.query\\\n .order_by(desc(models.Copr.created_on))\\\n .filter(self.models.Copr.id == self.c1.id).first().deleted\n\n @TransactionDecorator(\"u1\")\n def test_copr_delete_does_not_delete_if_verify_filled_wrongly(\n self, f_users, f_coprs, f_db):\n\n self.db.session.add_all([self.u1, self.c1])\n r = self.test_client.post(\"/coprs/{0}/{1}/delete/\"\n .format(self.u1.name, self.c1.name),\n data={\"verify\": \"no\"},\n follow_redirects=True)\n\n assert b\"Project has been deleted successfully\" not in r.data\n assert not self.models.Action.query.first()\n assert self.models.Copr.query\\\n .order_by(desc(models.Copr.created_on))\\\n .filter(self.models.Copr.id == self.c1.id).first()\n\n @TransactionDecorator(\"u2\")\n def test_non_user_cant_delete(self, f_users, f_coprs, f_db):\n self.db.session.add_all([self.u1, self.u2, self.c1])\n r = self.test_client.post(\"/coprs/{0}/{1}/delete/\"\n .format(self.u1.name, self.c1.name),\n data={\"verify\": \"yes\"},\n follow_redirects=True)\n self.c1 = self.db.session.merge(self.c1)\n assert b\"Project has been deleted successfully\" not in r.data\n assert not self.models.Action.query.first()\n assert self.models.Copr.query\\\n .order_by(desc(models.Copr.created_on))\\\n .filter(self.models.Copr.id == self.c1.id).first()\n\n\nclass TestCoprRepoGeneration(CoprsTestCase):\n\n @pytest.fixture\n def f_not_finished_builds(self):\n \"\"\" Custom builds are used in order not to break the default ones \"\"\"\n self.b8 = self.models.Build(\n copr=self.c1, user=self.u1, submitted_on=11)\n self.mc1 = self.models.MockChroot(\n os_release=\"fedora\", os_version=\"18\", arch=\"x86_64\")\n self.cc1 = self.models.CoprChroot(mock_chroot=self.mc1, copr=self.c1)\n\n # assign with chroot\n self.db.session.add(\n self.models.BuildChroot(\n build=self.b8,\n mock_chroot=self.mc1\n )\n )\n\n self.db.session.add_all([self.b8, self.mc1, self.cc1])\n\n def test_fail_on_nonexistent_copr(self):\n r = self.tc.get(\n \"/coprs/bogus-user/bogus-nonexistent-repo/repo/fedora-18-x86_64/\")\n assert r.status_code == 404\n assert b\"does not exist\" in r.data\n\n def test_works_on_older_builds(self, f_users, f_coprs, f_mock_chroots,\n f_custom_builds, f_db):\n orig = app.config[\"ENFORCE_PROTOCOL_FOR_BACKEND_URL\"]\n app.config[\"ENFORCE_PROTOCOL_FOR_BACKEND_URL\"] = \"https\"\n r = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-18/\"\n .format(self.u1.name, self.c1.name))\n\n assert r.status_code == 200\n assert b\"baseurl=https://\" in r.data\n app.config[\"ENFORCE_PROTOCOL_FOR_BACKEND_URL\"] = orig\n\n def test_repofile_multilib(self, f_users, f_coprs, f_mock_chroots,\n f_mock_chroots_many, f_custom_builds, f_db):\n\n r_non_ml_chroot = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-18/some.repo?arch=x86_64\".format(\n self.u1.name, self.c1.name))\n\n for f_version in range(19, 24):\n for arch in ['x86_64', 'i386']:\n # with disabled multilib there's no change between fedora repos,\n # no matter what the version or architecture is\n r_ml_chroot = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-{2}/some.repo?arch={3}\".format(\n self.u1.name, self.c1.name, f_version, arch))\n assert r_ml_chroot.data == r_non_ml_chroot.data\n\n self.c1.multilib = True\n self.db.session.commit()\n\n cache.clear() # f18 repofile is cached\n\n # The project is now multilib, but f18 chroot doesn't have i386\n # countepart in c1\n\n r_non_ml_chroot = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-18/some.repo?arch=x86_64\".format(\n self.u1.name, self.c1.name))\n\n r_ml_first_chroot = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-19/some.repo?arch=x86_64\".format(\n self.u1.name, self.c1.name))\n\n for f_version in range(19, 24):\n # All the Fedora 19..23 chroots have both i386 and x86_64 enabled in\n # c1, so all the repofiles need to be the same.\n r_ml_chroot = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-{2}/some.repo?arch=x86_64\".format(\n self.u1.name, self.c1.name, f_version))\n assert r_ml_chroot.data == r_ml_first_chroot.data\n assert r_ml_chroot.data != r_non_ml_chroot.data\n\n # and the non-ml variants need to match non-ml chroot f18\n # (this also checks that we don't cache 'some.repo' requests with\n # 'some.repo&arch=...')\n r_non_ml_repofile = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-{2}/some.repo\".format(\n self.u1.name, self.c1.name, f_version))\n assert r_non_ml_repofile.data == r_non_ml_chroot.data\n\n def parse_repofile(string):\n lines = string.split('\\n')\n def get_params(name, lines):\n return [x.split('=')[1] for x in lines\n if re.match(r'^{}=.*'.format(name), x)]\n return (\n [x.strip('[]') for x in lines if re.match(r'^\\[.*\\]$', x)],\n get_params('baseurl', lines),\n get_params('gpgkey', lines),\n get_params('name', lines),\n get_params('cost', lines),\n )\n\n non_ml_repofile = r_non_ml_chroot.data.decode('utf-8')\n ml_repofile = r_ml_first_chroot.data.decode('utf-8')\n\n repoids, baseurls, gpgkeys, _, costs = parse_repofile(non_ml_repofile)\n assert len(repoids) == len(baseurls) == len(gpgkeys) == 1\n assert len(costs) == 0\n\n normal_gpgkey = gpgkeys[0]\n normal_repoid = repoids[0]\n normal_baseurl = baseurls[0]\n\n repoids, baseurls, gpgkeys, names, costs = parse_repofile(ml_repofile)\n assert len(repoids) == len(baseurls) == len(gpgkeys) == 2\n assert len(costs) == 1\n assert costs[0] == '1100'\n\n assert normal_repoid == repoids[0]\n assert normal_repoid + ':ml' == repoids[1]\n assert 'x86_64' not in names[0]\n assert '(i386)' not in names[0]\n assert '(i386)' in names[1]\n assert gpgkeys[0] == gpgkeys[1] == normal_gpgkey\n assert normal_baseurl == baseurls[0]\n assert normal_baseurl.rsplit('-', 1)[0] == baseurls[1].rsplit('-', 1)[0]\n\n def test_repofile_copr_runtime_deps(self, f_users, f_coprs, f_mock_chroots):\n \"\"\"\n Test that a repofile for a project that has runtime dependencies was\n generated correctly.\n \"\"\"\n _side_effects = (f_users, f_coprs, f_mock_chroots)\n\n repofile = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-18/some.repo?arch=x86_64\".format(\n self.u2.name, self.c3.name))\n\n config = ConfigParser()\n config.read_string(repofile.data.decode(\"utf-8\"))\n\n name1 = \"Copr localhost/user2/barcopr runtime dependency #1 - user1/foocopr\"\n name2 = \"Copr localhost/user2/barcopr external runtime dependency #1 - https_url_to_external_repo\"\n\n assert len(config.sections()) == 3\n assert name1 in config.get(config.sections()[1], \"name\")\n assert name2 in config.get(config.sections()[2], \"name\")\n assert \"{0}:{1}\".format(self.u1.name, self.c1.name) in config.sections()[1]\n\n url = \"https://url.to/external/repo\"\n repo_id = \"coprdep:{0}\".format(generate_repo_name(url))\n assert repo_id == config.sections()[2]\n assert config.get(repo_id, \"baseurl\") == url\n\n def test_repofile_group_copr_runtime_deps(self, f_users, f_coprs,\n f_mock_chroots, f_group_copr,\n f_group_copr_dependent):\n \"\"\"\n Test that repofiles for a project that has runtime dependency on\n a group project and a group project with runtime dependency were\n generated correctly.\n \"\"\"\n _side_effects = (f_users, f_coprs, f_mock_chroots, f_group_copr,\n f_group_copr_dependent)\n\n repofile = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-18/some.repo?arch=x86_64\".format(\n self.u2.name, self.c_gd.name))\n\n config = ConfigParser()\n config.read_string(repofile.data.decode(\"utf-8\"))\n\n name = (\n \"Copr localhost/user2/depcopr runtime dependency #1 - \"\n \"@group1/groupcopr\"\n )\n\n assert len(config.sections()) == 2\n assert name in config.get(config.sections()[1], \"name\")\n\n repofile = self.tc.get(\n \"/coprs/g/{0}/{1}/repo/fedora-18/some.repo?arch=x86_64\".format(\n self.g1.name, self.gc2.name))\n\n config = ConfigParser()\n config.read_string(repofile.data.decode(\"utf-8\"))\n\n name = (\n \"Copr localhost/@group1/groupcopr2 runtime dependency #1 - \"\n \"@group1/groupcopr1\"\n )\n\n assert len(config.sections()) == 2\n assert name in config.get(config.sections()[1], \"name\")\n\n\n def test_repofile_transitive_runtime_deps(self, f_users,\n f_copr_transitive_dependency):\n \"\"\"\n Test that a repofile for a project that has multiple transitive\n runtime dependencies was generated correctly.\n \"\"\"\n _side_effects = (f_users, f_copr_transitive_dependency)\n\n repofile = self.tc.get(\n \"/coprs/{0}/{1}/repo/fedora-18/some.repo?arch=x86_64\".format(\n self.u2.name, self.c_td1.name))\n\n config = ConfigParser()\n config.read_string(repofile.data.decode(\"utf-8\"))\n\n warning = (\n \"# This repository is configured to have a runtime dependency on \"\n \"a Copr project user2/nonexisting but that doesn't exist.\"\n )\n assert warning in repofile.data.decode(\"utf-8\")\n\n assert len(config.sections()) == 4\n assert \"coprdep:localhost:{0}:{1}\".format(self.u2.name, self.c_td2.name) in config.sections()\n assert \"coprdep:localhost:{0}:{1}\".format(self.u2.name, self.c_td3.name) in config.sections()\n\n url = \"http://some.url/\"\n repo_id = \"coprdep:{0}\".format(generate_repo_name(url))\n assert repo_id == config.sections()[3]\n assert config.get(repo_id, \"baseurl\") == url\n\n\nclass TestSearch(CoprsTestCase):\n\n @mock.patch(\"coprs.views.coprs_ns.coprs_general.render_template\")\n def test_search_basic(self, mc_render_template, f_users, f_db):\n # mc_flask.render_template.return_value = mock.MagicMock()\n # self.prefix = u\"prefix_{}_\".format(int(time.time()))\n self.prefix = u\"prefix\"\n self.s_coprs = []\n\n for x in range(5):\n self.s_coprs.append(models.Copr(name=self.prefix + str(x), user=self.u1))\n\n for x in range(7):\n self.s_coprs.append(models.Copr(name=self.prefix + str(x), user=self.u2))\n\n self.db.session.add_all(self.s_coprs)\n self.db.session.commit()\n\n mc_render_template.side_effect = lambda *args, **kwargs: flask.render_template(*args, **kwargs)\n\n # self.tc.get(\"/coprs/fulltext/?fulltext={}\".format(self.prefix))\n # qargs, qkwargs = mc_render_template.call_args\n # assert qkwargs[\"paginator\"].total_count == 5+7\n #\n # self.tc.get(\"/coprs/fulltext/?fulltext={}\".format(\"user1/prefix\"))\n # qargs, qkwargs = mc_render_template.call_args\n # assert qkwargs[\"paginator\"].total_count == 5\n #\n # self.tc.get(\"/coprs/fulltext/?fulltext={}\".format(\"user1\"))\n # qargs, qkwargs = mc_render_template.call_args\n # assert qkwargs[\"paginator\"].total_count == 5\n #\n # self.tc.get(\"/coprs/fulltext/?fulltext={}\".format(\"user1/\"))\n # qargs, qkwargs = mc_render_template.call_args\n # assert qkwargs[\"paginator\"].total_count == 5\n\n @pytest.mark.usefixtures(\"f_users\", \"f_coprs\", \"f_group_copr\", \"f_builds\",\n \"f_db\")\n def test_search_by_attributes(self):\n # We will be searching a lot, so let's make a small helper for that\n def search(url):\n response = self.tc.get(url)\n tree = html.fromstring(response.data)\n results = [x.find(\".//h3\") for x in\n tree.xpath(\"//a[@class='list-group-item']\")]\n return [x.text for x in results if x is not None]\n\n # Search by username\n results = search(\"/coprs/fulltext/?ownername=user2\")\n assert len(results) == 2\n\n # Search by packagename\n results = search(\"/coprs/fulltext/?packagename=world\")\n assert len(results) == 3\n\n # Search by multiple attributes at once\n params = \"?ownername=user2&projectname=foo&packagename=world\"\n results = search(\"/coprs/fulltext/\" + params)\n assert len(results) == 1\n\n # Make sure all found results contain the searched username\n # and projectname\n for result in results:\n assert \"user2\" in result\n assert \"foo\" in result\n\n\nclass TestRepo(CoprsTestCase):\n def test_repo_renders_http(self, f_users, f_coprs, f_mock_chroots, f_db):\n url = \"/coprs/{user}/{copr}/repo/{chroot}/{user}-{copr}-{chroot}.repo\".format(\n user = self.u1.username,\n copr = self.c1.name,\n chroot = \"{}-{}\".format(self.mc1.os_release, self.mc1.os_version),\n )\n app.config[\"REPO_NO_SSL\"] = True\n app.config[\"ENFORCE_PROTOCOL_FOR_BACKEND_URL\"] = \"https\"\n with app.app_context():\n res = self.tc.get(url)\n assert res.status_code == 200\n assert 'baseurl=http://' in res.data.decode('utf-8')\n\n def test_chroot_alias(self, f_users, f_coprs, f_mock_chroots, f_db):\n # Test a chroot alias feature on a real-world example (RhBug: 1756632)\n\n mc_kwargs = dict(os_version=\"8\", arch=\"x86_64\", is_active=True,\n distgit_branch=models.DistGitBranch(name=\"bar\"))\n mc_epel = models.MockChroot(os_release=\"epel\", **mc_kwargs)\n mc_rhelbeta = models.MockChroot(os_release=\"rhelbeta\", **mc_kwargs)\n\n cc_epel = models.CoprChroot(mock_chroot=mc_epel)\n cc_rhelbeta = models.CoprChroot(mock_chroot=mc_rhelbeta)\n\n self.c1.copr_chroots = [cc_epel, cc_rhelbeta]\n self.db.session.commit()\n\n app.config[\"BACKEND_BASE_URL\"] = \"https://foo\"\n\n kwargs = dict(user = self.u1.username, copr = self.c1.name)\n url = \"/coprs/{user}/{copr}/repo/{chroot}/\"\n\n # Both chroots enabled, without alias\n r1 = self.tc.get(url.format(chroot=\"epel-8\", **kwargs))\n r2 = self.tc.get(url.format(chroot=\"rhelbeta-8\", **kwargs))\n assert \"baseurl=https://foo/results/user1/foocopr/epel-8-$basearch/\" in r1.data.decode(\"utf-8\")\n assert \"baseurl=https://foo/results/user1/foocopr/rhelbeta-8-$basearch/\" in r2.data.decode(\"utf-8\")\n\n # Both chroots enabled, alias defined\n app.config[\"CHROOT_NAME_RELEASE_ALIAS\"] = {\"epel-8\": \"rhelbeta-8\"}\n r1 = self.tc.get(url.format(chroot=\"epel-8\", **kwargs))\n r2 = self.tc.get(url.format(chroot=\"rhelbeta-8\", **kwargs))\n assert \"baseurl=https://foo/results/user1/foocopr/epel-8-$basearch/\" in r1.data.decode(\"utf-8\")\n assert \"baseurl=https://foo/results/user1/foocopr/rhelbeta-8-$basearch/\" in r2.data.decode(\"utf-8\")\n\n # Only one chroot enabled, alias defined\n self.c1.copr_chroots = [cc_rhelbeta]\n self.db.session.commit()\n cache.clear()\n r1 = self.tc.get(url.format(chroot=\"epel-8\", **kwargs))\n r2 = self.tc.get(url.format(chroot=\"rhelbeta-8\", **kwargs))\n assert \"baseurl=https://foo/results/user1/foocopr/rhelbeta-8-$basearch/\" in r1.data.decode(\"utf-8\")\n assert \"baseurl=https://foo/results/user1/foocopr/rhelbeta-8-$basearch/\" in r2.data.decode(\"utf-8\")\n\n\nclass TestCoprActionsGeneration(CoprsTestCase):\n\n @TransactionDecorator(\"u1\")\n def test_createrepo_priority(self, f_users, f_mock_chroots, f_db):\n # When creating a project the initial createrepo action should be prioritized\n self.test_client.post(\"/coprs/{0}/new/\".format(self.u1.name),\n data={\"name\": \"foo\",\n \"chroots\": [\"fedora-rawhide-i386\"],\n \"arches\": [\"i386\"]})\n\n copr = CoprsLogic.get(self.u1.username, \"foo\").one()\n actions = ActionsLogic.get_many(ActionTypeEnum(\"createrepo\")).all()\n assert len(actions) == 1\n assert actions[0].priority == ActionPriorityEnum(\"highest\")\n\n # User-requested createrepo actions should have normal priority\n self.test_client.post(\"/coprs/id/{0}/createrepo/\".format(copr.id), data={})\n actions = ActionsLogic.get_many(ActionTypeEnum(\"createrepo\")).all()\n assert len(actions) == 2\n assert actions[1].priority == 0\n\n @TransactionDecorator(\"u1\")\n @pytest.mark.usefixtures(\"f_users\", \"f_users_api\", \"f_mock_chroots\", \"f_db\")\n def test_createrepo_on_reenable(self):\n self.api3.new_project(\"test\", [\"fedora-rawhide-i386\",\n \"fedora-17-x86_64\"])\n # disable fedora-17, but enable fedora-18\n self.api3.modify_project(\"test\", chroots=[\"fedora-rawhide-i386\",\n \"fedora-18-x86_64\"])\n # re-enable fedora-17\n self.api3.modify_project(\"test\", chroots=[\"fedora-rawhide-i386\",\n \"fedora-17-x86_64\",\n \"fedora-18-x86_64\"])\n\n actions = self.models.Action.query.all()\n assert [ActionTypeEnum(a)\n for a in [\"createrepo\", \"gen_gpg_key\", \"createrepo\",\n \"createrepo\"]] \\\n == [a.action_type for a in actions]\n\n actions.pop(1) # we don't care about gpg here\n template = {\n \"ownername\": \"user1\",\n \"projectname\": \"test\",\n \"project_dirnames\": [\"test\"],\n \"appstream\": False,\n \"devel\": False,\n }\n def _expected(action, chroots):\n template[\"chroots\"] = chroots\n assert json.loads(action.data) == template\n _expected(actions[0], [\"fedora-17-x86_64\", \"fedora-rawhide-i386\"])\n _expected(actions[1], [\"fedora-18-x86_64\"])\n _expected(actions[2], [\"fedora-17-x86_64\"])\n\n @pytest.mark.usefixtures(\"f_u1_ts_client\", \"f_mock_chroots\", \"f_db\")\n def test_fedora_review_project(self):\n create_chroot_function([\"fedora-rawhide-x86_64\"])\n route = \"/coprs/{0}/new-fedora-review/\".format(self.transaction_username)\n resp = self.test_client.post(\n route,\n data={\"name\": \"test-fedora-review\"},\n follow_redirects=False,\n )\n assert \"user1/test-fedora-review/add_build\" in resp.headers[\"Location\"]\n copr = self.models.Copr.query.get(1)\n assert copr.full_name == \"user1/test-fedora-review\"\n assert len(copr.active_chroots) == 1\n assert copr.active_chroots[0].name == \"fedora-rawhide-x86_64\"\n assert \"Fedora Review tool\" in copr.description\n assert \"You should ask the project owner\" in copr.instructions\n assert copr.fedora_review\n assert copr.unlisted_on_hp\n\n # re-request\n resp = self.test_client.post(\n route,\n data={\"name\": \"test-fedora-review\"},\n follow_redirects=True,\n )\n assert resp.status_code == 200 # error!\n error = parse_web_form_error(resp.data, variant=\"b\")\n assert error == \"Error in project config\"\n","repo_name":"fedora-copr/copr","sub_path":"frontend/coprs_frontend/tests/test_views/test_coprs_ns/test_coprs_general.py","file_name":"test_coprs_general.py","file_ext":"py","file_size_in_byte":49845,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"54"} +{"seq_id":"72220892640","text":"\"\"\"\nEcho server with threading\n\nCreate a socket echo server which handles each connection in a separate Thread\n\"\"\"\nfrom socket import *\nimport threading\n\n\ndef listen_for_new_client(connection, address):\n try:\n print('Connected', address)\n while True:\n with connection:\n client_msg = connection.recv(1024).decode()\n print(f'{threading.current_thread().getName()} info. recived: {client_msg}')\n if client_msg == 'quit':\n connection.send(client_msg.encode())\n connection.close()\n break\n print(addr, ': ', client_msg)\n connection.send(client_msg.encode())\n finally:\n connection.close()\n\n\nHOST = '127.0.0.1'\nPORT = 8000\n\ntcp_socket = socket(AF_INET, SOCK_STREAM)\ntry:\n tcp_socket.bind((HOST, PORT))\n tcp_socket.listen(2)\n while True:\n conn, addr = tcp_socket.accept()\n threading.Thread(target=listen_for_new_client, args=(conn, addr)).start()\nfinally:\n tcp_socket.close()\n","repo_name":"DanyloSamoylov/PY20092021","sub_path":"lesson35/4_35_2_server.py","file_name":"4_35_2_server.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21510072538","text":"import cupy as cp\nimport numpy as np\n\n# Type pairs that we support\nTYPE_PAIRS = {\"float\": cp.float32, \"double\": cp.float64}\n\n# Cuda kernels for the flt function to use.\n# Based on the original C code by Franklin Antonio, available at\n# https://github.com/UCBerkeleySETI/dedopplerperf/blob/main/CudaTaylor5demo.cu\n# It does one round of the Taylor tree algorithm, calculating the sums of length-2^(x+1) paths\n# from the sums of length-2^x paths.\nCODE = r\"\"\"\ntemplate\n__global__ void taylor(const T* A, T* B, int kmin, int kmax, int set_size, int n_time, int n_freq) {\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n int k = kmin + tid;\n bool worker = (k >= kmin) && (k < kmax) && set_size <= n_time;\n if (!worker) {\n return;\n }\n for (int j = 0; j < n_time; j += set_size) {\n for (int j0 = set_size - 1; j0 >= 0; j0--) {\n int j1 = j0 / 2;\n int j2 = j1 + set_size / 2;\n int j3 = (j0 + 1) / 2;\n if (k + j3 < kmax) {\n B[(j + j0) * n_freq + k] = A[(j + j1) * n_freq + k] + A[(j + j2) * n_freq + k + j3];\n }\n }\n }\n}\n\"\"\"\nC_TYPES = TYPE_PAIRS.keys()\nNAME_EXPS = [f\"taylor<{t}>\" for t in C_TYPES]\nMODULE = cp.RawModule(code=CODE, options=(\"-std=c++11\",), name_expressions=NAME_EXPS)\nKERNELS = {}\nfor c_type, name_exp in zip(C_TYPES, NAME_EXPS):\n KERNELS[c_type] = MODULE.get_function(name_exp)\n\n\ndef flt(array, n_time):\n \"\"\"\n Taylor-tree-sum the data in array.\n\n array should be a 1-dimensional cupy array. If reshaped into two dimensions, the\n data would be indexed so that array[time][freq] stores the data at a particular time\n and frequency. So, the same way h5 files are typically stored.\n\n n_time is the number of timesteps in the data.\n\n The algorithm uses one scratch buffer, and in each step of the loop, it calculates\n sums from one buffer and puts the output in the other. Thus, the drift sums we are looking\n for may end up either in the original buffer, or in the scratch buffer. This method\n returns whichever buffer is the one to use, and we leave the other one for cupy to clean up.\n \"\"\"\n taylor_kernel = None\n for c_type, py_type in TYPE_PAIRS.items():\n if py_type == array.dtype:\n taylor_kernel = KERNELS[c_type]\n break\n else:\n raise RuntimeError(\n f\"we have no GPU taylor kernel for the numerical type: {array.dtype}\"\n )\n\n assert len(array) % n_time == 0\n n_freq = len(array) // n_time\n buf = cp.zeros_like(array)\n\n # Cuda params\n block_size = 1024\n grid_size = (n_freq + block_size - 1) // block_size\n\n set_size = 2\n while set_size <= n_time:\n taylor_kernel(\n (grid_size,),\n (block_size,),\n (array, buf, 0, n_freq, set_size, n_time, n_freq),\n )\n array, buf = buf, array\n set_size *= 2\n\n return array\n","repo_name":"cejkys/turboSETI","sub_path":"turbo_seti/find_doppler/kernels/_taylor_tree/_core_cuda.py","file_name":"_core_cuda.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13800871548","text":"\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\n\r\ndef submit():\r\n email1 = email1_entry.get()\r\n email2 = email2_entry.get()\r\n\r\n if email1 == email2 and \"@\" in email1:\r\n frame2.tkraise()\r\n else:\r\n # Emails do not match or do not contain '@' sign\r\n messagebox.showerror(\"Error\", \"Emails do not match or are invalid- retype this\")\r\n\r\nroot = tk.Tk()\r\nroot.geometry(\"400x300\")\r\n\r\nframe1 = tk.Frame(root)\r\nframe2 = tk.Frame(root)\r\n\r\nfor frame in (frame1, frame2):\r\n frame.grid(row=0, column=0, sticky=\"nsew\")\r\n\r\n# Frame 1\r\ntk.Label(frame1, text=\"Email one\").pack()\r\nemail1_entry = tk.Entry(frame1)\r\nemail1_entry.pack()\r\nemail1_label = tk.Label(frame1, text=\"Enter your email here\")\r\nemail1_label.pack()\r\n\r\ntk.Label(frame1, text=\"Email two\").pack()\r\nemail2_entry = tk.Entry(frame1)\r\nemail2_entry.pack()\r\nemail2_label = tk.Label(frame1, text=\"Enter your email here again\")\r\nemail2_label.pack()\r\n\r\nsubmit_button = tk.Button(frame1, text=\"Submit\", command=submit, background=\"yellow\", font=(\"Comic Sans MS\", 12, \"bold\"))\r\nsubmit_button.pack()\r\n\r\n# Frame 2 (Empty frame for now, you can add content to it as needed)\r\ntk.Label(frame2, text=\"Submission Successful!\").pack()\r\n\r\nframe1.tkraise()\r\n","repo_name":"rey797/LocationLikes","sub_path":"Untitled-1.py","file_name":"Untitled-1.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25350211694","text":"import os\nimport glob\nimport torch\nimport wandb\nimport numpy as np\nimport pytorch_lightning as pl\nimport argparse\nimport time\nfrom im2mesh import config, data\n\nfrom collections import OrderedDict\n\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\nimport pytorch3d\nfrom pytorch3d.structures import Meshes, Pointclouds\nfrom pytorch3d.renderer import (\n PerspectiveCameras,\n RasterizationSettings,\n MeshRasterizer,\n)\n\n# Arguments\nparser = argparse.ArgumentParser(\n description='Validation function on with-distribution poses (ZJU training and testing).'\n)\nparser.add_argument('config', type=str, help='Path to config file.')\nparser.add_argument('--novel-pose', action='store_true', help='Test on novel-poses.')\nparser.add_argument('--novel-pose-view', type=str, default=None, help='Novel view to use for rendering novel poses. Specify this argument if you only want to render a specific view of novel poses.')\nparser.add_argument('--novel-view', action='store_true', help='Test on novel-views of all training poses.')\nparser.add_argument('--multi-gpu', action='store_true', help='Test on multiple GPUs.')\nparser.add_argument('--num-workers', type=int, default=4,\n help='Number of workers to use for val/test loaders.')\nparser.add_argument('--run-name', type=str, default='',\n help='Run name for Wandb logging.')\n\nif __name__ == '__main__':\n args = parser.parse_args()\n cfg = config.load_config(args.config, 'configs/default.yaml')\n num_workers = args.num_workers\n\n # Novel-view synthesis on training poses: evluate every 30th frame\n if args.novel_view and not args.novel_pose:\n cfg['data']['val_subsampling_rate'] = 30\n\n # View-synthesis (can be either training or testing views) on novel poses\n if args.novel_pose_view is not None:\n assert (args.novel_pose)\n cfg['data']['test_subsampling_rate'] = 1\n cfg['data']['test_views'] = [args.novel_pose_view]\n\n # Shorthands\n out_dir = cfg['training']['out_dir']\n batch_size = cfg['training']['batch_size']\n\n # Dataloaders\n train_dataset = config.get_dataset('train', cfg)\n val_dataset = config.get_dataset('test' if args.novel_pose else 'val', cfg)\n\n val_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=1, num_workers=args.num_workers, shuffle=False\n )\n\n # Create PyTorch Lightning model\n model = config.get_model(cfg, dataset=train_dataset, val_size=len(val_loader))\n\n # Create logger\n latest_wandb_path = glob.glob(os.path.join(out_dir, 'wandb', 'latest-run', 'run-*.wandb'))\n if len(latest_wandb_path) == 1:\n run_id = os.path.basename(latest_wandb_path[0]).split('.')[0][4:]\n else:\n run_id = None\n\n if len(args.run_name) > 0:\n run_name = args.run_name\n else:\n run_name = None\n\n kwargs = {'settings': wandb.Settings(start_method='fork')}\n logger = pl.loggers.WandbLogger(name=run_name,\n project='arah',\n id=run_id,\n save_dir=out_dir,\n config=cfg,\n **kwargs)\n\n # Create PyTorch Lightning trainer\n checkpoint_path = os.path.join(out_dir, 'checkpoints/last.ckpt')\n if not os.path.exists(checkpoint_path):\n raise FileNotFoundError('No checkpoint is found!')\n\n if args.multi_gpu:\n trainer = pl.Trainer(logger=logger,\n default_root_dir=out_dir,\n accelerator='gpu',\n strategy='ddp',\n devices=[0, 1, 2, 3],\n num_sanity_val_steps=0)\n else:\n trainer = pl.Trainer(logger=logger,\n default_root_dir=out_dir,\n accelerator='gpu',\n devices=[0],\n num_sanity_val_steps=0)\n\n trainer.validate(model=model, dataloaders=val_loader, ckpt_path=checkpoint_path, verbose=True)\n","repo_name":"taconite/arah-release","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"54"} +{"seq_id":"71626246561","text":"def binarySearch(arr,k):\n r = len(arr) - 1\n l = 0\n while (l <= r):\n m = (l+r)//2 #determina la mitad\n if k == arr[m]: return m\n elif (k < arr[m]) : r = m - 1\n else: l = m + 1\n return -1\n\nif __name__ == \"__main__\":\n arr = [3, 4, 5, 6, 12, 17, 18, 24, 30, 33]\n \n result = binarySearch(arr,17)\n if result != -1:\n print (\"El elemento está en la posición % d\" % result)\n else:\n print (\"El elemento no existe en el array\")\n","repo_name":"ivandumas/Algoritmos","sub_path":"searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21410055718","text":"'''\r\nCreated on 21 mars 2017\r\n\r\n@author: Naitra\r\n'''\r\nfrom numpy.random import multivariate_normal\r\nfrom numpy import zeros\r\n\r\n\r\nclass BrownianMotion(object):\r\n '''\r\n classdocs\r\n '''\r\n\r\n def __init__(self, covariance, mean=None):\r\n '''\r\n Constructor\r\n '''\r\n self.Covariance = covariance\r\n self.Dimension = len(self.Covariance[0])\r\n self.Mean = zeros(self.Dimension) if mean == None else mean\r\n \r\n def Path(self, timeline, nb_simulation):\r\n '''\r\n Create nbSimulation path of the brownian motion evaluated at timeline\r\n '''\r\n result = multivariate_normal(self.Mean, self.Covariance, (nb_simulation, len(timeline)))\r\n result[:,0,:] *= timeline[0]\r\n for k in range(1, len(timeline)):\r\n result[:, k, :] = (timeline[k]-timeline[k-1])*result[:, k, :] + result[:, k-1, :]\r\n return result\r\n \r\n \r\n ","repo_name":"NsanDev/MasterProject","sub_path":"StochasticProcess/Multidimensional/BrownianMotion.py","file_name":"BrownianMotion.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23277405228","text":"from flask import Blueprint, jsonify, request\nfrom flask_login import login_required, current_user\nfrom app.models import db, Follow\n\nfollow_routes = Blueprint('follows', __name__)\n\ndef validation_errors_to_error_messages(validation_errors):\n \"\"\"\n Simple function that turns the WTForms validation errors into a simple list\n \"\"\"\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f'{field} : {error}')\n return errorMessages\n\n@follow_routes.route('')\ndef get_follows():\n follows = Follow.query.all()\n\n return [follow.to_dict() for follow in follows]\n\n@follow_routes.route('/', methods=['POST'])\n@login_required\ndef follow_user(followedId):\n followerId = current_user.id\n follow = Follow(followerId=followerId, followedId=followedId)\n db.session.add(follow)\n db.session.commit()\n return follow.to_dict()\n\n@follow_routes.route('/', methods=['DELETE'])\n@login_required\ndef unfollow_user(followId):\n follow = Follow.query.get(followId)\n db.session.delete(follow)\n db.session.commit()\n return {'message': 'Successfully unfollowed.'}\n","repo_name":"gabriellaguerre/group-tumblr-clone","sub_path":"app/api/follow_routes.py","file_name":"follow_routes.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73638715043","text":"class Solution:\n def hIndex(self, citations: List[int]) -> int:\n citations.sort()\n countingPrefixSum = []\n hindex = 0\n length = len(citations) # the length is equal to the total number of citaions\n for value in range(min(length, max(citations)) + 1):\n left_pos = bisect_left(citations, value)\n right_pos = bisect_right(citations, value)\n for pos in range(left_pos, right_pos + 1):\n if length - pos == value:\n hindex = value\n\n return hindex","repo_name":"Haymanot-Demis/A2SV-Problems","sub_path":"h-index.py","file_name":"h-index.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24573971907","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 7 01:44:32 2021\r\n\r\n@author: narah\r\n\"\"\"\r\n\r\ndef fibonacci(x):\r\n count=2\r\n s=0\r\n a=0\r\n #print(a)\r\n #print(\"\\n\")\r\n b=1\r\n while count= 10\n\n\ndef test_valid_data_entry(app):\n book_input = Book(\n title=\"Test book title\",\n isbn=\"Test12345\",\n author=\"Test author\",\n price=\"123\",\n published=datetime.datetime.utcnow(),\n publisher=\"Test company\"\n )\n book_input.save()\n\n # verify book is successfully saved and able to be retrieved and their values match\n book_saved = Book.objects(title=book_input.title).first()\n assert book_saved.title == book_input.title\n assert book_saved.isbn == book_input.isbn\n assert book_saved.author == book_input.author\n assert book_saved.price == book_input.price\n assert book_saved.published.strftime(\"%m/%d/%Y, %H:%M:%S\") == book_input.published.strftime(\"%m/%d/%Y, %H:%M:%S\")\n assert book_saved.publisher == book_input.publisher\n\n # verify book can be retrieved by isbn\n book_saved = Book.objects(isbn=book_input.isbn).first()\n assert book_saved.title == book_input.title\n assert book_saved.isbn == book_input.isbn\n assert book_saved.author == book_input.author\n assert book_saved.price == book_input.price\n assert book_saved.published.strftime(\"%m/%d/%Y, %H:%M:%S\") == book_input.published.strftime(\"%m/%d/%Y, %H:%M:%S\")\n assert book_saved.publisher == book_input.publisher\n\n # verify the book can be ordered by a customer\n sample_customer = Customer.objects.first()\n\n order = Order(\n customer_name=\"{} {}\".format(sample_customer.first_name, sample_customer.last_name),\n books=[book_saved.id],\n shipping_address=sample_customer.address,\n total_price=book_saved.price,\n order_status=\"processing\",\n order_date=datetime.datetime.utcnow()\n ).save()\n sample_customer.orders.append(order.id)\n sample_customer.save()\n\n # Look for customer by order. Customer ID should match\n order_customer = Customer.objects(orders=order.id).first()\n assert order_customer.id == sample_customer.id\n\n\ndef test_invalid_data_entry(app):\n invalid_book = Book(\n price=\"123\",\n published=datetime.datetime.utcnow(),\n publisher=\"Test company\"\n )\n with pytest.raises(ValidationError):\n invalid_book.save()\n invalid_customer = Customer (\n phone=\"123\",\n customer_since=datetime.datetime.utcnow(),\n orders=[]\n )\n with pytest.raises(ValidationError):\n invalid_customer.save()\n","repo_name":"pacedillo/CMPE272AirBnB","sub_path":"tests/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26097977895","text":"import socket\n\nclass Client:\n def __init__(self, settings: dict):\n\n self.host = settings[\"host\"]\n self.port = settings[\"port\"]\n self.connect_type = settings[\"connect_type\"]\n\n def send(self, event: str, sender: str):\n if self.connect_type == \"UDP\":\n\n # Encodes the data\n data = f\"{sender};{event}\"\n\n # Sends the package via UDP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.sendto(bytes(data, \"ascii\"), (self.host, self.port))\n\n elif self.connect_type == \"TCP\":\n\n # Encodes the data\n data = f\"{sender};{event}\"\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((self.host, self.port))\n s.sendall(bytes(data, \"ascii\"))\n\n\n\n\n\n","repo_name":"Kalbra/mc-signal","sub_path":"src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12603657391","text":"import numpy as np\nfrom itertools import product\nfrom operator import itemgetter\nimport image\nfrom tqdm import trange\n\ndef change_axis_system(M):\n N = np.array([M[1], M[0], M[2]]).T \n return np.array([N[1], N[0], N[2]]).T\n\ndef bilinear_interpolation(img, x, y):\n l, m = int(x), int(y)\n a, b = x-l, y-m\n\n try:\n return (1-b)*(1-a)*img[l,m] + (1-b)*a*img[l+1,m] +\\\n b*(1-a)*img[l,m+1] + b*a*img[l+1,m+1]\n except IndexError:\n return [0, 0, 0] if len(img.shape) == 3 else 0\n\ndef apply(img, T):\n ''' applies a transform matrix T to a given image '''\n T = change_axis_system(T)\n if len(img.shape) == 2:\n h, w = img.shape\n result = np.zeros_like(img)\n else:\n h, w, c = img.shape\n result = np.zeros_like(img)\n J = np.array([(x,y,1) for x,y in product(range(h), range(w))]).T\n\n Tinv = np.linalg.pinv(T)\n I = np.dot(Tinv, J)\n I = I[:-1] / I[-1]\n\n for (yi, xi), (yj, xj) in zip(I.T, J[:-1].T):\n if ((0 <= round(xi) < w) and (0 <= round(yi) < h)):\n result[yj, xj] = bilinear_interpolation(img, yi, xi)\n\n return result\n\n\n\n\ndef translation(points1, points2):\n ''' finds a transformation matrix for len(points) = 1 '''\n [(x1, y1)] = points1\n [(x2, y2)] = points2\n T = np.eye(3)\n T[0, 2] = p2[0] - p1[0] # tx\n T[1, 2] = p2[1] - p1[1] # ty\n return T\n\ndef rigid(points1, points2):\n ''' finds a transformation matrix for len(points) = 2 '''\n [(x11, y11), (x12, y12)] = points1\n [(x21, y21), (x22, y22)] = points2\n A = np.array([\n [x11, -y11, 1, 0],\n [x12, -y12, 1, 0],\n [y11, x11, 0, 0],\n [y12, x12, 1, 0]\n ])\n\n B = np.array[[x21, x22, y21, y22]].T\n X = np.linalg.solve(A, B)\n x = X.reshape(-1)\n\n T = np.eye(3)\n T[0] = np.array([x[0], -x[1], x[2]])\n T[1] = np.array([x[1], x[0], x[3]])\n return T\n\ndef affine(points1, points2):\n ''' finds a transformation matrix for len(points) = 3 '''\n [(x11, y11), (x12, y12), (x13, y13)] = points1\n [(x21, y21), (x22, y22), (x23, y23)] = points2\n\n A = np.array([\n [x11, y11, 1, 0, 0, 0],\n [x12, y12, 1, 0, 0, 0],\n [x13, y13, 1, 0, 0, 0]\n [0, 0, 0, x11, y11, 1],\n [0, 0, 0, x12, y12, 1],\n [0, 0, 0, x13, y13, 1]\n ])\n B = np.array([[x21, x22, x23, y21, y22, y23]]).T\n X = np.linalg.solve(A, B)\n x = X.reshape(-1)\n\n T = np.eye(3)\n T[0] = np.array([x[0], x[1], x[2]])\n T[1] = np.array([x[3], x[4], x[5]])\n\n return T\n\ndef projective(points1, points2):\n ''' finds a transformation matrix for len(points) = 4 '''\n [(x11, y11), (x12, y12), (x13, y13), (x14, y14)] = points1\n [(x21, y21), (x22, y22), (x23, y23), (x24, y24)] = points2\n\n A = np.array([\n [x11, y11, 1, 0, 0, 0, -x11*x21, -y11*x21],\n [x12, y12, 1, 0, 0, 0, -x12*x22, -y12*x22],\n [x13, y13, 1, 0, 0, 0, -x13*x23, -y13*x23],\n [x14, y14, 1, 0, 0, 0, -x14*x24, -y14*x24],\n [0, 0, 0, x11, y11, 1, -x11*y21, -y11*y21],\n [0, 0, 0, x12, y12, 1, -x12*y22, -y12*y22],\n [0, 0, 0, x13, y13, 1, -x13*y23, -y13*y23],\n [0, 0, 0, x14, y14, 1, -x14*y24, -y14*y24],\n\n ])\n B = np.array([[x21, x22, x23, x24, y21, y22, y23, y24]]).T\n X = np.linalg.solve(A, B)\n x = X.reshape(-1)\n\n T = np.eye(3)\n T[0] = np.array([x[0], x[1], x[2]])\n T[1] = np.array([x[3], x[4], x[5]])\n T[2] = np.array([x[6], x[7], 1])\n\n return T\n\n\ndef compute_agreement(T, points1, points2, threshold=10):\n \n points1 = np.array([[point[0], point[1], 1] for point in points1]).T\n points2 = np.array([[point[0], point[1], 1] for point in points2]).T\n \n T = change_axis_system(T)\n transformed1 = np.dot(T, points1)\n transformed1 = transformed1[:-1] / transformed1[-1]\n\n #print (points2[:-1].shape)\n #print (transformed1.shape)\n distance = np.array([np.linalg.norm(p1-p2) for p1, p2 in zip(transformed1.T, points2[:-1].T) ])\n return sum(distance < threshold)\n\ndef ransac(points1, features1, points2, features2, round_count=1000):\n ''' computes the transformation matrix from img1 to img2 using ransac algorithm '''\n matches = image.match(features1, features2)\n match_count = len(matches)\n print (match_count)\n hypotheses = []\n points1 = np.array([[p.pt[0], p.pt[1] ] for p in points1])\n points2 = np.array([[p.pt[0], p.pt[1] ] for p in points2])\n for r in trange(round_count):\n try:\n indices = np.random.choice(range(match_count), 4, replace=False)\n print (indices)\n sample_matches = [matches[i] for i in indices]\n sample_points1, sample_points2 = zip(*[[points1[first], points2[second]] for first, second, _ in sample_matches])\n\n T = projective(sample_points1, sample_points2)\n votes = compute_agreement(T, points1, points2)\n \n except np.linalg.LinAlgError:\n pass\n\n else:\n hypotheses.append((T, sample_points1, sample_points2, votes))\n return max(hypotheses, key=itemgetter(3))","repo_name":"saurabhmathur96/image-stitcher","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72051566881","text":"from xml.dom.minidom import parse\nimport xml.dom.minidom\n\nDOMtree = xml.dom.minidom.parse(\"files_xml/student.xml\")\ncollection = DOMtree.documentElement\nif collection.hasAttribute(\"shelf\"):\n print(\"Root element: %s\" % collection.getAttribute(\"shelf\"))\n\nstudents = collection.getElementsByTagName(\"student\")\n\nfor student in students:\n print(\"{:*^17}\".format(\"Student\"))\n if student.hasAttribute(\"id\"):\n print(\"ID: %s\" % student.getAttribute(\"id\"))\n name = student.getElementsByTagName('name')[0]\n print(\"Name: %s\" % name.childNodes[0].data)\n date = student.getElementsByTagName('date')[0]\n print(\"Date of birth: %s\" % date.childNodes[0].data)","repo_name":"nguyenanh2222/PythonCSC","sub_path":"pakage_Chuong_Json/read_xml_dom.py","file_name":"read_xml_dom.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8282532979","text":"import pandas as pd\nimport os\n\npath = 'Diagramm/' # Daten von CO2; wenn Strompreis, bitte auf 'Daten/SP/' ändern\nfiles = os.listdir(path)\n\n# Start_Datum eingeben:\nStart_day = 15\nStart_month = 4\nStart_year = 2019\n\n# End_Datum eingeben:\n#Enddatum ist inklusive\nEnd_day = 20\nEnd_month = 4\nEnd_year = 2019\n\nall_values = []\n\nfor year in range(Start_year, End_year + 1, 1):\n for month in range(Start_month, End_month + 1, 1):\n for day in range(Start_day, End_day + 1, 1):\n if len(str(month)) == 1:\n month_string = '0' + str(month)\n else:\n month_string = str(month)\n if len(str(day)) == 1:\n day_string = '0' + str(day)\n else:\n day_string = str(day)\n Date = 'SP_' + day_string + '_' + month_string + '_' + str(year) # wenn Strompreis, bitte 'CO2_' auf 'SP_'\n for filename in files:\n if filename.startswith(Date):\n df = pd.read_csv(path + filename)\n all_values += df.values.tolist()\nmaximum = max(all_values)\nminimum = min(all_values)\n\nprint('Maximum:' + str(maximum))\nprint('Minimum:' + str(minimum))\n\n","repo_name":"OlaPronobis/FleetChargingManagement","sub_path":"prognose/Min_Max.py","file_name":"Min_Max.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"42040195640","text":"import os\nimport sys\nimport subprocess\n\nfrom PyQt5.QtCore import QThread, pyqtSignal, QObject\n\nfrom Settings import Settings\n\nclass Worker(QObject):\n\n finished = pyqtSignal(int)\n output = pyqtSignal(str)\n \n def __init__(self, cmds):\n super(QObject, self).__init__()\n self.cmds = cmds\n\n def doWork(self):\n try:\n for cmd in self.cmds:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n for line in iter(p.stdout.readline, b''):\n self.output.emit((str(line.strip(), 'utf-8')))\n p.communicate()\n retcode = p.returncode\n if (retcode != 0): break\n except Exception as e:\n print(e)\n retcode = 1\n\n # Command execution done, now inform the main thread with the output\n self.finished.emit(retcode)\n\nclass DeviceConnection(QObject):\n\n def __init__(self, parent, configuration, workarea):\n super(DeviceConnection, self).__init__(parent)\n \n self.deviceName = Settings.getElement(configuration, 'device_name')\n self.repositoryName = Settings.getElement(configuration, 'repository_name')\n self.repositoryUrl = Settings.getElement(configuration, 'repository_url')\n self.buildCmd = Settings.getElement(configuration, 'build_cmd')\n self.programCmd = Settings.getElement(configuration, 'program_cmd')\n self.reposWorkDir = Settings.getElement(configuration, 'repository_work_dir')\n self.masterBranch = Settings.getElement(configuration, 'master_branch')\n self.buildParams = Settings.getListElement(configuration, 'build_params')\n\n self.workareaDirectory = workarea\n\n self.repositorySuffix = \"work\"\n \n self.currentCommitHash = None\n\n self.workerThread = None\n self.workerObject = None\n\n def name(self):\n return self.deviceName\n\n def getBuildParameters(self):\n return self.buildParams\n\n def str(self):\n textElements = [\"Repository : %s\" % self.repositoryName,\n \"Location : %s\" % self.getRepositoryPath(),\n \"Build : %s\" % self.buildCmd,\n \"Program : %s\" % self.programCmd]\n text = '\\n'.join(textElements)\n return text\n\n def send_os_command(self, command):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n stdout_data, stderr_data = p.communicate()\n retcode = p.returncode\n if (retcode == 0 or stdout_data):\n raw_data = str(stdout_data, 'utf-8')\n raw_data_list = raw_data.split('\\n')\n return (retcode, raw_data_list)\n else:\n if stderr_data:\n return (retcode, [str(stderr_data, 'utf-8')])\n else:\n return (retcode, [\"None\"])\n\n def getRepositoryPath(self):\n reposFullname = self.getRepositoryFullname()\n reposPath = os.path.join(self.workareaDirectory, reposFullname)\n return reposPath\n\n def getRepositoryFullname(self):\n reposFullname = \"%s.%s\" % (self.repositoryName, self.repositorySuffix)\n return reposFullname\n\n def removeRepository(self, finishedSlot, appendLineSlot):\n reposFullname = self.getRepositoryFullname()\n\n cmds = [\"cd %s && rm -rf %s\" % (self.workareaDirectory, reposFullname)]\n\n self.runCommands(cmds, finishedSlot, appendLineSlot)\n \n def updateRepository(self, finishedSlot, appendLineSlot):\n # If repository does not exist, create one, if it does, just get latest (remember to update submodule)\n reposPath = self.getRepositoryPath()\n reposFullname = self.getRepositoryFullname()\n\n if (os.path.exists(reposPath)):\n cmds = [\"cd %s && git checkout %s && git pull && git submodule update && git checkout \" % (reposPath, self.masterBranch)]\n else:\n cmds = [\"cd %s && git clone --recurse-submodules %s %s\" % (self.workareaDirectory, self.repositoryUrl, reposFullname)]\n\n self.runCommands(cmds, finishedSlot, appendLineSlot)\n\n def getActiveBranch(self):\n reposPath = self.getRepositoryPath()\n \n if (os.path.exists(reposPath)): \n cmd = \"cd %s && git branch -a | grep \\* | cut -d ' ' -f2\" % reposPath\n retcode, output = self.send_os_command(cmd)\n if (retcode == 0):\n return output\n\n def getCurrentLabels(self):\n reposPath = self.getRepositoryPath()\n \n if (os.path.exists(reposPath)): \n cmd = \"cd %s && git tag\" % reposPath\n retcode, output = self.send_os_command(cmd)\n if (retcode == 0):\n labels = []\n labels.append('master')\n for line in output:\n if (len(line) == 0): continue\n labels.append(line.strip())\n return labels\n\n return []\n\n def getCurrentBranches(self):\n reposPath = self.getRepositoryPath()\n \n if (os.path.exists(reposPath)): \n cmd = \"cd %s && git branch -a\" % reposPath\n retcode, output = self.send_os_command(cmd)\n if (retcode == 0):\n branches = []\n for line in output:\n line = line[2:].strip()\n if (len(line) == 0): continue\n branches.append(line)\n return branches\n\n return []\n \n def buildImage(self, checkout, buildParamsValues, finishedSlot, appendLineSlot): \n reposWorkPath = os.path.join(self.getRepositoryPath(), self.reposWorkDir)\n\n if (buildParamsValues != None):\n buildParamsList = []\n\n for idx, value in enumerate(buildParamsValues):\n buildParam = self.buildParams[idx]\n paramCommand = buildParam['command']\n\n if (value == ''): continue\n \n if (paramCommand != ''):\n buildParamsList.append(\"%s %s\" % (paramCommand, value))\n else:\n buildParamsList.append(\"%s\" % value)\n \n buildParamsStr = ' '.join(buildParamsList)\n\n else:\n buildParamsStr = ''\n\n cmds = []\n cmds.append(\"cd %s && git checkout %s && git submodule update\" % (reposWorkPath, checkout))\n cmds.append(\"cd %s && %s %s\" % (reposWorkPath, self.buildCmd, buildParamsStr))\n\n self.runCommands(cmds, finishedSlot, appendLineSlot)\n \n def programImage(self, finishedSlot, appendLineSlot):\n reposWorkPath = os.path.join(self.getRepositoryPath(), self.reposWorkDir)\n\n cmds = [\"cd %s && %s\" % (reposWorkPath, self.programCmd)]\n self.runCommands(cmds, finishedSlot, appendLineSlot)\n\n def runCommands(self, cmds, finishedSlot = None, outputSlot = None):\n self.workerThread = QThread()\n self.workerObject = Worker(cmds)\n self.workerObject.moveToThread(self.workerThread)\n self.workerThread.started.connect(self.workerObject.doWork)\n if (finishedSlot): self.workerObject.finished.connect(finishedSlot)\n if (outputSlot): self.workerObject.output.connect(outputSlot)\n self.workerObject.finished.connect(self.commandDone)\n self.workerThread.start()\n\n def commandDone(self):\n self.workerThread.quit()\n self.workerThread.wait()\n self.workerThread = None\n self.workerObject = None\n \n def cancelCommand(self):\n # TODO find a way to stop the thread\n try:\n self.workerThread.quit()\n except Exception:\n pass\n","repo_name":"idokasher/programmer","sub_path":"DeviceConnection.py","file_name":"DeviceConnection.py","file_ext":"py","file_size_in_byte":7819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2449921645","text":"'''יצירת רשימה של הרווחים הצפויים לי בהעלאה בריבוע\nנשתמש בלולאה כשההנחה היא כ האפשרות לפתירת תרגילים היא עד 100'''\nimport functools\nmy_money_list =[]\nfor i in range(100):\n my_money_list.append(i**2)\nprint(my_money_list)\n\n'''איך נכתוב את הקוד לעיל בשורה אחת, יש מה שנקרא \"הרכבת רשימה(list comprehesion)'''\n\nmy_money_list = [x**2 for x in range(100)]\nprint(my_money_list)\n\nsentence = \"אביגל בר חיים המהממת\"\nwords = sentence.split()\nsecret = [word[0] for word in words if word != \"חיים\"]\nprint(secret)\n\nclass Question:\n\n def __init__(self):\n self.a = 0\n\n def func(self):\n print(self)\n\ndef main():\n A = Question()\n A.func()\n\nmain()\n\nimport functools\n\ndef intersections(list_1, list_2):\n return list(set(list_1).intersection(list_2))\n #return set([x.intersection(y) for x in list_1 for y in list_2])\n\ndef intersection_1(list_1, list_2):\n return set([i for i in list_1 if i in [j for j in list_2]])\n\nprint(intersections([1, 2, 3, 4], [8, 3, 9]))\nprint(intersections([5, 5, 6, 6, 7, 7], [1, 5, 9, 5, 6]))\n\nprint(intersection_1([1, 2, 3, 4], [8, 3, 9]))\nprint(intersection_1([5, 5, 6, 6, 7, 7], [1, 5, 9, 5, 6]))\n\ndef is_prime(number):\n return set([True if number%i == 0 else False for i in range(2, number)])=={False}\n\nprint(is_prime(42))\nprint(is_prime(43))\n\ndef is_funny(string):\n for char in string:\n if char != 'h' and char != 'a':\n return False\n return True\n\ndef is_funny_2(string_1):\n return set([False if char_1 != 'h' and char_1 != 'a' else True for char_1 in string_1])=={True}\n\nprint(is_funny(\"hahahahahah\"))\n\nprint(is_funny_2('hahahah'))","repo_name":"ERAN1202/python_digital_net4u","sub_path":"Python_Advance/list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"he","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71052255841","text":"import unittest\n\nfrom bust.components import nextbus_grabber\nfrom bust.components.nextbus_client import NextBusClient\nfrom bust.utils import xml_extractor\n\n\nclass XMLAttributesValuesExtractorTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n nextbus_client = NextBusClient()\n cls.test_xml = nextbus_client._query_route_stops('actransit', '51B')\n\n def test_extract_values(self):\n tag_attributes = {'route' : ['tag', 'title', 'color']}\n extractor = xml_extractor.XMLAttributesValuesExtractor(\n self.test_xml, tag_attributes)\n extracted_values = extractor.extract_values()\n self.assertEqual(\n {'tag' : ['51B'], 'title' : ['51B'], 'color' : ['49b869']},\n extracted_values,\n )\n\n def test_move_parsing_root(self):\n tag_attributes = {'stop' : ['tag', 'title', 'stopId']}\n extractor = xml_extractor.XMLAttributesValuesExtractor(\n self.test_xml, tag_attributes)\n move_parsing_root_down_one_level = [0]\n extractor.set_parsing_root(move_parsing_root_down_one_level)\n extracted_values = extractor.extract_values()\n self.assertIn('0306650', extracted_values['tag'])\n self.assertIn('University Av & Shattuck Av', extracted_values['title'])\n self.assertIn('50444', extracted_values['stopId'])\n\n def test_attribute_filters(self):\n tag_attributes = {'stop' : ['tag', 'title', 'stopId']}\n attribute_filter = {'tag' : '0306650', 'stopId' : '50444'}\n extractor = xml_extractor.XMLAttributesValuesExtractor(\n self.test_xml, tag_attributes)\n move_parsing_root_down_one_level = [0]\n extractor.set_parsing_root(move_parsing_root_down_one_level)\n extractor.set_attributes_filter(attribute_filter)\n extracted_values = extractor.extract_values()\n self.assertIn('University Av & Shattuck Av', extracted_values['title'])\n self.assertEqual(1, len(extracted_values['tag']))\n\nclass NextBusXMLExtractorTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n nextbus_client = NextBusClient()\n cls.test_xml = nextbus_client._query_route_stops('actransit', '51B')\n\n def test_get_stop_direction_data(self):\n stop_direction_data = \\\n xml_extractor.NextBusDirectionsExtractor.get_stop_direction_data(self.test_xml)\n self.assertEqual('North', stop_direction_data['0306650']['direction_name'])\n self.assertEqual('To Berkeley Marina', stop_direction_data['0306650']['direction'])\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ATRAN2/bust","sub_path":"backend/tests/utils/test_xml_extractor.py","file_name":"test_xml_extractor.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19468967853","text":"\n\nimport os\nimport sys\nimport subprocess\n\n\n \n\nprint (\"##################################################################################################################\")\nprint (\"# Welcome to DiscoAttack #\")\nprint (\"This tool is a prototype for Discovering, Testing the vulnerabilities, and analysing of the packets.\\nThe Intention of this tool is only and only educational.\")\nprint (\"Thank you for using this tool.\")\nprint (\"##################################################################################################################\\n\")\n\n# Main services.\nprint (\"##################################################################################################################\")\nprint (\"Services: discoverNetwork, listenNetwork, attackNetwork, exit\")\nprint (\"##################################################################################################################\\n\")\n\n\n# User input for selecting the main service such as discoverNetwork, attackNetwork, or exit.\nnetworkInput = input(\"Please specify a service to start: \")\n\n# This function controls the user input regarding network discovery/ discoverNetwork.\n# It contains all nmap scanning functions.\ndef discoverNetwork():\n # Outputs the selected service.\n print (networkInput, \"selected. \\n\")\n print (\"Searching for Nmap...\")\n\n # Try block check if Nmap is installed on the system or not, if installed it shows the version of Nmap.\n # Except block warns for the absence of Nmap, and offers the user to install it. If the user decides not to install Nmap, the programme gets terminated.\n try:\n # pipe output to /dev/null for silence\n null = open(\"/dev/null\", \"w\")\n subprocess.Popen(\"nmap\", stdout=null, stderr=null)\n null.close()\n os.system(\"nmap --version\")\n print (\"Nmap found successfully!\\n\")\n except OSError:\n installNmap = input(\"Nmap not found, would you like to install it (y/n): \")\n if installNmap in [\"y\", \"Y\", \"yes\", \"Yes\", \"YES\"]:\n os.system(\"sudo apt install nmap\")\n print (\"Nmap successfully installed!\\n\")\n else:\n sys.exit(\"Nmap was not installed, DiscoAttack terminated!\") \n\n # Sub-services of discoverNetwork.\n print (\"##################################################################################################################\")\n print (\"Scan type: pingScan, osScan, advancedScan, portScan, udpScan, synackScan, exit\")\n print (\"##################################################################################################################\\n\")\n\n # The user defines the type of scan they want to perform by typing form the list of discoverNetwork sub-services. \n userInput = input(\"Please specify the type of scanning: \")\n\n \n def pingScan():\n # This function performs a ping scan, sends the output to outputs repo in .csv format.\n print (userInput, \"selected.\\n\")\n subprocess.call('bashscript/pingScan.sh')\n\n\n def osScan():\n # This function performs an OS scan, the output is send to the outputs repo.\n print (userInput, \"selected.\\n\")\n subprocess.call('bashscript/osScan.sh')\n\n \n def advancedScan():\n # This function preforms an Advanced scan, the output is send to the outputs repo.\n print (userInput, \"selected.\\n\")\n subprocess.call('bashscript/advancedScan.sh') \n\n\n def portScan():\n # This function performs a Port scan, the output is send to the outputs repo.\n print (userInput, \"selected.\\n\")\n subprocess.call('bashscript/portScan.sh') \n\n\n def udpScan():\n # This function performs a UDP scan, the output is send to the outputs repo.\n print (userInput, \"selected.\\n\")\n subprocess.call('bashscript/udpScan.sh')\n\n\n def synackScan():\n # This function performs a SYN/ACK scan function, the output is send to the outputs repo.\n print (userInput, \"selected.\\n\")\n subprocess.call('bashscript/synackScan.sh')\n\n\n # Check the input for discoverNetwork sub-services and calls a function if matches with the input.\n if userInput == \"pingScan\":\n pingScan()\n elif userInput == \"osScan\":\n osScan()\n elif userInput == \"advancedScan\":\n advancedScan()\n elif userInput == \"portScan\":\n portScan()\n elif userInput == \"udpScan\":\n udpScan()\n elif userInput == \"synackScan\":\n synackScan()\n elif userInput == \"exit\":\n sys.exit(\"Terminated by user!\")\n else:\n os.system(\"clear\")\n print (\"Invalid scan type! \\n\")\n discoverNetwork()\n\n\n\n# This function is for listening to a specific network. It asks for a host IP address and starts capturing packets with tcpdump.\ndef listenNetwork():\n print (networkInput, \"selected. \\n\")\n print (\"Searching for tcpdump...\") \n\n # Try block check if tcpdump, is installed on the system or not, if installed it shows the version of Nmap.\n # Except block warns for the absence of tcpdump, and offers the user to install it. If the user decides not to install tcpdump, the programme gets terminated. \n try:\n # pipe output to /dev/null for silence\n null = open(\"/dev/null\", \"w\")\n subprocess.Popen(\"tcpdump\", stdout=null, stderr=null)\n null.close()\n os.system(\"tcpdump --version\")\n print (\"tcpdump found successfully!\\n\")\n except OSError:\n installTcpDump = input(\"tcpdump not found, would you like to install it (y/n): \")\n if installTcpDump in [\"y\", \"Y\", \"yes\", \"Yes\", \"YES\"]:\n os.system(\"sudo apt install tcpdump\")\n print (\"tcpdump successfully installed!\\n\")\n os.system(\"tcpdump --version\")\n print (\"\\n\")\n else:\n sys.exit(\"tcpdump was not installed, DiscoAttack terminated!\") \n\n\n\n # Sub-services of listen network.\n print (\"##################################################################################################################\")\n print (\"Sub-services: captureTraffic, exit\")\n print (\"##################################################################################################################\\n\")\n\n captureInput = input (\"Please enter an input: \")\n\n def captureTraffic():\n # This function performs a UDP scan, the output is send to the outputs repo.\n print (captureInput, \"selected.\\n\")\n subprocess.call('bashscript/captureNetwork.sh')\n\n\n\n\n if captureInput == \"captureTraffic\":\n captureTraffic()\n elif captureInput == \"exit\":\n sys.exit(\"Terminated by user!\")\n else:\n os.system(\"clear\")\n print (\"Invalid scan type! \\n\")\n listenNetwork()\n\n\n\n# This function attacks to specific network, It asks the user to enter an IP address and performs DDOS/DOS attack.\n# hping3 is used for performing attacks.\ndef attackNetwork():\n print (networkInput, \"selected. \\n\")\n print (\"Searching for hping3...\")\n\n # Try block check if hping3 is installed on the system or not, if installed it shows the version of hping3.\n # Except block warns for the absence of hping3, and offers the user to install it. If the user decides not to install hping3, the programme gets terminated.\n try:\n # pipe output to /dev/null for silence\n null = open(\"/dev/null\", \"w\")\n subprocess.Popen(\"hping3\", stdout=null, stderr=null)\n null.close()\n os.system(\"hping3 --version\")\n print (\"hping3 found successfully!\\n\")\n except OSError:\n installNmap = input(\"hping3 not found, would you like to install it (y/n): \")\n if installNmap in [\"y\", \"Y\", \"yes\", \"Yes\", \"YES\"]:\n os.system(\"sudo apt install hping3\")\n print (\"hping3 successfully installed!\\n\")\n else:\n sys.exit(\"hping3 was not installed, DiscoAttack terminated!\")\n\n\n # Sub-services of attack network.\n print (\"##################################################################################################################\")\n print (\"Sub-services: floodAttack, exit\")\n print (\"##################################################################################################################\\n\") \n\n attackInput = input(\"Please specify a sub-service you want to use: \") \n \n def floodAttack():\n # This function performs a flood attack.\n print (attackInput, \"selected. \\n\")\n subprocess.call('bashscript/floodAttack.sh')\n\n\n\n if attackInput == \"floodAttack\":\n floodAttack()\n elif attackInput == \"exit\":\n sys.exit(\"Terminated by user!\")\n else:\n sys.exit(\"Wrong input, DiscoAttack Terminated!\")\n \n\n\n# Controls the main services, checks if a user wants to scan a network or attack or exit.\nif networkInput == \"discoverNetwork\":\n discoverNetwork()\nelif networkInput == \"listenNetwork\":\n listenNetwork()\nelif networkInput == \"attackNetwork\":\n attackNetwork()\nelif networkInput == \"exit\":\n sys.exit(\"Terminated by user!\")\nelse:\n sys.exit(\"Wrong input, DiscoAttack Terminated!\")\n \n","repo_name":"Dark-Dragon001/DiscoAttack","sub_path":"DiscoAttack.py","file_name":"DiscoAttack.py","file_ext":"py","file_size_in_byte":8554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45388336421","text":"#!/usr/bin/python\n\nDOCUMENTATION = '''\n---\nmodule: ec2_type\nshort_description: change ec2 instance type\n'''\n\nEXAMPLES = '''\n- name: Create a github Repo\n ec2_type:\n region: 'eu-west-1'\n id: 'i-32587dy938'\n profile: 'production_23'\n type: 'r4.2xlarge'\n register: result\n'''\n\nfrom ansible.module_utils.basic import *\nimport boto.ec2\n\n\ndef cng_inst_type(data):\n id = data['id']\n type = data['type']\n profile = data['profile']\n region = data['region']\n change_instance_type(id,type,region,profile)\n\n result = {\"status\": \"SUCCESS\"}\n return False, True, result\n\n\ndef change_instance_type(id,type='r4.2xlarge',region='eu-west-1',profile='production_84'):\n ec2_conn=boto.ec2.connect_to_region(region,profile_name=profile)\n ec2_conn.modify_instance_attribute(id,'instanceType',type )\n\n\ndef main():\n\n fields = {\n \"region\": {\"required\": True , \"type\": \"str\"},\n \"id\": {\"required\": True, \"type\": \"str\"},\n \"profile\": {\"required\": True, \"type\": 'str'},\n \"type\": {\"default\": 'r4.2xlarge', \"type\": 'str'}\n }\n\n\n module = AnsibleModule(argument_spec=fields)\n is_error, has_changed, result = cng_inst_type(module.params)\n\n if not is_error:\n module.exit_json(changed=has_changed, meta=result)\n else:\n module.fail_json(msg=\"Error changing type\", meta=result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cohenjo/DB_Ansible","sub_path":"library/ec2_type.py","file_name":"ec2_type.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"37172573335","text":"import flet\nfrom flet import (\n BorderSide,\n ButtonStyle,\n ElevatedButton,\n Page,\n RoundedRectangleBorder,\n colors,\n)\n\n\ndef main(page: Page):\n page.padding = 50\n page.add(\n ElevatedButton(\n \"Styled button 1\",\n style=ButtonStyle(\n color={\n \"hovered\": colors.WHITE,\n \"focused\": colors.BLUE,\n \"\": colors.BLACK,\n },\n bgcolor={\"focused\": colors.PINK_200, \"\": colors.YELLOW},\n padding={\"hovered\": 20},\n overlay_color=colors.TRANSPARENT,\n elevation={\"pressed\": 0, \"\": 1},\n animation_duration=500,\n side={\n \"\": BorderSide(1, colors.BLUE),\n \"hovered\": BorderSide(2, colors.BLUE),\n },\n shape={\n \"hovered\": RoundedRectangleBorder(radius=20),\n \"\": RoundedRectangleBorder(radius=2),\n },\n ),\n )\n )\n\n\nflet.app(target=main)\n","repo_name":"flet-dev/examples","sub_path":"python/controls/elevated-buttons/styled-button.py","file_name":"styled-button.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":296,"dataset":"github-code","pt":"54"} +{"seq_id":"37728849195","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/1/18 14:01\n# @Author : LiJian\n# @Site : \n# @File : ExtractionStayPoint.py\n# @Software: PyCharm\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy.linalg as la\nfrom scipy.spatial.distance import pdist\nfrom scipy.spatial.distance import squareform\nfrom me.Distance import getDistance\nimport csv\n\nWd=200.0\nWt=10*60\n\ndata = np.loadtxt(\"data/user001.csv\", delimiter=\",\")\n#标记\na = [0 for _ in range(len(data))]\nre = []\ni=0\nwhile(i -1 and getDistance(data[i][0], data[i][1], data[m][0], data[m][1]) < 100.0):\n m = m - 1\n dt=data[n][2]-data[m][2]\n if(dt>300):\n re.append([m,n,i,dt])\n i = n+1\nre.sort(key=(lambda x:x[3]),reverse= True)\n\nwith open('data/user001_candidate_points_200_300.csv', 'w', newline='') as csvfile:\n fieldnames = ['startLabel','endLabel','centerLabel', 'timeLength']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n for x in re:\n writer.writerow({'startLabel':x[0],'endLabel':x[1],'centerLabel':x[2], 'timeLength':x[3]})\nprint(re)\n# print(data[1][0],data[1][1],data[2][0],data[2][1])\n# listDat=[]\n# with open('data/user004.csv', 'w', newline='') as csvfile:\n# fieldnames = ['startLabel','endLabel','centerLabel', 'timeLength']\n# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n","repo_name":"lijian45678/map","sub_path":"me/ExtractionStayPoint.py","file_name":"ExtractionStayPoint.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34842541059","text":"from wsgiref.simple_server import (\n make_server,\n)\n\nfrom not_django.application import (\n Application,\n)\nfrom urls import (\n urls,\n)\n\napplication = Application(urls)\n\nwith make_server('', 8000, application) as httpd:\n print('Server started at 127.0.0.1:8000')\n httpd.serve_forever()\n","repo_name":"Mrfuu04/Not-Django-Framework","sub_path":"runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24838170196","text":"# a = input('переменная 1 ')\n# b = input('переменная 2 ')\n\n# print('переменная 1 =', a)\n# print('переменная 2 =', b)\n\n# seconds = int(input('количество секунд = ',))\n# q = seconds // 3600\n# w = (seconds // 60) // 24\n# e = seconds % 60\n# print(f'время = { q } : { w } : { e } ')\n\n# p = int(input('число до 10 '))\n# pp = int( p * 10 + p)\n# ppp = int(( p * 100 ) + ( p * 10 ) + p)\n# print (int( p + pp + ppp))\n\n# proceed = int(input('выручка '))\n# costs = int(input('издержки '))\n# j = (proceed-costs)/proceed\n# if proceed >= costs:\n# print('профицит, рентабельность выручки - ', \"%.5f\" % j)\n# if proceed >= costs:\n# sotr = int(input('количество сотрудников '))\n# l = proceed / sotr\n# print('прибыль на 1 сотрудника', \"%.3f\" % l)\n# else:\n# print('убытки')\n\nkm = int(input('километры '))\npoint = int(input('цель '))\n\nwhile True:\n print(\"%.3f\" % km)\n km += ( km * 0.1 )\n if km >= point:\n break\n","repo_name":"badyagga/my_first_repo_origin","sub_path":"first_homework.py","file_name":"first_homework.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37968466318","text":"from scrapy import Spider\nfrom scrapy.linkextractors import LinkExtractor\nfrom witms.items import Article\nfrom witms.loaders import ArticleLoader\n\n\nclass WashingtonPostSpider(Spider):\n name = \"wp\"\n portal_name = \"Washington Post\"\n allowed_domains = [\"washingtonpost.com\"]\n start_urls = [\"https://www.washingtonpost.com/\"]\n link_extractor = LinkExtractor(\n deny=[\n \"/people/\",\n \"commerce.washingtonpost.com\",\n \"realestate.washingtonpost.com\",\n \"stats.washingtonpost.com\",\n ]\n )\n\n def parse(self, response):\n loader = ArticleLoader(item=Article(), response=response)\n loader.add_value(\"url\", response.url)\n loader.add_value(\"portal\", WashingtonPostSpider.portal_name)\n loader.add_xpath(\"section\", '//meta[@property=\"article:section\"]/@content')\n loader.add_xpath(\"section\", '//meta[@itemprop=\"articleSection\"]/@content')\n loader.add_css(\"authors\", \"a[class*=author-name] *::text\")\n loader.add_css(\"title\", \"h1 *::text\")\n loader.add_xpath(\"title\", '//meta[@name=\"title\"]/@content')\n loader.add_xpath(\"title\", '//meta[@property=\"og:title\"]/@content')\n loader.add_xpath(\"description\", '//meta[@name=\"description\"]/@content')\n loader.add_xpath(\"description\", '//meta[@property=\"og:description\"]/@content')\n loader.add_css(\"content\", \"div[class=article-body] *::text\")\n loader.add_xpath(\"content\", \"//article//p//text()\")\n loader.add_xpath(\"content\", \"//p//text()\")\n loader.add_xpath(\n \"publish_timestamp\", '//meta[@property=\"article:published_time\"]/@content'\n )\n loader.add_xpath(\n \"publish_timestamp\", '//time[@itemprop=\"datePublished\"]/@datetime'\n )\n loader.add_xpath(\n \"publish_timestamp\", \"//script//text()\", re=r'\"datePublished\":\\s*\"(.*?)\"'\n )\n loader.add_xpath(\n \"update_timestamp\", '//meta[@property=\"article:modified_time\"]/@content'\n )\n loader.add_xpath(\n \"update_timestamp\", '//time[@itemprop=\"dateModified\"]/@datetime'\n )\n loader.add_xpath(\n \"update_timestamp\", \"//script//text()\", re=r'\"dateModified\":\\s*\"(.*?)\"'\n )\n yield loader.load_item()\n\n for link in self.link_extractor.extract_links(response):\n yield response.follow(link.url, callback=self.parse)\n","repo_name":"mwesthelle/what_is_the_media_saying","sub_path":"witms/spiders/wp.py","file_name":"wp.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27429054391","text":"from django.template.loader import render_to_string\nfrom django.core.mail.message import EmailMultiAlternatives\nfrom django.conf import settings\n\n\ndef send_email(template,subject,receiver_email,**kwargs):\n email_template = render_to_string(template,kwargs) \n email_content = EmailMultiAlternatives(\n subject, \n None,\n settings.EMAIL_HOST_USER, \n [receiver_email],\n )\n email_content.attach_alternative(email_template, 'text/html')\n email_content.send()\n return True","repo_name":"Pradip369/django-email-otp-authentication","sub_path":"authentication/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"71040795683","text":"import requests\r\n\r\nfrom rest_framework import viewsets\r\nfrom rest_framework import status\r\nfrom rest_framework.views import APIView\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.decorators import action\r\nfrom .models import Companhia, Viagem, Passagem, Reserva, ClasseViagem, Assento, Municipio\r\nfrom .serializers import *\r\nfrom .validators import CompanhiaValidator, ViagemValidator\r\n\r\nclass CompanhiaViewSet(viewsets.ViewSet):\r\n def list(self, request):\r\n query_set = Companhia.objects.all() \r\n serializer = CompanhiaSerializer(query_set, many=True)\r\n return Response(serializer.data)\r\n\r\n def create(self, request):\r\n data = {\r\n 'nome': request.data.get('nome'),\r\n 'endereco': request.data.get('endereco'),\r\n 'contato': request.data.get('contato'),\r\n }\r\n \r\n validator = CompanhiaValidator(**data);\r\n\r\n if not validator.is_valid():\r\n return Response(validator.get_messages(), status=status.HTTP_400_BAD_REQUEST)\r\n\r\n serializer = CompanhiaSerializer(data=data)\r\n\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n\r\n def destroy(self, request):\r\n id_companhia = request.data.get(id)\r\n \r\n \r\nclass ViagemViewSet(viewsets.ViewSet):\r\n \r\n def list(self, request):\r\n query_set = Viagem.objects.all() \r\n serializer = ViagemSerializer(query_set, many=True)\r\n return Response(serializer.data)\r\n\r\n def create(self, request):\r\n data = {\r\n 'horario_saida':request.data.get('saida'), \r\n 'duracao': request.data.get('duracao'), \r\n 'classe': request.data.get('classe'), \r\n 'valor': request.data.get('valor'), \r\n 'origem': request.data.get('origem'), \r\n 'destino': request.data.get('destino'), \r\n 'companhia':request.data.get('companhia'),\r\n 'total_assentos': request.data.get('assentos')\r\n }\r\n\r\n\r\n validator = ViagemValidator(**data)\r\n\r\n if not validator.is_valid():\r\n return Response({'error': validator.get_messages()}, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n try:\r\n Companhia.objects.get(pk=data['companhia'])\r\n except:\r\n return Response({'error': [\"Companhia Inexistente\"]}, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n try:\r\n ClasseViagem.objects.get(pk=data['classe'])\r\n except:\r\n return Response({'error': [\"Classe Inexistente\"]}, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n serializer = ViagemCreateSerializer(data=data)\r\n\r\n\r\n\r\n if serializer.is_valid():\r\n serializer.save()\r\n Assento.objects.bulk_create([Assento(**{'numero_assento': k, 'viagem_id': serializer.data['id']}) for k in range(1, data['total_assentos'] + 1)])\r\n return Response({'message': \"Viagem cadastrada com sucesso.\", 'data': serializer.data}, status=status.HTTP_201_CREATED)\r\n\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n \r\n @action(detail=True, methods=[\"get\"])\r\n def assentos(self, request, pk=None):\r\n try:\r\n Viagem.objects.get(pk=pk)\r\n except:\r\n return Response({'error': [\"Essa viagem não existe\"]}, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n assentos = Assento.objects.filter(viagem_id=pk)\r\n\r\n if not assentos:\r\n return Response({'error': [\"Não existem assentos reservados para essa viagem\"]}, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n serializer = AssentoSerializer(assentos, many=True)\r\n\r\n return Response(serializer.data, status=status.HTTP_200_OK)\r\n\r\n\r\n @action(detail=False, methods=[\"get\"], url_path='(?P[^/.]+)')\r\n def detalhar(self, request, pk=None):\r\n viagem = None\r\n try:\r\n viagem = Viagem.objects.get(pk=pk)\r\n except:\r\n return Response({'error': [\"Essa viagem não existe\"]}, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n serializer = ViagemSerializer(viagem)\r\n return Response(serializer.data, status=status.HTTP_200_OK)\r\n\r\nclass PassagemViewSet(viewsets.ViewSet):\r\n def list(self, request):\r\n query_set = Passagem.objects.all() \r\n serializer = PassagemSerializer(query_set, many=True)\r\n return Response(serializer.data)\r\n\r\nclass ReservaViewSet(viewsets.ViewSet):\r\n def list(self, request):\r\n query_set = Reserva.objects.all() \r\n serializer = ReservaSerializer(query_set, many=True)\r\n return Response(serializer.data)\r\n\r\nclass ClasseViagemViewSet(viewsets.ViewSet):\r\n def list(self, request):\r\n query_set = ClasseViagem.objects.all() \r\n serializer = ClasseViagemSerializer(query_set, many=True)\r\n return Response(serializer.data)\r\n\r\n\r\nclass MunicipiosViewSet(viewsets.ViewSet):\r\n filtro = None\r\n def list(self, request):\r\n nome_filtro = request.query_params.get('nome')\r\n\r\n if not nome_filtro:\r\n query_set = Municipio.objects.all()\r\n serializer = MunicipioSerializer(query_set, many=True)\r\n return Response(serializer.data, status.HTTP_200_OK)\r\n \r\n query_set = Municipio.objects.filter(nome__contains=nome_filtro)\r\n serializer = MunicipioSerializer(query_set, many=True)\r\n return Response(serializer.data, status.HTTP_200_OK)","repo_name":"maaure/bushero","sub_path":"backend/api/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5464,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39034701679","text":"import cv2\nimport numpy as np\nimport ctypes\nimport os\nimport time\nimport math\nimport pyvirtualcam\nimport scipy.linalg\n\n# To use please downlaod and install OBS, explantion in the readme\nVirtualCameraOutput = False\n\n# By default this will be 480p\ncap = cv2.VideoCapture(0)\n\n# cap.set(3, 1280)\n# cap.set(4, 720)\nWIDTH = int(cap.get(3))\nHIGHT = int(cap.get(4))\nprint(\"Width :\", WIDTH)\nprint(\"Hight :\", HIGHT)\n\n\ndef run():\n intraFace = loaddll()\n\n _, frame = cap.read()\n\n X = np.zeros(dtype=ctypes.c_float, shape=(2, 49))\n X0 = np.zeros(dtype=ctypes.c_float, shape=(2, 49))\n\n angle = np.zeros(dtype=ctypes.c_float, shape=(1, 3))\n rot = np.zeros(dtype=ctypes.c_float, shape=(3, 3))\n intraFace.init(frame.shape[0], frame.shape[1], frame.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)),\n X.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),\n X0.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),\n angle.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),\n rot.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),\n )\n\n initiation(X0, angle, intraFace)\n\n # Loading eyes and camrea pos\n savedEyes = []\n savedCords = []\n for i in range(5):\n savedEyes.append([cv2.imread(os.path.abspath(\"ReplacementEyes\") + \"\\eye\" + \"0\" + str(i) + \".bmp\"),\n cv2.imread(os.path.abspath(\"ReplacementEyes\") + \"\\eye\" + \"1\" + str(i) + \".bmp\")])\n\n savedCords.append([np.load('ReplacementEyes\\eyeCords' + \"0\" + str(i) + '.npy'),\n np.load('ReplacementEyes\\eyeCords' + \"1\" + str(i) + '.npy')])\n\n currentEyeNumber = 0\n aboveScreen = np.load('ReplacementEyes\\camerapos.npy')\n done = True\n preFrame = frame.copy()\n pre = None\n\n blink = 0\n if VirtualCameraOutput:\n cam = pyvirtualcam.Camera(width=WIDTH, height=HIGHT, fps=30)\n else:\n cam = None\n\n while done:\n\n val = intraFace.detect(frame.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)))\n success = (val == 1)\n\n if success:\n pre, eyeCords = stabilizePoints(frame, preFrame, pre, X0, blink)\n if (not (detectBlink(eyeCords))):\n if blink > 0:\n blink -= 1\n boudingbox(frame, eyeCords, 1)\n box = boudingbox(frame, eyeCords, 2)\n bigbox = boudingbox(frame, eyeCords, 4)\n\n translationRange = 7\n yawRange = 30\n rollRange = 40\n if not aboveScreen:\n pitchUpper = 40\n pitchLower = -20\n else:\n pitchUpper = 20\n pitchLower = -40\n if -yawRange < angle[0][1] < yawRange and -rollRange < angle[0][0] < rollRange and pitchLower < \\\n angle[0][2] < pitchUpper:\n # Checking the negative edge of the frame\n if (np.array(bigbox) > 0).all():\n savedEye = savedEyes[currentEyeNumber]\n savedCord = savedCords[currentEyeNumber]\n frame2 = colourCorrectAndFrameBlend(frame, box, bigbox, eyeCords, savedEye, savedCord, angle[0],\n aboveScreen)\n\n alpha = 1\n if -yawRange + translationRange > angle[0][1] or angle[0][1] > yawRange - translationRange:\n z = (np.absolute(angle[0][1]) - (yawRange - translationRange)) / (\n yawRange - (yawRange - translationRange))\n alpha = min(alpha, z)\n\n if -rollRange + translationRange > angle[0][0] or angle[0][0] > rollRange - translationRange:\n z = (np.absolute(angle[0][0]) - (rollRange - translationRange)) / (\n rollRange - (rollRange - translationRange))\n alpha = min(alpha, z)\n\n if pitchLower + translationRange > angle[0][2]:\n z = (angle[0][2] - pitchLower) / ((pitchLower + translationRange) - pitchLower)\n alpha = min(alpha, z)\n\n if pitchUpper - translationRange < angle[0][2]:\n z = (angle[0][2] - (pitchUpper - translationRange)) / (\n pitchUpper - (pitchUpper - translationRange))\n alpha = min(alpha, z)\n\n if alpha != 1:\n alpha = np.round(alpha, 3)\n frame2 = frame * (1 - alpha) + frame2 * alpha\n frame2 = np.uint8(frame2)\n else:\n success = False\n else:\n success = False\n else:\n blink = 8\n success = False\n if not success:\n currentEyeNumber = np.random.randint(0, 5)\n pre = None\n frame2 = frame.copy()\n\n if cam != None:\n if success:\n cam.send(frame2)\n else:\n cam.send(frame)\n else:\n cv2.imshow(\"Frame\", frame)\n cv2.imshow(\"Frame2\", frame2)\n\n\n k = cv2.waitKey(1)\n # excape\n if k == 27:\n break\n if k == 108: # l swaping live eye\n currentEyeNumber = (currentEyeNumber + 1) % 5\n if k == 107: # k swapping live eye\n currentEyeNumber = (currentEyeNumber - 1) % 5\n if k == 32: # saving current eye as a new eye\n\n np.save('ReplacementEyes\\eyeYAW' + str(currentEyeNumber) + '.npy', angle[0][1])\n\n for i in range(2):\n eyeCord = box[i]\n sbox = np.int0(eyeCord)\n biCord = bigbox[i]\n Bbox = np.int0(biCord)\n (Btopx, Btopy) = (np.min(Bbox[:, 0]), np.min(Bbox[:, 1]))\n (Bbotx, Bboty) = (np.max(Bbox[:, 0]), np.max(Bbox[:, 1]))\n Beye = (frame[Btopy:Bboty, Btopx:Bbotx]).copy()\n boundInB = sbox - [Btopx, Btopy]\n np.save('ReplacementEyes\\eyeCords' + str(i) + str(currentEyeNumber) + '.npy', boundInB)\n cv2.imwrite(os.path.abspath(\"ReplacementEyes\") + \"\\eye\" + str(i) + str(currentEyeNumber) + \".bmp\",\n img=Beye)\n\n # Re-loading eye\n savedEyes[currentEyeNumber] = [\n cv2.imread(os.path.abspath(\"ReplacementEyes\") + \"\\eye\" + \"0\" + str(currentEyeNumber) + \".bmp\"),\n cv2.imread(os.path.abspath(\"ReplacementEyes\") + \"\\eye\" + \"1\" + str(currentEyeNumber) + \".bmp\")]\n savedCords[currentEyeNumber] = [np.load('ReplacementEyes\\eyeCords' + \"0\" + str(currentEyeNumber) + '.npy'),\n np.load('ReplacementEyes\\eyeCords' + \"1\" + str(currentEyeNumber) + '.npy')]\n\n preFrame = frame\n\n done, frame = cap.read()\n\n\ndef initiation(X0, angle, intraFace, ):\n _, frame = cap.read()\n while True:\n _, frame = cap.read()\n cv2.putText(frame, \"Please press a or b to signal if \",\n (int(0.05 * WIDTH), int(0.125 * HIGHT)),\n cv2.FONT_HERSHEY_SIMPLEX, color=(0, 0, 0), fontScale=1)\n cv2.putText(frame, \"the camera is located above or below the screen\",\n (int(0.05 * WIDTH), int(0.175 * HIGHT)),\n cv2.FONT_HERSHEY_SIMPLEX, color=(0, 0, 0), fontScale=1)\n cv2.putText(frame, \"above or below the screen\",\n (int(0.05 * WIDTH), int(0.225 * HIGHT)),\n cv2.FONT_HERSHEY_SIMPLEX, color=(0, 0, 0), fontScale=1)\n cv2.putText(frame, \"Press excape to skip\", (int(0.05 * WIDTH), int(0.4 * HIGHT)),\n cv2.FONT_HERSHEY_SIMPLEX, color=(255, 0, 0), fontScale=1)\n cv2.putText(frame, \"and use last values\", (int(0.05 * WIDTH), int(0.455 * HIGHT)),\n cv2.FONT_HERSHEY_SIMPLEX, color=(255, 0, 0), fontScale=1)\n\n k = cv2.waitKey(5)\n cv2.imshow(\"Frame\", frame)\n\n if k == 27: # excape\n return\n\n if k == ord('a'):\n np.save('ReplacementEyes\\camerapos.npy', True)\n break\n if k == ord('b'):\n np.save('ReplacementEyes\\camerapos.npy', False)\n break\n\n pre = None\n currentEyeNumber = 0\n blink = 0\n\n while True:\n if currentEyeNumber == 5:\n return\n preFrame = frame.copy()\n _, frame = cap.read()\n val = intraFace.detect(frame.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)))\n success = (val == 1)\n frame2 = frame.copy()\n pitchangle = angle[0][2]\n cv2.putText(frame, \"Press space while looking at the camera \",\n (int(0.05 * WIDTH), int(0.125 * HIGHT)),\n cv2.FONT_HERSHEY_SIMPLEX, color=(0, 0, 0), fontScale=1)\n cv2.putText(frame, \"to save a image to replace with\",\n (int(0.05 * WIDTH), int(0.175 * HIGHT)),\n cv2.FONT_HERSHEY_SIMPLEX, color=(0, 0, 0), fontScale=1)\n cv2.putText(frame2, \"Head Pitch \" + str(round(pitchangle)) + \"\",\n (int(0.02 * WIDTH), int(0.2 * HIGHT)),\n cv2.FONT_HERSHEY_SIMPLEX, color=(255, 255, 255), fontScale=1)\n cv2.putText(frame2, \"Current Number \" + str(currentEyeNumber) + \"\", (int(0.02 * WIDTH), int(0.3 * HIGHT)),\n cv2.FONT_HERSHEY_SIMPLEX, color=(255, 0, 0), fontScale=1)\n\n k = cv2.waitKey(5)\n cv2.imshow(\"Frame\", frame2)\n if success:\n pre, eyeCords = stabilizePoints(frame, preFrame, pre, X0, blink)\n if (not (detectBlink(eyeCords))):\n if blink > 0:\n blink -= 1\n box = boudingbox(frame, eyeCords, 2)\n bigbox = boudingbox(frame, eyeCords, 4)\n if k == 27: # excape\n return\n if k == 32: # space\n cv2.imwrite(os.path.abspath(\"savedImages\") + \"\\LiveEye\" + \".jpg\", img=frame)\n\n for i in range(2):\n eyeCord = box[i]\n sbox = np.int0(eyeCord)\n biCord = bigbox[i]\n Bbox = np.int0(biCord)\n (Btopx, Btopy) = (np.min(Bbox[:, 0]), np.min(Bbox[:, 1]))\n (Bbotx, Bboty) = (np.max(Bbox[:, 0]), np.max(Bbox[:, 1]))\n Beye = (frame[Btopy:Bboty, Btopx:Bbotx])\n boundInB = sbox - [Btopx, Btopy]\n np.save('ReplacementEyes\\eyeCords' + str(i) + str(currentEyeNumber) + '.npy', boundInB)\n cv2.imwrite(\n os.path.abspath(\"ReplacementEyes\") + \"\\eye\" + str(i) + str(currentEyeNumber) + \".bmp\",\n img=Beye)\n currentEyeNumber += 1\n\n\ndef detectBlink(eyesCords):\n for i in range(2):\n eyenp = np.array(eyesCords[i])\n v = eyenp[2] - eyenp[4]\n u = eyenp[1] - eyenp[5]\n hight = np.linalg.norm((u + v) / 2)\n width = np.linalg.norm(eyenp[3] - eyenp[0])\n if width / hight > 3.8:\n print(\"blinked: \", str(width / hight))\n return True\n return False\n\n\ndef colourCorrectAndFrameBlend(frame, boxcords, bigboxcords, eyeCords, savedEye, savedCords, angle, aboveScreen):\n frame = frame.copy()\n for i in range(2):\n boxcord = boxcords[i]\n box = boxcord.astype(np.int0)\n biCord = bigboxcords[i]\n Bbox = biCord.astype(np.int0)\n eyeCordsint = eyeCords[i].astype(np.int0)\n BsavedEye = savedEye[i]\n BsavedCords = savedCords[i]\n\n (Btopx, Btopy) = (np.min(Bbox[:, 0]), np.min(Bbox[:, 1]))\n (Bbotx, Bboty) = (np.max(Bbox[:, 0]), np.max(Bbox[:, 1]))\n\n\n # laplcisain can somewhat work with no colour correction\n # Say what type of blending to be used:\n # ff- full frame laplcisain,\n # el- just the eye laplcisain,\n # b- normal blur edges\n # n- no blending just overlaying with mask\n blendingType = \"el\"\n\n if blendingType == \"pos\":\n if (Btopx + Bbotx) % 2 == 1:\n Btopx -= 1\n\n if (Btopy + Bboty) % 2 == 1:\n Btopy -= 1\n\n Beye = (frame[Btopy:Bboty, Btopx:Bbotx]).copy()\n boundInB = box - [Btopx, Btopy]\n\n Twarp = time.perf_counter()\n vertAngle = angle[2]\n\n v = eyeCordsint[2] - eyeCordsint[4]\n u = eyeCordsint[1] - eyeCordsint[5]\n vert = (u + v) / 4\n hor = (eyeCordsint[0] - eyeCordsint[3]) / 2\n\n if aboveScreen == False:\n # In this case we are looking up hense eyes are slighly to big.\n # Only working for angles -10 to 30\n # Getting z in the range -1 to 1\n z = ((vertAngle + 10) / 40 - 0.5) * 2\n if i == 0:\n boundInB[0] = boundInB[0] - vert / 5 + vert * z / 6\n boundInB[1] = boundInB[1] - vert / 4 + vert * z / 6\n warp_mat = cv2.getAffineTransform(BsavedCords[:3].astype(np.float32), boundInB[:3].astype(np.float32))\n else:\n boundInB[0] = boundInB[0] - vert / 4 + vert * z / 6\n boundInB[1] = boundInB[1] - vert / 5 + vert * z / 6\n BsavedCords[2] = BsavedCords[3]\n boundInB[2] = boundInB[3]\n warp_mat = cv2.getAffineTransform(BsavedCords[:3].astype(np.float32), boundInB[:3].astype(np.float32))\n else:\n # Only working for angles -30 to 10\n # Getting z in the range -1 to 1 again\n z = ((vertAngle + 30) / 40 - 0.5) * 2\n if i == 0:\n boundInB[0] = boundInB[0] + vert / 5 - vert * z / 6\n boundInB[1] = boundInB[1] + vert / 4 - vert * z / 6\n warp_mat = cv2.getAffineTransform(BsavedCords[:3].astype(np.float32), boundInB[:3].astype(np.float32))\n else:\n boundInB[0] = boundInB[0] + vert / 4 - vert * z / 6\n boundInB[1] = boundInB[1] + vert / 5 - vert * z / 6\n BsavedCords[2] = BsavedCords[3]\n boundInB[2] = boundInB[3]\n warp_mat = cv2.getAffineTransform(BsavedCords[:3].astype(np.float32), boundInB[:3].astype(np.float32))\n\n Bfittedeye = cv2.warpAffine(BsavedEye, warp_mat, (Beye.shape[1], Beye.shape[0]),\n borderMode=cv2.BORDER_REPLICATE)\n\n # Try Perspective transform\n # warp_mat = cv2.getPerspectiveTransform(BsavedCords.astype(np.float32), boundInB.astype(np.float32))\n # Bfittedeye = cv2.warpPerspective(BsavedEye, warp_mat, (Beye.shape[1], Beye.shape[0]),borderMode=cv2.BORDER_REPLICATE)\n\n # Making predicted eyes:\n\n\n if i == 0:\n eyeCordsint[4] -= (0.3 * vert).astype(dtype=np.int)\n eyeCordsint[5] -= (0.3 * vert).astype(dtype=np.int)\n eyeCordsint[0] -= (0.2 * vert).astype(dtype=np.int) + (0.2 * hor).astype(dtype=np.int)\n eyeCordsint[1] -= (0.3 * vert).astype(dtype=np.int)\n eyeCordsint[2] -= (0.3 * vert).astype(dtype=np.int)\n else:\n eyeCordsint[4] -= (0.3 * vert).astype(dtype=np.int)\n eyeCordsint[5] -= (0.3 * vert).astype(dtype=np.int)\n eyeCordsint[3] -= (0.2 * vert).astype(dtype=np.int) - (0.2 * hor).astype(dtype=np.int)\n eyeCordsint[1] -= (0.3 * vert).astype(dtype=np.int)\n eyeCordsint[2] -= (0.3 * vert).astype(dtype=np.int)\n\n BeyeMask = np.zeros_like(Beye[:, :, 1])\n BeyeMask = cv2.ellipse(BeyeMask, cv2.fitEllipse(eyeCordsint - [Btopx, Btopy]), color=255, thickness=-1)\n\n\n bpredeyes1 = np.empty_like(Beye)\n bpredeyes2 = np.empty_like(Beye)\n\n for c in range(3):\n bpredeyes1[:, :, c] = fitEye(Beye[:, :, c])\n bpredeyes2[:, :, c] = fitEye(Bfittedeye[:, :, c])\n\n div = np.divide(bpredeyes1, bpredeyes2)\n\n Corrected = Bfittedeye * div\n\n bcolourcorrected = np.empty_like(Bfittedeye)\n\n k = int(int(np.linalg.norm(hor) / 3) * 2 + 1)\n blur = cv2.GaussianBlur(BeyeMask, (k, k), 0)\n\n for c in range(3):\n # bcolourcorrected[:, :, c] = Bfittedeye[:, :, c] * (blur / 255) + (1 - blur / 255) * Corrected[:, :, c]\n bcolourcorrected[:, :, c] = Bfittedeye[:, :, c] * (BeyeMask / 255) + (1 - BeyeMask / 255) * Corrected[:, :,\n c]\n # bcolourcorrected = Corrected\n\n bound(bcolourcorrected)\n\n bcolourcorrected = bcolourcorrected.astype(np.uint8)\n\n\n # 1) A tight oval only around eye centre\n # 2) A oval covering eye and eyelid\n # 3) A large ovel covering all to the eyelid\n # 4) A tight retangle coving eye section\n\n maskType = 2\n\n if maskType == 1:\n innermask = BeyeMask\n\n if maskType == 2:\n BeyeMask = cv2.ellipse(BeyeMask, cv2.fitEllipse(eyeCordsint - [Btopx, Btopy]), color=255,\n thickness=int(np.linalg.norm(vert * 2)))\n innermask = BeyeMask\n\n if maskType == 3:\n BeyeMask = cv2.ellipse(BeyeMask, cv2.fitEllipse(eyeCordsint - [Btopx, Btopy]), color=255,\n thickness=int(np.linalg.norm(vert * 5)))\n innermask = BeyeMask\n\n if maskType == 4:\n fmask = np.zeros_like(BeyeMask)\n cv2.drawContours(fmask, [box - [Btopx, Btopy]], 0, 255, -1) # Draw filled contour in mask\n cv2.drawContours(fmask, [box - [Btopx, Btopy]], 0, 0,\n int(np.linalg.norm(hor * 0.5))) # Draw filled contour in mask\n innermask = fmask\n\n Tblending = time.perf_counter()\n\n if blendingType == \"pos\":\n centre = (Btopx + (Bbotx - Btopx) / 2, Btopy + (Bboty - Btopy) / 2)\n centre = (int(centre[0]), int(centre[1]))\n\n output = cv2.seamlessClone(bcolourcorrected, frame, innermask, centre, cv2.NORMAL_CLONE)\n frame = output\n\n\n if blendingType == \"ff\":\n fmask = np.zeros_like(frame[:, :, 0])\n fmask[Btopy:Bboty, Btopx:Bbotx] = innermask\n\n ceye = frame.copy()\n # Only replacing the eyes\n ceye[Btopy:Bboty, Btopx:Bbotx] = bcolourcorrected\n depth = int(np.log2(np.linalg.norm(4 * hor)))\n\n frame = laplacianSameSize(frame, ceye, fmask, depth)\n\n\n if blendingType == \"el\":\n depth = int(np.log2(np.linalg.norm(2 * hor)))\n\n beyeblend = laplacianSameSize(Beye, bcolourcorrected, innermask, depth)\n frame[Btopy:Bboty, Btopx:Bbotx] = beyeblend\n\n\n if blendingType == \"combo\":\n depth = int(np.log2(np.linalg.norm(2 * hor)))\n beyeblend = laplacianSameSize(Beye, bcolourcorrected, innermask, depth)\n\n k = int(int(np.linalg.norm(hor) / 4) * 2 + 1)\n blur = cv2.GaussianBlur(innermask, (k, k), k)\n\n for c in range(3):\n frame[Btopy:Bboty, Btopx:Bbotx, c] = (\n frame[Btopy:Bboty, Btopx:Bbotx, c] * (1 - blur / 255) + beyeblend[:, :, c] * (\n blur / 255)).astype(np.uint8)\n\n\n if blendingType == \"blur\":\n k = int(int(np.linalg.norm(hor) / 3) * 2 + 1)\n blur = cv2.GaussianBlur(innermask, (k, k), 0)\n\n doublesizeshow(\"blur\", blur)\n\n for c in range(3):\n frame[Btopy:Bboty, Btopx:Bbotx, c] = (\n frame[Btopy:Bboty, Btopx:Bbotx, c] * (1 - blur / 255) + bcolourcorrected[:, :, c] * (\n blur / 255)).astype(np.uint8)\n\n if blendingType == \"n\":\n frame[Btopy:Bboty, Btopx:Bbotx][innermask == 255] = bcolourcorrected[innermask == 255]\n\n if blendingType == \"full\":\n frame[Btopy:Bboty, Btopx:Bbotx] = bcolourcorrected\n\n\n return frame\n\n\ndef gaussianPyramid(img, num_levels):\n lower = img.copy()\n gp = [np.float32(lower)]\n for i in range(num_levels):\n lower = cv2.pyrDown(lower)\n gp.append(np.float32(lower))\n return gp\n\n\ndef laplacianPyramid(gp):\n levels = len(gp) - 1\n lp = [gp[levels]]\n\n for i in range(levels, 0, -1):\n size = (gp[i - 1].shape[1], gp[i - 1].shape[0])\n GE = cv2.pyrUp(gp[i], dstsize=size)\n L = gp[i - 1] - GE\n lp.append(L)\n return lp\n\n\ndef laplacianSameSize(outerImage, innerImage, mask, levels):\n gpCEye = gaussianPyramid(innerImage, levels)\n lpCEye = laplacianPyramid(gpCEye)\n gpFrame = gaussianPyramid(outerImage, levels)\n lpFrame = laplacianPyramid(gpFrame)\n\n gpMask = gaussianPyramid(mask, levels)\n\n gpMask.reverse()\n LS = []\n # Appling the mask\n for lFrame, lCEye, gMask in zip(lpFrame, lpCEye, gpMask):\n lFrame[gMask == 255] = lCEye[gMask == 255]\n LS.append(lFrame)\n\n # now reconstruct\n ls_ = LS[0]\n for i in range(1, levels + 1):\n size = (LS[i].shape[1], LS[i].shape[0])\n ls_ = cv2.pyrUp(ls_, dstsize=size)\n ls_ = ls_ + LS[i]\n\n # Making it above 0 before becoming uint8\n bound(ls_)\n return ls_.astype(np.uint8)\n\n\n# Until functions\ndef doublesizeshow(name, im):\n big = cv2.resize(im, None, fx=4, fy=4)\n cv2.imshow(name, big)\n\n\ndef bound(im):\n im[im < 0] = 0\n im[im > 255] = 255\n\n\ndef wait():\n if cv2.waitKey(100000) == 27:\n quit()\n\n\ndef fitEye(eye, mask=None):\n X = np.arange(eye.shape[0])\n Y = np.arange(eye.shape[1])\n\n Z = eye.ravel()\n OnesX = np.ones(eye.shape[0])\n OnesY = np.ones(eye.shape[1])\n XY = np.outer(X, Y).ravel()\n Y2 = Y ** 2\n X2 = X ** 2\n\n A = np.array([np.ones_like(XY), np.outer(OnesX, Y).ravel(), np.outer(OnesX, Y2).ravel(),\n np.outer(OnesX, Y2 * Y).ravel(), np.outer(X, OnesY).ravel(), XY,\n np.outer(X, Y2).ravel(), np.outer(X2, OnesY).ravel(), np.outer(X2, Y).ravel(),\n np.outer(X2 * X, OnesY).ravel()]).T\n\n x, _, _, _ = scipy.linalg.lstsq(A, Z, lapack_driver='gelsy', overwrite_b=True, check_finite=False)\n\n minSolution = A * x\n neweye = minSolution.sum(axis=1)\n\n\n neweye[neweye < 8] = 8\n neweye[neweye > 255] = 255\n\n neweye = neweye.reshape((X.shape[0], Y.shape[0]))\n return neweye\n\n\ndef boudingbox(frame, eyeCords, scale):\n eyes = []\n for i in range(2):\n box = np.zeros(shape=(4, 2))\n eyenp = np.array(eyeCords[i])\n v = eyenp[2] - eyenp[4]\n u = eyenp[1] - eyenp[5]\n vert = (u + v) / 4\n vert = vert * scale\n hor = (eyenp[3] - eyenp[0]) / 2\n hord = (hor * scale - hor) / 2\n box[0] = eyenp[0] + vert - hord\n box[3] = eyenp[0] - vert - hord\n box[1] = eyenp[3] + vert + hord\n box[2] = eyenp[3] - vert + hord\n\n intBox = np.int0(box)\n draw = False\n if draw:\n if scale == 1:\n cv2.drawContours(frame, [intBox], 0, (0, 0, 0), 2)\n else:\n cv2.drawContours(frame, [intBox], 0, (0, 0, 255), 2)\n eyes.append(box)\n return eyes\n\n\ndef stabilizePoints(frame, preFrame, pre, points, blink, ):\n points = points.reshape((49 * 2), order='F')\n points = points.reshape((49, 2), order='A')\n\n draw = False\n if draw:\n i = 0\n for point in points:\n x = point[0]\n y = point[1]\n cv2.circle(frame, (int(float(x)), int(float(y))), 2, (255, 255, 0), -1)\n i += 1\n\n points = points[19:31]\n hor = (np.linalg.norm(points[0] - points[3] + points[6] - points[9])) / 4\n s = int(hor * 2.5)\n maxLevel = int(hor / 3)\n\n lk_params = dict(winSize=(s, s), maxLevel=maxLevel,\n criteria=(cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 12, 0.03))\n\n # If not detected properly last time do not stablise\n if pre is None or blink > 0:\n return points, [points[0:6], points[6:12]]\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n graypre = cv2.cvtColor(preFrame, cv2.COLOR_BGR2GRAY)\n\n OFpoints, status, err = cv2.calcOpticalFlowPyrLK(graypre, gray, pre, None, **lk_params)\n\n for i in range(0, len(points)):\n d = np.linalg.norm(points[i] - pre[i] + 5)\n alpha = math.exp(-d * d / 100)\n OFpoints[i] = (1 - alpha) * points[i] + alpha * OFpoints[i]\n\n dif = pre - OFpoints\n dif = dif.mean(axis=0)\n if abs(dif[0]) < 0.4 and abs(dif[1]) < 0.4:\n stablePoints = pre * 7 / 8 + OFpoints / 8\n elif abs(dif[0]) < 1.5 and abs(dif[1]) < 1.5:\n stablePoints = pre / 4 + OFpoints * 3 / 4\n else:\n stablePoints = OFpoints\n\n return stablePoints, [stablePoints[0:6], stablePoints[6:12]]\n\n\ndef drawpoints(frame, points, val):\n eyeCords = [[], []]\n noseCords = []\n\n if val != 1:\n cv2.putText(frame, \"Not Found\", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, color=(255, 0, 0), fontScale=1)\n else:\n i = 0\n points = points.reshape((49 * 2), order='F')\n points = points.reshape((49, 2), order='A')\n for point in points:\n i += 1\n x = point[0]\n y = point[1]\n if 20 <= i < 32:\n cv2.circle(frame, (int(float(x)), int(float(y))), 2, (255, 255, 0), -1)\n if i < 26:\n eyeCords[0].append((x, y))\n else:\n eyeCords[1].append((x, y))\n if 11 <= i < 15:\n noseCords.append((x, y))\n return eyeCords, noseCords\n\n\ndef loaddll():\n ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\opencv_core246.dll'))\n ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\opencv_ffmpeg246_64.dll'))\n ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\opencv_flann246.dll'))\n ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\opencv_highgui246.dll'))\n ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\opencv_imgproc246.dll'))\n ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\opencv_objdetect246.dll'))\n ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\opencv_features2d246.dll'))\n ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\opencv_calib3d246.dll'))\n\n ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\IntraFaceDLL.dll'))\n intraFace = ctypes.cdll.LoadLibrary(os.path.abspath('IntraFaceResources\\\\IntraFaceTracker.dll'))\n return intraFace\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"marcus800/Gaze-Correction","sub_path":"GazeCorrection.py","file_name":"GazeCorrection.py","file_ext":"py","file_size_in_byte":26707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"15695764035","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\n\n\n# In[ ]:\n\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC,LinearSVC\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\ndef make_input(data):\n input = pd.concat(\n [\n data[\"Pclass\"],\n pd.get_dummies(data[\"Sex\"]),\n data[\"Age\"],\n data[\"SibSp\"],\n data[\"Parch\"],\n data[\"Fare\"],\n pd.get_dummies(data[\"Embarked\"])\n ], axis=1\n )\n return input.fillna(input.mean())\n\ndef make_output(data):\n return data[\"Survived\"].values\n\n\n# In[ ]:\n\n\ntrain = pd.read_csv('../input/train.csv').sample(frac=1).reset_index(drop=True)\ntrainX = make_input(train)\ntrainY = make_output(train)\n\n\n# In[ ]:\n\n\nC = 5\nlin_clf = LinearSVC(loss=\"hinge\", C=C, random_state=42)\n\n\n# In[ ]:\n\n\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform(trainX)\n\nlin_clf.fit(X_scaled, trainY)\n\n\n# In[ ]:\n\n\ntest = pd.read_csv('../input/test.csv').sample(frac=1).reset_index(drop=True)\ntestX = make_input(test)\ntestX_scaled = scaler.fit_transform(testX)\npredict = lin_clf.predict(testX)\n\n\n# In[ ]:\n\n\noutput = pd.concat(\n [\n test['PassengerId']\n ], axis=1\n )\noutput['Survived'] = predict\noutput.to_csv('submission.csv',index=False)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"nischalshrestha/automatic_wat_discovery","sub_path":"Notebooks/py/hiromu1202/titanic-svm/titanic-svm.py","file_name":"titanic-svm.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"70504338403","text":"import unittest\nfrom time import sleep\nfrom pathlib import Path\n\nfrom jutils.procesos import Proceso, Paso\n\n\nclass Paso1(Paso):\n def _run(self, a) -> dict:\n super()._run()\n print(\"Haciendo algo pesado\")\n sleep(10)\n print(\"Terminando de hacer algo pesado\")\n return {\"paso1\": a * 4}\n\n\nclass Paso2(Paso):\n def _run(self, paso1) -> dict:\n super()._run()\n print(\"Haciendo algo pesado\")\n sleep(10)\n print(\"Terminando de hacer algo pesado\")\n return {\"paso2\": paso1 + 6}\n\n\nclass Paso3(Paso):\n def _run(self, paso2) -> dict:\n super()._run()\n print(\"Haciendo algo pesado\")\n sleep(10)\n print(\"Terminando de hacer algo pesado\")\n return {\"paso3\": paso2 ** 2}\n\n\nclass Paso4(Paso):\n def _run(self, paso3) -> dict:\n super()._run()\n print(\"Haciendo algo pesado\")\n sleep(10)\n print(\"Terminando de hacer algo pesado\")\n return {\"paso4\": paso3 - 10}\n\n\nclass Procesamiento(Proceso):\n def __init__(self, cache, cache_path, a, force_execution: dict):\n super().__init__(cache, cache_path)\n self._a = a\n self._paso1 = Paso1(\"Paso1\")\n self._paso2 = Paso2(\"Paso2\", self._paso1)\n self._paso3 = Paso3(\"Paso3\", self._paso2)\n self._paso4 = Paso4(\"Paso4\", self._paso3)\n self._force_execution = force_execution\n\n def paso1(self, force_execution=None):\n if force_execution is None:\n force_execution = {}\n r = self._paso1.run(a=self._a, force_execution=force_execution.setdefault(\"paso1\", False))\n self.save_cache()\n return r\n\n def paso2(self, force_execution=None):\n if force_execution is None:\n force_execution = {}\n r = self._paso2.run(a=self._a, force_execution=force_execution.setdefault(\"paso2\", False))\n self.save_cache()\n return r\n\n def paso3(self, force_execution=None):\n if force_execution is None:\n force_execution = {}\n r = self._paso3.run(a=self._a, force_execution=force_execution.setdefault(\"paso3\", False))\n self.save_cache()\n return r\n\n def paso4(self, force_execution=None):\n if force_execution is None:\n force_execution = {}\n r = self._paso4.run(a=self._a, force_execution=force_execution.setdefault(\"paso4\", False))\n self.save_cache()\n return r\n\n\nclass MyTestCase(unittest.TestCase):\n def setUp(self) -> None:\n self._path = Path(r'cache/procesamiento.pkl').resolve().absolute()\n self._a = 5\n self._paso1 = self._a * 4\n self._paso2 = self._paso1 + 6\n self._paso3 = self._paso2 ** 2\n self._paso4 = self._paso3 - 10\n self._force_execution = {'paso4': False, 'paso1': False}\n\n def test1_paso1_cache(self):\n procesamiento_def = Procesamiento(cache=True, cache_path=self._path, a=self._a,\n force_execution=self._force_execution)\n procesamiento = Procesamiento.from_cache(procesamiento_def, True, self._path)\n self.assertEqual(procesamiento.paso1(self._force_execution)['paso1'], self._paso1)\n\n def test2_paso2_cache(self):\n procesamiento_def = Procesamiento(cache=True, cache_path=self._path, a=self._a,\n force_execution=self._force_execution)\n procesamiento = Procesamiento.from_cache(procesamiento_def, True, self._path)\n self.assertEqual(procesamiento.paso2(self._force_execution)['paso2'], self._paso2)\n\n def test3_paso4_cache(self):\n procesamiento_def = Procesamiento(cache=True, cache_path=self._path, a=self._a,\n force_execution=self._force_execution)\n procesamiento = Procesamiento.from_cache(procesamiento_def, True, self._path)\n self.assertEqual(procesamiento.paso4(self._force_execution)['paso4'], self._paso4)\n\n def test4_paso3_cache(self):\n procesamiento_def = Procesamiento(cache=True, cache_path=self._path, a=self._a,\n force_execution=self._force_execution)\n procesamiento = Procesamiento.from_cache(procesamiento_def, True, self._path)\n self.assertEqual(procesamiento.paso3(self._force_execution)['paso3'], self._paso3)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jevo160296/jutils","sub_path":"tests/test_procesos.py","file_name":"test_procesos.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73529580001","text":"##\n# Esempi fi statement\nx = int(input(\"inserisci un numero...\")) # Metto int per trasformare le stringa in interno\n\nif(x < 0):\n x = -x\n print(\"inverto il valore...\")\nprint(\"abs(x)=\", x) # Mettendo il print() dentro l'if lo eseguo solo se l'istruzione if(x<0) è verificata\n# Per cambiare indentazione si utilizza TAB => mentre per <= TAB+shif o backspace\n\n# Se vogliamo mettere al posto di x (abs(x)) il valore iniziale inserito\nx = int(input(\"inserisci un numero...\"))\n\nif(x < 0):\n print(\"abs(%d)=\"%(x), -x)\n x = -x\nelse: # Questo ramo viene verificato se e solo se la condizione dell'if non è verificata\n print(\"abs(%d)=\"%(x), x)\n\n\n# C'è del codice ripetuto, ci sono 2 print()\n# Utilizzando un solo print()\nx = int(input(\"inserisci un numero...\"))\n\nif(x < 0):\n y = -x # Ci interessa tenere x (valore inserito) per questo utlizzo y=-x\nelse: # Senza else avrei problemi con x>0\n y = x\nprint(\"abs(%d)= %d\"%(x,y))\n","repo_name":"forlanosimone/course-python","sub_path":"2 - if statement/esempio_if_.py","file_name":"esempio_if_.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43827598427","text":"\ndef countofdays(trips):\n cou=[]\n for i in trips:\n a,b=i\n count=0\n for k in range(a,b+1):\n count+=1\n\n cou.append(count)\n return sum(cou)\n\n\na=countofdays([[10,15],[35,45]])\nprint(a)","repo_name":"evasu9582/python","sub_path":"countofday.py","file_name":"countofday.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22782243608","text":"\"\"\" Convenience functions to find the database and other system locations\nwithout the user having to specify full paths.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport collections\nimport datetime\n\n# std\nimport os\nimport shutil\nfrom functools import lru_cache\nfrom pathlib import Path, PurePath\nfrom typing import DefaultDict\n\n# ours\nfrom ankipandas.util.log import log\n\n\n@lru_cache(32)\ndef _find_db(\n search_path,\n maxdepth=6,\n filename=\"collection.anki2\",\n break_on_first=False,\n user: str | None = None,\n) -> DefaultDict[str, list[Path]]:\n \"\"\"\n Like find_database but only for one search path at a time. Also doesn't\n raise any error, even if the search path doesn't exist.\n\n Args:\n search_path:\n maxdepth: Maximum depth relative to search_path\n filename:\n break_on_first: Break on first search result\n user: Only search for this user\n\n Returns:\n collection.defaultdict({user: [list of results]})\n \"\"\"\n search_path = Path(search_path)\n if not search_path.exists():\n log.debug(\"_find_db: Search path %r does not exist.\", str(search_path))\n return collections.defaultdict(list)\n if search_path.is_file():\n if search_path.name == filename:\n return collections.defaultdict(\n list, {search_path.parent.name: [search_path]}\n )\n else:\n log.warning(\n \"_find_db: Search path %r is a file, but filename does not \"\n \"match that of %r.\",\n str(search_path),\n filename,\n )\n return collections.defaultdict(list)\n found: DefaultDict[str, list[Path]] = collections.defaultdict(list)\n for root, dirs, files in os.walk(str(search_path)):\n if filename in files:\n _user = os.path.basename(root)\n if user and not _user == user:\n continue\n found[_user].append(Path(root) / filename)\n if break_on_first:\n log.debug(\"_find_db: Breaking after first hit.\")\n break\n depth = len(Path(root).relative_to(search_path).parts)\n if maxdepth and depth >= maxdepth:\n # log.debug(\n # \"_find_db: Abort search at %r. \"\n # \"Max depth exceeded.\",\n # str(root)\n # )\n del dirs[:]\n return found\n\n\n@lru_cache(32)\ndef find_db(\n search_paths=None,\n maxdepth=8,\n filename=\"collection.anki2\",\n user=None,\n break_on_first=True,\n) -> Path:\n \"\"\"\n Find path to anki2 database.\n\n Args:\n search_paths: Search path as string or pathlib object or list/iterable\n thereof. If None, some search paths are set by default.\n maxdepth: Maximal search depth.\n filename: Filename of the collection (default: ``collections.anki2``)\n user: Username to which the collection belongs. If None, search for\n databases of any user.\n break_on_first: Stop searching once a database is found. This is\n obviously faster, but you will not get any errors if there are\n multiple databases matching your criteria.\n\n Raises:\n If none or more than one result is found: :class:`ValueError`\n\n Returns:\n Path to the anki2 database\n \"\"\"\n if not search_paths:\n log.info(\n \"Searching for database. This might take some time. \"\n \"You can speed this up by specifying a search path or \"\n \"directly entering the path to your database.\"\n )\n search_paths = [\n \"~/.local/share/Anki2/\",\n \"~/Documents/Anki2\",\n Path(os.getenv(\"APPDATA\", \"~\") + \"/Anki2/\"),\n \"~/.local/share/Anki2\",\n Path.home(),\n ]\n search_paths = [Path(sp).expanduser().resolve() for sp in search_paths]\n if break_on_first:\n log.warning(\n \"The search will stop at the first hit, so please verify that \"\n \"the result is correct (for example in case there might be more \"\n \"than one Anki installation)\"\n )\n if isinstance(search_paths, (str, PurePath)):\n search_paths = [search_paths]\n found: dict[str, list[Path]] = {}\n for search_path in search_paths:\n found = {\n **found,\n **_find_db(\n search_path,\n maxdepth=maxdepth,\n filename=filename,\n user=user,\n break_on_first=break_on_first,\n ),\n }\n if break_on_first:\n if user is not None:\n if user in found:\n break\n else:\n if found:\n break\n\n if user:\n # We were searching for a specific user\n if user not in found:\n raise ValueError(\n f\"Could not find database belonging to user {user}\"\n )\n else:\n results_user = found[user]\n else:\n if len(found) >= 2:\n raise ValueError(\n \"Found databases for more than one user: {}. Please specify \"\n \"the user.\".format(\", \".join(found))\n )\n elif not found:\n raise ValueError(\n \"No database found. You might increase the search depth or \"\n \"specify search paths to find more.\"\n )\n else:\n # No user specified but we found only one\n results_user = found.popitem()[1]\n\n if len(results_user) >= 2:\n raise ValueError(\n \"Found more than one database belonging to user {} at {}\".format(\n user, \", \".join(map(str, results_user))\n )\n )\n\n assert len(results_user) == 1\n final_result = results_user[0]\n log.debug(\"Database found at %r.\", final_result)\n return final_result\n\n\n@lru_cache(32)\ndef db_path_input(\n path: str | PurePath | None = None, user: str | None = None\n) -> Path:\n \"\"\"Helper function to interpret user input of path to database.\n\n 1. If no path is given, we search through some default locations\n 2. If path points to a file: Take that file\n 3. If path points to a directory: Search in that directory\n\n Args:\n path: Path to database or search path or None\n user: User name of anki collection or None\n\n Returns:\n Path to anki database as :class:`Path` object\n\n Raises:\n If path does not exist: :class:`FileNotFoundError`\n In various other cases: :class:`ValueError`\n \"\"\"\n if path is None:\n result = find_db(user=user)\n else:\n path = Path(path)\n if not path.exists():\n raise FileNotFoundError(\n f\"db_path_input: File '{str(path)}' does not exist.\"\n )\n if path.is_file():\n log.debug(\n \"db_path_input: Database explicitly set to %r.\", str(path)\n )\n result = path\n else:\n result = find_db(\n search_paths=(path,), user=user, break_on_first=False\n )\n log.info(\"Database found at %r.\", str(result))\n if result:\n return result\n else:\n raise ValueError(\"Database could not be found.\")\n\n\ndef db_backup_file_name() -> str:\n \"\"\"Time based file name of the backup file.\"\"\"\n return \"backup-ankipandas-{}.anki2\".format(\n datetime.datetime.now().strftime(\"%Y-%m-%d-%H.%M.%S.%f\")\n )\n\n\ndef get_anki_backup_folder(path: str | PurePath, nexist=\"raise\") -> Path:\n \"\"\"Return path to Anki backup folder.\n\n Args:\n path: Path to Aki database as :class:`Path`\n nexist: What to do if backup folder doesn't seem to exist: ``raise`` or\n ``ignore``.\n\n Returns:\n Path to Anki backup folder as :class:`Path`.\n \"\"\"\n path = Path(path)\n if not path.is_file():\n raise FileNotFoundError(f\"Database path {path} seems to be invalid.\")\n backup_folder = path.parent / \"backups\"\n if nexist == \"raise\" and not backup_folder.is_dir():\n raise ValueError(\n f\"Anki backup folder corresponding to database at {path} doesn't seem\"\n \" to exist. Perhaps you can specify a custom backup \"\n \"folder?\"\n )\n return backup_folder\n\n\ndef backup_db(\n db_path: str | PurePath,\n backup_folder: str | PurePath | None = None,\n) -> Path:\n \"\"\"\n Back up database file.\n\n Args:\n db_path: Path to database\n backup_folder: Path to backup folder. If None is given, the backup is\n created in the Anki backup directory.\n\n Returns:\n Path to newly created backup file as :class:`Path`.\n \"\"\"\n db_path = Path(db_path)\n if backup_folder:\n backup_folder = Path(backup_folder)\n if not backup_folder.is_dir():\n log.debug(\"Creating backup directory %s.\", backup_folder)\n backup_folder.mkdir(parents=True)\n else:\n backup_folder = get_anki_backup_folder(db_path, nexist=\"raise\")\n if not db_path.is_file():\n raise FileNotFoundError(\"Database does not seem to exist.\")\n backup_path = backup_folder / db_backup_file_name()\n shutil.copy2(str(db_path), str(backup_path))\n return backup_path\n","repo_name":"klieret/AnkiPandas","sub_path":"ankipandas/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":9228,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"54"} +{"seq_id":"23504597382","text":"import os\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\nfrom pyntcloud import PyntCloud\nfrom smutsia.point_cloud.projection import Projection\nfrom smutsia.point_cloud.normals import get_normals\n\n\ndef back_proj_front_pred(cloud, pred, proj, inference='regression'):\n \"\"\"\n Parameters\n ----------\n cloud: Pyntcloud\n\n pred: np.ndarray\n\n proj: Projection\n\n inference: optional {'regression', 'classification'}\n \"\"\"\n from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor\n\n # project gt\n acc_img = proj.project_points_values(cloud.xyz, np.ones(len(cloud.xyz)), aggregate_func='sum')\n\n acc_img_fl = acc_img.flatten()\n pred_fl = pred.flatten()\n\n lidx, i_img, j_img = proj.projector.project_point(cloud.xyz)\n # back project gt\n back_proj_acc_mask = acc_img_fl[lidx]\n back_proj_pred = pred_fl[lidx]\n\n num_neighbors = 3\n if inference == 'regression':\n knn = KNeighborsRegressor(n_neighbors=num_neighbors, weights='uniform')\n elif inference == 'classification':\n knn = KNeighborsClassifier(n_neighbors=num_neighbors)\n else:\n raise ValueError('Inference value can only be \"regression\" or \"classification\". '\n 'Passed value: {}'.format(inference))\n\n X_train, y_train = cloud.xyz[back_proj_acc_mask == 1, :], back_proj_pred[back_proj_acc_mask == 1]\n X_test = cloud.xyz[back_proj_acc_mask != 1, :]\n\n knn.fit(X_train, y_train)\n y_pred = knn.predict(X_test)\n labels = np.zeros_like(back_proj_pred)\n labels[back_proj_acc_mask == 1] = back_proj_pred[back_proj_acc_mask == 1]\n labels[back_proj_acc_mask != 1] = y_pred\n\n return labels\n\n\ndef project_cloud(cloud, proj, img_means, img_std, add_normals):\n \"\"\"\n Parameters\n ----------\n cloud: PyntCloud\n\n proj: Projection\n\n img_means: np.ndarray\n\n img_std: np.ndarray\n\n add_normals: bool\n \"\"\"\n # project point cloud on spherical image\n xyz = cloud.xyz\n i = cloud.points['i']\n z = xyz[:, 2]\n rho = np.linalg.norm(xyz, axis=1)\n aggr = ['max', 'min', 'mean']\n img_means = torch.from_numpy(img_means).clone()\n img_std = torch.from_numpy(img_std).clone()\n\n proj_img = proj.project_points_values(xyz, np.c_[rho, z, i], aggregate_func=aggr)\n\n # compute normals if necessary\n if add_normals:\n norm_img = get_normals(cloud=xyz, method='spherical', proj=proj, res_yaw=proj.res_yaw, res_pitch=proj.nb_layers)\n # stack information\n proj_img = np.dstack([proj_img, norm_img])\n # add to rescale values standard value for normals\n img_means = torch.cat([img_means, torch.zeros(3, dtype=img_means.dtype)])\n img_std = torch.cat([img_std, torch.ones(3, dtype=img_std.dtype)])\n\n # renormalize proj_img\n proj_img = torch.from_numpy(proj_img).clone()\n proj_img = proj_img.permute(2, 0, 1)\n proj_img = (proj_img - img_means[:, None, None]) / img_std[:, None, None]\n\n # return proj values\n return proj_img.unsqueeze(0)\n\n\ndef cnn_detect_ground(cloud, model, proj, img_means, img_std, add_normals, savedir='', gpu=0):\n \"\"\"\n Parameters\n ----------\n cloud: np.ndarray or PyntCloud\n\n model: torch.nn.Module\n\n proj: Projection\n\n img_means: ndarray\n\n img_std: ndarray\n\n add_normals: bool\n\n savedir: str\n pass\n \"\"\"\n # device = torch.device(\"cuda:\"+str(gpu) if torch.cuda.is_available() else \"cpu\")\n device = torch.device(\"cpu\")\n # project point cloud and extract normals\n proj_img = project_cloud(cloud=cloud, proj=proj, img_means=img_means, img_std=img_std, add_normals=add_normals)\n\n model.to(device)\n proj_img = proj_img.to(device=device, dtype=torch.float32)\n\n # evaluate point cloud\n with torch.no_grad():\n y_pred = model(proj_img)\n pred = torch.sigmoid(y_pred)\n thr_pred = (pred > 0.5).float()\n\n pred = np.asarray(pred[0, 0].to('cpu').detach())\n proj_img = np.asarray(proj_img[0].permute(1, 2, 0).to('cpu').detach())\n thr_pred = np.asarray(thr_pred[0, 0].to('cpu').detach())\n\n if len(savedir):\n fn = ''\n if hasattr(cloud, 'sequence'):\n fn += cloud.sequence + '_'\n if hasattr(cloud, 'filename'):\n fn += cloud.filename\n fn += '_2D'\n # todo save pred function\n fig, ax = plt.subplots(3, 1, figsize=(20, 4))\n if add_normals:\n ax[0].imshow(np.abs(proj_img[:, :, 3:]))\n ax[0].set_title('Estimated Normals')\n else:\n ax[0].imshow(np.abs(proj_img[:, :, 2]))\n ax[0].set_title('Reflectivity')\n ax[0].axis('off')\n ax[1].imshow(pred, cmap=plt.cm.coolwarm)\n ax[1].set_title('Heat Map')\n ax[1].axis('off')\n ax[2].imshow(thr_pred)\n ax[2].set_title('2D Prediction')\n ax[2].axis('off')\n fig.suptitle('Prediction on file {}'.format(fn))\n plt.tight_layout()\n plt.savefig(os.path.join(savedir, fn + '.eps'), dpi=90)\n\n # back project evaluated point cloud\n ground = back_proj_front_pred(cloud, pred, proj)\n\n return ground\n\n\nif __name__ == \"__main__\":\n from glob import glob\n from smutsia.utils.semantickitti import load_pyntcloud\n from smutsia.utils.viz import plot_cloud\n from smutsia.nn.models import UNet\n from definitions import SEMANTICKITTI_PATH\n\n weights = '/home/leonardo/Dev/github/smutsia/ckpt/ground_detection/unet_best.pth'\n net = UNet(n_channels=3, n_classes=1, n_filters=8, scale=(1, 2))\n net.load_state_dict(torch.load(weights))\n net.eval()\n\n layers_proj = Projection(proj_type='layers', nb_layers=64, res_yaw=2048)\n\n par_img_means = np.array([12.12, -1.04, 0.21])\n par_img_std = np.array([12.32, 0.86, 0.16])\n\n basedir = os.path.join(SEMANTICKITTI_PATH, '08', 'velodyne')\n\n files = sorted(glob(os.path.join(basedir, '*.bin')))\n\n pc = load_pyntcloud(files[0], add_label=True)\n\n out = cnn_detect_ground(pc, net, layers_proj, img_means=par_img_means, img_std=par_img_std, add_normals=False,\n savedir='.')\n plot_cloud(pc.xyz, scalars=out, notebook=False, interact=True)\n print(\"END\")\n","repo_name":"liubigli/smutsia","sub_path":"smutsia/point_cloud/ground_detection/_cnn.py","file_name":"_cnn.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30266418098","text":"from django.db import models\nfrom django.utils.text import slugify\n\n\nfrom apps.proposal.models.fields import FieldsModel\n\n\nclass ProposalModel(models.Model):\n\n SITUATION_CHOICES = (\n ('Em análise','Em análise'),\n ('Aprovada','Aprovada'),\n ('Negada','Negada'),\n ) \n \n title = models.CharField(max_length=800)\n slug = models.SlugField(max_length=500, null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n field = models.ManyToManyField(FieldsModel, related_name='pillars', through='ProposalFieldAssignment')\n proposal_situation = models.CharField(max_length=50, default='Em análise', choices=SITUATION_CHOICES)\n \n def save(self, *args, **kwargs):\n self.slug = slugify(self.title)\n super(ProposalModel, self).save(*args, **kwargs)\n\n class Meta:\n verbose_name = \"Proposta\"\n verbose_name_plural = \"Propostas\"\n db_table = 'proposal'\n\n def __str__(self):\n return self.title\n\nclass ProposalFieldAssignment(models.Model):\n proposal = models.ForeignKey(ProposalModel, on_delete=models.CASCADE, related_name='field_assignments')\n field = models.ForeignKey(FieldsModel, on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = \"Atribuição de questão\"\n verbose_name_plural = \"Atribuições de questões\"\n db_table = 'proposal_field_assignment'\n \n\n def __str__(self):\n return f\"{self.proposal.title} - {self.field.label}\"\n \n","repo_name":"LucasAraujoBR/-challenge-dev-django","sub_path":"apps/proposal/models/proposal.py","file_name":"proposal.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22447629126","text":"\"\"\"Exercício Python 017:\nFaça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um\ntriângulo retângulo. Calcule e mostre o comprimento da hipotenusa. \"\"\"\n\nfrom math import hypot\ncatopost = float(input('\\33[1;34mDigite o cumprimento do cateto oposto: '))\ncatadj = float(input('\\33[1;35mDigite o comprimento do cateto adjacente: '))\nhip = hypot(catadj, catopost)\nprint(f'\\33[37mO comprimento da hipotenusa é: \\33[1;97m{hip:.2f}')\n\n'''from math import pow, sqrt\ncatopost = float(input('Digite o cumprimento do cateto oposto: '))\ncatadj = float(input('Digite o comprimento do cateto adjacente: '))\nhip = sqrt(pow(catopost,2)+pow(catadj,2))\nprint(f'O comprimento da hipotenusa é: {hip:.2f}')'''\n","repo_name":"juveniljunior/desafios-python3-cursoemvideo","sub_path":"Desafios/Mundo1_Conceitos_Básicos/desafio017.py","file_name":"desafio017.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19247083142","text":"import pandas as pd\nimport numpy as np\n\n\ndataset_name = \"Caltech\"\n\nrelative = \"../../../\"\n\ndf = pd.read_csv(relative + \"datasets/\" + dataset_name + '/'+ dataset_name + '.csv', sep=\";\", header=None)\ndf = df.drop(0, 1)\n\n\nprint(df.describe())\nprint(df.nunique())\nprint(df.head())\nprint(df.shape)\n\n\ndf[11] = pd.Categorical(df[11])\ndf[11] = df[11].cat.codes\nnum_cols = df.shape[1]-1\n\nnp.savetxt(relative + \"datasets/\" + dataset_name + '/' + dataset_name + \"_prep_encoding2.csv\", df.values[:,:num_cols], delimiter=\",\")\nnp.savetxt(relative + \"datasets/\" + dataset_name + '/' + dataset_name + \"_labels.csv\", df.values[:,num_cols], delimiter=\",\")\n\n\n\nimport umap\n\n\nX_embedded = umap.UMAP().fit_transform(df.values[:,:num_cols])\n\n\nimport matplotlib.pyplot as plt\n\n\nplt.scatter(X_embedded[:,0], X_embedded[:,1], c = df.values[:,num_cols])\nplt.show()","repo_name":"RaulRomani/Interactive-Data-Projection","sub_path":"server/utils/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70494439203","text":"from ExcelDao import ExcelDao\nfrom ExcelWrited import ExcelWrited\n\nif __name__ == \"__main__\":\n dao = ExcelDao(\"chuqin.xlsx\")\n result = dao.readExcel()\n write = ExcelWrited(\"chuqin.xlsx\", result)\n write.toWriteData()\n print(write.CList)\n print(write.ZList)\n print(write.JList)\n write.toWriteExcel()\n","repo_name":"Jevis/work-statistics","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16748342206","text":"import numpy as __np\nimport skfuzzy as __fuzz\nfrom skfuzzy import control as __ctrl\n\n\n#########################################################################\n# Rasgos de Personalidad #\n#########################################################################\n\n# racional\nracional = __ctrl.Antecedent(__np.arange(0, 100, 0.1), 'racional')\nracional['bajo'] = __fuzz.gaussmf(racional.universe, 30, 20)\nracional['medio'] = __fuzz.gaussmf(racional.universe, 70, 20)\nracional['alto'] = __fuzz.gaussmf(racional.universe, 100, 20)\n\n# emocional\nemocional = __ctrl.Antecedent(__np.arange(0, 101, 0.1), 'emocional')\nemocional['bajo'] = __fuzz.gaussmf(emocional.universe, 30, 20)\nemocional['medio'] = __fuzz.gaussmf(emocional.universe, 70, 20)\nemocional['alto'] = __fuzz.gaussmf(emocional.universe, 100, 20)\n\n#########################################################################\n# Emociones #\n#########################################################################\n\n# tristeza\ntristeza = __ctrl.Antecedent(__np.arange(0, 101, 0.1), 'tristeza')\ntristeza['bajo'] = __fuzz.gaussmf(tristeza.universe, 30, 20)\ntristeza['medio'] = __fuzz.gaussmf(tristeza.universe, 70, 30)\ntristeza['alto'] = __fuzz.gaussmf(tristeza.universe, 100, 20)\n\n# sorpresa\nsorpresa = __ctrl.Antecedent(__np.arange(0, 101, 0.1), 'sorpresa')\nsorpresa['bajo'] = __fuzz.gaussmf(sorpresa.universe, 30, 20)\nsorpresa['medio'] = __fuzz.gaussmf(sorpresa.universe, 70, 15)\nsorpresa['alto'] = __fuzz.gaussmf(sorpresa.universe, 100, 20)\n\n# disfrute\ndisfrute = __ctrl.Antecedent(__np.arange(0, 101, 0.1), 'disfrute')\ndisfrute['bajo'] = __fuzz.gaussmf(disfrute.universe, 30, 20)\ndisfrute['medio'] = __fuzz.gaussmf(disfrute.universe, 70, 15)\ndisfrute['alto'] = __fuzz.gaussmf(disfrute.universe, 100, 20)\n\n# enfado\nenfado = __ctrl.Antecedent(__np.arange(0, 101, 0.1), 'enfado')\nenfado['bajo'] = __fuzz.gaussmf(enfado.universe, 30, 20)\nenfado['medio'] = __fuzz.gaussmf(enfado.universe, 70, 15)\nenfado['alto'] = __fuzz.gaussmf(enfado.universe, 90, 20)\n\n# asco\nasco = __ctrl.Antecedent(__np.arange(0, 101, 0.1), 'asco')\nasco['bajo'] = __fuzz.gaussmf(asco.universe, 30, 20)\nasco['medio'] = __fuzz.gaussmf(asco.universe, 75, 20)\nasco['alto'] = __fuzz.gaussmf(asco.universe, 100, 20)\n\n# temor\ntemor = __ctrl.Antecedent(__np.arange(0, 101, 0.1), 'temor')\ntemor['bajo'] = __fuzz.gaussmf(temor.universe, 30, 20)\ntemor['medio'] = __fuzz.gaussmf(temor.universe, 70, 15)\ntemor['alto'] = __fuzz.gaussmf(temor.universe, 90, 20)\n\n# sin emoción\nnoEmocion = __ctrl.Antecedent(__np.arange(0, 101, 0.1), 'noEmocion')\nnoEmocion['bajo'] = __fuzz.gaussmf(noEmocion.universe, 30, 20)\nnoEmocion['medio'] = __fuzz.gaussmf(noEmocion.universe, 70, 10)\nnoEmocion['alto'] = __fuzz.gaussmf(noEmocion.universe, 90, 20)\n","repo_name":"Scoowy/django-do-deploy","sub_path":"fuzzysystem/dashboard/services/diffuser/antecedents.py","file_name":"antecedents.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5508583397","text":"from django.contrib import admin\r\nfrom django.urls import path,include\r\nfrom rest_framework.routers import DefaultRouter\r\nfrom .views import BlacklistTokenView,LoggedInUserView,RegisterView,TrendingViewSet,PlaceView,ActivitiesView,FestivalViewSet,ItemViewSet,PurchaseViewSet,AttractionViewSet,BookingViewSet,GuideDetailViewset,ContactViewSet,ReviewViewSet,CuisineViewSet,EventViewSet,EventBookViewSet,WorkshopBookingViewSet,WorkshopViewSet\r\nfrom rest_framework_simplejwt.views import TokenObtainPairView,TokenRefreshView\r\n\r\nrouter=DefaultRouter()\r\nrouter.register('register',RegisterView,basename='register')\r\nrouter.register('trendings',TrendingViewSet,basename='trendings')\r\nrouter.register('places',PlaceView,basename='places')\r\nrouter.register('activities',ActivitiesView,basename='activities')\r\nrouter.register('festivals',FestivalViewSet,basename='festivals')\r\nrouter.register('items',ItemViewSet,basename='items')\r\nrouter.register('purchases',PurchaseViewSet,basename='purchases')\r\nrouter.register('attractions',AttractionViewSet,basename='attractions')\r\nrouter.register('booking',BookingViewSet,basename='booking')\r\nrouter.register('guides',GuideDetailViewset,basename='guides')\r\nrouter.register('contacts',ContactViewSet,basename='contacts')\r\nrouter.register('reviews',ReviewViewSet,basename='reviews')\r\nrouter.register('cuisine',CuisineViewSet,basename='cuisine')\r\nrouter.register('cultural-event',EventViewSet,basename='cultural-event')\r\nrouter.register('event-booking',EventBookViewSet,basename='event-booking')\r\nrouter.register('workshop',WorkshopViewSet,basename='workshop')\r\nrouter.register('workshop-booking',WorkshopBookingViewSet,basename='workshop-booking')\r\n\r\nurlpatterns = [\r\n path('',include(router.urls)),\r\n path('api/token/',TokenObtainPairView.as_view(),name=\"token_obtain\"),\r\n path('api/token/refresh/',TokenRefreshView.as_view(),name=\"refresh_token\"),\r\n path('api/token/blacklist/',BlacklistTokenView.as_view(),name=\"blacklist\"),\r\n path('current-user/', LoggedInUserView.as_view(), name='currentuser'),\r\n]","repo_name":"nandiinii/techno-backend","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6801644814","text":"# complexity O(nlog(n))\n\ndef shellSort(arr):\n arrLen = len(arr)\n gap = arrLen//2\n while gap > 0:\n i = gap\n while i < arrLen:\n temp = arr[i]\n j = i - gap\n while j >= 0 and arr[j] > temp:\n arr[j+gap] = arr[j]\n j = j - gap\n arr[j+gap] = temp\n i = i + 1\n gap = gap // 2\n return arr\n\n\na = [3, 5, 8, 7, 6, 9, 2]\nprint('shell sort', shellSort(a))\n","repo_name":"PKrupa94/DataStructure","sub_path":"DS/Sorting/shell_Sort.py","file_name":"shell_Sort.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70264309281","text":"#User function Template for python3\nclass Solution:\n\n def immediateSmaller(self,arr,n):\n\t\t# code here\n for i in range(n - 1):\n arr[i] = arr[i+1] if arr[i+1] < arr[i] else -1\n\t\t \n arr[n-1] = -1\n\t\t\nn = int(input())\narr = [int(x) for x in input().split()]\nSolution().immediateSmaller(arr, n)\nprint(arr)","repo_name":"gokulgk-9402/Daily-CP-August-2022","sub_path":"ImmediateSmallerElement.py","file_name":"ImmediateSmallerElement.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74595140321","text":"import os\nfrom wholeslidedata.dataset import WholeSlideDataSet\nfrom wholeslidedata.source.utils import whole_slide_files_from_folder_factory\nfrom wholeslidedata.source.associations import associate_files\nfrom utils import print_dataset_statistics\nimport argparse\nimport numpy as np\nfrom wholeslidedata.source.files import WholeSlideAnnotationFile, WholeSlideImageFile\n\nLABELS = ['NDBE-G', 'LGD-G', 'HGD-G']\nSEED = 55\n\n\ndef write_data_yaml(train_associations, val_associations, test_associations, output_path):\n \"\"\"Writes both train and validation associations to a config split file.\n\n Args:\n train_associations: the selected samples for training\n val_associations: the selected samples for validation\n test_associations: the selected samples for testing\n output_path: path where to config split file is stored\n\n Returns:\n none: writes to config file\n\n Todo: refactor this = create dict and write to yaml instead of str.\n \"\"\"\n s = '---\\ntraining:'\n\n for files in train_associations.values():\n image_file = files[WholeSlideImageFile][0].path\n annotation_file = files[WholeSlideAnnotationFile][0].path\n s += '\\n\\t-\\n\\t\\twsi:\\n\\t\\t\\tpath: {}\\n\\t\\twsa:\\n\\t\\t\\tpath: {}'.format(image_file, annotation_file)\n\n s += '\\n\\nvalidation:'\n for files in val_associations.values():\n image_file = files[WholeSlideImageFile][0].path\n annotation_file = files[WholeSlideAnnotationFile][0].path\n s += '\\n\\t-\\n\\t\\twsi:\\n\\t\\t\\tpath: {}\\n\\t\\twsa:\\n\\t\\t\\tpath: {}'.format(image_file, annotation_file)\n\n s += '\\n\\ntesting:'\n for files in test_associations.values():\n image_file = files[WholeSlideImageFile][0].path\n annotation_file = files[WholeSlideAnnotationFile][0].path\n s += '\\n\\t-\\n\\t\\twsi:\\n\\t\\t\\tpath: {}\\n\\t\\twsa:\\n\\t\\t\\tpath: {}'.format(image_file, annotation_file)\n\n print('Writing split yaml file to: {}'.format(output_path))\n with open(output_path, 'w') as out_file:\n out_file.write(s.replace('\\t', ' '))\n\n\ndef get_associations_from_folders(folder, datasets):\n \"\"\"Gets all associations (link between image and annotation file) from the folder where the datasets are located.\n An exact match in image and annotation file names is required.\n\n Args:\n folder: the main folder that holds the directories for each individual dataset.\n datasets: the datasets to be included: 'ASL, Bolero, LANS, RBE'\n\n Returns:\n dict: all found associations\n \"\"\"\n total_images = []\n total_annotations = []\n\n for folder in [os.path.join(folder, dataset) for dataset in datasets]:\n # find all image and annotation files\n print('\\nFolder: {}'.format(folder))\n image_files = whole_slide_files_from_folder_factory(folder, 'wsi', excludes=['mask', 'P53'],\n image_backend='openslide')\n annotation_files = whole_slide_files_from_folder_factory(folder, 'wsa', excludes=['tif', 'old'],\n annotation_parser='asap')\n\n total_images += image_files\n total_annotations += annotation_files\n\n print('Found {} image files.'.format(len(image_files)))\n print('Found {} annotation files.'.format(len(annotation_files)))\n\n # associate image and annotation files\n associations = associate_files(image_files, annotation_files, exact_match=True)\n\n # print dataset statistics\n dataset = WholeSlideDataSet(mode='default', associations=associations, labels=LABELS)\n print_dataset_statistics(dataset)\n\n # print the total dataset statistics\n total_associations = associate_files(total_images, total_annotations, exact_match=True)\n print('\\nTotal of annotated images: {}'.format(len(total_associations)))\n total_dataset = WholeSlideDataSet(mode='default', associations=total_associations, labels=LABELS)\n print_dataset_statistics(total_dataset)\n\n return total_associations\n\n\ndef train_val_split(folder, datasets, output_path, train_percent=0.8, val_percent=0.1):\n \"\"\"Produces a train validation split. Output is a config (yml) file with the paths to the images\n and annotations for each sample in the train and validation set.\n\n Args:\n folder: the folder where all data is located.\n datasets: names of the datasets included\n output_path: path where the config file for the split is stored\n train_percent: percentage of images used for training\n val_percent: percentage of image used for validation, the rest is used for testing\n\n Returns:\n none: writes to the config split file.\n \"\"\"\n\n # get file keys\n associations = get_associations_from_folders(folder, datasets)\n file_keys = np.array([*associations])\n indexes = np.arange(len(file_keys))\n\n # shuffle the indexes\n np.random.seed(SEED)\n np.random.shuffle(indexes)\n\n # split indexes\n n = len(indexes)\n n_train_end = int(train_percent * n)\n n_val_end = int(val_percent * n) + n_train_end\n\n train_keys = file_keys[indexes[:n_train_end]]\n val_keys = file_keys[indexes[n_train_end:n_val_end]]\n test_keys = file_keys[indexes[n_val_end:]]\n\n train_associations = {file_key: files for file_key, files in associations.items() if file_key in train_keys}\n val_associations = {file_key: files for file_key, files in associations.items() if file_key in val_keys}\n test_associations = {file_key: files for file_key, files in associations.items() if file_key in test_keys}\n\n # print train and val set statistics\n train_dataset = WholeSlideDataSet(mode='default', associations=train_associations, labels=LABELS)\n val_dataset = WholeSlideDataSet(mode='default', associations=val_associations, labels=LABELS)\n test_dataset = WholeSlideDataSet(mode='default', associations=test_associations, labels=LABELS)\n\n print('\\nTraining dataset: {} images.'.format(len(train_associations)))\n print_dataset_statistics(train_dataset, show_all_files=False)\n print('\\nValidation dataset: {} images.'.format(len(val_associations)))\n print_dataset_statistics(val_dataset, show_all_files=False)\n print('\\nTest dataset: {} images.'.format(len(test_associations)))\n print_dataset_statistics(test_dataset, show_all_files=False)\n\n # write to yaml file\n write_data_yaml(train_associations, val_associations, test_associations, output_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_path\", help=\"Path to the folder where the datasets are located.\")\n parser.add_argument(\"--datasets\", help=\"Names of the datasets to include to the train and validation split.\",\n default='ASL, LANS, RBE', type=str)\n parser.add_argument(\"--train_percent\", help=\"Percentage used for training.\",\n default=0.8, type=float)\n parser.add_argument(\"--val_percent\", help=\"Percentage used for training, the rest is used for testing.\",\n default=0.1, type=float)\n parser.add_argument(\"--output_path\", help=\"Path to the folder where the data.yml is stored.\",\n default='/home/mbotros/code/barrett_gland_grading/configs/split.yml')\n args = parser.parse_args()\n dataset_names = [dataset for dataset in args.datasets.split(', ')]\n\n train_val_split(folder=args.input_path, datasets=dataset_names, train_percent=args.train_percent,\n val_percent=args.val_percent, output_path=args.output_path)\n\n","repo_name":"qurAI-amsterdam/barrett_gland_grading","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":7557,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"7170000024","text":"from pathlib import Path\n\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision.utils import make_grid\nfrom tqdm import tqdm\n\nfrom .. import functional as functional_trainers\nfrom .. import hooks\nfrom ..metrics import TrainingMetrics\n\n\nclass TensorBoardVisionHook(hooks.TensorBoardHook):\n def __init__(\n self,\n writer: SummaryWriter,\n generator: nn.Module,\n fixed_noise: torch.Tensor,\n image_sample_interval: int,\n ):\n self.generator = generator\n self.fixed_noise = fixed_noise\n self.image_sample_interval = image_sample_interval\n self._iteration = 0\n super().__init__(writer)\n\n def __call__(self, mini_batch_metrics: TrainingMetrics):\n if self._iteration % self.image_sample_interval == 0:\n with torch.no_grad():\n fake = self.generator(self.fixed_noise).cpu()\n fake = make_grid(\n fake,\n nrow=int(self.fixed_noise.shape[0] ** 0.5),\n padding=2,\n normalize=True,\n )\n self.writer.add_image(\"generated_images\", fake, self.iteration)\n self._iteration += 1\n return super().__call__(mini_batch_metrics)\n\n\nclass WGANTrainer:\n def __init__(\n self,\n dataloader: DataLoader,\n generator: nn.Module,\n critic: nn.Module,\n generator_optimizer: optim.Optimizer,\n critic_optimizer: optim.Optimizer,\n device: torch.device,\n critic_train_per_batch: int,\n generator_train_per_batch: int,\n gradient_penalty_weight: float,\n tensorboard_log_dir: Path,\n image_sample_interval: int,\n image_sample_size: int,\n ):\n self.dataloader = dataloader\n self.critic = critic\n self.generator = generator\n self.critic_optimizer = critic_optimizer\n self.generator_optimizer = generator_optimizer\n self.device = device\n self.critic_train_per_batch = critic_train_per_batch\n self.generator_train_per_batch = generator_train_per_batch\n self.gradient_penalty_weight = gradient_penalty_weight\n self.metrics_aggregator_hook = hooks.MetricsAggregatorHook(\n functional_trainers.wasserstein.METRICS\n )\n self.tensorboard_hook = TensorBoardVisionHook(\n SummaryWriter(tensorboard_log_dir),\n self.generator,\n self.generator.sample_noise(image_sample_size).to(device),\n image_sample_interval,\n )\n self.tqdm_metrics_to_print = [\"critic_loss\", \"generator_loss\"]\n\n @property\n def metrics(self):\n return self.metrics_aggregator_hook.metrics\n\n def train(self, epochs: int) -> None:\n progress_bar = tqdm(range(epochs), desc=\"Training\")\n hook = hooks.ChainedHooks(\n self.metrics_aggregator_hook,\n hooks.TQDMHook(progress_bar, metrics_to_print=self.tqdm_metrics_to_print),\n self.tensorboard_hook,\n )\n for epoch in progress_bar:\n metrics = functional_trainers.wasserstein.train_one_epoch(\n self.dataloader,\n self.generator,\n self.critic,\n self.generator_optimizer,\n self.critic_optimizer,\n self.critic_train_per_batch,\n self.generator_train_per_batch,\n self.gradient_penalty_weight,\n self.device,\n hook,\n )\n\n\nclass VanilaTrainer:\n def __init__(\n self,\n dataloader: DataLoader,\n generator: nn.Module,\n discriminator: nn.Module,\n generator_optimizer: optim.Optimizer,\n discriminator_optimizer: optim.Optimizer,\n device: torch.device,\n tensorboard_log_dir: Path,\n image_sample_interval: int,\n image_sample_size: int,\n ):\n self.dataloader = dataloader\n self.discriminator = discriminator\n self.generator = generator\n self.discriminator_optimizer = discriminator_optimizer\n self.generator_optimizer = generator_optimizer\n self.device = device\n self.metrics_aggregator_hook = hooks.MetricsAggregatorHook(\n functional_trainers.vanila.METRICS\n )\n self.tensorboard_hook = TensorBoardVisionHook(\n SummaryWriter(tensorboard_log_dir),\n self.generator,\n self.generator.sample_noise(image_sample_size).to(device),\n image_sample_interval,\n )\n self.tqdm_metrics_to_print = [\"discriminator_loss\", \"generator_loss\"]\n self.sigmoid_applier_hook = hooks.SigmoidApplierHook(\n [\n \"discriminator_output_on_real_data\",\n \"discriminator_output_on_fake_data\",\n ]\n )\n\n @property\n def metrics(self):\n return self.metrics_aggregator_hook.metrics\n\n def train(self, epochs: int) -> None:\n progress_bar = tqdm(range(epochs), desc=\"Training\")\n hook = hooks.ChainedHooks(\n self.sigmoid_applier_hook,\n self.metrics_aggregator_hook,\n hooks.TQDMHook(progress_bar, metrics_to_print=self.tqdm_metrics_to_print),\n self.tensorboard_hook,\n )\n for epoch in progress_bar:\n metrics = functional_trainers.vanila.train_one_epoch(\n self.dataloader,\n self.generator,\n self.discriminator,\n self.generator_optimizer,\n self.discriminator_optimizer,\n self.device,\n hook,\n )\n","repo_name":"dariush-bahrami/elderwand","sub_path":"elderwand/trainer/applications/vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"32046885992","text":"import pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom utils import AssignProbSent, plot_minimal_pair\nfrom utils import log_uni_unk_prob, sentences_unigram_probability, calculating_perplexity\nimport utils\nfrom sentence_manipulation import find_sentence_that_have, comparative_sent_printing, x2y\ncount_sign = '__COUNT__'\n\n\nmodelname = 'TrieRoot'\n# Trained on Harry Potter 1-7\ninfile = open(modelname, 'rb')\nroot = pickle.load(infile)\ninfile.close()\n\nfilename = 'eval_dataset'\ninfile = open(filename, 'rb')\neval_set = pickle.load(infile)\ninfile.close()\n\nfilename='is_sentences'\ninfile = open(filename,'rb')\nis_sentences=pickle.load(infile)\ninfile.close()\n\n\nbad_is_sentences = x2y(is_sentences, 'is','are')\nunk_is_sentences = x2y(is_sentences, 'is','who')\na = is_sentences[6][1:9]\nb = bad_is_sentences[6][1:9]\nplot_minimal_pair(a, b, root, AssignProbSent, __plt__=plt)","repo_name":"CLQuantizer/ngram","sub_path":"Experiment Scripts/Experiments.py","file_name":"Experiments.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21335010097","text":"import unittest\n\nimport koji\nimport kojihub\n\n\nclass TestRepoInit(unittest.TestCase):\n\n def test_repo_init_wrong_type_typeID(self):\n task_id = 'test-task_id'\n with self.assertRaises(koji.ParameterError) as cm:\n kojihub.repo_init('test-tag', task_id)\n self.assertEqual(f\"Invalid type for value '{task_id}': {type(task_id)}, \"\n f\"expected type \", str(cm.exception))\n","repo_name":"yifengyou/koji","sub_path":"BUILD/koji-1.30.0-tests/test_hub/test_repo_init.py","file_name":"test_repo_init.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"17257169088","text":"import os\nfrom flask import Flask, jsonify\nfrom flask_restful import Api\nfrom flask_jwt_extended import JWTManager\n\nfrom resources.user import UserRegister, User, UserLogin, UserLogout, TokenRefresh, UserList\nfrom resources.mood import Mood, MoodList\nfrom resources.theme import Theme, ThemeList\nfrom resources.interaction import Interaction, InteractionList\nfrom resources.toy import Toy, ToyList\nfrom resources.child import Child, ChildrenList\nfrom resources.playground import Playground, PlaygroundList\nfrom blacklist import BLACKLIST\n\nfrom fileupload.upload import appUpload\n\nheroku_db_url = os.environ.get('DATABASE_URL')\nsqlite_url = 'sqlite:///data.db'\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # flask-sqlalchemy setting\napp.config['PROPAGATE_EXCEPTIONS'] = True\napp.config['JWT_BLACKLIST_ENABLED'] = True\napp.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']\n\napp.register_blueprint(appUpload)\n# app.config['JWT_SECRET_KEY'] = 'jose' # jwt extended config \napp.secret_key = 'jose' \napi = Api(app)\n\n@app.before_first_request\ndef create_tables():\n db.create_all()\n\njwt = JWTManager(app)\n\n\n@jwt.user_claims_loader\ndef add_claims(identity):\n if identity == 1: # read from config file rather than hard coding\n return({'is_admin': True,})\n return({'is_admin': False,})\n\n# Returns true if jti in the blacklist, false if it is not in the blacklist\n@jwt.token_in_blacklist_loader\ndef check_if_token_in_blacklist(decrypted_token):\n return decrypted_token['jti'] in BLACKLIST\n \n\n# Token expired every 5 minutes or so, when they try again they will get this message\n@jwt.expired_token_loader\ndef expired_token_callback():\n return jsonify({\n 'description': 'The token has expired.',\n 'error': 'token_expired'\n }), 401\n\n# When Authorization string is not a JWT, and is a different string or when token is not fresh....why?\n@jwt.invalid_token_loader\ndef invalid_token_callback(error):\n return jsonify({\n 'description': 'Signature verification failed.',\n 'error': 'invalid_token'\n }), 401\n\n# When no JWT is sent at all\n@jwt.unauthorized_loader\ndef missing_token_callback(error):\n return jsonify({\n 'description': 'Request does not contain an access token.',\n 'error': 'no_token'\n }), 401\n\n# When a non fresh token is sent and a fresh is required...does not work as expected...why?\n@jwt.needs_fresh_token_loader\ndef token_not_fresh_callback():\n return jsonify({\n 'description': 'The token is not fresh, please login again.',\n 'error': 'non_fresh_token'\n }), 401\n\n# When a token is revoked, ie user logs out token is added to revoked token list\n@jwt.revoked_token_loader\ndef revoked_token_callback():\n return jsonify({\n 'description': 'The token is revoked, please login again.',\n 'error': 'revoked_token'\n }), 401\n\n\napi.add_resource(Theme, '/theme//')\napi.add_resource(Mood, '/mood//')\napi.add_resource(Interaction, '/interaction//')\napi.add_resource(Toy, '/toy//')\napi.add_resource(Playground, '/playground//')\napi.add_resource(Child, '/child/')\napi.add_resource(ChildrenList, '/children')\napi.add_resource(ThemeList, '/theme_list/')\napi.add_resource(MoodList, '/moods')\napi.add_resource(InteractionList, '/interactions')\napi.add_resource(ToyList, '/toys')\napi.add_resource(PlaygroundList, '/playground_toys')\napi.add_resource(UserRegister, '/register')\napi.add_resource(User, '/user/')\napi.add_resource(UserLogin, '/login')\napi.add_resource(UserList, '/users')\napi.add_resource(TokenRefresh, '/refresh')\napi.add_resource(UserLogout, '/logout')\n\n\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(port=5005, debug=True) # important to mention debug=True\n","repo_name":"gokalper/my-flask-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19456752563","text":"#!/usr/bin/env python3\n# Coded by: AuxGrep\n# Sh3dyz\n# 2023\n\nimport time, sys, os\nfrom AP.alert import disclaimer\nimport platform\n\nITALIC = \"\\033[3m\"\npurple = '\\x1b[38;5;165m'\nblue = '\\x1b[38;5;33m'\nred = '\\x1b[38;5;196m'\ngreen = '\\x1b[38;5;118m'\ngrey = '\\x1b[38;5;0m'\npink = '\\x1b[38;5;199m'\nEND = \"\\033[0m\"\nUNDERLINE = \"\\033[4m\"\nBOLD = \"\\033[1m\"\nBLINK = \"\\033[5m\"\n\nos_check = platform.system()\n\nif os_check != 'Linux':\n sys.exit('Framework support On linux!!!')\n\n\ndef big_banner():\n print(f'''{grey}\n ,▄▄▄▄▄╓,\n\t\t\t\t ,,▄▄▄▄▓█████████████████████▓▓▄▄▄▄,,\n\t ▀▀████████████████████████████████████████▓▄▄▄▄▄▄▄, \n\t ▄▄██████████████████████████████████████████████▌▄╓▄∩\n\t ▄██▀▀████████████████████████████████████████████████▄, '\n\t ` ▄█████████{pink}▓{grey}████{pink}▓▓▓▓▓▓▓▓▓{grey}███████{pink}▓{grey}██████▄▄L,\n\t ▄██████████{pink}▓{grey}███{pink}▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓{grey}████{pink}▓{grey}███████▀\n\t └▀╙ ▓██████{pink}▓{grey}██{pink}▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓{grey}███{pink}▓▓▓{grey}████▀╙`\n\t ,███████{pink}▓{grey}██{pink}▓▓▓▓▓▓{grey}████{pink}▓▓▓▓▓▓▓▓▓{grey}██{pink}▓{grey}███\n\t ▓██████████{pink}▓▓▓▓▓▓{grey}█████{pink}▓▓▓▓▓▓▓▓{grey}██{pink}▓{grey}█████m\n\t ████████{pink}▓{grey}████{pink}▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓{grey}████████████████████,\n\t J█████████{pink}▓{grey}████{pink}▓▓▓▓▓▓▓▓▓▓▓▓▓▓{grey}███{pink}▓{grey}██████▀╙\n\t ╟█████████████{pink}▓▓▓▓▓▓▓▓▓▓▓▓▓{grey}████{pink}▓{grey}████████████████▀\n\t {pink} ▄▓▓{grey}████████████{pink}▓▓▓{grey}█████████████████████████████████▀{pink}╨\n\t Φ▌▓▓▓{grey}███████████████████████████████████████{pink}▓▓▓▓▌Å\"`\n\t ⌐'╫▓▓▓▓▓▓▓▓▓▓▓▓▓{grey}███████████████████████{pink}{END}▓▓▓▓▓▓▓Ñ`\n\t ` \"▌░╟╫╨╢║╣▀▓▓▓▀▀▀▓▓▓▓▓▓▓▓▓▓▓▓▓▓▌▀▀▀▀╣ÖÅ`╠░║▓:\n\t ` ▌ \"╫. j ▓▓▒\" ``╟╫M` ```└``` ╙⌂║. \"Ñ▓▌ Developed By{red} Auxgrep{END} and{red} Sh3dYz{END}\n\t ▐M `╨H ¿ \"▓▓╫ ���Ñ j ╣╬ ║▓ {green}pew! pew!!{END}\n\t ╬ :Ñ\" ╫▓╣Ü ╣░ j ╙▌ ╣▌ 2023\n\n\t ''')\n\n\ndef Menu():\n print('')\n HEAD = '{0}{3}{2}WELCOME TO DARK-EAGLE WIFI KIT LOADER v1{1}'.format(BOLD, END, red, ITALIC)\n size = HEAD.center(97)\n print(size)\n print('')\n print('Choose a module'.center(80))\n print('____'.center(70))\n print('')\n print(f'[1] AP-Sniffer {green}{BLINK}(✓){END}'.center(90))\n print(f'[2] Handshakes-sooper{green}{BLINK}(✓){END}'.center(95))\n print(f'[3] Deauthentication attack {green}{BLINK}(✓){END}'.center(101))\n print(f'[4] Monitor Mode{green}{BLINK}(✓){END}'.center(90))\n print(f'[5] MITM - ByPass Private Dns(IOS 16 and UP){green}{BLINK}(✓){END}'.center(117))\n print(f'[6] Manage Mode{green}{BLINK}(✓){END}'.center(90))\n print(f'[7] VIF-checking{green}{BLINK}(✓){END}'.center(90))\n print(f'[8] Strolling Attacks{green}{BLINK}(✓){END}'.center(95))\n print(f'{BOLD}[9]{blue}*** Refresh **** {END}{pink}{BLINK}ヽ(•‿•)ノ{END}'.center(115))\n print(f'[10] Evil-Twin Attacks-**SOON**{red}{BLINK}(x){END}'.center(105))\n print('')\n print(f'{red}[0] Exit {END}'.center(90))\n print('____'.center(70))\n print('')\n\n value = None # INT VALUE TO BE RETURNED\n\n while True:\n try:\n module_number = input(f'{green}Enter module{END} {purple}ID(1-10){END}: ')\n except:\n sys.exit(f'{purple}Thanks for Your Time!! Byeee!!{END}'.center(100))\n try:\n module_number = int(module_number)\n except ValueError:\n print('Please choose module {0}number 1 to 10{1}'.format(red, END))\n continue\n\n if module_number == int(1):\n value = 1\n break\n elif module_number == int(2):\n value = 2\n break\n elif module_number == int(3):\n value = 3\n break\n elif module_number == int(4):\n value = 4\n break\n elif module_number == int(5):\n value = 5\n break\n elif module_number == int(6):\n value = 6\n break\n elif module_number == int(7):\n value = 7\n break\n elif module_number == int(8):\n value = 8\n break\n elif module_number == int(9):\n value = 9\n break\n elif module_number == int(10):\n value = 10\n break\n\n elif module_number == int(0):\n os.system('clear')\n sys.exit(f'{purple}Thanks for Your Time!! Byeee!!{END}'.center(100))\n \n \n else:\n print('Please choose module {0}number 1 to 5{1}'.format(red, END))\n continue\n # RETURN INT VALUE\n return value\n\n\nbig_banner()\ndisclaimer()\nwhile True:\n try:\n module_number = Menu() # CALL MENU THEN MENU WILL RETURN INT VALUE\n\n if module_number == int(1):\n os.system('clear')\n time.sleep(2)\n print('{0}LOADING AP-SNIFFER{1}'.format(green, END))\n print('')\n from bar.bar import bar_module2\n\n bar_module2();\n print('')\n time.sleep(3)\n print('')\n from AP.module import module1\n\n module1()\n time.sleep(2)\n os.system(\"clear\")\n # LOOP BACK TO MENU\n\n elif module_number == int(2):\n os.system('clear')\n print('LOADING HANDSHAKE MODULE V1-BETA')\n from banner.banner import banner\n\n banner()\n from bar.bar import bar_module2\n\n bar_module2();\n from handshakes import module_handshakes\n\n time.sleep(2)\n os.system(\"clear\")\n # LOOP BACK TO MENU\n\n elif module_number == int(3):\n os.system('clear')\n print('{0} LOADING DEAUTHENTICATION MODULE {1}'.format(green, END))\n print('')\n from bar.bar import bar_module2\n\n bar_module2();\n time.sleep(2)\n from AP.dds import *\n\n module_number()\n time.sleep(2)\n os.system(\"clear\")\n # LOOP BACK TO MENU\n\n elif module_number == int(4):\n # LOOP BACK TO MENU\n os.system('clear')\n print('LOADING ......wait!!!')\n from bar.bar import bar_module2\n bar_module2()\n os.system('clear')\n from banner.banner import banner\n banner()\n from AP.monitor import monitor\n monitor()\n time.sleep(3)\n os.system(\"clear\")\n\n elif module_number == int(5):\n os.system('clear')\n print('{0}LOADING MITM MODULE BYPASS IOS 16 AND UP{1}'.format(green, END))\n print('')\n from bar.bar import bar_module2\n\n bar_module2();\n time.sleep(2)\n from evilMITM.loader import moduleMitmf\n\n moduleMitmf();\n time.sleep(2)\n os.system(\"clear\")\n elif module_number == int(6):\n os.system('clear')\n print('LOADING .........')\n from bar.bar import bar_module2\n bar_module2()\n os.system('clear')\n from banner.banner import banner\n banner()\n from AP.managed import interface_managed\n interface_managed()\n os.system('clear')\n\n elif module_number == int(7):\n os.system('clear')\n print('LOADING MODULE ......VIF-CHECKER')\n from bar.bar import bar_module2\n bar_module2()\n os.system('clear')\n from banner.banner import banner\n banner()\n from VIF.vif import VIF\n VIF()\n os.system('clear')\n \n elif module_number == int(8):\n os.system('clear')\n print('LOADING STROLLING MODULE!!')\n from bar.bar import bar_module2\n bar_module2()\n os.system('clear')\n from banner.banner import banner\n banner()\n from mk47.module import mk47\n mk47()\n os.system('clear')\n\n elif module_number == int(9):\n os.system('clear')\n print(f'{BOLD}{green}success!!{END} {BOLD}refreshed{END}'.center(120))\n dr = os.getcwd()\n from bar.refresh import refresh\n refresh()\n elif module_number == int(10):\n os.system('clear')\n print(f'{red}{BOLD}{ITALIC}{BLINK} Module 10, Still On development!! Try Again letter{END}')\n \n # LOOP BACK TO MENU\n except Exception as E:\n print(f\"\\nError Name: {E}\")\n sys.exit(\n '\\nUnkwown error !! OCCURED !! if you Think this is bug.\\nGood report it here {0}{2}mranonymoustz@tutanota.com{1}'.format(\n red, \\\n END, ITALIC))\n","repo_name":"DARK-EAGLE-FRAMEWORK/framework","sub_path":"framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":10537,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"8219468556","text":"def solution(genres, plays):\n answer = []\n dic,cnt = {}, {}\n \n # 우선 어떤 장르가 제일 재생 합계가 많은지 계산\n temp = 0\n for key, value in zip(genres, plays): \n if key in cnt: \n cnt[key] += value\n else :\n cnt[key] = value\n s_cnt = sorted(cnt.items(), key=lambda x: x[1], reverse=True)\n \n \n i = 0\n for key, value in zip(genres, plays): \n if key in dic: \n dic[key].append([value, -1 * i])\n else :\n dic[key] = []\n dic[key].append([value, -1 * i])\n i+=1\n \n #s_cnt에서 많이 재생된 장르 이름 순서대로 뽑아서 dic에 저장된 결과를 조회\n # -1 곱해준 것은 정렬 순서에서 재생횟수 같으면 인덱스 적은 순으로 정렬하기 위해서 \n for key, value in s_cnt: \n list = sorted(dic[key], reverse = True)\n if len(list) >= 2: \n answer.append(-1 * list[0][1])\n answer.append(-1 * list[1][1])\n elif len(list) == 1: \n answer.append(-1 * list[0][1])\n return answer\n","repo_name":"lts96/Algorithm","sub_path":"프로그래머스/코딩테스트 고득점 Kit/베스트앨범.py","file_name":"베스트앨범.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25432646685","text":"import torch\nimport torch.nn as nn\nfrom torchvision import models\n\n\ndef featureL2Norm(feature):\n epsilon = 1e-6\n norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) +\n epsilon, 0.5).unsqueeze(1).expand_as(feature)\n return torch.div(feature, norm)\n\n\nclass FeatureExtraction(torch.nn.Module):\n def __init__(self, train_fe=False, feature_extraction_cnn='vgg19', normalization=True, last_layer='', use_cuda=True):\n super(FeatureExtraction, self).__init__()\n self.normalization = normalization\n\n # multiple extracting layers\n last_layer = last_layer.split(',')\n\n if feature_extraction_cnn == 'vgg16':\n self.model = models.vgg16(pretrained=True)\n # keep feature extraction network up to indicated layer\n vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',\n 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',\n 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',\n 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5']\n\n start_index = 0\n self.model_list = []\n for l in last_layer:\n if l == '':\n l = 'pool4'\n layer_idx = vgg_feature_layers.index(l)\n assert layer_idx >= start_index, 'layer order wrong!'\n model = nn.Sequential(\n *list(self.model.features.children())[start_index:layer_idx + 1])\n self.model_list.append(model)\n start_index = layer_idx + 1\n\n if feature_extraction_cnn == 'vgg19':\n self.model = models.vgg19(pretrained=True)\n # keep feature extraction network up to indicated layer\n vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',\n 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',\n 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',\n 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5']\n\n # vgg_output_dim = [64, 64, 64, 64, 64,\n # 128, 128, 128, 128, 128,\n # 256, 256, 256, 256, 256, 256, 256, 256, 256,\n # 512, 512, 512, 512, 512, 512, 512, 512, 512,\n # 512, 512, 512, 512, 512, 512, 512, 512, 512]\n\n start_index = 0\n self.model_list = []\n # self.out_dim = 0\n for l in last_layer:\n if l == '':\n l = 'relu5_4'\n layer_idx = vgg_feature_layers.index(l)\n assert layer_idx >= start_index, 'layer order wrong!'\n # self.out_dim += vgg_output_dim[layer_idx]\n model = nn.Sequential(\n *list(self.model.features.children())[start_index:layer_idx + 1])\n self.model_list.append(model)\n start_index = layer_idx + 1\n\n if feature_extraction_cnn in ['resnet18', 'resnet101']:\n\n if feature_extraction_cnn == 'resnet18':\n self.model = models.resnet18(pretrained=True)\n else:\n self.model = models.resnet101(pretrained=True)\n\n resnet_feature_layers = ['conv1',\n 'bn1',\n 'relu',\n 'maxpool',\n 'layer1',\n 'layer2',\n 'layer3',\n 'layer4']\n\n resnet_module_list = [self.model.conv1,\n self.model.bn1,\n self.model.relu,\n self.model.maxpool,\n self.model.layer1,\n self.model.layer2,\n self.model.layer3,\n self.model.layer4]\n\n start_index = 0\n self.model_list = []\n for l in last_layer:\n if l == '':\n l = 'layer3'\n layer_idx = resnet_feature_layers.index(l)\n assert layer_idx >= start_index, 'layer order wrong!'\n model = nn.Sequential(\n *resnet_module_list[start_index:layer_idx + 1])\n self.model_list.append(model)\n start_index = layer_idx + 1\n\n if not train_fe:\n # freeze parameters\n for param in self.model.parameters():\n param.requires_grad = False\n # move to GPU\n if use_cuda:\n self.model_list = [model.cuda() for model in self.model_list]\n\n def forward(self, image_batch):\n features_list = []\n features = image_batch\n for model in self.model_list:\n features = model(features)\n if self.normalization:\n features = featureL2Norm(features)\n features_list.append(features)\n return features_list\n \n\n","repo_name":"Vasanth731/segmentation-masks","sub_path":"agsd_seg/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40219455628","text":"import deepl\nimport os\nfrom pathlib import Path\ntranslator = deepl.Translator(os.getenv(\"DEEPL_AUTH_KEY\"))\npath = os.getcwd()\n\nfor root, dirs, files in os.walk(\"./source/objectsfirst_german/\"):\n for name in files:\n file = os.path.join(root, name)\n print(file)\n target_file = os.path.join(root, name).replace(\"german\", \"english\")\n txt_de = Path(file).read_text()\n txt_en = translator.translate_text(txt_de, target_lang=\"EN-US\")\n txt_en_str = str(txt_en)\n txt_en_str = txt_en_str.replace(\"\\n`` python\", \"\\n``` python\")\n txt_en_str = txt_en_str.replace(\"\\n``python\", \"\\n``` python\")\n txt_en_str = txt_en_str.replace(\"\\n`` {\", \"\\n``` {\")\n txt_en_str = txt_en_str.replace(\"\\n``{\", \"\\n``` {\")\n output = open(target_file, 'w')\n output.write(txt_en_str)\n output.close()","repo_name":"asbl/miniworldmaker","sub_path":"docs/translate_docs.py","file_name":"translate_docs.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"19933587347","text":"class RECTANGLE:\n def __init__(self,height,width,corner_x,corner_y):\n self.height = height\n self.width = width\n self.corner_x = corner_x\n self.corner_y = corner_y\n\n def find_center(self):\n center_x = self.corner_x + (self.width / 2 )\n center_y = self.corner_y + (self.height/ 2 )\n print(\"the center of the rectange is (\",center_x,',',center_y,')')\n\n def area(self):\n Area = self.height * self.width\n print('The arae is ',Area,\"unit square\")\n\n def perimeter(self):\n Peri = 2 * (self.height + self.width)\n print(\"Perimeter is \",Peri,\"units\")\n\ninstance = RECTANGLE(10,20,0,0)\ninstance.find_center()\ninstance.area()\ninstance.perimeter()","repo_name":"8G6/CST283_Model_QA","sub_path":"17_a.py","file_name":"17_a.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33222370198","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom knowledge_distillation.kd.KD_framework import KDFramework\n\n\nclass SoftTargetKD(KDFramework):\n \"\"\"\n Original implementation of Knowledge distillation from the paper \"Distilling the\n Knowledge in a Neural Network\" https://arxiv.org/pdf/1503.02531.pdf\n :param teacher_model (torch.nn.Module): Teacher model\n :param student_model (torch.nn.Module): Student model\n :param train_loader (torch.utils.data.DataLoader): Dataloader for training\n :param val_loader (torch.utils.data.DataLoader): Dataloader for validation/testing\n :param optimizer_teacher (torch.optim.*): Optimizer used for training teacher\n :param optimizer_student (torch.optim.*): Optimizer used for training student\n :param loss_fn (torch.nn.Module): Calculates loss during distillation\n :param temp (float): Temperature parameter for distillation\n :param distil_weight (float): Weight paramter for distillation loss\n :param device (str): Device used for training; 'cpu' for cpu and 'cuda' for gpu\n :param log (bool): True if logging required\n :param logdir (str): Directory for storing logs\n \"\"\"\n\n def __init__(\n self,\n teacher_data,\n student_model,\n train_loader,\n val_loader,\n optimizer_student,\n scheduler,\n loss_fn=nn.MSELoss(),\n temp=20.0,\n distill_weight=0.5,\n perturb_distill_weight=0.5,\n eps=8,\n device=\"cpu\",\n att_object=None,\n attack_model=None,\n experiment_name='exp_name',\n log=False,\n logdir=\"./Experiments\",\n ):\n super(SoftTargetKD, self).__init__(\n teacher_data,\n student_model,\n train_loader,\n val_loader,\n optimizer_student,\n scheduler,\n loss_fn,\n temp,\n distill_weight,\n perturb_distill_weight,\n eps,\n device,\n att_object,\n attack_model,\n experiment_name,\n log,\n logdir,\n )\n\n def calculate_kd_loss(self, y_pred_student, y_pred_teacher, y_true, distil_weight):\n \"\"\"\n Function used for calculating the KD loss during distillation\n :param y_pred_student (torch.FloatTensor): Prediction made by the student model\n :param y_pred_teacher (torch.FloatTensor): Prediction made by the teacher model\n :param y_true (torch.FloatTensor): Original label\n \"\"\"\n\n soft_teacher_out = F.softmax(y_pred_teacher / self.temp, dim=1)\n soft_student_out = F.softmax(y_pred_student / self.temp, dim=1)\n\n loss = (1 - distil_weight) * F.cross_entropy(y_pred_student, y_true)\n loss += (distil_weight * self.temp * self.temp) * self.loss_fn(\n soft_teacher_out, soft_student_out\n )\n return loss\n","repo_name":"YaelRe/KD_PROJECT","sub_path":"knowledge_distillation/kd/soft_target_KD.py","file_name":"soft_target_KD.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8707628168","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 20 23:30:11 2020\n\n@author: suryakantkumar\n\"\"\"\n\n\n'''\nProblem : Jim's Burgers has a line of hungry customers. \nOrders vary in the time it takes to prepare them. Determine the order the customers receive their orders. \nStart by numbering each of the customers from 1 to n, front of the line to the back. \nYou will then be given an order number and a preparation time for each customer.\n\nThe time of delivery is calculated as the sum of the order number and the preparation time. \nIf two orders are delivered at the same time, assume they are delivered in ascending customer number order.\n\nFor example, there are n = 5 customers in line. They each receive an order number order[i] and a preparation time prep[i].:\n\nCustomer\t 1\t2\t3\t4\t5\nOrder #\t\t8\t5\t6\t2\t4\nPrep time\t3\t6\t2\t3\t3\nCalculate:\nServe time\t11\t11\t8\t5\t7\n\nWe see that the orders are delivered to customers in the following order:\n\nOrder by:\nServe time\t5\t7\t8\t11\t11\nCustomer\t 4\t5\t3\t1\t2\n'''\n\n\nimport os\n\ndef jimOrders(orders):\n time = []\n for e in orders:\n time.append(e[0] + e[1])\n\n freq = {v : i +1 for i, v in enumerate(sorted(time))}\n \n li = []\n for ele in freq:\n for i in range(len(time)):\n if time[i] == ele:\n li.append(i + 1)\n time[i] = -1\n \n return li\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n n = int(input())\n orders = []\n for _ in range(n):\n orders.append(list(map(int, input().rstrip().split())))\n result = jimOrders(orders)\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n fptr.close()","repo_name":"SuryakantKumar/HackerRank-Problem-Solving","sub_path":"Easy Level/Jim-And-The-Orders.py","file_name":"Jim-And-The-Orders.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5346674922","text":"import pandas as pd\nimport numpy as np\nimport time\n\nfrom utils import BatchGenerator, batch_balance_sampling\nfrom callbacks import SlackLogger #custom file for callbacks\nfrom models import unet\n\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom imblearn.over_sampling import RandomOverSampler\n\n# Loading data from pickle:\ndata = pd.read_pickle(\"train-data-filelist.pkl\")\ntest_data = pd.read_pickle(\"test-data-filelist.pkl\")\n\nsplitter = StratifiedShuffleSplit(1, test_size=0.1)\n\nfor train_index, test_index in splitter.split(data, data['label'].values):\n train_set = data.loc[train_index]\n validation_set = data.loc[test_index]\n\npatch_size = (132,132,116) # smallest possible patch size is (108,108,108)\nbatch_size = 4 # 16 is max due to gpu memory errors\n\nsampler = RandomOverSampler(random_state=42)\n\ntrain_generator = BatchGenerator(train_set, patch_size, batch_size=batch_size, sampling=sampler.fit_sample)\nvalidation_generator = BatchGenerator(validation_set, patch_size, batch_size=batch_size)\ntest_generator = BatchGenerator(test_data, patch_size, batch_size=batch_size, test=True)\n\n\nif __name__ == \"__main__\":\n model = unet(input_shape=[*patch_size,1])\n test = True\n\n if not test:\n timeNow = time.strftime(\"%e%m-%H%M%S\")\n\n slacklogger = SlackLogger()\n tensorboard = TensorBoard(log_dir='./logs/log-'+str(timeNow), batch_size=batch_size)\n modelcheck = ModelCheckpoint(\"weights-\"+str(timeNow)+\".hdf5\", monitor='val_loss', verbose=0, save_best_only=True,\n save_weights_only=False, mode='auto', period=1)\n\n model.fit_generator(generator=train_generator,\n validation_data=validation_generator,\n steps_per_epoch=10000,\n validation_steps=1000,\n epochs=3,\n callbacks=[modelcheck, slacklogger, tensorboard],\n verbose=1)\n else:\n model.load_weights('weights-2006-105451.hdf5')\n \n print(test_data.shape)\n \n output = model.predict_generator(test_generator, steps=len(test_generator), verbose=1)\n #output = shift_and_stitch(model, test_data, patch_size, patch_size, (44,44,28), 3)\n np.save('test_patch_segmentations',output)\n\n","repo_name":"SvenDH/ISMI-Fissure-Detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"2413383314","text":"import os\nimport requests\n\n\ndef gapi_get_response(address):\n global gapi_response\n gkey = os.getenv(\"GOOGLE_API_KEY\")\n url = f\"https://maps.googleapis.com/maps/api/geocode/json?address={address}&key={gkey}\"\n gapi_response = requests.get(url)\n results = gapi_response.json()\n return results['status'] == 'OK'\n\n\ndef wapi_get_response(zipcode, country):\n global wapi_response\n # There is an open issue that doens't allow to search by zipcode from some places in other countries\n # https://openweathermap.desk.com/customer/portal/questions/17194531-search-by-zip-postal-code-not-working\n wkey = os.getenv(\"OPENWEATHER_API_KEY\")\n url = f\"http://api.openweathermap.org/data/2.5/weather?units=imperial&zip={zipcode},{country}&APPID={wkey}\"\n wapi_response = requests.get(url)\n return \"main\" in wapi_response.json()\n\n\ndef gapi_get_postal_code():\n location = gapi_response.json()\n\n # default value (if no postal code is found)\n postal_code = False\n\n for result in location['results'][0]['address_components']:\n if result['types'][0] == 'postal_code':\n postal_code = result['long_name']\n\n return postal_code\n\n\ndef gapi_get_location_name():\n\n location = gapi_response.json()\n\n country = False\n region = False\n city = False\n components = location[\"results\"][0][\"address_components\"]\n for comp in components:\n if \"locality\" in comp[\"types\"]:\n city = comp[\"long_name\"]\n elif \"administrative_area_level_2\" in comp[\"types\"] and city is False:\n city = comp[\"long_name\"]\n elif \"administrative_area_level_1\" in comp[\"types\"]:\n region = comp[\"short_name\"]\n elif \"country\" in comp[\"types\"]:\n country = comp[\"short_name\"]\n\n return city, region, country\n\n\ndef wapi_get_temperature():\n temperature = wapi_response.json()\n return temperature[\"main\"][\"temp\"]\n","repo_name":"lnicorena/weather-app-python-vuejs","sub_path":"server/flask/app/utils/external.py","file_name":"external.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20437669130","text":"import sys\nfrom datetime import time\ntest_cases = open(sys.argv[1], 'r')\ntest_lines = (line.rstrip() for line in test_cases)\nfor test in test_lines:\n\tallTimeTable = test.split(\" \")\n\ttimeSchedule = []\n\tsortedSchedule = []\n\tfor timeEntry in allTimeTable:\n\t\th, m, s = timeEntry.split(\":\")\n\t\ttimeSchedule.append(time(int(h), int(m), int(s)))\n\tfor timeEntry in reversed(sorted(timeSchedule)):\n\t\tformatedTime = timeEntry.strftime(\"%H:%M:%S\")\n\t\tsortedSchedule.append(formatedTime)\n\tfinalSchedule = \" \".join(sortedSchedule)\n\tprint(finalSchedule)\ntest_cases.close()","repo_name":"stascrash/codeeval","sub_path":"time_to_eat.py","file_name":"time_to_eat.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22737121417","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Number of all the patients\nN_patient = 5\n\n#public_patient_data = np.array([True, False, True, True, True])\npublic_patient_data = np.random.rand(N_patient) < 0.9\npublic_count_true = np.count_nonzero(public_patient_data == True)\n#df = pd.DataFrame(public_patient_data.reshape(-1,N_patient), columns=['patient-0', 'patient-1', 'patient-2', 'patient-3', 'patient-4'])\n#df['count(True)'] = public_count_true\n#df\n\nfrom itertools import combinations\n\ndef get_combinations(N_patient, N_true):\n combination_results_list = []\n count_true_list = []\n\n comb = combinations(np.arange(N_patient), N_true)\n for i in list(comb):\n combination_results = np.empty(N_patient, dtype=bool)\n combination_results[:] = False\n if len(i) > 0:\n combination_results[np.array(i)] = True\n combination_results_list.append(combination_results)\n count_true_list.append(np.count_nonzero(combination_results == True))\n \n return combination_results_list, count_true_list\n\ndef generate_patient_data(N_patient):\n patients_list = []\n count_true_list = []\n \n for i in range(N_patient + 1):\n N_true = N_patient - i\n temp_patients_list, temp_count_true_list = get_combinations(N_patient, N_true)\n patients_list += temp_patients_list\n count_true_list += temp_count_true_list\n \n return patients_list, count_true_list\n\noriginal_patient_data_list, original_count_true_list = generate_patient_data(N_patient)\n#df = pd.DataFrame(original_patient_data_list, columns=['patient-0', 'patient-1', 'patient-2', 'patient-3', 'patient-4'])\n#df['count(True)'] = original_count_true_list\n#df\n\ndef process_patient_data(patient_data, spinner_results):\n patient_data_output = np.copy(patient_data)\n patient_data_output[spinner_results == False] = patient_data[spinner_results == False] != True\n return patient_data_output\n\ndef get_processed_patient_data(patient_data, spinner_results_list):\n patient_data_output_list = []\n count_true_list = []\n\n for i in range(len(spinner_results_list)):\n patient_data_output = process_patient_data(patient_data, spinner_results_list[i])\n patient_data_output_list.append(patient_data_output)\n count_true_list.append(np.count_nonzero(patient_data_output == True))\n \n return patient_data_output_list, count_true_list\n\ndef generate_spinner_data(N_patient):\n spinners_list = []\n count_true_list = []\n \n for i in range(N_patient + 1):\n N_true = N_patient - i\n temp_spinners_list, temp_count_true_list = get_combinations(N_patient, N_true)\n spinners_list += temp_spinners_list\n count_true_list += temp_count_true_list\n \n return spinners_list, count_true_list\n\ndef get_spinner_probability_list(p_light, N_patient, spinner_count_true_list):\n p_dark = 1.0 - p_light\n\n spinner_probability_list = []\n\n for i in spinner_count_true_list:\n prob = pow(p_light, i) * pow(p_dark, N_patient - i)\n spinner_probability_list.append(prob)\n\n return spinner_probability_list\n\nspinner_data_list, spinner_count_true_list = generate_spinner_data(N_patient)\n\np_light = 0.9\nspinner_probability_list = get_spinner_probability_list(p_light, N_patient, spinner_count_true_list)\n\n#df = pd.DataFrame(spinner_data_list, columns=['spinner-0', 'spinner-1', 'spinner-2', 'spinner-3', 'spinner-4'])\n#df['count(True)'] = spinner_count_true_list\n#df['probability'] = spinner_probability_list\n#df\n\noriginal_patient_data = original_patient_data_list[1]\noutput_patient_data_list, output_count_true_list = get_processed_patient_data(original_patient_data, spinner_data_list)\n#df = pd.DataFrame(output_patient_data_list, columns=['patient-0', 'patient-1', 'patient-2', 'patient-3', 'patient-4'])\n#df['count(True)'] = output_count_true_list\n#df['probability'] = spinner_probability_list\n#df\n\nfor i in range(len(output_patient_data_list)):\n if (public_patient_data == output_patient_data_list[i]).all():\n print(f'[ 1] Original{original_patient_data}')\n print(f'[{i:-2}] Spinner{spinner_data_list[i]}')\n print(f' -> Public{public_patient_data}')\n print(f' ** Probability: {spinner_probability_list[i]:.6f}')\n \ndef get_likelihood_list(original_patient_data_list, public_patient_data, spinner_data_list, spinner_probability_list):\n likelihood_list = []\n spinner_index_list = []\n for j in range(len(original_patient_data_list)):\n original_patient_data = original_patient_data_list[j]\n output_patient_data_list, output_count_true_list = get_processed_patient_data(original_patient_data, spinner_data_list)\n for i in range(len(output_patient_data_list)):\n if (public_patient_data == output_patient_data_list[i]).all():\n likelihood_list.append(spinner_probability_list[i])\n spinner_index_list.append(i)\n return likelihood_list, spinner_index_list\n\nlikelihood_list, spinner_index_list = get_likelihood_list(original_patient_data_list, public_patient_data, spinner_data_list, spinner_probability_list)\n\nfor i in range(len(original_patient_data_list)):\n spinner_index = spinner_index_list[i]\n print(f'[{i:-2}] Original{original_patient_data_list[i]} [{spinner_index:-2}] Spinner{spinner_data_list[spinner_index]} ** Probability: {likelihood_list[i]:.6f}')\n \ndef plot_likelihood_distribution(likelihood_list):\n plt.plot(np.arange(len(likelihood_list)), likelihood_list, marker='o')\n plt.title(f'For p_light={p_light}')\n plt.ylim(0, 1)\n plt.grid(True)\n plt.xlabel('original data index')\n plt.ylabel('likelihood')\n plt.show()\n \nplot_likelihood_distribution(likelihood_list)\n\ndef estimate_original_count_true(original_count_true_list, likelihood_list):\n estimate = 0\n for i in range(len(original_count_true_list)):\n estimate += original_count_true_list[i] * likelihood_list[i]\n return estimate\n\np_light_list = [0.9, 0.75, 0.6, 0.5]\n\nplt.figure(figsize=(16,4))\n\nfor i in range(len(p_light_list)):\n p_light = p_light_list[i]\n spinner_probability_list = get_spinner_probability_list(p_light, N_patient, spinner_count_true_list)\n likelihood_list, _ = get_likelihood_list(original_patient_data_list, public_patient_data, spinner_data_list, spinner_probability_list)\n \n estimate = estimate_original_count_true(original_count_true_list, likelihood_list)\n print(f'Estimate original count true: {estimate:.2f} for p_light={p_light}')\n \n plt.subplot(141+i)\n plt.plot(np.arange(len(likelihood_list)), likelihood_list, marker='o')\n plt.title(f'For p_light={p_light}')\n plt.ylim(0, 1)\n plt.grid(True)\n plt.xlabel('original data index')\n plt.ylabel('likelihood')\n\nplt.show()\n","repo_name":"trvoid/dp-study","sub_path":"differential-privacy-intro/differential-privacy-part3-likelihood.py","file_name":"differential-privacy-part3-likelihood.py","file_ext":"py","file_size_in_byte":6794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19513810151","text":"import nuke,os\nimport DML_Tools\nimport DML_Tools.DML_Nuke.Nuke_GUI.Generic_Widgets.Generic_Widgets_Nodes\nDML_Nuke = DML_Tools.DML_Nuke\n\n\ndef get_DML_Gimped_PSD_Group_Last_Layer_Build_Order(node):\n\treturn eval(node.knob(\"dml_last_layer_build_order\").getText())\n\n################################################################################\nclass DML_Gimped_PSD_Group(DML_Nuke.Nuke_Nodes.Standered_Nodes.Group):\n\t#NODE_TYPE_RELATION = None\n\tRETURN_OVERIDE_CHECK_TYPE = \"Group\"\n\t#----------------------------------------------------------------------\n\t@classmethod\n\tdef _overide_Return_Check(cls,node):\n\t\t\"\"\"\"\"\"\n\t\ttry:\n\t\t\treturn node.knob(\"DML_NODE_CLASS\").value() == \"DML_Gimped_PSD_Group\"\n\t\texcept:\n\t\t\treturn False\n\t#----------------------------------------------------------------------\n\tdef __init__(self,*args,**kwargs):\n\t\t\"\"\"\"\"\"\n\t\tif not self.hasKnob(\"DML_NODE_CLASS\"):\n\t\t\tknob = nuke.String_Knob(\"DML_NODE_CLASS\",\"DML Node Class\")\n\t\t\tknob.setVisible(False)\n\t\t\tknob.setText(\"DML_Gimped_PSD_Group\")\n\t\t\tself.addKnob(knob)\n\t\t\n\t\tif not self.hasKnob(\"dml_folder_destination\"):\n\t\t\tself._folder_destination_knob = nuke.Link_Knob(\"dml_folder_destination\")\n\t\t\tself.addKnob(self._folder_destination_knob)\n\t\telse:\n\t\t\tself._folder_destination_knob = self.nuke_object.knobs()[\"dml_folder_destination\"]\n\t\tself._folder_destination_knob.setVisible(False)\n\t\t\n\t\tif not self.hasKnob(\"dml_file_name\"):\n\t\t\tself._file_name_knob = nuke.Link_Knob(\"dml_file_name\")\n\t\t\tself.addKnob(self._file_name_knob)\n\t\telse:\n\t\t\tself._file_name_knob = self.nuke_object.knobs()[\"dml_file_name\"]\n\t\tself._file_name_knob.setVisible(False)\n\t\t\n\t\tif not self.hasKnob(\"dml_frame_padding\"):\n\t\t\tself._frame_padding_knob = nuke.Link_Knob(\"dml_frame_padding\")\n\t\t\tself.addKnob(self._frame_padding_knob)\n\t\telse:\n\t\t\tself._frame_padding_knob = self.nuke_object.knobs()[\"dml_frame_padding\"]\n\t\tself._frame_padding_knob.setVisible(False)\n\t\t\n\t\tif not self.hasKnob(\"dml_output_views\"):\n\t\t\tself._output_views_knob = nuke.MultiView_Knob(\"dml_output_views\")\n\t\t\tself.addKnob(self._output_views_knob)\n\t\telse:\n\t\t\tself._output_views_knob = self.nuke_object.knobs()[\"dml_output_views\"]\n\t\t#self._output_views_knob.setVisible(False)\n\t\t\n\t\tif not self.hasKnob(\"dml_last_layer_build_order\"):\n\t\t\tself._last_layer_build_order_knob = nuke.String_Knob(\"dml_last_layer_build_order\")\n\t\t\tself.addKnob(self._last_layer_build_order_knob)\n\t\t\tself._last_layer_build_order_knob.setText(repr([]))\n\t\telse:\n\t\t\tself._last_layer_build_order_knob = self.nuke_object.knobs()[\"dml_last_layer_build_order\"]\n\t\tself._last_layer_build_order_knob.setVisible(False)\n\t\t\n\t\tif not len(nuke.allNodes(\"Input\",self.nuke_object)):\n\t\t\twith self.nuke_object:\n\t\t\t\tnuke.nodes.Input(xpos=0,ypos=0)\n\t\t\n\t\tself.input_node = DML_Nuke.dml.to_DML_Node(nuke.allNodes(\"Input\",self.nuke_object)[0])\n\t\t\n\t\tif not len(nuke.allNodes(\"Output\",self.nuke_object)):\n\t\t\twith self.nuke_object:\n\t\t\t\toutput_node = nuke.nodes.Output(xpos=0,ypos=500)\n\t\t\t\toutput_node.setInput(0,self.input_node.nuke_object)\n\t\tself.output_node = DML_Nuke.dml.to_DML_Node(nuke.allNodes(\"Output\",self.nuke_object)[0])\n\t\t\n\t\tself._noop_node = self.nuke_object.node(\"PSD_LAYER_SHUFFLE_SECTION\")\n\t\tif self._noop_node == None:\n\t\t\twith self.nuke_object:\n\t\t\t\tif self._noop_node == None:\n\t\t\t\t\tself._noop_node = DML_Nuke.Nuke_Nodes.Nodes.NoOp(xpos=0,ypos=250,name=\"PSD_LAYER_SHUFFLE_SECTION\")\n\t\t\t\t\tself._noop_node.setInput(0, self.input_node)\n\t\t\t\t\tself.output_node.setInput(0, self._noop_node)\n\t\telse:\n\t\t\tself._noop_node = DML_Nuke.dml.to_DML_Node(self._noop_node)\n\t#----------------------------------------------------------------------\n\tdef assign_knob_links(self,folder_destination,file_name,frame_padding,output_views):\n\t\t\"\"\"\"\"\"\n\t\tself._file_name_knob.setLink(file_name.fullyQualifiedName())\n\t\tself._folder_destination_knob.setLink(folder_destination.fullyQualifiedName())\n\t\tself._frame_padding_knob.setLink(frame_padding.fullyQualifiedName())\n\t\tself._output_views_knob.setValue(\"{{\" + output_views.fullyQualifiedName() + \"}}\")\n\t#----------------------------------------------------------------------\n\tdef create_Layers_To_Render(self, layers, xOffset=200, xSpaceing=150, yOffset=100):\n\t\t# set the node to be the only thing selected\n\t\tself._noop_node.selectOnly()\n\t\tnew_layer_build_order = []\n\t\t[nuke.delete(node) for node in nuke.allNodes(group=self.nuke_object) if not node in [self.input_node.nuke_object,self.output_node.nuke_object,self._noop_node.nuke_object]]\n\t\t\n\t\t# collect only the layers that exist in the noop node's layers\n\t\tcreateion_layers = [layer for layer in layers if layer in self.layers]\n\t\n\t\t# caculate the start pos of the shuffles\n\t\tmaster_xpos = self._noop_node.x + xOffset\n\t\tmaster_ypos = self._noop_node.y\n\t\n\t\t# keeps track of the last shuffle node created and used to connet the current shuffle node \n\t\tlast_created_shuffle = None\n\t\t\n\t\tshuffle_nodes = DML_Nuke.Nuke_Nodes.Node_List()\n\t\twrite_nodes = DML_Nuke.Nuke_Nodes.Node_List()\n\t\t# scan over each layer and create the nodes\n\t\twith self.nuke_object:\n\t\t\t\n\t\t\tfor index,layer in enumerate(createion_layers):\n\t\t\t\t# create the shuffle node\n\t\t\t\tnew_layer_build_order.append(layer)\n\t\t\t\tshuffle_node = DML_Nuke.Nuke_Nodes.Standered_Nodes.Shuffle(**{\"in\":layer, \"out\":\"rgba\", \"xpos\":master_xpos + (xSpaceing * index), \"ypos\":master_ypos,\"label\":\"[value in ]\"})\n\t\t\t\tshuffle_nodes.append(shuffle_node)\n\t\t\t\tshuffle_node.in_layer_value\n\t\t\t\t# check for a last created shuffle node\n\t\t\t\tif last_created_shuffle == None:\n\t\t\t\t\t# if not set the current shuffle nodes input to the input start node\n\t\t\t\t\tshuffle_node.setInput(0,self._noop_node)\n\t\t\t\telse:\n\t\t\t\t\t# otherwise set the current shuffle nodes input to the shuffle node that was created just before this one\n\t\t\t\t\tshuffle_node.setInput(0,last_created_shuffle)\n\t\t\t\t# assign the current shuffle node to the last crated input for the next loop\n\t\t\t\tlast_created_shuffle = shuffle_node\n\t\t\t\t# check if write nodes should be crated for the shuffles\n\t\t\t\twrite_node=DML_Nuke.Nuke_Nodes.Standered_Nodes.Write(xpos=shuffle_node.x,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ypos=shuffle_node.y + yOffset,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfile_type=\"png\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcreate_directories=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchannels=\"rgba\")\n\t\t\t\t# connect the write nodes input to the current shuffle node\n\t\t\t\twrite_node.setInput(0,shuffle_node)\n\t\t\t\t#write_node.knob(\"render_order\").setValue(index+1)\n\t\t\t\twrite_node.knob(\"file\").setValue(\"[value parent.dml_folder_destination]/%0[value parent.dml_frame_padding]d/[value input0.in].png\")\n\t\t\t\twrite_node.knob(\"views\").setValue('{{parent.dml_output_views}}')\n\t\t\t\twrite_node.knob(\"disable\").setExpression('parent.disable')\n\t\t\t\tif \"ICC_knob\" in write_node.knobs():\n\t\t\t\t\twrite_node.knob(\"ICC_knob\").setValue('sRGB.icc')\n\t\t\t\twrite_nodes.append(write_node)\n\t\tnew_layer_build_order.reverse()\n\t\tself._shuffle_nodes = shuffle_nodes\n\t\tself._write_nodes = write_nodes\n\t\tself._last_layer_build_order_knob.setText(repr(new_layer_build_order))\n\t\treturn shuffle_nodes,write_nodes\n\t@property\n\t#----------------------------------------------------------------------\n\tdef shuffle_nodes(self):\n\t\t\"\"\"\"\"\"\n\t\treturn DML_Nuke.Nuke_Nodes.Node_List(sorted(nuke.allNodes(\"Shuffle\",self.nuke_object))).reorder_By_X_Value(reverse=True)\n\t@property\n\t#----------------------------------------------------------------------\n\tdef layer_names(self):\n\t\t\"\"\"\"\"\"\n\t\treturn [node.knob(\"in\").value() for node in self.shuffle_nodes]\n\t#----------------------------------------------------------------------\n\t@property\n\tdef last_layer_build_order(self):\n\t\t\"\"\"\"\"\"\n\t\treturn eval(self._last_layer_build_order_knob.getText())\n\t#----------------------------------------------------------------------\n\tdef get_Shuffle_Nodes(self):\n\t\t\"\"\"\"\"\"\n\t\tshuffles = DML_Nuke.dml.to_DML_Nodes(nuke.allNodes(\"Shuffle\",self.nuke_object))\n\t\tshuffles.reorder_By_X_Value()\n\t\treturn shuffles\n\t#----------------------------------------------------------------------\n\tdef get_Write_Nodes(self):\n\t\t\"\"\"\"\"\"\n\t\twrites = DML_Nuke.dml.to_DML_Nodes(nuke.allNodes(\"Write\",self.nuke_object))\n\t\twrites.reorder_By_X_Value()\n\t\treturn writes\n\ndef _update_DML_Layers_To_Gimped_PSD_Folder_Path(node):\n\tif False:\n\t\tisinstance(node,DML_Layers_To_Gimped_PSD)\n\tfolder = node.knob(\"dml_raw_folder_destination\").getText() \n\tfile_name = node.knob(\"dml_file_name\").getText()\n\tfolder_path = os.path.join(folder,\"_PNGs\",file_name).replace(\"\\\\\",\"/\")\n\tnode.knob(\"dml_folder_destination\").setText(folder_path)\n#----------------------------------------------------------------------\ndef does_DML_Layers_To_Gimped_PSD_Need_Rebuild(psd_node):\n\t\"\"\"\"\"\"\n\tif not isinstance(psd_node,DML_Layers_To_Gimped_PSD):\n\t\tpsd_node = DML_Layers_To_Gimped_PSD(nuke_node=psd_node)\n\tlayer_order = psd_node.imbeded_data_layer_order\n\t#last_build_order = psd_node._psd_build_group.last_layer_build_order\n\tcurrent_build_order = psd_node._psd_build_group.layer_names\n\tif not layer_order == current_build_order and len(current_build_order):\n\t\treturn True\n\n\tif len(psd_node.layers) != len(current_build_order):\n\t\treturn True\n\n\treturn False\n#----------------------------------------------------------------------\ndef on_DML_Layers_To_Gimped_PSD_Knob_Changed():\n\t\"\"\"\"\"\"\n\ttry:\n\t\tknob = nuke.thisKnob()\n\t\tnode = nuke.thisNode()\n\t\tif knob.name() in [\"dml_raw_folder_destination\",\"dml_enable_views\",\"dml_file_name\",\"dml_frame_padding\"]:\n\t\t\t_update_DML_Layers_To_Gimped_PSD_Folder_Path(node)\n\t\t\n\t\tif knob.name() == \"DML_Layer_Order_layers\":\n\t\t\tpsd_node = DML_Layers_To_Gimped_PSD(nuke_node=node)\n\t\t\tpsd_node.do_Error_Check()\n\texcept:\n\t\tpass\n\n################################################################################\nclass DML_Layers_To_Gimped_PSD(DML_Tools.DML_Nuke.Nuke_GUI.Generic_Widgets.Generic_Widgets_Nodes.Layer_Order_Views_Selector_Output_Builder_Node):\n\tNODE_TYPE_RELATION = \"DML_Layers_To_Gimped_PSD\"\n\t#----------------------------------------------------------------------\n\tdef __init__(self,*args,**kwargs):\n\t\t\"\"\"\"\"\"\n\t\tDML_Tools.DML_Nuke.Nuke_GUI.Generic_Widgets.Generic_Widgets_Nodes.Layer_Order_Views_Selector_Output_Builder_Node.__init__(self,*args,**kwargs)\n\t\tself._raw_folder_destination_knob = self.knob(\"dml_raw_folder_destination\")\n\t\tself._needs_rebuild = self.knob(\"dml_needs_rebuild\")\n\t\tself._raw_folder_destination_knob.setVisible(False)\n\t\tself._needs_rebuild.setVisible(False)\n\t\tself._psd_build_group = None#self._find_Psd_Build_Group()\n\t\t#self._create_PSD_Group()\n\t\t\n\t\tif False:\n\t\t\tisinstance(self._raw_folder_destination_knob, nuke.String_Knob)\n\t\t\tisinstance(self._needs_rebuild, nuke.Boolean_Knob)\n\t#----------------------------------------------------------------------\n\tdef do_Error_Check(self):\n\t\t\"\"\"\"\"\"\n\t\thas_error = does_DML_Layers_To_Gimped_PSD_Need_Rebuild(self)\n\t\tif has_error:\n\t\t\tself._needs_rebuild.setValue(False)\n\t\t\t#self.psd_build_group.knob(\"tile_color\").setValue(4278190335L)\n\t\t\t#try:\n\t\t\t\t#wig_knob = self.knob(\"dml_gimped_psd_builder\")\n\t\t\t\t#wig_object = wig_knob.getObject()\n\t\t\t\t#wig_object.channel_layers_list.rebuild_Items()\n\t\t\t#except:\n\t\t\t\t#pass\n\t\telse:\n\t\t\tself._needs_rebuild.setValue(True)\n\t\t\t#self.psd_build_group.knob(\"tile_color\").setValue(16711935)\n\t#----------------------------------------------------------------------\n\tdef _update_Folder_Path(self):\n\t\t_update_DML_Layers_To_Gimped_PSD_Folder_Path(self.nuke_object)\n\t#----------------------------------------------------------------------\n\tdef _find_Psd_Build_Group(self):\n\t\t\"\"\"\"\"\"\n\t\t# get all the nodes connected to this nuke node that are of type Group\n\t\tdependent = DML_Nuke.dml.to_DML_Nodes([n for n in self.dependent(nuke.INPUTS, forceEvaluate=True) if n.nuke_object.Class() == \"Group\"])\n\t\tfor node in DML_Nuke.dml.to_DML_Nodes(dependent):\n\t\t\tif isinstance(node,DML_Gimped_PSD_Group):\n\t\t\t\treturn node\n\t\treturn None\n\t#----------------------------------------------------------------------\n\t@property\n\tdef psd_build_group(self):\n\t\t\"\"\"\"\"\"\n\t\tif self._psd_build_group == None:\n\t\t\tself._create_PSD_Group()\n\t\treturn self._psd_build_group\n\t#----------------------------------------------------------------------\n\tdef _create_PSD_Group(self):\n\t\t\"\"\"\"\"\"\n\t\t#----------------------------------------------------------------------\n\t\tdef create_group():\n\t\t\tthis_parent = nuke.thisParent()\n\t\t\tif this_parent == None:\n\t\t\t\tthis_parent = nuke.root()\n\t\t\t\t\n\t\t\twith this_parent:\n\t\t\t\tself.selectOnly()\n\t\t\t\tself.selected = False\n\t\t\t\tgrp = nuke.createNode(\"Group\",\"tile_color 0xff00ff name Layers_To_PSD\",False)\n\t\t\t\tself._psd_build_group = DML_Gimped_PSD_Group(nuke_node=grp)\n\t\t\t\tself._psd_build_group.x = self.x\n\t\t\t\tself._psd_build_group.y = self.y + 100\n\t\t\t\tself._psd_build_group.setInput(0,self)\n\t\t\tself._psd_build_group.assign_knob_links(self._folder_path_knob, self._file_name_knob, self._frame_padding_knob, self._imbeded_data_View_Selection_knob)\n\t\t\tself.Initialize_build_Layers()\n\t\t\tself.create_Layers_To_Render()\n\t\t\treturn self._psd_build_group\n\t\t\n\t\t# get all the nodes connected to this nuke node that are of type Group\n\t\tdependent = [n for n in self.dependent(nuke.INPUTS) if n.nuke_object.Class() == \"Group\"]\n\t\tif not len(dependent):\n\t\t\tdependent = [n for n in self.dependent(nuke.INPUTS) if n.nuke_object.Class() == \"Group\"]\n\t\t\n\t\tself._psd_build_group = None\n\t\t\n\t\tif not len(dependent):\n\t\t\treturn create_group()\n\t\telse:\n\t\t\tfor node in DML_Nuke.dml.to_DML_Nodes(dependent):\n\t\t\t\tif node.__class__.__name__ == 'DML_Gimped_PSD_Group':\n\t\t\t\t\tself._psd_build_group = node\n\t\t\t\t\tself._psd_build_group.assign_knob_links(self._folder_path_knob, self._file_name_knob, self._frame_padding_knob, self._imbeded_data_View_Selection_knob)\n\t\t\t\t\treturn self._psd_build_group\n\t\t\treturn create_group()\n\t\traise LookupError(\"Cound Not Find PSD Build Node Connected to this node\")\n\t#----------------------------------------------------------------------\n\tdef create_Layers_To_Render(self):\n\t\t\"\"\"\"\"\"\n\t\tlayer_order = list(reversed(self.imbeded_data_layer_order))\n\t\tself.psd_build_group.create_Layers_To_Render(layer_order, xOffset=200, xSpaceing=150, yOffset=100)\n\t\tself._needs_rebuild.setValue(True)\n\t\t\t\t\n\t#----------------------------------------------------------------------\n\tdef generate_Json_Data(self,frame,multi_frame=True):\n\t\t\"\"\"\"\"\"\n\t\tpsd_node = self\n\t\twriteNode = self.psd_build_group.get_Write_Nodes()[0]\n\t\tres = []\n\t\t# can a list of all the nuke views\n\t\tall_view_names = nuke.views()\n\t\t# store the options that determans if views are to be used or not\n\t\t# store the options that determans if views are to be used or not\n\t\tif \"%V\" in self._file_name_knob.getText() or \"%V\" in self._folder_path_knob.value():\n\t\t\tdml_enable_views = True\n\t\telse:\n\t\t\tdml_enable_views = False\n\t\t# get the views that are going to be rended out\n\t\tview_selection = psd_node.active_views\n\t\t# get the layer order that the psd builder will use when assemblying the psd file\n\t\tlayer_order_names = psd_node.imbeded_data_layer_order\n\t\t# get the frame padding\n\t\tframe_padding = int(psd_node._frame_padding_knob.value())\n\t\n\t\t# using the current frame and the padding create a padded_frame for use in file path image createion\n\t\tpadded_frame = str(frame).zfill(frame_padding)\n\t\n\t\t# get the folder path that this write node will render the image to\n\t\tlayers_folder = os.path.dirname(nuke.filename(writeNode.nuke_object,nuke.REPLACE))\n\t\tfolder_parts = layers_folder.split(\"/\")\n\t\tlayers_folder = \"/\".join(folder_parts[0:-1]+[padded_frame])\n\t\t# check if the view are enabled\n\t\t# if so then things get a little tricky\n\t\t# at no point in the render process is there view context so nuke.thisView() will never work.\n\t\t# so to do anykind of actions that are relative to a view you have to manuly figure it out\n\t\t# by diceting the folder path and the views that this write node is going to render\n\t\tif dml_enable_views:\n\t\t\t# create and list that will be used to store the folder paths for each view\n\t\t\tview_layer_folders = []\n\t\t\t# this will be a prebuilt forder path with built in formating that will replaced with view names \n\t\t\tlayers_folder_exp = None\n\t\n\t\t\tif nuke.root().knob(\"DML_nuke_views_system_use_image_name\")==None:\n\t\t\t\tnuke.showSettings()\n\t\n\t\t\t# check the root node to determan if view names or image names are to be used\n\t\t\tif nuke.root().knob(\"DML_nuke_views_system_use_image_name\").value():\n\t\t\t\t# get a list of the image name for each view\n\t\t\t\tall_image_names = [nuke.DML_Nuke_View_System.get_Image_Name_From_View_Name(v) for v in all_view_names]\n\t\n\t\t\t\t# first we need to replace the view section of the folder path with {} for formating later\n\t\t\t\t# because we don't know what view is the active view we have to find it useing the layers_folder\n\t\t\t\tfor image_name in all_image_names:\n\t\t\t\t\t# check if the image_name is in the layers_folder path\n\t\t\t\t\tif image_name in layers_folder.split(\"/\"):\n\t\t\t\t\t\t# if so replace it and we are done searching\n\t\t\t\t\t\tlayers_folder_exp = layers_folder.replace(image_name,\"{input_name}\")\n\t\t\t\t\t\tbreak\n\t\n\t\t\t\t# this is to make sure that we found the view to replace\n\t\t\t\tif layers_folder_exp is not None:\n\t\t\t\t\t# get the image names for the views this write node is using \n\t\t\t\t\tview_image_names = [nuke.DML_Nuke_View_System.get_Image_Name_From_View_Name(v) for v in view_selection]\n\t\t\t\t\t# iterate over each image name and build a folder path for the image name\n\t\t\t\t\tfor image_name in view_image_names:\n\t\t\t\t\t\tdata = {'input_name': image_name}\n\t\t\t\t\t\tview_layer_folders.append(layers_folder_exp.format(**data))\n\t\t\telse:\n\t\t\t\t# first we need to replace the view section of the folder path with {} for formating later\n\t\t\t\t# because we don't know what view is the active view we have to find it using the layers_folder\n\t\t\t\tfor view_name in all_view_names:\n\t\t\t\t\t# check if the view is in the layers_folder path\n\t\t\t\t\tif view_name in layers_folder.split(\"/\"):\n\t\t\t\t\t\t# if so replace it and we are done searching\n\t\t\t\t\t\tlayers_folder_exp = layers_folder.replace(view_name,\"{input_name}\")\n\t\t\t\t\t\tbreak\n\t\n\t\t\t\t# this is to make sure that we found the view to replace\n\t\t\t\tif layers_folder_exp is not None:\n\t\t\t\t\t# iterate over each view and build a folder path for the view name\n\t\t\t\t\tfor view_name in view_selection:\n\t\t\t\t\t\tdata = {'input_name': view_name}\n\t\t\t\t\t\tview_layer_folders.append(layers_folder_exp.format(**data))\n\t\n\t\t\t# iterate over each view folder\n\t\t\tfor view_layer_folder in view_layer_folders:\n\t\t\t\t# exam : C:/User_Input_Folder/verions/PNGS/view_folder/frame_folder\n\t\t\t\t# exam : C:/Psd_Local_output/v06/PNGS/Background/001\n\t\n\t\t\t\t# stores the path for each image to be used in the psd build in the older that it should be added\n\t\t\t\tLayer_Order_Paths = []\n\t\n\t\t\t\t# iterate over each layer name\n\t\t\t\tfor layer_name in layer_order_names:\n\t\t\t\t\t# exam : Background\n\t\n\t\t\t\t\t# build the path to the image using the current view folder and layer name\n\t\t\t\t\t# exam : C:/User_Input_Folder/verions/PNGS/view_folder/frame_folder\n\t\t\t\t\t# exam : C:/Psd_Local_output/v06/PNGS/Background/001\n\t\t\t\t\tlayer_path = os.path.join(view_layer_folder,layer_name+\".png\")\n\t\t\t\t\t# exam : C:/User_Input_Folder/verions/PNGS/view_folder/frame_folder/layer_name.png\n\t\t\t\t\t# exam : C:/Psd_Local_output/v06/PNGS/Background/001/Background.png\n\t\n\t\t\t\t\t# normalize the path \n\t\t\t\t\tlayer_path = os.path.normpath(layer_path)\n\t\t\t\t\t# force consistent pathings\n\t\t\t\t\tlayer_path = layer_path.replace(\"\\\\\",\"/\")\n\t\t\t\t\t# add it to the collection\n\t\t\t\t\tLayer_Order_Paths.append(layer_path)\n\t\t\t\t# exam : C:/Psd_Local_output/v06/PNGS/Background/001/Background.png\n\t\t\t\t# exam : C:/Psd_Local_output/v06\n\t\t\t\t# exam : folder_end: Blurred_Oval_Bloo/001\n\t\t\t\tfolder_start,folder_end = view_layer_folder.split(\"/_PNGs/\",1)\n\t\t\t\t# exam : Blurred_Oval_Bloo/001\n\t\t\t\t# exam : Blurred_Oval_Bloo\n\t\t\t\timage_name = folder_end.split(\"/\")[0]\n\t\n\t\t\t\tif multi_frame:\n\t\t\t\t\t# exam : C:/Psd_Local_output/v06/Blurred_Oval_Bloo\n\t\t\t\t\tpsd_folder_path = os.path.join(folder_start,image_name).replace(\"\\\\\",\"/\")\n\t\t\t\t\t# exam : Blurred_Oval_Bloo_001.psd\n\t\t\t\t\tpsd_file_name = psd_folder_path.split(\"/\")[-1] + \"_\" + padded_frame + \".psd\"\n\t\t\t\telse:\n\t\t\t\t\t# exam : C:/Psd_Local_output/v06/Blurred_Oval_Bloo\n\t\t\t\t\tpsd_folder_path = os.path.join(folder_start).replace(\"\\\\\",\"/\")\n\t\t\t\t\t# exam : Blurred_Oval_Bloo_001.psd\n\t\t\t\t\tpsd_file_name = image_name+\".psd\"\n\t\t\t\t# last combine the psd folder path with the psd file name for this view\n\t\t\t\t# exam : C:/Psd_Local_output/v06/Blurred_Oval_Bloo/Blurred_Oval_Bloo_001.psd\n\t\t\t\tPSD_File_Path = os.path.join(psd_folder_path,psd_file_name).replace(\"\\\\\",\"/\")\n\t\n\t\t\t\t# create the data to be writen to json\n\t\t\t\tdata = dict(PSD_File_Path = PSD_File_Path,\n\t\t\t\t\t\t\tLayer_Order_Paths = Layer_Order_Paths)\n\t\t\t\tres.append(data)\n\t\n\t\telse:\n\t\t\t# stores the path for each image to be used in the psd build in the older that it should be added\n\t\t\tLayer_Order_Paths = []\n\t\t\t# iterate over each layer name\n\t\t\tfor layer_name in layer_order_names:\n\t\t\t\t# build the layer file name\n\t\t\t\tlayer_file_name = layer_name + \".png\"\n\t\t\t\t# build file path to the image using the layers folder and the layer file name\n\t\t\t\tlayer_file_path = os.path.join(layers_folder,layer_file_name)\n\t\t\t\t# normalize the path \n\t\t\t\tlayer_file_path = os.path.normpath(layer_file_path)\n\t\t\t\t# force consistent pathings\n\t\t\t\tlayer_file_path = layer_file_path.replace(\"\\\\\",\"/\")\n\t\t\t\t# add it to the collection\n\t\t\t\tLayer_Order_Paths.append(layer_file_path)\n\t\n\t\t\tfolder_start,folder_end = layers_folder.split(\"/_PNGs/\",1)\n\t\t\t# exam : Blurred_Oval_Bloo/001\n\t\t\t# exam : Blurred_Oval_Bloo\n\t\t\timage_name = folder_end.split(\"/\")[0]\n\t\n\t\t\tpsd_folder_path = os.path.join(folder_start).replace(\"\\\\\",\"/\")\n\t\t\t\n\t\t\tif multi_frame:\n\t\t\t\tpsd_file_name = os.path.join(image_name,image_name + \"_\" + padded_frame + \".psd\").replace(\"\\\\\",\"/\")\n\t\t\t\t#psd_file_name = psd_folder_path.split(\"/\")[-1] + \"_\" + padded_frame + \".psd\"\n\t\t\telse:\n\t\t\t\t# exam : C:/Psd_Local_output/v06/Blurred_Oval_Bloo\n\t\t\t\t#psd_folder_path = os.path.join(folder_start).replace(\"\\\\\",\"/\")\n\t\t\t\t# exam : Blurred_Oval_Bloo_001.psd\n\t\t\t\tpsd_file_name = image_name+\".psd\"\n\t\n\t\t\tPSD_File_Path = os.path.join(psd_folder_path,psd_file_name).replace(\"\\\\\",\"/\")\n\t\n\t\t\t# create the data to be writen to json\n\t\t\tdata = dict(PSD_File_Path = PSD_File_Path,\n\t\t\t\t\t\tLayer_Order_Paths = Layer_Order_Paths)\n\t\t\tres.append(data)\n\t\treturn res\n","repo_name":"SGSMarkNA/DML_Tools","sub_path":"DML_Nuke/Gizmos_And_Tools/Layers_To_Gimped_PSD/Layers_To_Gimped_PSD_Nodes.py","file_name":"Layers_To_Gimped_PSD_Nodes.py","file_ext":"py","file_size_in_byte":22263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74417931680","text":"import sys\nsys.stdin = open('C:\\\\Users\\\\multicampus\\\\Desktop\\\\kayunkim\\\\lectures\\\\알고리즘\\\\D10_Stack\\\\input.txt', 'r')\n\n\ndef find_match():\n s = list()\n for i in range(len(txt)):\n if txt[i] == '(': #문자가 여는 괄호면\n s.append(txt[i])\n elif txt[i] == ')': #문자가 닫는 괄호면\n if(len(s) == 0):\n return 0\n else:\n s.pop() #스택에서 하나 빼서 버리기\n #모든 반복 완료 후\n if len(s) != 0: #스택에 괄호가 남았다면\n return 0\n else:\n return 1\n\n\nT = int(input())\nfor tc in range(1,T+1):\n txt = input()\n print('#{} {}'.format(tc,find_match()))","repo_name":"KaYunKIM/ssafy","sub_path":"Lectures/ALGORITHM/D10_Stack/Stack_ex.py","file_name":"Stack_ex.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35356476451","text":"import re\nimport os\n\ncurrent_file = r'cacm_stem.txt'\ncontent = open(current_file, 'r').read()\n\ndocs = re.split(\"# [\\d]+\",content)\ndocs = [w for w in docs if w != \"\"]\n\nnewpath = r'Stemmed_Corpus/'\nif not os.path.exists(newpath):\n os.makedirs(newpath)\n\nfor i,doc in enumerate(docs,1):\n print(\"Creating Stemmed Corpus file for CACM-\" + str(i))\n f = open(newpath + 'CACM-' + str(i) + '.txt', 'w', encoding='utf-8')\n f.write(doc.strip())\n f.close()\n","repo_name":"parshva45/Information-Retrieval-System","sub_path":"Phase 1/Task 3/Part B/Step 1/cacm_stem_extracter.py","file_name":"cacm_stem_extracter.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"42760303330","text":"from src.models.tokenizer import Tokenizer\n\nclass Prediction:\n \"\"\"\n Predicition class used to predict inpu name\n \"\"\"\n def __init__(self, model) -> None:\n \"\"\"\n Construct prediction class\n Args:\n model: model used in prediction\n \"\"\"\n self.model = model\n self.max_len = 31\n self.tokenizer = Tokenizer()\n\n def predict(self, names):\n \"\"\"\n Take the names convert it to padded sequences and use model to get prediction\n Args:\n names: names to verify using the model\n \"\"\"\n sequences = self.tokenizer.texts_to_sequences(names)\n padded = self.tokenizer.pad_sequences(sequences, self.max_len)\n return self.model.predict(padded)\n \n def convert_prediction_0_1(self, y_pred):\n \"\"\"\n Convert result to 1 or 0 using threshold 0.5\n Args:\n y_pred: prediction to convert\n \"\"\"\n return [1 if x > 0.5 else 0 for x in y_pred]\n \n def convert_prediction_into_confidence(self, y_pred):\n \"\"\"\n Convert prediction to confidence\n Args:\n y_pred: prediction to convert\n \"\"\"\n results = []\n for x in y_pred:\n if x >= 0.9:\n results.append('Real name with high confidence')\n elif x > 0.5 and x < 0.9:\n results.append('Real name with low confidence')\n elif x < 0.5 and x > .0001:\n results.append('Wrong name with low confidence')\n else:\n results.append('Wrong name with high confidence')\n return results","repo_name":"hossamasaad/Name-Verification","sub_path":"src/models/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39003955046","text":"# Author: Runar Fosse\n# Time complexity: O(n)\n# Space complexity: O(1)\n\nclass Solution:\n mod = int(1e9 + 7)\n def countHomogenous(self, s: str) -> int:\n count_homogenous = 0\n\n char, count = s[0], 1\n for i in range(1, len(s)):\n if s[i] != char:\n count_homogenous += count*(count+1)//2\n char = s[i]\n count = 0\n count += 1\n count_homogenous += count*(count+1)//2\n\n return count_homogenous % self.mod\n \n# This problem can be reduced to finding the length of every longest homogenous substring\n# in the string, as we easily can extrapolate the total number of homogenous substrings.\n\n# i.e. \"aaaa\" contains 1 \"aaaa\", 2 \"aaa\", 3 \"aa\", 4 \"a\".\n# This holds for all homogenous substrings.","repo_name":"RunarFosse/leetcode","sub_path":"Medium/count-number-of-homogenous-substrings.py","file_name":"count-number-of-homogenous-substrings.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31848670715","text":"#!/usr/bin/env python\nfrom sys import argv\n#print('in main.py')\n\nargc = len( argv )\n\ncommands_all = { 'add'\t\t: 'pit_add',\n 'init' : 'pit_init',\n 'log'\t\t: 'pit_log',\n 'status'\t: 'pit_status',\n\t\t\t\t 'stash' : 'pit_stash',\n\t\t\t\t 'rm' : 'pit_rm',\n\t\t\t\t 'pull' : 'pit_pull',\n\t\t\t\t 'push' : 'pit_push'\n }\n\ndef execute_cmd(cmdToExecute):\n\t\"\"\"\n\tif pit is invoked as:\t'pit push'\n\n\tthen vars are:\n\tcmdToExecute:\tcommands_all[argv[1]], therefore 'push'\n\tfileToImport:\t'src.commands.pit_push'\n\n\twhat is executed:\n\texec:\t\t\t'import src.commands.pit_push\n\teval:\t\t\t'src.commands.pit_push.pit_push()'\n\t\"\"\"\n\tfileToImport = 'src.commands.' + cmdToExecute\n\tprint( 'in execute_cmd(); fileToImport: ' + fileToImport)\n\texec( 'import ' + fileToImport )\n\teval( fileToImport + '.' + cmdToExecute + '()' )\n\n# if invoked with app name only, do nothing (maybe status then???)\nif argc == 1:\n\tpass\n# if executed with 1 arg... (e.g.: 'pit add')\nelif argc >= 2:\n\t#and this arg is known in commands list...\n\tif( argv[1] in commands_all ):\n\t\t#print related text\n\t\texecute_cmd( commands_all[argv[1]] )\n\telse:\n\t\tprint( 'no such command' )","repo_name":"okmanek/pit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36810259984","text":"\n# ler numero\nnum=int(input(\"Insira um número: \"))\n\nant=num-5\nsuc=num+5\n\n# imprimir anteriores\nwhile(ant 0:\n\n # get new sessions\n newSessions = [t[0] for t in get_sql_queries(connection_string)]\n\n # loop though the sessions\n for sessionId in sessions:\n\n # check whether the sessions is still running\n if sessionId not in newSessions:\n\n # if it's done pop up\n winsound.PlaySound(\"*\", winsound.SND_ALIAS)\n ctypes.windll.user32.MessageBoxA(\n 0,\n \"A query is done. \\n\"\n + str(len(newSessions))\n + \" are still running\",\n \"Query done\",\n \"MB_TOPMOST\",\n )\n sessions.remove(sessionId)\n\n # rest for 10 seconds\n time.sleep(10)\n\n\nif __name__ == \"__main__\":\n # get connection string from arguments\n connection_string = sys.argv[1]\n\n # track\n track(connection_string=connection_string)\n\n","repo_name":"kromme/Notify-on-sql-query-completion","sub_path":"sql_query_notify.py","file_name":"sql_query_notify.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7656378992","text":"\"\"\"\nFastText wrapper for MLflow.\n\"\"\"\nimport fasttext\nimport mlflow\nimport pandas as pd\n\n\nclass FastTextWrapper(mlflow.pyfunc.PythonModel):\n \"\"\"\n Class to wrap and use FastText Models.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor.\n \"\"\"\n\n def load_context(self, context: mlflow.pyfunc.PythonModelContext) -> None:\n \"\"\"\n To implement.\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n self._model = fasttext.load_model(\n context.artifacts[\"fasttext_model_path\"]\n )\n\n def predict(self, context, input_model) -> tuple:\n \"\"\"\n Predict.\n\n Args:\n context: MLFlow context.\n input_model: Input for the model.\n \"\"\"\n model_output = self._model.predict(input_model)\n\n predictions = [\n single_predictions[0].replace(\"__label__\", \"\")\n for single_predictions in model_output[0]\n ]\n probas = [float(single_probas[0]) for single_probas in model_output[1]]\n\n output = pd.DataFrame(\n {\n \"predictions\": predictions,\n \"probas\": probas,\n }\n )\n\n return output\n","repo_name":"InseeFrLab/extraction-comptes-sociaux","sub_path":"src/page_selection/fasttext_wrapper.py","file_name":"fasttext_wrapper.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14286451517","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, absolute_import\n\nimport collections\nfrom collections import Iterable, Sequence, Mapping\nimport itertools\n\nfrom ..common import * # pylint: disable=redefined-builtin\nfrom ..datastructures import OrderedDict\nfrom ..exceptions import *\nfrom ..transforms import (\n export_loop,\n get_import_context, get_export_context,\n to_native_converter, to_primitive_converter)\n\nfrom .base import BaseType, get_value_in\n\n\nclass CompoundType(BaseType):\n\n def __init__(self, **kwargs):\n super(CompoundType, self).__init__(**kwargs)\n self.is_compound = True\n try:\n self.field.parent_field = self\n except AttributeError:\n pass\n\n def _setup(self, field_name, owner_model):\n # Recursively set up inner fields.\n if hasattr(self, 'field'):\n self.field._setup(None, owner_model)\n super(CompoundType, self)._setup(field_name, owner_model)\n\n def convert(self, value, context=None):\n context = context or get_import_context()\n return self._convert(value, context)\n\n def _convert(self, value, context):\n raise NotImplementedError\n\n def export(self, value, format, context=None):\n context = context or get_export_context()\n return self._export(value, format, context)\n\n def _export(self, value, format, context):\n raise NotImplementedError\n\n def to_native(self, value, context=None):\n context = context or get_export_context(to_native_converter)\n return to_native_converter(self, value, context)\n\n def to_primitive(self, value, context=None):\n context = context or get_export_context(to_primitive_converter)\n return to_primitive_converter(self, value, context)\n\n def _init_field(self, field, options):\n \"\"\"\n Instantiate the inner field that represents each element within this compound type.\n In case the inner field is itself a compound type, its inner field can be provided\n as the ``nested_field`` keyword argument.\n \"\"\"\n if not isinstance(field, BaseType):\n nested_field = options.pop('nested_field', None) or options.pop('compound_field', None)\n if nested_field:\n field = field(field=nested_field, **options)\n else:\n field = field(**options)\n return field\n\nMultiType = CompoundType\n\n\nclass ModelType(CompoundType):\n \"\"\"A field that can hold an instance of the specified model.\"\"\"\n\n @property\n def fields(self):\n return self.model_class.fields\n\n def __init__(self, model_spec, **kwargs):\n\n if isinstance(model_spec, ModelMeta):\n self.model_class = model_spec\n self.model_name = self.model_class.__name__\n elif isinstance(model_spec, string_type):\n self.model_class = None\n self.model_name = model_spec\n else:\n raise TypeError(\"ModelType: Expected a model, got an argument \"\n \"of the type '{}'.\".format(model_spec.__class__.__name__))\n\n super(ModelType, self).__init__(**kwargs)\n\n def _repr_info(self):\n return self.model_class.__name__\n\n def _mock(self, context=None):\n return self.model_class.get_mock_object(context)\n\n def _setup(self, field_name, owner_model):\n # Resolve possible name-based model reference.\n if not self.model_class:\n if self.model_name == owner_model.__name__:\n self.model_class = owner_model\n else:\n raise Exception(\"ModelType: Unable to resolve model '{}'.\".format(self.model_name))\n super(ModelType, self)._setup(field_name, owner_model)\n\n def pre_setattr(self, value):\n if value is not None \\\n and not isinstance(value, Model):\n value = self.model_class(value)\n return value\n\n def _convert(self, value, context):\n\n if isinstance(value, self.model_class):\n model_class = type(value)\n elif isinstance(value, dict):\n model_class = self.model_class\n else:\n raise ConversionError(\n \"Input must be a mapping or '%s' instance\" % self.model_class.__name__)\n if context.convert and context.oo:\n return model_class(value, context=context)\n else:\n return model_class.convert(value, context=context)\n\n def _export(self, value, format, context):\n if isinstance(value, Model):\n model_class = type(value)\n else:\n model_class = self.model_class\n return export_loop(model_class, value, context=context)\n\n\nclass ListType(CompoundType):\n \"\"\"A field for storing a list of items, all of which must conform to the type\n specified by the ``field`` parameter.\n\n Use it like this::\n\n ...\n categories = ListType(StringType)\n \"\"\"\n\n def __init__(self, field, min_size=None, max_size=None, **kwargs):\n self.field = self._init_field(field, kwargs)\n self.min_size = min_size\n self.max_size = max_size\n\n validators = [self.check_length] + kwargs.pop(\"validators\", [])\n\n super(ListType, self).__init__(validators=validators, **kwargs)\n\n @property\n def model_class(self):\n return self.field.model_class\n\n def _repr_info(self):\n return self.field.__class__.__name__\n\n def _mock(self, context=None):\n min_size = self.min_size or 1\n max_size = self.max_size or 1\n if min_size > max_size:\n message = 'Minimum list size is greater than maximum list size.'\n raise MockCreationError(message)\n random_length = get_value_in(min_size, max_size)\n\n return [self.field._mock(context) for _ in range(random_length)]\n\n def _coerce(self, value):\n if isinstance(value, list):\n return value\n elif isinstance(value, (string_type, Mapping)): # unacceptable iterables\n pass\n elif isinstance(value, Sequence):\n return value\n elif isinstance(value, Iterable):\n return value\n raise ConversionError('Could not interpret the value as a list')\n\n def _convert(self, value, context):\n value = self._coerce(value)\n data = []\n errors = {}\n for index, item in enumerate(value):\n try:\n data.append(context.field_converter(self.field, item, context))\n except BaseError as exc:\n errors[index] = exc\n if errors:\n raise CompoundError(errors)\n return data\n\n def check_length(self, value, context):\n list_length = len(value) if value else 0\n\n if self.min_size is not None and list_length < self.min_size:\n message = ({\n True: 'Please provide at least %d item.',\n False: 'Please provide at least %d items.',\n }[self.min_size == 1]) % self.min_size\n raise ValidationError(message)\n\n if self.max_size is not None and list_length > self.max_size:\n message = ({\n True: 'Please provide no more than %d item.',\n False: 'Please provide no more than %d items.',\n }[self.max_size == 1]) % self.max_size\n raise ValidationError(message)\n\n def _export(self, list_instance, format, context):\n \"\"\"Loops over each item in the model and applies either the field\n transform or the multitype transform. Essentially functions the same\n as `transforms.export_loop`.\n \"\"\"\n data = []\n _export_level = self.field.get_export_level(context)\n if _export_level == DROP:\n return data\n for value in list_instance:\n shaped = self.field.export(value, format, context)\n if shaped is None:\n if _export_level <= NOT_NONE:\n continue\n elif self.field.is_compound and len(shaped) == 0:\n if _export_level <= NONEMPTY:\n continue\n data.append(shaped)\n return data\n\n\nclass DictType(CompoundType):\n \"\"\"A field for storing a mapping of items, the values of which must conform to the type\n specified by the ``field`` parameter.\n\n Use it like this::\n\n ...\n categories = DictType(StringType)\n\n \"\"\"\n\n def __init__(self, field, coerce_key=None, **kwargs):\n self.field = self._init_field(field, kwargs)\n self.coerce_key = coerce_key or str\n super(DictType, self).__init__(**kwargs)\n\n @property\n def model_class(self):\n return self.field.model_class\n\n def _repr_info(self):\n return self.field.__class__.__name__\n\n def _convert(self, value, context, safe=False):\n if not isinstance(value, Mapping):\n raise ConversionError('Only mappings may be used in a DictType')\n\n data = {}\n errors = {}\n for k, v in iteritems(value):\n try:\n data[self.coerce_key(k)] = context.field_converter(self.field, v, context)\n except BaseError as exc:\n errors[k] = exc\n if errors:\n raise CompoundError(errors)\n return data\n\n def _export(self, dict_instance, format, context):\n \"\"\"Loops over each item in the model and applies either the field\n transform or the multitype transform. Essentially functions the same\n as `transforms.export_loop`.\n \"\"\"\n data = {}\n _export_level = self.field.get_export_level(context)\n if _export_level == DROP:\n return data\n for key, value in iteritems(dict_instance):\n shaped = self.field.export(value, format, context)\n if shaped is None:\n if _export_level <= NOT_NONE:\n continue\n elif self.field.is_compound and len(shaped) == 0:\n if _export_level <= NONEMPTY:\n continue\n data[key] = shaped\n return data\n\n\nclass PolyModelType(CompoundType):\n \"\"\"A field that accepts an instance of any of the specified models.\"\"\"\n\n def __init__(self, model_spec, **kwargs):\n\n if isinstance(model_spec, (ModelMeta, string_type)):\n self.model_classes = (model_spec,)\n allow_subclasses = True\n elif isinstance(model_spec, Iterable):\n self.model_classes = tuple(model_spec)\n allow_subclasses = False\n else:\n raise Exception(\"The first argument to PolyModelType.__init__() \"\n \"must be a model or an iterable.\")\n\n self.claim_function = kwargs.pop(\"claim_function\", None)\n self.allow_subclasses = kwargs.pop(\"allow_subclasses\", allow_subclasses)\n\n CompoundType.__init__(self, **kwargs)\n\n def _setup(self, field_name, owner_model):\n # Resolve possible name-based model references.\n resolved_classes = []\n for m in self.model_classes:\n if isinstance(m, string_type):\n if m == owner_model.__name__:\n resolved_classes.append(owner_model)\n else:\n raise Exception(\"PolyModelType: Unable to resolve model '{}'.\".format(m))\n else:\n resolved_classes.append(m)\n self.model_classes = tuple(resolved_classes)\n super(PolyModelType, self)._setup(field_name, owner_model)\n\n def is_allowed_model(self, model_instance):\n if self.allow_subclasses:\n if isinstance(model_instance, self.model_classes):\n return True\n else:\n if model_instance.__class__ in self.model_classes:\n return True\n return False\n\n def _convert(self, value, context):\n\n if value is None:\n return None\n if self.is_allowed_model(value):\n return value\n if not isinstance(value, dict):\n if len(self.model_classes) > 1:\n instanceof_msg = 'one of: {}'.format(', '.join(\n cls.__name__ for cls in self.model_classes))\n else:\n instanceof_msg = self.model_classes[0].__name__\n raise ConversionError('Please use a mapping for this field or '\n 'an instance of {}'.format(instanceof_msg))\n\n model_class = self.find_model(value)\n return model_class(value, context=context)\n\n def find_model(self, data):\n \"\"\"Finds the intended type by consulting potential classes or `claim_function`.\"\"\"\n\n chosen_class = None\n if self.claim_function:\n chosen_class = self.claim_function(self, data)\n else:\n candidates = self.model_classes\n if self.allow_subclasses:\n candidates = itertools.chain.from_iterable(\n ([m] + m._subclasses for m in candidates))\n fallback = None\n matching_classes = []\n for cls in candidates:\n match = None\n if '_claim_polymorphic' in cls.__dict__:\n match = cls._claim_polymorphic(data)\n elif not fallback: # The first model that doesn't define the hook\n fallback = cls # can be used as a default if there's no match.\n if match:\n matching_classes.append(cls)\n if not matching_classes and fallback:\n chosen_class = fallback\n elif len(matching_classes) == 1:\n chosen_class = matching_classes[0]\n else:\n raise Exception(\"Got ambiguous input for polymorphic field\")\n if chosen_class:\n return chosen_class\n else:\n raise Exception(\"Input for polymorphic field did not match any model\")\n\n def _export(self, model_instance, format, context):\n\n model_class = model_instance.__class__\n if not self.is_allowed_model(model_instance):\n raise Exception(\"Cannot export: {} is not an allowed type\".format(model_class))\n\n return model_instance.export(context=context)\n\n\n__all__ = module_exports(__name__)\n\n","repo_name":"splunk/SA-ctf_scoreboard","sub_path":"bin/sa_ctf_scoreboard/solnlib/packages/schematics/types/compound.py","file_name":"compound.py","file_ext":"py","file_size_in_byte":14171,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"54"} +{"seq_id":"12510486682","text":"class Solution(object):\n def numberOfArithmeticSlices(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n if len(A)<3:\n return 0\n now = 2\n d = A[1]-A[0]\n ret = 0\n for i in xrange(2, len(A)):\n if A[i]-A[i-1] == d:\n now+=1\n else:\n ret += (now-2)*(now-1)/2\n now = 2\n d = A[i]-A[i-1]\n ret += (now-2)*(now-1)/2\n return ret","repo_name":"nyroro/leetcode","sub_path":"LC413.py","file_name":"LC413.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5926187067","text":"import os, time, datetime\nfrom bottle import error, route,run, get,post,request, template,static_file,redirect\nimport sqlite3\n\n@route('/')\ndef top():\n return \"TOP PAGE!!\"\n \n@route('/index')\ndef hello():\n name = \"StartUpCafe\"\n age = 12345\n address = \"Heaven\"\n return template(\"index\",name_tpl =name,age_tpl = age, address_tpl = address)\n# run(host = 'localhost', port = 8080, reloader =True)\n\n\n@route('/user/')\ndef user(name):\n return name\n\n@route(('/object/'))\ndef callback(id):\n assert isinstance(id,int)\n\n@route ('/hello/')\ndef textGuest(guest):\n return(\"Hi,\",guest)\n\n@route('/temptest')\ndef temptest():\n name = \"StartUpCafe\"\n age = 12345\n address = \"Heaven\"\n return template(\"index\",name_tpl =name,age_tpl = age, address_tpl = address)\n\n@route('/time')\ndef time_rr():\n today = datetime.date.today()\n now = str(datetime.datetime.now()).split(\" \")\n # print(now[1])\n today.day #これは今日の日付\n today.weekday() #これは曜日の数字を取得\n days = (\"月\",\"火\", \"水\", \"木\", \"金\", \"土\", \"日\")\n\n today_str = (\"今日は\"+str(today.year)+\"年 \"+str(today.month)+\"月\"+ str(today.day)+\"日、\" + days[today.weekday()]+\"曜日です\")\n \n\n return template(\"time_re\", today_str_tpl = today_str)\n\n@route('/dbtest')\ndef dbtest():\n #access to db\n conn = sqlite3.connect('test20190213.db')\n\n c=conn.cursor()\n c.execute('select name,age,address from users where id=1;')\n user_info =c.fetchone()\n# disconnect \n c.close()\n print(user_info)\n return template('dbtest',user_info_tpl=user_info)\n\n@route('/list')\ndef showlist():\n\n conn = sqlite3.connect('test20190213.db')\n c=conn.cursor()\n c.execute('select name, age, address from users;')\n user_list = []\n for row in c.fetchall():\n user_list.append({\n \"name\": row[0],\n \"age\": row[1],\n \"address\": row[2]\n })\n# disconnect \n c.close()\n print(user_list)\n print(type(user_list))\n headers = ['Name', 'Age', 'Address']\n return template('list',user_list_tpl=user_list, headers_tpl = headers)\n\n@route('/add', method = [\"GET\"])\ndef add_get():\n return template('add.tpl') \n\n@route('/add', method =[\"POST\"])\ndef add_post():\n # Obtain current time\n now = datetime.datetime.today()\n # Convert into the format\n now = ('{0:%Y-%m-%d %H:%M:%S}'.format(now))\n task=request.POST.getunicode('task')\n #Connect database\n conn = sqlite3.connect('test20190213.db')\n #command to use sql \n c=conn.cursor()\n c.execute(\"insert into task values (null,?,?);\",(task,now,))\n #save data\n conn.commit()\n #Disconect\n conn.close()\n\n return redirect('/showtask')\n\n@route('/showtask')\ndef show_task():\n\n conn = sqlite3.connect('test20190213.db')\n c=conn.cursor()\n c.execute('select id, task,time from task;')\n # Define an empty list \n task_list = []\n # Adding list in the task_list\n # By using fetchall, getting all elements from the db \n for row in c.fetchall():\n task_list.append({\n \"id\": row[0],\n \"task\": row[1],\n \"time\": row[2],\n })\n c.close()\n headers = ['ID', 'Task','Created Time']\n return template('showtask',task_list_tpl=task_list, headers_tpl = headers)\n\n@route(\"/delete/\")\ndef delete(task_id):\n delete_task(task_id)\n return redirect(\"/showtask\")\n\ndef delete_task(task_id):\n conn = sqlite3.connect('test20190213.db')\n c = conn.cursor()\n delete = \"delete from task where id=?\"\n c.execute(delete, (task_id,))\n conn.commit()\n conn.close()\n\n\n@error(404)\ndef notfunction(code):\n return \"You are dead...\"\nrun( port = 8080, reloader =True)","repo_name":"YuYuAoi/my-first-bottle","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8404927982","text":"import argparse\nimport logging\nfrom pathlib import Path\nimport shutil\nfrom tempfile import NamedTemporaryFile\n\nimport os\nfrom itertools import groupby\nfrom typing import BinaryIO, List, Optional, Tuple, Union\n\nimport pandas as pd\nimport torchaudio\n\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import Dataset\nfrom torchaudio.datasets.utils import download_url, extract_archive\nfrom tqdm import tqdm\nimport soundfile as sf\nfrom speech_tokenizer import SpeechTokenizer\nfrom fairseq.data.audio.audio_utils import get_waveform, convert_waveform\nimport csv\nimport numpy as np\n\nlog = logging.getLogger(__name__)\n\nMANIFEST_COLUMNS = [\"id\", \"units\", \"src_text\", \"tgt_text\", \"speaker\"]\n\n\n\nclass MUSTC(Dataset):\n \"\"\"\n Create a Dataset for MuST-C. Each item is a tuple of the form:\n waveform, sample_rate, source utterance, target utterance, speaker_id,\n utterance_id\n \"\"\"\n\n SPLITS = [\"train\", \"dev\", \"tst-COMMON\", \"tst-HE\"]\n LANGUAGES = [\"de\", \"es\", \"fr\", \"it\", \"nl\", \"pt\", \"ro\", \"ru\"]\n\n def __init__(self, root: str, lang: str, split: str) -> None:\n assert split in self.SPLITS and lang in self.LANGUAGES\n _root = Path(root) / f\"en-{lang}\" / \"data\" / split\n wav_root, txt_root = _root / \"wav\", _root / \"txt\"\n assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()\n # Load audio segments\n try:\n import yaml\n except ImportError:\n print(\"Please install PyYAML to load the MuST-C YAML files\")\n with open(txt_root / f\"{split}.yaml\") as f:\n segments = yaml.load(f, Loader=yaml.BaseLoader)\n # Load source and target utterances\n for _lang in [\"en\", lang]:\n with open(txt_root / f\"{split}.{_lang}\") as f:\n utterances = [r.strip() for r in f]\n assert len(segments) == len(utterances)\n for i, u in enumerate(utterances):\n segments[i][_lang] = u\n # Gather info\n self.data = []\n for wav_filename, _seg_group in groupby(segments, lambda x: x[\"wav\"]):\n wav_path = wav_root / wav_filename\n sample_rate = sf.info(wav_path.as_posix()).samplerate\n seg_group = sorted(_seg_group, key=lambda x: x[\"offset\"])\n for i, segment in enumerate(seg_group):\n offset = int(float(segment[\"offset\"]) * sample_rate)\n n_frames = int(float(segment[\"duration\"]) * sample_rate)\n _id = f\"{wav_path.stem}_{i}\"\n self.data.append(\n (\n wav_path.as_posix(),\n offset,\n n_frames,\n sample_rate,\n segment[\"en\"],\n segment[lang],\n segment[\"speaker_id\"],\n _id,\n )\n )\n\n def __getitem__(\n self, n: int\n ) -> Tuple[torch.Tensor, int, str, str, str, str]:\n wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, \\\n utt_id = self.data[n]\n waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)\n waveform = torch.from_numpy(waveform)\n return waveform, sr, src_utt, tgt_utt, spk_id, utt_id\n\n def __len__(self) -> int:\n return len(self.data)\n\n\n \n\n\ndef process(args):\n root = Path(args.data_root).absolute()\n\n lang = args.language\n\n if not root.is_dir():\n raise NotADirectoryError(f\"{root} does not exist\")\n \n if not Path(args.hubert_dir).is_dir():\n os.makedirs(args.hubert_dir, exist_ok=True)\n os.system(f'wget https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt -O {args.hubert_dir}/hubert_model.pt')\n os.system(f'wget https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960_L9_km500.bin -O {args.hubert_dir}/km.model')\n \n hubert_path = f'{args.hubert_dir}/hubert_model.pt'\n km_path = f'{args.hubert_dir}/km.model'\n\n speechtokenizer = SpeechTokenizer(\n ckpt_path=hubert_path,\n layer=9,\n max_chunk=1600000,\n fp16=False,\n pool_k=1,\n pool_s=1,\n km_path=km_path\n )\n\n def extract_discrete_units(\n waveform, #torch.Tensor\n types, # [\"units\", \"unmerged_units\", \"duration\", \"continuous\"]\n output_path=None,\n wav_name=None,\n ):\n \n _waveform = waveform * (2 ** 15) # Kaldi compliance: 16-bit signed integers\n _waveform = _waveform.squeeze()\n\n encoded_audio = speechtokenizer(_waveform)\n units = encoded_audio[types]\n _units = ''.join(['#'+str(x) for x in units])\n\n if output_path is not None:\n with open(output_path,'a') as f:\n f.writelines(f'{wav_name}|{_units}')\n return _units\n\n \n\n for split in MUSTC.SPLITS:\n print(f\"Fetching split {split}...\")\n dataset = MUSTC(root.as_posix(), lang, split)\n with open(os.path.join(args.output_dir,f'{split}.id'),'a') as fid, \\\n open(os.path.join(args.output_dir,f'{split}.en'),'a') as fs, \\\n open(os.path.join(args.output_dir,f'{split}.{args.language}'),'a') as ft, \\\n open(os.path.join(args.output_dir,f'{split}.en_units'),'a') as funits:\n\n for waveform, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):\n fid.writelines(utt_id + '\\n')\n fs.writelines(src_utt + '\\n')\n ft.writelines(tgt_utt + '\\n')\n units = extract_discrete_units(waveform, \"units\")\n funits.writelines(units + '\\n')\n\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data-root\", required=True, type=str,\n help=\"data root with sub-folders for each language /\"\n )\n parser.add_argument(\n \"--hubert-dir\", required=True, type=str,\n help=\"dirname of hubert model and kmeans model\"\n )\n parser.add_argument(\"--language\", required=True, type=str)\n parser.add_argument('--output-dir', type=str)\n args = parser.parse_args()\n\n\n process(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"0nutation/DUB","sub_path":"DUB/scripts/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":6152,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"20612502932","text":"from django.contrib import admin\nfrom import_export.admin import ImportExportModelAdmin\nfrom .models import *\n\nclass FaqAdmin(ImportExportModelAdmin):\n list_display = ('questionno','question', 'answer', 'hyperlinktext', 'hyperlink')\n search_fields = ('questionno','question', 'answer')\n list_sort = ('questionno',) # Add more fields for filtering if needed\n list_per_page = 20 # Change the number of items displayed per page\n\n fieldsets = (\n (None, {\n 'fields': ('questionno','question', 'answer')\n }),\n ('Hyperlink', {\n 'fields': ('hyperlinktext', 'hyperlink'),\n 'classes': ('collapse',),\n }),\n )\n\n # Optionally, you can add prepopulated fields for URLField using the following:\n # prepopulated_fields = {'hyperlinktext': ('hyperlink',)}\n\nadmin.site.register(faq, FaqAdmin)\n\n\n\n\nclass EventsAdmin(ImportExportModelAdmin):\n list_display = ('event_id', 'title', 'description', 'venue', 'mode', 'start_dateandtime', 'zoom_link', 'is_submission','submission_driveid')\n search_fields = ('title', 'venue', 'mode' , 'start_dateandtime')\n list_filter = ('start_dateandtime',) # Add more fields for filtering if needed\n list_per_page = 20 # Change the number of items displayed per page\n\n fieldsets = (\n ('Event Details', {\n 'fields': ('event_id', 'title', 'description', 'venue', 'mode', 'start_dateandtime', 'end_dateandtime', 'imageurl')\n }),\n ('Event links', {\n 'fields': ('zoom_link' , 'whatsapp_group_link')\n }),\n ('Event Submission (if event need any submission)', {\n 'fields': ('is_submission','submission_driveid')\n }),\n )\n\nadmin.site.register(events, EventsAdmin)\n\n\n\nclass UserDetailsAdmin(ImportExportModelAdmin):\n list_display = ('username', 'first_name', 'last_name', 'email', 'gender', 'study_year' ,'mobile')\n search_fields = ('username', 'first_name', 'last_name', 'email', 'registration_no', 'institute')\n list_filter = ( 'campus','gender', 'study_year')\n list_per_page = 20\n\n fieldsets = (\n ('Personal Information', {\n 'fields': ('username', 'first_name', 'last_name', 'email', 'mobile', 'date_of_birth', 'gender', 'profile_image', 'bio')\n }),\n ('Academic Information', {\n 'fields': ('registration_no', 'institute', 'branch', 'campus', 'study_year')\n }),\n ('Social Media Links', {\n 'fields': ('instagram_link', 'linkedin_link', 'github_link', 'tryhackme_link', 'hackthebox_link' , 'discord_link')\n }),\n )\n\nadmin.site.register(UserDetails, UserDetailsAdmin)\n\n\n\n\n@admin.register(EventRegistration)\nclass EventRegistrationAdmin(ImportExportModelAdmin):\n list_display = ('event_id', 'email', 'registered_datetime', 'fullname', 'registration_no', 'study_year', 'campus', 'user_event_id')\n list_filter = ('event_id', 'registered_datetime','study_year', 'campus')\n search_fields = ('event_id','email', 'fullname', 'registration_no')\n date_hierarchy = 'registered_datetime'\n list_per_page = 50\n\n\n@admin.register(event_submission)\nclass event_submissionAdmin(ImportExportModelAdmin):\n list_display = ('event_id', 'email', 'registered_datetime', 'fullname', 'registration_no', 'study_year', 'campus', 'user_submission_id')\n list_filter = ('event_id', 'registered_datetime','study_year', 'campus')\n search_fields = ('event_id','email', 'fullname', 'registration_no')\n date_hierarchy = 'registered_datetime'\n list_per_page = 50\n","repo_name":"CYSEC-Gitam/CYSEC-WEBSITE","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13419553282","text":"\"\"\"\nClasses related to networks for generating the config files.\n\nA network is basically a representation of nodes and keypairs.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import List, Optional\n\nfrom xrpl import CryptoAlgorithm\nfrom xrpl.core.addresscodec import encode_account_public_key, encode_node_public_key\nfrom xrpl.core.keypairs import derive_keypair, generate_seed\nfrom xrpl.wallet import Wallet\n\nfrom slk.config.helper_classes import Keypair, Ports\n\n\nclass Network:\n \"\"\"Represents a network of validator nodes and their keypairs.\"\"\"\n\n def __init__(self: Network, num_nodes: int, ports: List[Ports]) -> None:\n \"\"\"\n Initialize a Network for config files.\n\n Args:\n num_nodes: The number of nodes in the network.\n ports: The Ports for the network.\n \"\"\"\n self.url = \"127.0.0.1\"\n self.num_nodes = num_nodes\n self.ports = ports\n\n\nclass StandaloneNetwork(Network):\n \"\"\"Represents a network that is standalone and running locally.\"\"\"\n\n def __init__(\n self: StandaloneNetwork, start_cfg_index: int, num_nodes: int = 1\n ) -> None:\n \"\"\"\n Initializes a StandaloneNetwork.\n\n Args:\n num_nodes: The number of nodes in the network.\n start_cfg_index: The port number the set of ports should start at.\n \"\"\"\n ports = [Ports.generate(start_cfg_index + i) for i in range(num_nodes)]\n super().__init__(num_nodes, ports)\n self.validator_keypairs = self._generate_node_keypairs()\n\n def _generate_node_keypairs(self: StandaloneNetwork) -> List[Keypair]:\n # Generate keypairs suitable for validator keys\n result = []\n for i in range(self.num_nodes):\n seed = generate_seed(None, CryptoAlgorithm.SECP256K1)\n pub_key, priv_key = derive_keypair(seed, True)\n result.append(\n Keypair(\n public_key=encode_node_public_key(bytes.fromhex(pub_key)),\n secret_key=seed,\n account_id=None,\n )\n )\n return result\n\n\nclass SidechainNetwork(StandaloneNetwork):\n \"\"\"Represents a sidechain network of federator nodes and their keypairs.\"\"\"\n\n def __init__(\n self: SidechainNetwork,\n num_federators: int,\n start_cfg_index: int,\n main_door_seed: Optional[str] = None,\n ) -> None:\n \"\"\"\n Initialize a SidechainNetwork for config files.\n\n Args:\n num_federators: The number of federators in the network.\n start_cfg_index: The port number the ports should start at.\n main_door_seed: The secret seed of the door account on the mainchain. Only\n needed if the mainchain is an external chain (e.g.\n mainnet/devnet/testnet).\n \"\"\"\n super().__init__(start_cfg_index, num_federators)\n self.num_federators = num_federators\n self.federator_keypairs = self._generate_federator_keypairs()\n\n if main_door_seed is None:\n self.main_account = Wallet.create(CryptoAlgorithm.SECP256K1)\n print(f\"Door account seed: {self.main_account.seed}\")\n print(\"Store this in the environment variable `DOOR_ACCOUNT_SEED`\")\n else:\n self.main_account = Wallet(main_door_seed, 0)\n\n def _generate_federator_keypairs(self: SidechainNetwork) -> List[Keypair]:\n # Generate keypairs suitable for federator keys\n result = []\n for i in range(self.num_federators):\n wallet = Wallet.create(crypto_algorithm=CryptoAlgorithm.ED25519)\n result.append(\n Keypair(\n public_key=encode_account_public_key(\n bytes.fromhex(wallet.public_key)\n ),\n secret_key=wallet.seed,\n account_id=wallet.classic_address,\n )\n )\n return result\n","repo_name":"xpring-eng/sidechain-launch-kit","sub_path":"slk/config/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"72245140961","text":"from pod_cr_to_jira import jira_builder\n\nfrom flask import escape, abort\nfrom google.cloud import error_reporting\n\ngoogle_error_client = error_reporting.Client()\n\n\n\ndef inbound_http(request):\n request_json = request.get_json(silent=True)\n request_args = request.args\n\n if request_json:\n try:\n success, result = jira_builder.create_jira_tree(request_json)\n\n if not success:\n abort(400, result['message'])\n\n except Exception as ex:\n # google_error_client.report_exception()\n abort(500, str(ex))\n\n else:\n abort(400, 'Payload missing or invalid.')","repo_name":"KevinJMcGrath/CloudFunction-SFDCtoJIRA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20567781746","text":"'''\nPython script to create fake database of users and purchase orders for use in ETL pipeline\n'''\nfrom faker import Faker\nimport numpy as np\nimport random\nimport json\nfrom faker.providers import BaseProvider\nfrom datetime import datetime\nimport pandas as pd\n\n# ---- function defintions -------\n\n\ndef create_json_file(filename: str, dict):\n '''function used to dump json file from dictionary'''\n with open(str(filename) + '.json', 'w') as file:\n json.dump(dict,\n file)\n\n\ndef create_csv_file(filename: str, df):\n '''convert pandas df to csv file'''\n pd.to_csv(str(filename) + '.csv',\n df,\n sep=',',\n index=False)\n\n\nclass ProductProvider(BaseProvider):\n '''create new faker provdider through class inheritance to generate fake product types'''\n\n def product(self):\n products = (\n 'tshirt pants jeans button-down shorts underwear socks jacket sunglasses hat cap beanie').split(' ')\n\n # return random selection from products\n return random.choice(products)\n\n\n# intialize Faker\nfake = Faker('en_US')\n\n# create customer database values\nnum_customer = 10000\ncustomer_dict = {}\n\nfor customer in range(num_customer):\n # create customer name, address, DOB, email for DB\n # faker customer name\n fname, lname = fake.first_name(), fake.last_name()\n name = fname + ' ' + lname\n\n # update dictionary with each entry\n customer_dict.update({customer: {'CustID': fake.ean(length=13),\n 'name': name,\n 'street address': fake.street_address(),\n 'city': fake.city(),\n 'state': fake.state(),\n 'post code': fake.postcode(),\n 'DOB': str(fake.date_of_birth(minimum_age=14, maximum_age=110)),\n 'email': fname[0] + lname + '@' + fake.free_email_domain()\n }\n }\n )\n\n\n# create product order database\n\n# add products to our faker object\nfake.add_provider(ProductProvider)\norder_dict = {}\n\nfor customer in range(num_customer):\n for i in range(random.randint(1, 4)):\n # define random number of items a customer purchases\n order_dict.update({fake.ean(length=8, prefixes=('000')): {'CustID': customer_dict[customer]['CustID'],\n 'product': fake.product(),\n 'item_cost': round(random.uniform(0, 100), 2),\n 'order_time': str(fake.date_time_between(start_date='-1y', end_date='now'))\n }\n }\n )\n\n\n# create Iphone user event database\ncolumns = ('EventID CustID ToApp AppOpenTime').split(' ')\nevent_df = pd.DataFrame(columns=columns, index=None)\nnum_events = 50000\n\nfor event in range(num_events):\n customer_val = random.randint(0, num_customer - 1)\n temp = [[event, customer_dict[customer_val]['CustID'], round(random.uniform(0, 3600), 2), str(\n fake.date_time_between(start_date='-1y', end_date='now'))]]\n event_df = event_df.append(pd.DataFrame(temp, columns=columns), ignore_index=False)\n\n# save dictionaries to file for later import to database\ncreate_json_file('data/customer_db', customer_dict)\ncreate_json_file('data/order_db', order_dict)\ncreate_csv_file('data/event_db',event_df)\n","repo_name":"nasriv/Customer_Purchase_ETL","sub_path":"initialize_data.py","file_name":"initialize_data.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28371087077","text":"import numpy as np\r\nfrom utils import gradientDescent\r\nfrom utils import extract_features\r\nfrom utils import sigmoid\r\nfrom Config import Config\r\n\r\ndef predict_tweet(tweet, freq, theta):\r\n \"\"\"\"\r\n Input:\r\n tweet: a string\r\n freq: a dictionary corresponding to the frequencies of each tuple (word, label)\r\n theta: (3,1) vector of weights\r\n Output:\r\n y_pred: the probability of a tweet being positive or negative\r\n \"\"\"\r\n\r\n # extract the features of the tweet and store it into x\r\n x = extract_features(tweet, freq)\r\n\r\n # make the prediction using x and theta\r\n y_pred = sigmoid(np.dot(x, theta))\r\n\r\n return y_pred\r\n\r\n\r\ndef test_logistic_regression(test_x, test_y, freq, theta, predict_tweet=predict_tweet):\r\n \"\"\"\r\n Input:\r\n test_x: a list of tweets\r\n test_y: (m, 1) vector with the corresponding labels for the list of tweets\r\n freq: a dictionary with the frequency of each pair (or tuple)\r\n theta: weight vector of dimension (3, 1)\r\n Output:\r\n accuracy: (# of tweets classified correctly) / (total # of tweets)\r\n \"\"\"\r\n\r\n # the list for storing predictions\r\n y_hat = []\r\n\r\n for tweet in test_x:\r\n # get the label prediction for the tweet\r\n y_pred = predict_tweet(tweet, freq, theta)\r\n\r\n if y_pred > 0.5:\r\n # append 1.0 to the list\r\n y_hat.append(1.0)\r\n else:\r\n # append 0 to the list\r\n y_hat.append(0.0)\r\n\r\n # With the above implementation, y_hat is a list, but test_y is (m,1) array\r\n # convert both to one-dimensional arrays in order to compare them using the '==' operator\r\n accuracy = (y_hat == np.squeeze(test_y)).sum() / len(test_x)\r\n accuracy = accuracy * 100\r\n\r\n return accuracy\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"imkushwaha/Sentiment-Analysis-using-logistic-regression","sub_path":"LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34210428847","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\ntry:\n longdesc = open('README.md').read()\nexcept:\n longdesc = ''\n\nsetup(\n name='telebot',\n version='1.0.0',\n description='Python Telegram Bot.',\n long_description=longdesc,\n author='Kien Nguyen',\n author_email='kiennt2609@gmail.com',\n license='Apache-2.0',\n scripts=['bin/telebot'],\n url='https://github.com/ntk148v/telebot/',\n packages=['telebot', 'telebot.plugins'],\n include_package_data=True,\n install_requires=[],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache-2.0 License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ]\n)\n","repo_name":"ntk148v/telebot","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"31070631256","text":"\"\"\"\nhttps://www.codewars.com/kata/faro-shuffle-count/python\n\"\"\"\ndef faro_cycles(deck_size):\n old_num = num = [i for i in range(deck_size)]\n cur = 1\n while True:\n c = []\n for i in range(len(num) // 2):\n c.extend([num[i], num[i + len(num) // 2]])\n if c == old_num:\n return cur\n cur += 1\n num = c\n\nprint(faro_cycles(2),1)\nprint(faro_cycles(52),8)\n\n\ndef faro_cycles(n):\n x, cnt = 2, 1\n while x != 1 and n > 3:\n cnt += 1\n x = x*2 % (n-1)\n return cnt","repo_name":"lichkingwulaa/Codewars","sub_path":"6 kyu/6_kyu_Faro_Shuffle_Count.py","file_name":"6_kyu_Faro_Shuffle_Count.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74272177762","text":"#!/usr/bin/python3\n\"\"\"\nReturn the list of all hot posts\n\"\"\"\n\nimport requests\nfrom sys import argv\n\n\ndef recurse(subreddit, hot_list=[], after=''):\n \"\"\"Function that returns the list of all hot posts\"\"\"\n if after is None:\n return []\n headers = {\"User-Agent\":\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\\\n AppleWebKit/537.36 (KHTML, like Gecko)\\\n Chrome/70.0.3538.77 Safari/537.36\"}\n url = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)\n if after:\n url += '?limit=100&after={}'.format(after)\n req = requests.get(url, headers=headers, allow_redirects=False)\n if str(req) != '':\n return None\n else:\n rj = req.json()\n posts = rj.get('data', {}).get('children', None)\n for post in posts:\n hot_list.append(post.get('data', {}).get('title', None))\n return hot_list + recurse(subreddit, [], rj.get('data').get('after'))\n","repo_name":"alejoortizd/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19725661108","text":"# -*- coding: utf-8 -*-\n\n# 波士顿房价回归分析\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVR\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef example():\n boston = load_boston()\n X, y = boston.data, boston.target\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=8)\n # 对训练集和测试集进行数据预处理\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train_scaled = scaler.transform(X_train)\n X_test_scaled = scaler.transform(X_test)\n print('--------------')\n print(X_train.shape)\n print(X_test.shape)\n print('-----------------')\n for kernel in ['linear', 'rbf']:\n svr = SVR(kernel=kernel, C=100, gamma=0.1)\n svr.fit(X_train_scaled, y_train)\n print(kernel, '核函数模型训练集得分:{:.3f}'.format(svr.score(X_train_scaled, y_train)))\n print(kernel, '核函数模型测试集得分:{:.3f}'.format(svr.score(X_test_scaled, y_test)))\n\nexample()","repo_name":"yanan-wu/ml_algorithm","sub_path":"svm/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31303792323","text":"import torch\n\n\n# (Bernoulli) Restricted Boltzmann Machine\n# k-step Contrastive Divergence algorithm\nclass RBMobj:\n def __init__(self, h_dim: int, v_dim: int, k: int, lr: float = 0.001,\n tol: float = 1E-3, epoch: int = 10, trace: bool = False):\n assert h_dim > 0, f'Found zero/negative dimension in latent dimension which is impossible {h_dim}.'\n assert v_dim > 0, f'Found zero/negative dimension in visible dimension which is impossible {v_dim}.'\n assert lr > 0, f'Found zero/negative learning rate which is impossible {lr}.'\n self.h_dim = h_dim\n self.v_dim = v_dim\n self.k = k\n\n self.lr = torch.tensor(lr, dtype=torch.float64)\n self.tol = tol\n self.epoch = epoch\n self.trace = trace\n\n # model parameters\n self.W = None\n self.a = None\n self.b = None\n self.pseudo_lik = torch.zeros(epoch, dtype=torch.float64)\n\n def fit(self, X: torch.Tensor):\n # random initialization\n self.W = torch.randn(self.v_dim, self.h_dim, dtype=torch.float64)\n self.a = torch.randn(self.v_dim, dtype=torch.float64)\n self.b = torch.randn(self.h_dim, dtype=torch.float64)\n\n # training\n n_sample, x_dim = X.shape\n\n # epoch loop\n for epoch in range(self.epoch):\n if self.trace:\n print(f'Running epoch: {epoch}')\n\n # training sample loop\n for i in range(n_sample):\n v = X[i, :].clone()\n h = torch.zeros(self.h_dim, dtype=torch.float64)\n v_next = torch.zeros(self.v_dim, dtype=torch.float64)\n h_next = torch.zeros(self.h_dim, dtype=torch.float64)\n\n # k-step CD learning\n v_temp = v.clone()\n for t in range(self.k):\n # sampling\n h_temp = self._sampling_hidden(v_temp)\n v_temp = self._sampling_visible(h_temp)\n\n # save\n if t == 0:\n h = h_temp.clone()\n if t == self.k - 1:\n h_next = h_temp.clone()\n v_next = v_temp.clone()\n\n # update\n self.W += self.lr * (torch.outer(v, h) - torch.outer(v_next, h_next))\n self.a += self.lr * (v - v_next)\n self.b += self.lr * (h - h_next)\n\n # corrupt each sample by a random bit flip\n idx_bit = torch.randint(0, self.v_dim, size=(n_sample,))\n pseudo_lik = 0.0\n for i in range(n_sample):\n v_corrpt = X[i, :].clone()\n d_free_energy = self._free_energy(v_corrpt)\n v_corrpt[idx_bit[i]] = 1 - v_corrpt[idx_bit[i]]\n d_free_energy -= self._free_energy(v_corrpt)\n pseudo_lik += self.v_dim * torch.log(torch.sigmoid(-d_free_energy))\n\n self.pseudo_lik[epoch] = pseudo_lik / n_sample\n\n if self.trace:\n # print(f'Difference in free energy: {torch.round(d_free_energy, decimals=4)}')\n print(f'Pseudo-likelihood: {torch.round(self.pseudo_lik[epoch], decimals=4)}')\n\n def gibbs(self, v: torch.Tensor):\n # gibbs sampling\n v_temp = v.clone()\n for t in range(self.k):\n h_temp = self._sampling_hidden(v_temp)\n v_temp = self._sampling_visible(h_temp)\n return v_temp\n\n def _sampling_hidden(self, v: torch.Tensor):\n # sample from p(h|v)\n p = torch.matmul(self.W.t(), v)\n p += self.b\n p = torch.sigmoid(p)\n return torch.bernoulli(p)\n\n def _sampling_visible(self, h: torch.Tensor):\n # sample from p(v|h)\n p = torch.matmul(self.W, h)\n p += self.a\n p = torch.sigmoid(p)\n return torch.bernoulli(p)\n\n def _free_energy(self, v: torch.Tensor):\n E = torch.inner(self.a, v)\n E += torch.sum(torch.log(1 + torch.exp(torch.matmul(self.W.t(), v) + self.b)))\n return -E\n\n def _energy(self, v: torch.Tensor, h: torch.Tensor):\n E = torch.inner(v, torch.matmul(self.W, h))\n E += torch.inner(self.a, v)\n E += torch.inner(self.b, h)\n return -E\n","repo_name":"andyc1997/RBM","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38167978367","text":"#hexadecimal to base64 encoder\r\n#just kidding this already exists. boo\r\nfile = open(\"EOS results template3.csv\", 'rb')\r\nimport codecs\r\n#str = \"616d6974\"\r\n#encoded = codecs.encode(codecs.decode(str, \"hex\"),\"base64\").decode()\r\n#print(encoded)\r\n\r\nimport hashlib\r\n#sha1_hash = sha1(\"616d6974\")\r\n#print(sha1_hash)\r\n#sha1 = hashlib.sha1(b\"test\").hexdigest()\r\n\r\nsha1 = hashlib.sha1(file.read()).hexdigest()\r\nprint(sha1)\r\nencoded = codecs.encode(codecs.decode(sha1, \"hex\"),\"base64\").decode()\r\nprint(encoded)\r\n","repo_name":"slfisco/learning2","sub_path":"hex decoder.py","file_name":"hex decoder.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37324875877","text":"import sys\nfrom pathlib import Path\nimport string\n\n\ndef find_priority_total(input):\n total = 0\n index_value = 0\n for content in input:\n if index_value <= (len(input) - 3):\n first_elf = set(input[index_value])\n second_elf = set(input[index_value + 1])\n third_elf = set(input[index_value + 2])\n common_item = first_elf & second_elf & third_elf\n index_value += 3\n priority = string.ascii_letters.index(common_item.pop()) + 1\n total += priority\n print(total)\n\n\nif __name__ == \"__main__\":\n file = Path(sys.argv[1])\n if Path.is_file(file):\n input = Path.read_text(file).splitlines()\n find_priority_total(input)\n else:\n raise TypeError(\"This is not a file\")\n","repo_name":"amygori/advent-of-code-2022","sub_path":"day3/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70492308962","text":"import random\nfrom random import randint\nfrom pymongo import MongoClient\nimport datetime\n\nMONGO_URI = 'mongodb://localhost'\n\nclient = MongoClient(MONGO_URI)\n\ndb = client['FlyBot']\n\ndef upload_data():\n #Intanciacion de atributos fecha salida y fecha llegada\n numero_de_vuelo = 1\n inicio = datetime.date(2020, 10, 10)\n final = datetime.date(2020, 12, 31)\n #Creacion de la coleccion Vuelos y asignacion a la variable\n vuelos_collection = db['vuelos']\n #Lectura del archivo vuelos.txt\n file = open(\"vuelos.txt\", \"r\", encoding='utf8')\n #Asignacion a la var lines de lo que habia en el archivo .txt\n lines = file.readlines()\n it = 0\n for i in range(50):\n random_date_ida = inicio + (final - inicio) * random.random()\n random_date_llegada = inicio + (final - inicio) * random.random()\n ida = lines[randint(0, len(lines) - 1)].split(\",\")\n llegada = lines[randint(0, len(lines) - 1)].split(\",\")\n if ida[1] != llegada[1]:\n if it % 2 == 0:\n it += 1\n if random_date_llegada > random_date_ida:\n vuelos_collection.insert_one(\n {\n \"numero de vuelo\": numero_de_vuelo,\n \"origen\": {\n \"ciudad\": ida[0],\n \"IATA\": ida[1],\n \"provincia\": ida[2],\n \"pais\": ida[3][:-1]\n },\n \"destino\": {\n \"ciudad\": llegada[0],\n \"IATA\": llegada[1],\n \"provincia\": llegada[2],\n \"pais\": llegada[3][:-1]\n },\n \"fecha de ida\": str(random_date_ida),\n \"fecha de llegada\": str(random_date_llegada)\n }\n )\n numero_de_vuelo += 1\n else:\n it += 1\n vuelos_collection.insert_one(\n {\n \"numero de vuelo\": numero_de_vuelo,\n \"origen\": {\n \"ciudad\": ida[0],\n \"IATA\": ida[1],\n \"provincia\": ida[2],\n \"pais\": ida[3][:-1]\n },\n \"destino\": {\n \"ciudad\": llegada[0],\n \"IATA\": llegada[1],\n \"provincia\": llegada[2],\n \"pais\": llegada[3][:-1]\n },\n \"fecha de ida\": str(random_date_ida),\n \"fecha de llegada\": \"\"\n }\n )\n numero_de_vuelo += 1\n\nupload_data()","repo_name":"Frael98/refactor-pylint-Bot","sub_path":"data/subir_datos.py","file_name":"subir_datos.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37697997642","text":"import os\nimport shutil\nimport struct\n\nfrom mutagen._compat import cBytesIO, PY2\nfrom tempfile import mkstemp\nfrom tests import TestCase, add\nfrom mutagen.mp4 import MP4, Atom, Atoms, MP4Tags, MP4Info, \\\n delete, MP4Cover, MP4MetadataError, MP4FreeForm, error\nfrom mutagen._util import cdata\nfrom os import devnull\n\n\nclass TAtom(TestCase):\n\n def test_no_children(self):\n fileobj = cBytesIO(b\"\\x00\\x00\\x00\\x08atom\")\n atom = Atom(fileobj)\n self.failUnlessRaises(KeyError, atom.__getitem__, \"test\")\n\n def test_length_1(self):\n fileobj = cBytesIO(b\"\\x00\\x00\\x00\\x01atom\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10\" + b\"\\x00\" * 16)\n self.failUnlessEqual(Atom(fileobj).length, 16)\n\n def test_length_64bit_less_than_16(self):\n fileobj = cBytesIO(b\"\\x00\\x00\\x00\\x01atom\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x08\" + b\"\\x00\" * 8)\n self.assertRaises(error, Atom, fileobj)\n\n def test_length_less_than_8(self):\n fileobj = cBytesIO(b\"\\x00\\x00\\x00\\x02atom\")\n self.assertRaises(MP4MetadataError, Atom, fileobj)\n\n def test_render_too_big(self):\n class TooBig(bytes):\n def __len__(self):\n return 1 << 32\n data = TooBig(b\"test\")\n try: len(data)\n except OverflowError:\n # Py_ssize_t is still only 32 bits on this system.\n self.failUnlessRaises(OverflowError, Atom.render, b\"data\", data)\n else:\n data = Atom.render(b\"data\", data)\n self.failUnlessEqual(len(data), 4 + 4 + 8 + 4)\n\n def test_non_top_level_length_0_is_invalid(self):\n data = cBytesIO(struct.pack(\">I4s\", 0, b\"whee\"))\n self.assertRaises(MP4MetadataError, Atom, data, level=1)\n\n def test_length_0(self):\n fileobj = cBytesIO(b\"\\x00\\x00\\x00\\x00atom\" + 40 * b\"\\x00\")\n atom = Atom(fileobj)\n self.failUnlessEqual(fileobj.tell(), 48)\n self.failUnlessEqual(atom.length, 48)\n\n def test_length_0_container(self):\n data = cBytesIO(struct.pack(\">I4s\", 0, b\"moov\") +\n Atom.render(b\"data\", b\"whee\"))\n atom = Atom(data)\n self.failUnlessEqual(len(atom.children), 1)\n self.failUnlessEqual(atom.length, 20)\n self.failUnlessEqual(atom.children[-1].length, 12)\n\nadd(TAtom)\n\nclass TAtoms(TestCase):\n filename = os.path.join(\"tests\", \"data\", \"has-tags.m4a\")\n\n def setUp(self):\n self.atoms = Atoms(open(self.filename, \"rb\"))\n\n def test_getitem(self):\n self.failUnless(self.atoms[b\"moov\"])\n self.failUnless(self.atoms[b\"moov.udta\"])\n self.failUnlessRaises(KeyError, self.atoms.__getitem__, b\"whee\")\n\n def test_contains(self):\n self.failUnless(b\"moov\" in self.atoms)\n self.failUnless(b\"moov.udta\" in self.atoms)\n self.failUnless(b\"whee\" not in self.atoms)\n\n def test_name(self):\n self.failUnlessEqual(self.atoms.atoms[0].name, b\"ftyp\")\n\n def test_children(self):\n self.failUnless(self.atoms.atoms[2].children)\n\n def test_no_children(self):\n self.failUnless(self.atoms.atoms[0].children is None)\n\n def test_extra_trailing_data(self):\n data = cBytesIO(Atom.render(b\"data\", b\"whee\") + b\"\\x00\\x00\")\n self.failUnless(Atoms(data))\n\n def test_repr(self):\n repr(self.atoms)\nadd(TAtoms)\n\nclass TMP4Info(TestCase):\n\n def test_no_soun(self):\n self.failUnlessRaises(\n IOError, self.test_mdhd_version_1, b\"vide\")\n\n def test_mdhd_version_1(self, soun=b\"soun\"):\n mdhd = Atom.render(b\"mdhd\", (b\"\\x01\\x00\\x00\\x00\" + b\"\\x00\" * 16 +\n b\"\\x00\\x00\\x00\\x02\" + # 2 Hz\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10\"))\n hdlr = Atom.render(b\"hdlr\", b\"\\x00\" * 8 + soun)\n mdia = Atom.render(b\"mdia\", mdhd + hdlr)\n trak = Atom.render(b\"trak\", mdia)\n moov = Atom.render(b\"moov\", trak)\n fileobj = cBytesIO(moov)\n atoms = Atoms(fileobj)\n info = MP4Info(atoms, fileobj)\n self.failUnlessEqual(info.length, 8)\n\n def test_multiple_tracks(self):\n hdlr = Atom.render(b\"hdlr\", b\"\\x00\" * 8 + b\"whee\")\n mdia = Atom.render(b\"mdia\", hdlr)\n trak1 = Atom.render(b\"trak\", mdia)\n mdhd = Atom.render(b\"mdhd\", (b\"\\x01\\x00\\x00\\x00\" + b\"\\x00\" * 16 +\n b\"\\x00\\x00\\x00\\x02\" + # 2 Hz\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10\"))\n hdlr = Atom.render(b\"hdlr\", b\"\\x00\" * 8 + b\"soun\")\n mdia = Atom.render(b\"mdia\", mdhd + hdlr)\n trak2 = Atom.render(b\"trak\", mdia)\n moov = Atom.render(b\"moov\", trak1 + trak2)\n fileobj = cBytesIO(moov)\n atoms = Atoms(fileobj)\n info = MP4Info(atoms, fileobj)\n self.failUnlessEqual(info.length, 8)\nadd(TMP4Info)\n\nclass TMP4Tags(TestCase):\n\n def wrap_ilst(self, data):\n ilst = Atom.render(b\"ilst\", data)\n meta = Atom.render(b\"meta\", b\"\\x00\" * 4 + ilst)\n data = Atom.render(b\"moov\", Atom.render(b\"udta\", meta))\n fileobj = cBytesIO(data)\n return MP4Tags(Atoms(fileobj), fileobj)\n\n def test_genre(self):\n data = Atom.render(b\"data\", b\"\\x00\" * 8 + b\"\\x00\\x01\")\n genre = Atom.render(b\"gnre\", data)\n tags = self.wrap_ilst(genre)\n self.failIf(b\"gnre\" in tags)\n self.failUnlessEqual(tags[b\"\\xa9gen\"], [\"Blues\"])\n\n def test_empty_cpil(self):\n cpil = Atom.render(b\"cpil\", Atom.render(b\"data\", b\"\\x00\" * 8))\n tags = self.wrap_ilst(cpil)\n self.failUnless(b\"cpil\" in tags)\n self.failIf(tags[b\"cpil\"])\n\n def test_genre_too_big(self):\n data = Atom.render(b\"data\", b\"\\x00\" * 8 + b\"\\x01\\x00\")\n genre = Atom.render(b\"gnre\", data)\n tags = self.wrap_ilst(genre)\n self.failIf(b\"gnre\" in tags)\n self.failIf(b\"\\xa9gen\" in tags)\n\n def test_strips_unknown_types(self):\n data = Atom.render(b\"data\", b\"\\x00\" * 8 + b\"whee\")\n foob = Atom.render(b\"foob\", data)\n tags = self.wrap_ilst(foob)\n self.failIf(tags)\n\n def test_strips_bad_unknown_types(self):\n data = Atom.render(b\"datA\", b\"\\x00\" * 8 + b\"whee\")\n foob = Atom.render(b\"foob\", data)\n tags = self.wrap_ilst(foob)\n self.failIf(tags)\n\n def test_bad_covr(self):\n data = Atom.render(b\"foob\", b\"\\x00\\x00\\x00\\x0E\" + b\"\\x00\" * 4 + b\"whee\")\n covr = Atom.render(b\"covr\", data)\n self.failUnlessRaises(MP4MetadataError, self.wrap_ilst, covr)\n\n def test_covr_blank_format(self):\n data = Atom.render(b\"data\", b\"\\x00\\x00\\x00\\x00\" + b\"\\x00\" * 4 + b\"whee\")\n covr = Atom.render(b\"covr\", data)\n tags = self.wrap_ilst(covr)\n self.failUnlessEqual(MP4Cover.FORMAT_JPEG, tags[b\"covr\"][0].imageformat)\n\n def test_render_bool(self):\n self.failUnlessEqual(MP4Tags()._MP4Tags__render_bool(b'pgap', True),\n b\"\\x00\\x00\\x00\\x19pgap\\x00\\x00\\x00\\x11data\"\n b\"\\x00\\x00\\x00\\x15\\x00\\x00\\x00\\x00\\x01\")\n self.failUnlessEqual(MP4Tags()._MP4Tags__render_bool(b'pgap', False),\n b\"\\x00\\x00\\x00\\x19pgap\\x00\\x00\\x00\\x11data\"\n b\"\\x00\\x00\\x00\\x15\\x00\\x00\\x00\\x00\\x00\")\n\n def test_render_text(self):\n self.failUnlessEqual(\n MP4Tags()._MP4Tags__render_text(b'purl', ['http://foo/bar.xml'], 0),\n b\"\\x00\\x00\\x00*purl\\x00\\x00\\x00\\\"data\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00http://foo/bar.xml\")\n self.failUnlessEqual(\n MP4Tags()._MP4Tags__render_text(b'aART', [u'\\u0041lbum Artist']),\n b\"\\x00\\x00\\x00$aART\\x00\\x00\\x00\\x1cdata\\x00\\x00\\x00\\x01\\x00\\x00\"\n b\"\\x00\\x00\\x41lbum Artist\")\n self.failUnlessEqual(\n MP4Tags()._MP4Tags__render_text(b'aART', [u'Album Artist', u'Whee']),\n b\"\\x00\\x00\\x008aART\\x00\\x00\\x00\\x1cdata\\x00\\x00\\x00\\x01\\x00\\x00\"\n b\"\\x00\\x00Album Artist\\x00\\x00\\x00\\x14data\\x00\\x00\\x00\\x01\\x00\"\n b\"\\x00\\x00\\x00Whee\")\n\n def test_render_data(self):\n self.failUnlessEqual(\n MP4Tags()._MP4Tags__render_data(b'aART', 1, [b'whee']),\n b\"\\x00\\x00\\x00\\x1caART\"\n b\"\\x00\\x00\\x00\\x14data\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00whee\")\n self.failUnlessEqual(\n MP4Tags()._MP4Tags__render_data(b'aART', 2, [b'whee', b'wee']),\n b\"\\x00\\x00\\x00/aART\"\n b\"\\x00\\x00\\x00\\x14data\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00whee\"\n b\"\\x00\\x00\\x00\\x13data\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00wee\")\n\n def test_bad_text_data(self):\n data = Atom.render(b\"datA\", b\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00whee\")\n data = Atom.render(b\"aART\", data)\n self.failUnlessRaises(MP4MetadataError, self.wrap_ilst, data)\n\n def test_render_freeform(self):\n self.failUnlessEqual(\n MP4Tags()._MP4Tags__render_freeform(\n b'----:net.sacredchao.Mutagen:test', [b'whee', b'wee']),\n b\"\\x00\\x00\\x00a----\"\n b\"\\x00\\x00\\x00\\\"mean\\x00\\x00\\x00\\x00net.sacredchao.Mutagen\"\n b\"\\x00\\x00\\x00\\x10name\\x00\\x00\\x00\\x00test\"\n b\"\\x00\\x00\\x00\\x14data\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00whee\"\n b\"\\x00\\x00\\x00\\x13data\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00wee\")\n\n def test_bad_freeform(self):\n mean = Atom.render(b\"mean\", b\"net.sacredchao.Mutagen\")\n name = Atom.render(b\"name\", b\"empty test key\")\n bad_freeform = Atom.render(b\"----\", b\"\\x00\" * 4 + mean + name)\n self.failUnlessRaises(MP4MetadataError, self.wrap_ilst, bad_freeform)\n\n def test_pprint_non_text_list(self):\n tags = MP4Tags()\n tags[b\"tmpo\"] = [120, 121]\n tags[b\"trck\"] = [(1, 2), (3, 4)]\n tags.pprint()\n\n def test_freeform_data(self):\n # http://code.google.com/p/mutagen/issues/detail?id=103\n key = b\"----:com.apple.iTunes:Encoding Params\"\n value = (b\"vers\\x00\\x00\\x00\\x01acbf\\x00\\x00\\x00\\x01brat\\x00\\x01\\xf4\"\n b\"\\x00cdcv\\x00\\x01\\x05\\x04\")\n\n data = (b\"\\x00\\x00\\x00\\x1cmean\\x00\\x00\\x00\\x00com.apple.iTunes\\x00\\x00\"\n b\"\\x00\\x1bname\\x00\\x00\\x00\\x00Encoding Params\\x00\\x00\\x000data\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00vers\\x00\\x00\\x00\\x01acbf\\x00\"\n b\"\\x00\\x00\\x01brat\\x00\\x01\\xf4\\x00cdcv\\x00\\x01\\x05\\x04\")\n\n tags = self.wrap_ilst(Atom.render(b\"----\", data))\n v = tags[key][0]\n self.failUnlessEqual(v, value)\n self.failUnlessEqual(v.dataformat, MP4FreeForm.FORMAT_DATA)\n\n data = MP4Tags()._MP4Tags__render_freeform(key, v)\n v = self.wrap_ilst(data)[key][0]\n self.failUnlessEqual(v.dataformat, MP4FreeForm.FORMAT_DATA)\n\n data = MP4Tags()._MP4Tags__render_freeform(key, value)\n v = self.wrap_ilst(data)[key][0]\n self.failUnlessEqual(v.dataformat, MP4FreeForm.FORMAT_TEXT)\n\nadd(TMP4Tags)\n\nclass TMP4(TestCase):\n def setUp(self):\n fd, self.filename = mkstemp(suffix='.m4a')\n os.close(fd)\n shutil.copy(self.original, self.filename)\n self.audio = MP4(self.filename)\n\n def faad(self):\n if not have_faad: return\n value = os.system(\"faad %s -o %s > %s 2> %s\" % (\n self.filename, devnull, devnull, devnull))\n self.failIf(value and value != NOTFOUND)\n\n def test_score(self):\n fileobj = open(self.filename, \"rb\")\n header = fileobj.read(128)\n self.failUnless(MP4.score(self.filename, fileobj, header))\n fileobj.close()\n\n def test_channels(self):\n self.failUnlessEqual(self.audio.info.channels, 2)\n\n def test_sample_rate(self):\n self.failUnlessEqual(self.audio.info.sample_rate, 44100)\n\n def test_bits_per_sample(self):\n self.failUnlessEqual(self.audio.info.bits_per_sample, 16)\n\n def test_bitrate(self):\n self.failUnlessEqual(self.audio.info.bitrate, 2914)\n\n def test_length(self):\n self.failUnlessAlmostEqual(3.7, self.audio.info.length, 1)\n\n def test_padding(self):\n self.audio[b\"\\xa9nam\"] = u\"wheeee\" * 10\n self.audio.save()\n size1 = os.path.getsize(self.audio.filename)\n self.audio[b\"\\xa9nam\"] = u\"wheeee\" * 11\n self.audio.save()\n size2 = os.path.getsize(self.audio.filename)\n self.failUnless(size1, size2)\n\n def test_padding_2(self):\n self.audio[b\"\\xa9nam\"] = u\"wheeee\" * 10\n self.audio.save()\n # Reorder \"free\" and \"ilst\" atoms\n fileobj = open(self.audio.filename, \"rb+\")\n atoms = Atoms(fileobj)\n meta = atoms[b\"moov\", b\"udta\", b\"meta\"]\n meta_length1 = meta.length\n ilst = meta[b\"ilst\",]\n free = meta[b\"free\",]\n self.failUnlessEqual(ilst.offset + ilst.length, free.offset)\n fileobj.seek(ilst.offset)\n ilst_data = fileobj.read(ilst.length)\n fileobj.seek(free.offset)\n free_data = fileobj.read(free.length)\n fileobj.seek(ilst.offset)\n fileobj.write(free_data + ilst_data)\n fileobj.close()\n fileobj = open(self.audio.filename, \"rb+\")\n atoms = Atoms(fileobj)\n meta = atoms[b\"moov\", b\"udta\", b\"meta\"]\n ilst = meta[b\"ilst\",]\n free = meta[b\"free\",]\n self.failUnlessEqual(free.offset + free.length, ilst.offset)\n fileobj.close()\n # Save the file\n self.audio[b\"\\xa9nam\"] = u\"wheeee\" * 11\n self.audio.save()\n # Check the order of \"free\" and \"ilst\" atoms\n fileobj = open(self.audio.filename, \"rb+\")\n atoms = Atoms(fileobj)\n fileobj.close()\n meta = atoms[b\"moov\", b\"udta\", b\"meta\"]\n ilst = meta[b\"ilst\",]\n free = meta[b\"free\",]\n self.failUnlessEqual(meta.length, meta_length1)\n self.failUnlessEqual(ilst.offset + ilst.length, free.offset)\n\n def set_key(self, key, value, result=None, faad=True):\n self.audio[key] = value\n self.audio.save()\n audio = MP4(self.audio.filename)\n self.failUnless(key in audio)\n self.failUnlessEqual(audio[key], result or value)\n if faad:\n self.faad()\n\n def test_unicode(self):\n self.set_key(b'\\xa9nam', [b'\\xe3\\x82\\x8a\\xe3\\x81\\x8b'],\n result=[u'\\u308a\\u304b'])\n\n def test_save_text(self):\n self.set_key(b'\\xa9nam', [u\"Some test name\"])\n\n def test_save_texts(self):\n self.set_key(b'\\xa9nam', [u\"Some test name\", u\"One more name\"])\n\n def test_freeform(self):\n self.set_key(b'----:net.sacredchao.Mutagen:test key', [b\"whee\"])\n\n def test_freeform_2(self):\n self.set_key(b'----:net.sacredchao.Mutagen:test key', b\"whee\", [b\"whee\"])\n\n def test_freeforms(self):\n self.set_key(b'----:net.sacredchao.Mutagen:test key', [b\"whee\", b\"uhh\"])\n\n def test_freeform_bin(self):\n self.set_key(b'----:net.sacredchao.Mutagen:test key', [\n MP4FreeForm(b'woooo', MP4FreeForm.FORMAT_TEXT),\n MP4FreeForm(b'hoooo', MP4FreeForm.FORMAT_DATA),\n MP4FreeForm(b'boooo'),\n ])\n\n def test_tracknumber(self):\n self.set_key(b'trkn', [(1, 10)])\n self.set_key(b'trkn', [(1, 10), (5, 20)], faad=False)\n self.set_key(b'trkn', [])\n\n def test_disk(self):\n self.set_key(b'disk', [(18, 0)])\n self.set_key(b'disk', [(1, 10), (5, 20)], faad=False)\n self.set_key(b'disk', [])\n\n def test_tracknumber_too_small(self):\n self.failUnlessRaises(ValueError, self.set_key, b'trkn', [(-1, 0)])\n self.failUnlessRaises(ValueError, self.set_key, b'trkn', [(2**18, 1)])\n\n def test_disk_too_small(self):\n self.failUnlessRaises(ValueError, self.set_key, b'disk', [(-1, 0)])\n self.failUnlessRaises(ValueError, self.set_key, b'disk', [(2**18, 1)])\n\n def test_tracknumber_wrong_size(self):\n self.failUnlessRaises(ValueError, self.set_key, b'trkn', (1,))\n self.failUnlessRaises(ValueError, self.set_key, b'trkn', (1, 2, 3,))\n self.failUnlessRaises(ValueError, self.set_key, b'trkn', [(1,)])\n self.failUnlessRaises(ValueError, self.set_key, b'trkn', [(1, 2, 3,)])\n\n def test_disk_wrong_size(self):\n self.failUnlessRaises(ValueError, self.set_key, b'disk', [(1,)])\n self.failUnlessRaises(ValueError, self.set_key, b'disk', [(1, 2, 3,)])\n\n def test_tempo(self):\n self.set_key(b'tmpo', [150])\n self.set_key(b'tmpo', [])\n\n def test_tempos(self):\n self.set_key(b'tmpo', [160, 200], faad=False)\n\n def test_tempo_invalid(self):\n for badvalue in [[10000000], [-1], 10, \"foo\"]:\n self.failUnlessRaises(ValueError, self.set_key, b'tmpo', badvalue)\n\n def test_compilation(self):\n self.set_key(b'cpil', True)\n\n def test_compilation_false(self):\n self.set_key(b'cpil', False)\n\n def test_gapless(self):\n self.set_key(b'pgap', True)\n\n def test_gapless_false(self):\n self.set_key(b'pgap', False)\n\n def test_podcast(self):\n self.set_key(b'pcst', True)\n\n def test_podcast_false(self):\n self.set_key(b'pcst', False)\n\n def test_cover(self):\n self.set_key(b'covr', [b'woooo'])\n\n def test_cover_png(self):\n self.set_key(b'covr', [\n MP4Cover(b'woooo', MP4Cover.FORMAT_PNG),\n MP4Cover(b'hoooo', MP4Cover.FORMAT_JPEG),\n ])\n\n def test_podcast_url(self):\n self.set_key(b'purl', ['http://pdl.warnerbros.com/wbie/justiceleagueheroes/audio/JLH_EA.xml'])\n\n def test_episode_guid(self):\n self.set_key(b'catg', ['falling-star-episode-1'])\n\n def test_pprint(self):\n self.failUnless(self.audio.pprint())\n\n def test_pprint_binary(self):\n self.audio[b\"covr\"] = [b\"\\x00\\xa9\\garbage\"]\n self.failUnless(self.audio.pprint())\n\n def test_pprint_pair(self):\n self.audio[b\"cpil\"] = (1, 10)\n if PY2:\n self.failUnless(\"'cpil'=(1, 10)\" in self.audio.pprint())\n else:\n self.failUnless(\"b'cpil'=(1, 10)\" in self.audio.pprint())\n\n def test_delete(self):\n self.audio.delete()\n audio = MP4(self.audio.filename)\n self.failIf(audio.tags)\n self.faad()\n\n def test_module_delete(self):\n delete(self.filename)\n audio = MP4(self.audio.filename)\n self.failIf(audio.tags)\n self.faad()\n\n def test_reads_unknown_text(self):\n self.set_key(b\"foob\", [u\"A test\"])\n\n def __read_offsets(self, filename):\n fileobj = open(filename, 'rb')\n atoms = Atoms(fileobj)\n moov = atoms[b'moov']\n samples = []\n for atom in moov.findall(b'stco', True):\n fileobj.seek(atom.offset + 12)\n data = fileobj.read(atom.length - 12)\n fmt = \">%dI\" % cdata.uint_be(data[:4])\n offsets = struct.unpack(fmt, data[4:])\n for offset in offsets:\n fileobj.seek(offset)\n samples.append(fileobj.read(8))\n for atom in moov.findall(b'co64', True):\n fileobj.seek(atom.offset + 12)\n data = fileobj.read(atom.length - 12)\n fmt = \">%dQ\" % cdata.uint_be(data[:4])\n offsets = struct.unpack(fmt, data[4:])\n for offset in offsets:\n fileobj.seek(offset)\n samples.append(fileobj.read(8))\n try:\n for atom in atoms[b\"moof\"].findall(b'tfhd', True):\n data = fileobj.read(atom.length - 9)\n flags = cdata.uint_be(b\"\\x00\" + data[:3])\n if flags & 1:\n offset = cdata.ulonglong_be(data[7:15])\n fileobj.seek(offset)\n samples.append(fileobj.read(8))\n except KeyError:\n pass\n fileobj.close()\n return samples\n\n def test_update_offsets(self):\n aa = self.__read_offsets(self.original)\n self.audio[b\"\\xa9nam\"] = b\"wheeeeeeee\"\n self.audio.save()\n bb = self.__read_offsets(self.filename)\n for a, b in zip(aa, bb):\n self.failUnlessEqual(a, b)\n\n def test_mime(self):\n self.failUnless(\"audio/mp4\" in self.audio.mime)\n\n def tearDown(self):\n os.unlink(self.filename)\n\n\nclass TMP4HasTags(TMP4):\n def test_save_simple(self):\n self.audio.save()\n self.faad()\n\n def test_shrink(self):\n self.audio.clear()\n self.audio.save()\n audio = MP4(self.audio.filename)\n self.failIf(audio.tags)\n\n def test_too_short(self):\n fileobj = open(self.audio.filename, \"rb\")\n try:\n atoms = Atoms(fileobj)\n ilst = atoms[b\"moov.udta.meta.ilst\"]\n # fake a too long atom length\n ilst.children[0].length += 10000000\n self.failUnlessRaises(MP4MetadataError, MP4Tags, atoms, fileobj)\n finally:\n fileobj.close()\n\n def test_has_tags(self):\n self.failUnless(self.audio.tags)\n\n def test_not_my_file(self):\n # should raise something like \"Not a MP4 file\"\n self.failUnlessRaisesRegexp(\n error, \"MP4\", MP4, os.path.join(\"tests\", \"data\", \"empty.ogg\"))\n\n\nclass TMP4Datatypes(TMP4HasTags):\n original = os.path.join(\"tests\", \"data\", \"has-tags.m4a\")\n\n def test_has_freeform(self):\n key = b\"----:com.apple.iTunes:iTunNORM\"\n self.failUnless(key in self.audio.tags)\n ff = self.audio.tags[key]\n self.failUnlessEqual(ff[0].dataformat, MP4FreeForm.FORMAT_TEXT)\n\n def test_has_covr(self):\n self.failUnless(b'covr' in self.audio.tags)\n covr = self.audio.tags[b'covr']\n self.failUnlessEqual(len(covr), 2)\n self.failUnlessEqual(covr[0].imageformat, MP4Cover.FORMAT_PNG)\n self.failUnlessEqual(covr[1].imageformat, MP4Cover.FORMAT_JPEG)\n\nadd(TMP4Datatypes)\n\n\nclass TMP4CovrWithName(TMP4):\n # http://bugs.musicbrainz.org/ticket/5894\n original = os.path.join(\"tests\", \"data\", \"covr-with-name.m4a\")\n\n def test_has_covr(self):\n self.failUnless(b'covr' in self.audio.tags)\n covr = self.audio.tags[b'covr']\n self.failUnlessEqual(len(covr), 2)\n self.failUnlessEqual(covr[0].imageformat, MP4Cover.FORMAT_PNG)\n self.failUnlessEqual(covr[1].imageformat, MP4Cover.FORMAT_JPEG)\n\nadd(TMP4CovrWithName)\n\nclass TMP4HasTags64Bit(TMP4HasTags):\n original = os.path.join(\"tests\", \"data\", \"truncated-64bit.mp4\")\n\n def test_has_covr(self):\n pass\n\n def test_bitrate(self):\n self.failUnlessEqual(self.audio.info.bitrate, 128000)\n\n def test_length(self):\n self.failUnlessAlmostEqual(0.325, self.audio.info.length, 3)\n\n def faad(self):\n # This is only half a file, so FAAD segfaults. Can't test. :(\n pass\n\nadd(TMP4HasTags64Bit)\n\nclass TMP4NoTagsM4A(TMP4):\n original = os.path.join(\"tests\", \"data\", \"no-tags.m4a\")\n\n def test_no_tags(self):\n self.failUnless(self.audio.tags is None)\n\n def test_add_tags(self):\n self.audio.add_tags()\n self.failUnlessRaises(error, self.audio.add_tags)\n\nadd(TMP4NoTagsM4A)\n\nclass TMP4NoTags3G2(TMP4):\n original = os.path.join(\"tests\", \"data\", \"no-tags.3g2\")\n\n def test_no_tags(self):\n self.failUnless(self.audio.tags is None)\n\n def test_sample_rate(self):\n self.failUnlessEqual(self.audio.info.sample_rate, 22050)\n\n def test_bitrate(self):\n self.failUnlessEqual(self.audio.info.bitrate, 32000)\n\n def test_length(self):\n self.failUnlessAlmostEqual(15, self.audio.info.length, 1)\n\nadd(TMP4NoTags3G2)\n\nclass TMP4UpdateParents64Bit(TestCase):\n original = os.path.join(\"tests\", \"data\", \"64bit.mp4\")\n\n def setUp(self):\n fd, self.filename = mkstemp(suffix='.mp4')\n os.close(fd)\n shutil.copy(self.original, self.filename)\n\n def test_update_parents(self):\n with open(self.filename, \"rb\") as fileobj:\n atoms = Atoms(fileobj)\n self.assertEqual(77, atoms.atoms[0].length)\n self.assertEqual(61, atoms.atoms[0].children[0].length)\n tags = MP4Tags(atoms, fileobj)\n tags[b'pgap'] = True\n tags.save(self.filename)\n\n with open(self.filename, \"rb\") as fileobj:\n atoms = Atoms(fileobj)\n # original size + 'pgap' size + padding\n self.assertEqual(77 + 25 + 974, atoms.atoms[0].length)\n self.assertEqual(61 + 25 + 974, atoms.atoms[0].children[0].length)\n\n def tearDown(self):\n os.unlink(self.filename)\n\nadd(TMP4UpdateParents64Bit)\n\nNOTFOUND = os.system(\"tools/notarealprogram 2> %s\" % devnull)\n\nhave_faad = True\nif os.system(\"faad 2> %s > %s\" % (devnull, devnull)) == NOTFOUND:\n have_faad = False\n print(\"WARNING: Skipping FAAD reference tests.\")\n","repo_name":"LordSputnik/mutagen","sub_path":"tests/test_mp4.py","file_name":"test_mp4.py","file_ext":"py","file_size_in_byte":24633,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"54"} +{"seq_id":"39739497761","text":"import pygame, sys, random\r\n\r\n#Physics Animations\r\ndef ball_animation():\r\n global ball_speed_x, ball_speed_y, player_score, opponent_score, restart\r\n ball.x += ball_speed_x\r\n ball.y += ball_speed_y\r\n\r\n #make ball stay in the window\r\n if ball.top <= 0 or ball.bottom >= screen_height:\r\n collide_sound.play()\r\n ball_speed_y *= -1\r\n\r\n #ball goes out left side\r\n if ball.right <= -10:\r\n score_sound.play()\r\n player_score += 1\r\n restart = True\r\n\r\n #ball goes out right side\r\n if ball.left >= screen_width + 10:\r\n score_sound.play()\r\n opponent_score += 1\r\n restart = True\r\n\r\n #check for collisions with player\r\n if ball.colliderect(player) and ball_speed_x > 0:\r\n #if the ball hits on the left\r\n collide_sound.play()\r\n if abs(ball.right - player.left) < ball_speed*2:\r\n ball_speed_x *= -1\r\n #if the ball hits on the top\r\n elif abs(ball.bottom - player.top) < ball_speed*2 and ball_speed_y > 0:\r\n ball_speed_y *= -1\r\n #if the ball hits on the bottom\r\n elif abs(ball.top - player.bottom) < ball_speed*2 and ball_speed_y < 0:\r\n ball_speed_y *= -1\r\n\r\n #check for collisions with opponent\r\n if ball.colliderect(opponent) and ball_speed_x < 0:\r\n collide_sound.play()\r\n #if the ball hits on the right\r\n if abs(ball.left - opponent.right) < ball_speed*2:\r\n ball_speed_x *= -1\r\n #if the ball hits on the top\r\n elif abs(ball.bottom - opponent.top) < ball_speed*2 and ball_speed_y > 0:\r\n ball_speed_y *= -1\r\n #if the ball hits on the bottom\r\n elif abs(ball.top - opponent.bottom) < ball_speed*2 and ball_speed_y < 0:\r\n ball_speed_y *= -1\r\n\r\ndef ball_restart():\r\n global ball_speed_x, ball_speed_y, ball_speed, restart, begin\r\n\r\n ball.center = (screen_width/2, screen_height/2)\r\n space_text = game_font.render(\"Press space to begin\", False, LIGHT_GREEN)\r\n screen.blit(space_text, (screen_width/2 - 160, screen_height - 40))\r\n\r\n #must click space to start the ball \r\n if begin:\r\n ball_speed_x = ball_speed * random.choice((1, -1))\r\n ball_speed_y = ball_speed * random.choice((1, -1))\r\n restart = False\r\n begin = False\r\n else:\r\n ball_speed_x = 0\r\n ball_speed_y = 0\r\n \r\ndef player_animation():\r\n player.y += player_speed\r\n #keep the player in bounds\r\n if player.top <= 0:\r\n player.top = 0\r\n if player.bottom >= screen_height:\r\n player.bottom = screen_height\r\n\r\ndef opponent_ai():\r\n #raising the opponent_speed increases the difficulty\r\n if opponent.centery < ball.centery:\r\n opponent.centery += opponent_speed\r\n if opponent.centery > ball.centery:\r\n opponent.centery -= opponent_speed\r\n #keep the opponent in bounds\r\n if opponent.top <= 0:\r\n opponent.top = 0\r\n if opponent.bottom >= screen_height:\r\n opponent.bottom = screen_height\r\n\r\n\r\n\r\n#General setup\r\npygame.mixer.pre_init(44100, -16, 2, 256)\r\npygame.init()\r\nclock = pygame.time.Clock()\r\n\r\n#Setting up the main window\r\nscreen_width = 1200 #960\r\nscreen_height = 720\r\nscreen = pygame.display.set_mode((screen_width, screen_height))\r\npygame.display.set_caption(\"Pong\")\r\n\r\n#Game Rectangles\r\n#pygame.Rect(xcord, ycord, xsize, ysize)\r\nball = pygame.Rect(screen_width/2 - 15, screen_height/2 - 15, 30, 30)\r\nplayer = pygame.Rect(screen_width - 20, screen_height/2 - 70, 10, 140)\r\nopponent = pygame.Rect(10, screen_height/2 - 70, 10, 140)\r\n\r\n#Colors\r\nDARK_GREEN = (61, 107, 69)\r\nLIGHT_GREEN = (220, 255, 150)\r\nORANGE = (255, 218, 150)\r\n\r\n#Physics\r\nball_speed = 10\r\nball_speed_x = ball_speed * random.choice((1, -1))\r\nball_speed_y = ball_speed * random.choice((1, -1))\r\nplayer_speed = 0\r\nopponent_speed = 7\r\n\r\n#Text Variables\r\nplayer_score = 0\r\nopponent_score = 0\r\ngame_font = pygame.font.Font(\"freesansbold.ttf\", 32)\r\n\r\n#Restart Point Variables\r\nrestart = True\r\nbegin = False\r\n\r\n#Sound\r\ncollide_sound = pygame.mixer.Sound(\"gamefiles/collide.wav\")\r\nscore_sound = pygame.mixer.Sound(\"gamefiles/score.wav\")\r\n\r\nwhile True:\r\n #Handling input\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_DOWN:\r\n player_speed = 7\r\n if event.key == pygame.K_UP:\r\n player_speed = -7\r\n if event.key == pygame.K_SPACE:\r\n if restart == True:\r\n begin = True\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_DOWN:\r\n player_speed = 0\r\n if event.key == pygame.K_UP:\r\n player_speed = 0 \r\n\r\n\r\n #Physics Animations\r\n ball_animation()\r\n player_animation()\r\n opponent_ai()\r\n \r\n #Visuals\r\n #background color\r\n screen.fill(DARK_GREEN)\r\n\r\n #drawing a line: (surface, color, start, end)\r\n pygame.draw.aaline(screen, ORANGE, (screen_width/2, 0), (screen_width/2, screen_height))\r\n \r\n #display the text - f\"{variable}\" prints the variable as a string\r\n player_text = game_font.render(f\"{player_score}\", False, ORANGE)\r\n screen.blit(player_text, (screen_width/2 + 20, 20))\r\n opponent_text = game_font.render(f\"{opponent_score}\", False, ORANGE)\r\n screen.blit(opponent_text, (screen_width/2 - 35, 20))\r\n\r\n #ball and players\r\n pygame.draw.ellipse(screen, LIGHT_GREEN, ball)\r\n pygame.draw.rect(screen, ORANGE, player)\r\n pygame.draw.rect(screen, ORANGE, opponent)\r\n \r\n\r\n #if someone scored, reset ball at the middle \r\n if restart:\r\n ball_restart()\r\n \r\n #Updating the window\r\n pygame.display.flip()\r\n #60 frames per second\r\n clock.tick(60)\r\n","repo_name":"ethanhebert/Pong","sub_path":"pongCode.py","file_name":"pongCode.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37063945812","text":"import tkinter as tk\nimport time\nimport random\n\n\napp = tk.Tk()\napp.title(\"Simon - The Game\")\n\nscore = 0\nsequence_index = 0\nnumber_of_colors= 2\ncolors = [\"red\", \"green\", \"blue\", \"yellow\"]\nsequence = []\nuser_sequence = []\n\n\n \ndef change_the_color():\n global sequence_index # Use the global index\n sequence_index = 0 \n random_colors = random.choices(colors, k=number_of_colors) # Generate 5 random colors\n for color in random_colors:\n label.config(bg=color, text=color)\n label.update()\n time.sleep(1) # Pause for 1 second to display the color\n # Clear the label\n label.update()\n time.sleep(1) # Pause for 1 second between colors\n label.config(text=\"now it's your turn\",bg=\"white\")\n \n sequence.append(color)\n \n\ndef after_first_try():\n label.config(text=\"Nice let's try again\", bg=\"white\")\n time.sleep(1)\n global sequence_index # Use the global index\n sequence_index = 0\n \n for color in sequence:\n label.config(bg=color,text =color)\n label.update()\n time.sleep(1)\n \n label.update()\n time.sleep(1)\n label.config(text=\"now it's your turn\",bg=\"white\")\n \n user_sequence.clear()\n \n \n \n\ndef user_input_save(pressed_color):\n user_sequence.append(pressed_color)\n if len(sequence) == len (user_sequence):\n checked_user_input()\n\n\ndef checked_user_input():\n global number_of_colors\n global score\n if user_sequence == sequence:\n number_of_colors +=1\n random_colors = random.choices(colors, k=1)\n sequence.extend(random_colors)\n score += 1\n my_score.config(text=f'Your Score: {score}')\n my_score.update()\n after_first_try()\n\n else: \n label.config(text=\"Sorry you failed\")\n label.config(bg=\"white\")\n label.update()\n time.sleep(2)\n label.config(text=\"press start and try again\")\n sequence.clear()\n user_sequence.clear()\n number_of_colors = 1\n\n\nmy_score=tk.Label(app, text = f'Your Score: {score}', bg=\"white\", width =20, height =5)\nmy_score.pack(pady=10) \n\nlabel = tk.Label(app, text = \"Simon the Game\", bg= \"white\", width = 20, height = 5)\nlabel.pack(pady=10)\n\nred_button = tk.Button(app, text = \"red\",highlightbackground=\"red\",command=lambda:user_input_save(\"red\"))\ngreen_button = tk.Button(app, text = \"green\",highlightbackground=\"green\",bg=\"green\", command=lambda:user_input_save(\"green\"))\nblue_button = tk.Button(app, text = \"blue\",highlightbackground=\"blue\", command=lambda:user_input_save(\"blue\"))\nyellow_button = tk.Button(app, text = \"yellow\",highlightbackground=\"yellow\",command=lambda:user_input_save(\"yellow\"))\nstart_button = tk.Button(app, text=\"Start\", command=change_the_color)\nquit_button = tk.Button(app, text=\"Quit\", command=app.destroy, width=50, height=10, highlightbackground=\"black\",fg=\"black\")\n\n\nstart_button.pack()\n\n\nred_button.pack()\ngreen_button.pack()\nblue_button.pack()\nyellow_button.pack()\n\n\n\n\nquit_button.pack()\n\nif __name__ == \"__main__\":\n app.mainloop()\n print(user_sequence)\n print(sequence)\n print(number_of_colors)","repo_name":"gilyasur/SimontheGame","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72925969762","text":"import MapReduce\nimport sys\n\n\"\"\"\nSQL JOIN\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\ndef mapper(record):\n # key: document identifier\n # value: document contents\n key = record[1]\n value = record\n mr.emit_intermediate(key, value)\n\ndef reducer(key, list_of_values):\n # key: word\n # value: list of occurrence counts\n for i in range(1,len(list_of_values)):\n mr.emit((list_of_values[0]+list_of_values[i]))\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)","repo_name":"leparrav/Playground","sub_path":"Courses/Coursera/IDC/assignment3/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"39657512065","text":"class cluster_3:\r\n\r\n def __init__(self,listt):\r\n self.listt = listt\r\n\r\n\r\n def k_mean_3(self):\r\n m_1,m_2,m_3 = self.mean_init()\r\n\r\n p_m1 = 0\r\n p_m2 = 0\r\n p_m3 = 0\r\n while(p_m1 != m_1 or p_m2 != m_2 or p_m3 != m_3):\r\n p_m1 = m_1\r\n p_m2 = m_2\r\n p_m3 = m_3\r\n\r\n list_1,list_2,list_3 = self.clustering(m_1,m_2,m_3) # convert a list into 3 cluster\r\n m_1 = self.avr(list_1)\r\n m_2 = self.avr(list_2)\r\n m_3 = self.avr(list_3)\r\n\r\n #print(list_1,\"\\n\",list_2,\"\\n\",list_3,\"\\n------------\")\r\n \r\n return list_1,list_2,list_3 # final output for cluster\r\n \r\n\r\n def mean_init(self):\r\n num_list = list(zip(*(self.listt)))\r\n num_list = list(num_list[1])\r\n num_list.sort()\r\n return num_list[0],num_list[round(len(num_list)/2)], num_list[len(num_list)-1]\r\n \r\n\r\n def clustering(self,m1,m2,m3):\r\n list_1 = []\r\n list_2 = []\r\n list_3 = []\r\n for v in self.listt:\r\n d1 = abs(v[1] - m1)\r\n d2 = abs(v[1] - m2)\r\n d3 = abs(v[1] - m3)\r\n\r\n if d1<=d2 and d1<=d3:\r\n list_1.append(v)\r\n elif d2<=d2 and d2<=d3:\r\n list_2.append(v)\r\n else:\r\n list_3.append(v)\r\n \r\n return list_1,list_2,list_3\r\n\r\n\r\n\r\n\r\n def avr(self,lst):\r\n if len(lst) == 0:\r\n return 0\r\n s = 0\r\n for x in lst:\r\n s += x[1]\r\n return(s/len(lst))\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"NurAhmadullah/text-summarizer","sub_path":"cluster_3.py","file_name":"cluster_3.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"31776952567","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.2.1\n# ---\n\n# %%\n# %matplotlib inline\nimport numpy, matplotlib.pyplot as plt\nimport seaborn\n\n# %%\nseaborn.set(font_scale=1.3)\nseaborn.set_style('whitegrid')\n\nfig, axes = plt.subplots(1, 3, figsize=(14,4))\nfor ax in axes:\n ax.axhline(0, color='black', linewidth=0.5)\n ax.axvline(0, color='black', linewidth=0.5)\n ax.set_ylim(-1,1)\n ax.set_xlim(-3,3)\n ax.set_yticks(numpy.linspace(-2,2,5))\n\nx = numpy.linspace(-3,3,1000)\n\nsigmoid = 1 / (1+numpy.e**(-x))\naxes[0].plot(x, sigmoid, linewidth=3)\naxes[0].set_title('logistic sigmoid', pad=15)\n\ntanh = numpy.tanh(x)\naxes[1].plot(x, tanh, linewidth=3)\naxes[1].set_title('hyperbolic tangent', pad=15)\n\nrelu = numpy.maximum(x, 0)\naxes[2].plot(x, relu, linewidth=3)\naxes[2].set_title('ReLU', pad=15)\n\nplt.savefig('activation_functions.pdf', bbox_inches='tight')\n","repo_name":"matangover/thesis-results-analysis","sub_path":"deep_learning.py","file_name":"deep_learning.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40622105649","text":"''' \nfunctions to identify which store(s) or combination stores results in the lowest \nper unit subtotal cost \n\nWorks for any n combinations so long as n <= num_stores\n'''\n\nimport pandas as pd \nfrom itertools import combinations\n\n\ndef item_selection(dfs):\n \n # append all dfs\n df = dfs[0]\n dfs.remove(dfs[0])\n for df_n in dfs:\n df = df.append(df_n, ignore_index=True)\n\n # group by list_items \n df_grp = df.groupby('list_item')['comparable_PUP']\n\n # add col indicating min cost out of the n store dfs \n df = df.assign(min_cost=df_grp.transform(min))\n\n # filter rows where the row corresponds to the min cost \n df = df[df['comparable_PUP'] == df['min_cost']]\n\n # take subtotal \n per_unit_subtotal = sum(df['comparable_PUP'])\n subtotal = sum(df['comparable_price'])\n\n return df, per_unit_subtotal, subtotal \n\n\ndef n_store_selection(n, results_dict, subtotal_results = {}, PUP_subtotal_results = {}, output_results = {}):\n\n if n >= len(results_dict):\n print(f'{n} combinations not possible for our {len(results_dict)} store selection')\n return {}\n\n # go through n combination stores \n possibilities = list(combinations(results_dict.keys(), n))\n for combin in possibilities:\n \n # place all dfs in list \n dfs = []\n for store in combin:\n # df = pd.read_csv(f'search_output/{store}_results.csv')\n df = results_dict[store]\n dfs.append(df)\n\n # item selection \n optimal_selection, per_unit_subtotal, subtotal = item_selection(dfs)\n\n # add to results dict \n\n PUP_subtotal_results[combin] = per_unit_subtotal\n subtotal_results[combin] = subtotal\n output_results[combin] = optimal_selection\n\n return final_json(n, PUP_subtotal_results, subtotal_results, output_results)\n\n\n\ndef final_json(n, PUP_subtotal_results, subtotal_results, output_results):\n output = {}\n\n sorted_dict = dict(sorted(PUP_subtotal_results.items(), key=lambda item: item[1]))\n\n keys_in_order = list(sorted_dict.keys())\n\n for i in range(n):\n store_s = keys_in_order[i]\n\n # clean up results to be returned \n df_results = output_results[store_s]\n df_results = df_results.drop(columns=['store', 'similarity', 'comparable_PUP', 'sale_price', 'price',\n 'sale_per_unit_price', 'per_unit_price', 'min_cost', 'price_per_1']).rename(columns = {'comparable_price':'price'})\n df_results = df_results[['list_item', 'category', 'brand', 'product', 'price', 'price_unit', 'is_sale']]\n\n output[i+1] = {'store': store_s\n , 'subtotal': subtotal_results[store_s]\n , 'results': df_results.to_dict('list')\n }\n return output\n \n","repo_name":"caitlan-krasinski/grocery-store-backend","sub_path":"cost_minimization.py","file_name":"cost_minimization.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6874648723","text":"\nimport sys\n\n# 풀이 1\nwhile True :\n try :\n str = input() # input -> 하나씩 실행\n print(str)\n except EOFError: # 런타임에러\n break\n\n# 풀이 2\nstr = sys.stdin.readlines() # 모든 입력값이 한 줄에 들어감. 런타임 에러 X\n\nfor line in str :\n print(line.rstrip()) # 개행문자 없앰","repo_name":"saevyeokvyeol/python_algorithm_study","sub_path":"yuhyun/Baekjoon/String/0706_11718.py","file_name":"0706_11718.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3642382384","text":"import time\nimport glob\nimport shutil\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nfrom scraping import config\n\n\"\"\"\nScript that uses selenium to navigate to various web pages holding team tables and download them.\n\"\"\"\n\n\ndef get_nst_table(driver):\n base_url = 'http://naturalstattrick.com/teamtable.php?fromseason=20212022&thruseason=20212022&stype=2&sit=sva&score=all&'\\\n 'rate=y&team=all&loc=B&gpf=410&fd=&td='\n driver.get(base_url)\n dl_button = driver.find_elements_by_class_name('dt-buttons')[1].find_elements_by_xpath('./a')[-1]\n dl_button.click()\n\n\ndef get_eh_table(driver):\n driver.get('https://evolving-hockey.com/login/')\n driver.find_element_by_id('user_login').send_keys(config.eh_username)\n driver.find_element_by_id('user_pass').send_keys(config.eh_password)\n driver.find_element_by_id('user_pass').send_keys(Keys.ENTER)\n base_url = 'https://evolving-hockey.com/stats/team_standard/?_inputs_&std_tm_str=%225v5%22&std_tm_table=%22On-Ice%22&'\\\n 'std_tm_team=%22All%22&std_tm_range=%22Seasons%22&std_tm_adj=%22Score%20%26%20Venue%22&std_tm_span=%22Regular'\\\n '%22&dir_ttbl=%22Stats%22&std_tm_type=%22Rates%22&std_tm_group=%22Season%22'\n driver.get(base_url)\n time.sleep(2)\n dl_button = driver.find_element_by_id('std_tm_download_ui').find_element_by_xpath('./*')\n dl_button.click()\n\n\ndef get_mp_table(driver):\n driver.get('https://moneypuck.com/data.htm')\n dl_button = driver.find_element_by_xpath('//a[@href=\"moneypuck/playerData/seasonSummary/2021/regular/teams.csv\"]')\n dl_button.click()\n\n\ndef organize_tables():\n map_pairs = [('*EH*.csv', 'eh_team_table.csv'),\n ('*Natural*.csv', 'nst_team_table.csv'),\n ('teams.csv', 'mp_team_table.csv')]\n for exp, dest in map_pairs:\n source = glob.glob(f'./{exp}')[0]\n dest = f'{config.download_directory_team_tables}/{dest}'\n shutil.move(source, dest)\n print(f'{source} -> {dest}')\n\n\ndef main():\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--headless')\n driver = webdriver.Chrome(chrome_options=chrome_options)\n print('Getting NST table...')\n get_nst_table(driver)\n print('Getting EH table...')\n get_eh_table(driver)\n print('Getting MP table...')\n get_mp_table(driver)\n time.sleep(2)\n driver.quit()\n print('Organizing tables....')\n organize_tables()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sohraub/hockey_statistics","sub_path":"scraping/scrape_team_tables.py","file_name":"scrape_team_tables.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25966996477","text":"from typing import List\nclass Solution:\n def advantageCount(self, nums1: List[int], nums2: List[int]) -> List[int]:\n l1,l2 = len(nums1),len(nums2)\n l,r = 0,l2-1\n res = [0] * l1\n nums1.sort()\n nums2.sort()\n for i in range(0,l1):\n if nums1[i] > nums2[l]:\n res[l] = nums1[i]\n l += 1\n else:\n res[r] = nums1[i]\n r -= 1\n return res\n\n\nclass Solution:\n def advantageCount(self, nums1: List[int], nums2: List[int]) -> List[int]:\n n = len(nums1)\n idx1, idx2 = list(range(n)), list(range(n))\n idx1.sort(key=lambda x: nums1[x])\n idx2.sort(key=lambda x: nums2[x])\n\n ans = [0] * n\n left, right = 0, n - 1\n for i in range(n):\n if nums1[idx1[i]] > nums2[idx2[left]]:\n ans[idx2[left]] = nums1[idx1[i]]\n left += 1\n else:\n ans[idx2[right]] = nums1[idx1[i]]\n right -= 1\n\n return ans\n\nif __name__ == \"__main__\":\n nums1 = [12,24,8,32]\n nums2 = [13,25,32,11]\n S = Solution()\n print(S.advantageCount(nums1, nums2))","repo_name":"zhu0Li/code_learning","sub_path":"leetcode/everyday/1008.py","file_name":"1008.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41783045094","text":"import requests\nimport datetime\nimport json\nimport re\nimport sys\nimport codecs\nfrom serpwow.google_search_results import GoogleSearchResults\n\n\n\ndef find(string):\n # findall() has been used\n # with valid conditions for urls in string\n regex = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\"\n url = re.findall(regex, string)\n return [x[0] for x in url]\n\n\n\n\n# create the serpwow object, passing in our API key -> https://app.serpwow.com/login\nserpwow = GoogleSearchResults(\"YOUR_API_KEY\")\n\n# set up a dict for the search parameters\nparams = {\n \"q\" : \"site:youtube.com intitle:free hack download\", # You can write whatever you want, like: bitcoin miner free download or credit card money adder free download\n \"num\" : \"100\",\n \"time_period\" : \"last_week\" # Or last_day or last_year, see the website for more params\n}\n\n# retrieve the search results as JSON\nresult = serpwow.get_json(params)\n\n\n\n\nfile_data = open(\"report.txt\",\"w\", encoding=\"utf-8\")\ndescription_urls_file = open(\"description_urls.txt\", \"w\", encoding=\"utf-8\")\n\nfor i in result[\"organic_results\"]:\n urls = find(i[\"snippet\"])\n data = \\\n f\"\"\"\n -------------------------------------\n Title: {i[\"title\"]}\n Url: {i[\"link\"]}\n Description [Only Found Urls]: {urls}\n Date: {i[\"rich_snippet\"][\"top\"][\"extensions\"][0]}\n Author: {i[\"rich_snippet\"][\"top\"][\"extensions\"][1]}\n \\n\n \"\"\"\n file_data.write(str(data))\n if len(urls) > 0:\n description_urls_file.write(f\"{urls}\\n\")\n try:\n for x in urls:\n params = {\n \"q\": f\"{x}\"\n }\n # retrieve the search results as JSON\n result = serpwow.get_json(params)\n if len(result[\"organic_results\"]) > 0:\n for z in result[\"organic_results\"]:\n urls = find(z[\"snippet\"])\n data = \\\n f\"\"\"\n -------------------------------------\n Query: {result[\"search_parameters\"][\"q\"]}\n Title: {z[\"title\"]}\n Url: {z[\"link\"]}\n Domain: {z[\"domain\"]}\n Description: {z[\"snippet\"]}\n More Urls: {urls}\n \\n\n \"\"\"\n file_data.write(str(data))\n description_urls_file.write(f\"{urls}\\n\")\n except:\n continue\n\ndescription_urls_file.close()\nfile_data.close()\n","repo_name":"Finch4/MalSeeker","sub_path":"MalSeeker.py","file_name":"MalSeeker.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35462325130","text":"from collections import defaultdict\n\n\ndef gcd(a, b):\n return a if b == 0 else gcd(b, a % b)\n\n\nN = int(input())\nA = list(map(int, input().split()))\n\n\n# dp[i][j]: i番目まででgcdがjになる場合の数\ndp = [defaultdict(int) for _ in range(N + 1)]\n\n\nfor i in range(N):\n dp[i][A[i]] += 1\n for j in range(i + 1, N):\n for k in dp[i].keys():\n dp[j][gcd(k, A[j])] += dp[i][k]\n\nans = 0\nfor i in range(N):\n ans += dp[i][1]\nprint(ans)\n","repo_name":"kiccho1101/atcoder","sub_path":"yukicoder/917.py","file_name":"917.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"368095613","text":"import os\r\n\r\n\r\ndef renameFile():\r\n fileList = os.listdir(r\"H:\\01.Company\\01.CCB\\验收测试\\s160_innputpin\")\r\n print(fileList)\r\n # get current work path\r\n currentpath = os.getcwd()\r\n print(\"Current is \" + currentpath)\r\n # change current work path\r\n os.chdir(r\"H:\\01.Company\\01.CCB\\验收测试\\s160_innputpin\")\r\n for fileName in fileList:\r\n print(\"Original is \" + fileName)\r\n # delete 0123456789 in file name\r\n os.rename(fileName, fileName.replace('_00', '_0'))\r\n print(\"Changed is \" + fileName.replace('_00', '_0'))\r\n os.chdir(currentpath)\r\n\r\n\r\nrenameFile()\r\n","repo_name":"DamonXiong/pyreggmail","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"33807117019","text":"# -*- coding: utf-8 -*-\n\n\nclass GildedRose(object):\n def __init__(self, items):\n self.items = items\n self.increasing_items = {\"Aged Brie\", \"Backstage passes to a TAFKAL80ETC concert\"}\n self.legendary_items = {\"Sulfuras, Hand of Ragnaros\"}\n\n def decrease_quality(self, item):\n quality = 1 if item.sell_in >= 0 else 2\n if item.name == \"Conjured Mana Cake\":\n quality *= 2\n \n item.quality -= quality\n if item.quality < 0:\n item.quality = 0\n\n def increase_quality(self, item):\n quality = 1 if item.sell_in >= 0 else 2\n if item.name == \"Backstage passes to a TAFKAL80ETC concert\":\n if item.sell_in <= 10:\n quality += 1\n if item.sell_in <= 5:\n quality += 1\n if item.sell_in < 0:\n quality = 0\n item.quality = 0\n\n item.quality += quality\n if item.quality > 50:\n item.quality = 50\n\n def change_quality(self, item):\n if item.name in self.increasing_items:\n self.increase_quality(item)\n else:\n self.decrease_quality(item)\n\n def update_quality(self):\n for item in self.items:\n if item.name not in self.legendary_items:\n item.sell_in -= 1\n self.change_quality(item)\n\n\nclass Item:\n def __init__(self, name, sell_in, quality):\n self.name = name\n self.sell_in = sell_in\n self.quality = quality\n\n def __repr__(self):\n return \"%s, %s, %s\" % (self.name, self.sell_in, self.quality)\n","repo_name":"ronek22/ZJP","sub_path":"GildedRose/gilded_rose.py","file_name":"gilded_rose.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26407728639","text":"from __future__ import annotations\nfrom plobject import PLObject, PLObjType\nfrom errors import PyLoxRuntimeError\nfrom tokens import Token\n\nclass Environment():\n def __init__(self, enclosing: Environment = None) -> None:\n self.__enclosing: Environment = enclosing\n self.__values: dict[str, PLObject] = {}\n\n def define(self, name: str, value: PLObject) -> None:\n self.__values[name] = value\n\n def get(self, name: Token) -> PLObject:\n if name.lexeme in self.__values:\n return self.__values[name.lexeme]\n elif self.__enclosing != None:\n return self.__enclosing.get(name)\n else:\n raise PyLoxRuntimeError(name.pos, f\"Undefined variable '{name.lexeme}'\")\n\n def assign(self, name: Token, value: PLObject) -> None:\n if name.lexeme in self.__values:\n self.__values[name.lexeme] = value\n elif self.__enclosing != None:\n self.__enclosing.assign(name, value)\n else:\n raise PyLoxRuntimeError(name.pos, f\"Undefined variable '{name.lexeme}'\")\n\nclass TypeEnvironment():\n def __init__(self, enclosing: TypeEnvironment = None) -> None:\n self.__enclosing: TypeEnvironment = enclosing\n self.__types: dict[str, PLObjType] = {}\n\n def define(self, name: str, objType: PLObjType) -> None:\n self.__types[name] = objType\n\n def get(self, name: Token) -> PLObjType:\n if name.lexeme in self.__types:\n return self.__types[name.lexeme]\n elif self.__enclosing != None:\n return self.__enclosing.get(name)\n else:\n return PLObjType.ERROR\n\n def assign(self, name: Token, objType: PLObjType) -> None:\n if name.lexeme in self.__types:\n self.__types[name.lexeme] = objType\n elif self.__enclosing != None:\n self.__enclosing.assign(name, objType)\n else:\n return PLObjType.ERROR","repo_name":"NoahWedlich/PyLox","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10545272085","text":"import sys, webbrowser\nfrom PyQt6.QtWidgets import *\nfrom PyQt6.QtGui import *\nfrom PyQt6 import uic, QtWidgets\nfrom utils.audio import Audio\nfrom utils.qatarWcApi import QatarWcApi\nimport resource_rc as resource_rc\n\nui = [\"ui/form.ui\", \"ui/index.ui\", \"ui/highlight.ui\", \"ui/schedule.ui\", \"ui/news.ui\",]\nnews_url = [\n 'https://www.fifa.com/fifaplus/en/articles/a-tribute-to-lionel-messi-legend-argentina-fifa-world-cup-qatar-2022-quotes-stats-stories-goals',\n 'https://www.fifa.com/fifaplus/en/articles/world-cup-2022-qatar-france-mbappe-argentina-return',\n 'https://www.fifa.com/fifaplus/en/watch/LClFVl7N4EGxNeVV4Rb1eg',\n 'https://www.fifa.com/fifaplus/en/articles/top-assisters-at-world-cup-qatar-2022',\n 'https://www.fifa.com/fifaplus/en/articles/world-cup-qatar-2022-final-argentina-messi-mbappe-france-videos-reaction',\n 'https://www.fifa.com/fifaplus/en/watch/bg6S7A_nRE2qSJGKZhg-Bw'\n]\nhighlight_url = [\n 'https://www.fifa.com/fifaplus/en/watch/63XwuAOoqYgNW0Q3E9PxJG',\n 'https://www.fifa.com/fifaplus/en/watch/5BAGunqVa9YUoPdZxI8MTm',\n 'https://www.fifa.com/fifaplus/en/watch/783zSqR6RRJrx6UiakMYc4',\n 'https://www.fifa.com/fifaplus/en/watch/ib6UqdiMuU-_jkJh_fIZrg',\n 'https://www.fifa.com/fifaplus/en/watch/Orm8DY9ZfUKa60iQfkTU4g',\n 'https://www.fifa.com/fifaplus/en/watch/fdSBZWNGRU24DaEyFo35tA',\n 'https://www.fifa.com/fifaplus/en/watch/wSlRFrP5fUCOmvXM65cHlA',\n 'https://www.fifa.com/fifaplus/en/watch/f0PDBzheQEme4I9mDxnHSg'\n ]\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n uic.loadUi(ui[0], self)\n self.setFixedSize(1270, 720)\n self.audio = Audio()\n self.wc = QatarWcApi()\n self.page = 0\n self.visited = []\n\n def preprocessData(self):\n raw_data = self.wc.getLatestMatch()\n p_data = []\n for elem in raw_data:\n p_data.append([\n elem['type'], elem['local_date'], \n elem['home_team_en'], elem['away_team_en'], \n elem['home_score'], elem['away_score'],elem['time_elapsed']\n ])\n return p_data\n\n def changeShedule(self):\n match_lables = []\n for elem in self.__dict__:\n if 'match' in elem:\n match_lables.append(elem)\n\n if len(match_lables) != 0:\n s_lables = sorted(match_lables, key=lambda x : int(x[6:]))\n matchInfo = self.preprocessData()\n self.changeScheduleText(s_lables, matchInfo)\n\n def changeScheduleText(self, lables:list[dict], matchInfo:list):\n per_match_lables = 4 # %51 1 -> 1 -> 0 -> 1,那么它表示二进制数 01101,也就是 13 。\n\n对树上的每一片叶子,我们都要找出从根到该叶子的路径所表示的数字。\n\n以 10^9 + 7 为模,返回这些数字之和。\n\n \n\n示例:\n 1\n 0 1\n 0 1 0 1\n\n\n输入:[1,0,1,0,1,0,1]\n输出:22\n解释:(100) + (101) + (110) + (111) = 4 + 5 + 6 + 7 = 22\n \nMOD = 109\nbranch: 126\nmod_branch: 126 % 109 = 17\nleaf: 126 * 2 + 1 = 253 % 109 = 35\nleaf: 17 * 2 + 1 = 35 % 109 = 35\n\n\n提示:\n\n树中的结点数介于 1 和 1000 之间。\nnode.val 为 0 或 1 。\n\"\"\"\n\nfrom tree_node import TreeNode,make_simple_tree\n\nclass Solution:\n def sumRootToLeaf(self, root: TreeNode) -> int:\n MOD = pow(10,9) + 7\n root.num = root.val\n ans = 0\n nodes = [root]\n while nodes:\n branch = nodes.pop()\n left = branch.left\n right = branch.right\n\n base_num = branch.num << 1 #\n\n if left is None and right is None:\n # branch is leaf\n ans += branch.num\n ans %= MOD\n else:\n if left:\n left.num = (base_num + left.val) % MOD\n nodes.append(left)\n if right:\n right.num = (base_num + right.val) % MOD\n nodes.append(right)\n return ans\n\n\n\n\n\ndef test():\n s = Solution()\n t1 = make_simple_tree(1,make_simple_tree(0,0,1),make_simple_tree(1,0,1))\n assert s.sumRootToLeaf(t1) == 22\n\n assert s.sumRootToLeaf(make_simple_tree(1,None,None)) == 1\n assert s.sumRootToLeaf(make_simple_tree(0,None,None)) == 0\n","repo_name":"codetalks-new/leetcode-qa","sub_path":"answers/python3/tree/test_p1022_sum_root_to_leaf.py","file_name":"test_p1022_sum_root_to_leaf.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1218708651","text":"import abc\nfrom typing import Optional\n\nfrom aiohttp.test_utils import TestClient\nimport pytest\n\nfrom dl_api_lib_testing.dashsql_base import DashSQLTestBase\nfrom dl_testing.regulated_test import RegulatedTestCase\n\n\nclass DefaultDashSQLTestSuite(DashSQLTestBase, RegulatedTestCase, metaclass=abc.ABCMeta):\n @pytest.fixture(scope=\"class\")\n def dashsql_headers(self) -> Optional[dict[str, str]]:\n return None\n\n @pytest.fixture(scope=\"class\")\n def dashsql_basic_query(self) -> str:\n return \"select 1, 2, 3\"\n\n @pytest.mark.asyncio\n async def test_basic_select(\n self,\n data_api_lowlevel_aiohttp_client: TestClient,\n saved_connection_id: str,\n dashsql_basic_query: str,\n dashsql_headers: Optional[dict[str, str]],\n ) -> None:\n resp = await self.get_dashsql_response(\n data_api_aio=data_api_lowlevel_aiohttp_client,\n conn_id=saved_connection_id,\n query=dashsql_basic_query,\n headers=dashsql_headers,\n )\n data = await resp.json()\n assert data[1][\"data\"] == [1, 2, 3]\n","repo_name":"datalens-tech/datalens-backend","sub_path":"lib/dl_api_lib_testing/dl_api_lib_testing/connector/dashsql_suite.py","file_name":"dashsql_suite.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"54"} +{"seq_id":"20113677466","text":"#!/usr/bin/env python3\n\n\"\"\"\nBest practice graph-mode development with TF2?\n\nhttps://www.tensorflow.org/guide/migrate\n\n`import tensorflow.compat.v1 as tf`? (useful to not change too much...)\n\nhttps://www.tensorflow.org/guide/function\nhttps://github.com/rwth-i6/returnn/issues/292\nhttps://stackoverflow.com/questions/61964379/tf-data-dataset-runs-on-cpu-except-of-prefetchdataset\nhttps://stackoverflow.com/questions/61964754/is-there-a-queue-like-dataset\nhttps://stackoverflow.com/questions/61964090/running-defun-in-graph-mode\nhttps://stackoverflow.com/questions/61973237/parallel-execution-of-tf-ops-in-eager-code\n\n\"\"\"\n\nimport better_exchook\nbetter_exchook.install()\n\nimport tensorflow as tf\n\nif hasattr(tf, \"compat\"):\n tf_compat_v1 = tf.compat.v1\n tf_compat_v2 = tf.compat.v2\nelse:\n tf_compat_v1 = None\n tf_compat_v2 = None\n # noinspection PyUnresolvedReferences\n assert tf.VERSION.startswith(\"1.\")\n\n\nprint(\"TF:\", tf.version.VERSION)\n\nif tf_compat_v1:\n tf_compat_v1.disable_eager_execution()\n tf_compat_v1.disable_v2_tensorshape()\n\n\ndef main():\n @tf.function\n def f(x):\n with tf.control_dependencies([tf.print([\"f\", x])]):\n return tf.identity(x)\n\n print(tf.autograph.to_code(f.python_function))\n\n x = tf.constant(13.)\n y = f(x)\n\n @tf.function\n def eager_func(x):\n while tf.reduce_sum(x) > 1:\n tf.print(x)\n tf.print(\"hello\")\n x = tf.tanh(x)\n return x\n\n print(tf.autograph.to_code(eager_func.python_function))\n\n y2 = eager_func(tf.random.uniform([5]))\n\n v = tf.Variable(17)\n assert isinstance(v, tf.Variable)\n\n # noinspection PyProtectedMember\n from tensorflow.python.data.ops.dataset_ops import _GeneratorDataset as GeneratorDataset\n from tensorflow.python.data.ops.dataset_ops import DatasetV1Adapter\n\n def raise_out_of_range_error():\n empty_dataset = tf.data.Dataset.from_tensor_slices(tf.fill([0], 0))\n return DatasetV1Adapter(empty_dataset).make_one_shot_iterator().get_next()\n\n @tf.function(autograph=False)\n def init_func(x):\n with tf.control_dependencies([tf.print([\"init_func\", x]), v.assign(0)]):\n return tf.identity(x)\n\n @tf.function(autograph=False)\n def next_func(x):\n res = tf.identity(v)\n with tf.control_dependencies([res]):\n with tf.control_dependencies([tf.print([\"next_func\", x, res])]):\n end_check = tf.cond(\n pred=tf.greater_equal(res, 13),\n true_fn=raise_out_of_range_error,\n false_fn=lambda: tf.constant(0))\n with tf.control_dependencies([end_check]):\n with tf.control_dependencies([v.assign_add(1)]):\n return tf.identity(res)\n\n @tf.function(autograph=False)\n def finalize_func(x):\n with tf.control_dependencies([tf.print([\"finalize_func\", x, v])]):\n return tf.identity(x)\n\n generator_dataset = GeneratorDataset(\n init_args=tf.constant(\"dummy_init_args\"),\n init_func=init_func,\n next_func=next_func,\n finalize_func=finalize_func)\n generator_dataset_v1 = DatasetV1Adapter(generator_dataset)\n ds_iter = tf_compat_v1.data.make_initializable_iterator(generator_dataset_v1)\n ds_iter_init = ds_iter.make_initializer(generator_dataset_v1)\n\n with tf_compat_v1.Session() as session:\n session.run(y)\n # session.run(y2)\n\n session.run(ds_iter_init)\n while True:\n try:\n print(session.run(ds_iter.get_next()))\n except tf.errors.OutOfRangeError:\n print(\"OutOfRangeError\")\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"albertz/playground","sub_path":"tf2-graph.py","file_name":"tf2-graph.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"54"} +{"seq_id":"74230531682","text":"from unittest.mock import Mock\n\nfrom arcade import Window, View\n\n\ndef test_on_show_view_called(window):\n view = View(window)\n show_mock = Mock()\n view.on_show_view = show_mock\n\n window.show_view(view)\n\n show_mock.assert_called_once()\n\n\ndef test_on_hide_view_called(window):\n view1 = View(window)\n view2 = View(window)\n window.show_view(view1)\n\n hide_mock = Mock()\n view1.on_hide_view = hide_mock\n\n window.show_view(view2)\n\n hide_mock.assert_called_once()\n","repo_name":"pythonarcade/arcade","sub_path":"tests/unit/window/test_view.py","file_name":"test_view.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":1537,"dataset":"github-code","pt":"54"} +{"seq_id":"1909428021","text":"# INI: This script contains routines for performing a priori flux calibration\n# adapted from previous years' scripts by Maciek, CK, Lindy, and others.\n\nimport numpy as np\nimport datetime\nfrom argparse import Namespace\nimport glob\nimport os\nimport scipy.interpolate\nimport itertools as it\nfrom astropy.time import Time\nfrom astropy.io import fits\n#from hops2uvfits import *\n#import mk4 # part of recent HOPS install, need HOPS ENV variables\n#import ctypes\n#import astropy.io.fits as fits\n#import astropy.time as at\n#import sys\n#import numpy.matlib\n#import pandas as pd\n\n#conversion factors and data types\n'''stationdict = {'ALMA':'AA', 'A':'AA','AA':'AA',\n 'APEX':'AX', 'X':'AX','AP': 'AX',\n 'LMT':'LM','L':'LM','LM':'LM',\n 'PICOVEL':'PV','P':'PV','IRAM30': 'PV','PV':'PV',\n 'SMTO':'MG','Z': 'MG','SMT':'MG','AZ':'MG',\n 'SPT':'SZ','Y':'SZ','SP':'SZ',\n 'JCMT':'MM','J':'MM','JC':'MM',\n 'SMAP':'SW','S':'SW','SMAR':'SW','SMA':'SW','SM':'SW',\n 'GLT':'GL','G':'GL','THULE':'GL','GL':'GL',\n 'NOEMA':'NN','N':'NN','NN':'NN',\n 'KITTPEAK':'KT','K':'KT','KT':'KT', \n 'SMAR':'SR','R':'SR','SMR':'SR','SR':'SR',\n 'GBT': 'GB','FD': 'FD','PT':'PT','LA':'LA', \n 'KP':'KP', 'MK':'MK', 'BR':'BR', 'NL':'NL',\n 'OV':'OV','YS':'YS','EB':'EB'\n }\n\nstation_frot_old = {'PV':(1,-1,0),'AZ':(1,1,0),'SM':(1,-1,np.pi/4.),'LM': (1,-1,0),\n 'AA':(1,0,0),'SP':(1,0,0),'AP':(1,1,0),'JC':(1,0,0),'SR':(1,-1,np.pi/4.),\n 'GB':(1,0,0),'FD':(1,0,0),'PT':(1,0,0),'LA':(1,0,0),'KP':(1,0,0),\n 'MK':(1,0,0),'BR':(1,0,0),'NL':(1,0,0),'OV':(1,0,0),'YS':(1,0,0),'EB':(1,0,0)}'''\n\nstation_frot = {'PV':(1,-1,0),'MG':(1,1,0),'SW':(1,-1,np.pi/4.),'LM': (1,-1,0),\n 'AA':(1,0,0),'SZ':(1,0,0),'AX':(1,1,0),'MM':(1,0,0),'GL': (1,0,0),\n 'NN':(1,0,0),'KT':(1,0,0),'SR':(1,-1,np.pi/4.),\n 'GB':(1,0,0),'FD':(1,0,0),'PT':(1,0,0),'LA':(1,0,0),'KP':(1,0,0),\n 'MK':(1,0,0),'BR':(1,0,0),'NL':(1,0,0),'OV':(1,0,0),'YS':(1,0,0),'EB':(1,0,0)}\n\nBLTYPE = [('time','f8'),('t1','a32'),('t2','a32')]\nDTARR = [('site', 'a32'), ('x','f8'), ('y','f8'), ('z','f8')]\nDTCAL = [('time','f8'), ('rscale','c16'), ('lscale','c16')]\nDTPOL = [('time','f8'),('freq','f8'),('tint','f8'),\n ('t1','a32'),('t2','a32'),\n ('u','f8'),('v','f8'),\n ('rr','c16'),('ll','c16'),('rl','c16'),('lr','c16'),\n ('rrweight','f8'),('llweight','f8'),('rlweight','f8'),('lrweight','f8')]\nEP = 1.e-5\nCORRCOEFF = 10000.0\nDEGREE = np.pi/180.0\nHOUR = 15.0*DEGREE\nC = 299792458.0\nMHZ2HZ = 1e6\nMJD_0 = 2400000.5\nRADPERARCSEC = (np.pi / 180.) / 3600.\n\n# INI: copied from hops2uvfits.py\n# TODO these constants should go into ___init__.py or util.py\n#reference date\nRDATE = '2017-04-04'\nrdate_tt = Time(RDATE, format='isot', scale='utc')\nRDATE_JD = rdate_tt.jd\nRDATE_GSTIA0 = rdate_tt.sidereal_time('apparent','greenwich').degree\nRDATE_DEGPERDY = 360.98564497330 # TODO from AIPS, get the actual value?\nRDATE_OFFSET = rdate_tt.ut1.datetime.second - rdate_tt.utc.datetime.second\nRDATE_OFFSET += 1.e-6*(rdate_tt.ut1.datetime.microsecond - rdate_tt.utc.datetime.microsecond)\n\n# decimal precision for the scan start & stop times (fractional day)\nROUND_SCAN_INT = 20\n\n##################################################################################################\n# Caltable object\n##################################################################################################\n# ANDREW TODO copied from caltable.py in ehtim\n# load directly instead?\nclass Caltable(object):\n \"\"\"\n Attributes:\n \"\"\"\n\n def __init__(self, ra, dec, rf, bw, datatables, tarr, source='NONE', mjd=0, timetype='UTC'):\n \"\"\"A polarimetric VLBI observation of visibility amplitudes and phases (in Jy).\n\n Args:\n\n Returns:\n caltable (Caltable): an Caltable object\n \"\"\"\n\n if len(datatables) == 0:\n raise Exception(\"No data in input table!\")\n\n # Set the various parameters\n self.source = str(source)\n self.ra = float(ra)\n self.dec = float(dec)\n self.rf = float(rf)\n self.bw = float(bw)\n self.mjd = int(mjd)\n\n if timetype not in ['GMST', 'UTC']:\n raise Exception(\"timetype must by 'GMST' or 'UTC'\")\n self.timetype = timetype\n self.tarr = tarr\n\n # Dictionary of array indices for site names\n self.tkey = {self.tarr[i]['site']: i for i in range(len(self.tarr))}\n\n # Save the data\n self.data = datatables\n\n def copy(self):\n \"\"\"Copy the caltable object.\n\n Args:\n\n Returns:\n (Caltable): a copy of the Caltable object.\n \"\"\"\n new_caltable = Caltable(self.ra, self.dec, self.rf, self.bw, self.data, self.tarr, source=self.source, mjd=self.mjd, timetype=self.timetype)\n return new_caltable\n\n# INI: classes copied from hops2uvfits.py to make this module self-sufficient;\n# TODO These class definitions should probably be moved to a common \"util.py\" / \"__init__.py\" in this dir\nclass Uvfits_data(object):\n \"\"\"data table and random group parameter arrays to save to uvfits\"\"\"\n def __init__(self, u, v, bls, jds, tints, datatable):\n self.u = u\n self.v = v\n self.bls = bls\n self.jds = jds\n self.tints = tints\n self.datatable = datatable\n\nclass Antenna_info(object):\n \"\"\"antenna metadata \"\"\"\n def __init__(self, antnames, antnums, xyz):\n self.antnames = antnames\n self.antnums = antnums\n self.xyz = xyz\n\nclass Datastruct(object):\n \"\"\"Data and metadata to save to uvfits, in uvfits format\n dtype tells you if the data table is in uvfits or ehtim format\n in ehtim format antenna_info and data are tables,\n in uvfits format they are Antenna_info and Uvfits_data objects\n \"\"\"\n\n def __init__(self, obs_info, antenna_info, data, dtype='UVFITS'):\n self.dtype = dtype\n self.obs_info = obs_info\n self.antenna_info = antenna_info\n self.data = data\n\n# INI: function copied from hops2uvfits.py\n# TODO these should really be in the io submodule!\ndef save_uvfits(datastruct, fname):\n \"\"\"save information already in uvfits format to uvfits file\n Args:\n datastruct (Datastruct) : a datastruct object with type 'UVFITS' for saving\n fname (str) : filename to save to\n \"\"\"\n\n # unpack data\n if datastruct.dtype != 'UVFITS':\n raise Exception(\"datastruct.dtype != 'UVFITS' in save_uvfits()!\")\n\n src = datastruct.obs_info.src\n ra = datastruct.obs_info.ra\n dec = datastruct.obs_info.dec\n ref_freq = datastruct.obs_info.ref_freq\n ch_bw = datastruct.obs_info.ch_bw\n ch_spacing = datastruct.obs_info.ch_spacing\n ch1_freq = datastruct.obs_info.ch_1\n nchan = datastruct.obs_info.nchan\n scan_arr = datastruct.obs_info.scans\n bw = nchan*ch_bw\n\n antnames = datastruct.antenna_info.antnames\n antnums = datastruct.antenna_info.antnums\n xyz = datastruct.antenna_info.xyz\n nsta = len(antnames)\n\n u = datastruct.data.u\n v = datastruct.data.v\n bls = datastruct.data.bls\n jds = datastruct.data.jds\n tints = datastruct.data.tints\n outdat = datastruct.data.datatable\n\n if (len(u) != len(v) != len(bls) != len(jds) != len(tints) != len(outdat)):\n raise Exception(\"rg parameter shapes and data shape not consistent!\")\n\n ndat = len(u)\n mjd = int(np.min(jds) - MJD_0)\n jd_start = (MJD_0 + mjd)\n fractimes = (jds - jd_start)\n jds_only = np.ones(ndat) * jd_start\n\n #print \"timedur uvfits \" , (np.max(jds) - np.min(jds)) * 3600 * 24, (np.max(fractimes) - np.min(fractimes)) * 3600 * 24\n nsubchan = 1\n nstokes = 4\n\n # Create new HDU\n hdulist = fits.HDUList()\n hdulist.append(fits.GroupsHDU())\n\n ##################### DATA TABLE ##################################################################################################\n # Data header\n header = hdulist['PRIMARY'].header\n\n #mandatory\n header['OBJECT'] = src\n header['TELESCOP'] = 'VLBA' # TODO Can we change this field?\n header['INSTRUME'] = 'VLBA'\n header['OBSERVER'] = 'EHT'\n header['BSCALE'] = 1.0\n header['BZERO'] = 0.0\n header['BUNIT'] = 'JY'\n header['EQUINOX'] = 'J2000'\n header['ALTRPIX'] = 1.e0\n header['ALTRVAL'] = 0.e0\n\n #optional\n header['OBSRA'] = ra * 180./12.\n header['OBSDEC'] = dec\n header['MJD'] = float(mjd)\n # new astropy broke this subfmt for jd for some reason\n # header['DATE-OBS'] = Time(mjd + MJD_0, format='jd', scale='utc', out_subfmt='date').iso\n header['DATE-OBS'] = Time(mjd + MJD_0, format='jd', scale='utc').iso[:10]\n #header['DATE-MAP'] = ??\n #header['VELREF'] = 3\n\n # DATA AXES #\n header['NAXIS'] = 7\n header['NAXIS1'] = 0\n\n # real, imag, weight\n header['CTYPE2'] = 'COMPLEX'\n header['NAXIS2'] = 3\n header['CRVAL2'] = 1.e0\n header['CDELT2'] = 1.e0\n header['CRPIX2'] = 1.e0\n header['CROTA2'] = 0.e0\n # RR, LL, RL, LR\n header['CTYPE3'] = 'STOKES'\n header['NAXIS3'] = nstokes\n header['CRVAL3'] = -1.e0 #corresponds to RR LL RL LR\n header['CDELT3'] = -1.e0\n header['CRPIX3'] = 1.e0\n header['CROTA3'] = 0.e0\n # frequencies\n header['CTYPE4'] = 'FREQ'\n header['NAXIS4'] = nsubchan\n header['CRPIX4'] = 1.e0\n # header['CRVAL4'] = ch1_freq # is this the right ref freq? in Hz\n header['CRVAL4'] = ref_freq # is this the right ref freq? in Hz\n header['CDELT4'] = ch_bw\n header['CROTA4'] = 0.e0\n # frequencies\n header['CTYPE5'] = 'IF'\n header['NAXIS5'] = nchan\n header['CRPIX5'] = 1.e0\n header['CRVAL5'] = 1.e0\n header['CDELT5'] = 1.e0\n header['CROTA5'] = 0.e0\n # RA\n header['CTYPE6'] = 'RA'\n header['NAXIS6'] = 1.e0\n header['CRPIX6'] = 1.e0\n header['CRVAL6'] = header['OBSRA']\n header['CDELT6'] = 1.e0\n header['CROTA6'] = 0.e0\n # DEC\n header['CTYPE7'] = 'DEC'\n header['NAXIS7'] = 1.e0\n header['CRPIX7'] = 1.e0\n header['CRVAL7'] = header['OBSDEC']\n header['CDELT7'] = 1.e0\n header['CROTA7'] = 0.e0\n\n ##RANDOM PARAMS##\n header['PTYPE1'] = 'UU---SIN'\n header['PSCAL1'] = 1/ref_freq\n header['PZERO1'] = 0.e0\n header['PTYPE2'] = 'VV---SIN'\n header['PSCAL2'] = 1.e0/ref_freq\n header['PZERO2'] = 0.e0\n header['PTYPE3'] = 'WW---SIN'\n header['PSCAL3'] = 1.e0/ref_freq\n header['PZERO3'] = 0.e0\n header['PTYPE4'] = 'BASELINE'\n header['PSCAL4'] = 1.e0\n header['PZERO4'] = 0.e0\n header['PTYPE5'] = 'DATE'\n header['PSCAL5'] = 1.e0\n header['PZERO5'] = 0.e0\n header['PTYPE6'] = 'DATE'\n header['PSCAL6'] = 1.e0\n header['PZERO6'] = 0.0\n header['PTYPE7'] = 'INTTIM'\n header['PSCAL7'] = 1.e0\n header['PZERO7'] = 0.e0\n header['history'] = \"AIPS SORT ORDER='TB'\"\n\n # Save data\n pars = ['UU---SIN', 'VV---SIN', 'WW---SIN', 'BASELINE', 'DATE', 'DATE', 'INTTIM']\n x = fits.GroupData(outdat, parnames=pars, pardata=[u, v, np.zeros(ndat), np.array(bls).reshape(-1), jds_only, fractimes, tints], bitpix=-32)\n\n hdulist['PRIMARY'].data = x\n hdulist['PRIMARY'].header = header\n\n ####################### AIPS AN TABLE ###############################################################################################\n #Antenna Table entries\n col1 = fits.Column(name='ANNAME', format='8A', array=antnames)\n col2 = fits.Column(name='STABXYZ', format='3D', unit='METERS', array=xyz)\n col3= fits.Column(name='ORBPARM', format='0D', array=np.zeros(0))\n col4 = fits.Column(name='NOSTA', format='1J', array=antnums)\n\n #TODO get the actual information for these parameters for each station\n col5 = fits.Column(name='MNTSTA', format='1J', array=np.zeros(nsta)) #zero = alt-az\n col6 = fits.Column(name='STAXOF', format='1E', unit='METERS', array=np.zeros(nsta)) #zero = no axis offset\n col7 = fits.Column(name='POLTYA', format='1A', array=np.array(['R' for i in range(nsta)], dtype='|S1')) #RCP\n col8 = fits.Column(name='POLAA', format='1E', unit='DEGREES', array=np.zeros(nsta)) #feed orientation A\n col9 = fits.Column(name='POLCALA', format='2E', array=np.zeros((nsta,2))) #zero = no pol cal info TODO should have extra dim for nif\n col10 = fits.Column(name='POLTYB', format='1A', array=np.array(['L' for i in range(nsta)], dtype='|S1')) #LCP\n col11 = fits.Column(name='POLAB', format='1E', unit='DEGREES', array=90*np.ones(nsta)) #feed orientation A\n col12 = fits.Column(name='POLCALB', format='2E', array=np.zeros((nsta,2))) #zero = no pol cal info\n\n # create table\n tbhdu = fits.BinTableHDU.from_columns(fits.ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12]), name='AIPS AN')\n hdulist.append(tbhdu)\n\n # header information\n head = hdulist['AIPS AN'].header\n head['EXTVER'] = 1\n head['ARRAYX'] = 0.e0\n head['ARRAYY'] = 0.e0\n head['ARRAYZ'] = 0.e0\n\n # TODO change the reference date\n #rdate_out = RDATE\n #rdate_gstiao_out = RDATE_GSTIA0\n #rdate_offset_out = RDATE_OFFSET\n\n # new astropy broke this subfmt, it should be a day boundary hopefully\n # rdate_tt_new = Time(mjd + MJD_0, format='jd', scale='utc', out_subfmt='date')\n rdate_tt_new = Time(mjd + MJD_0, format='jd', scale='utc')\n rdate_out = rdate_tt_new.iso[:10]\n rdate_jd_out = rdate_tt_new.jd\n rdate_gstiao_out = rdate_tt_new.sidereal_time('apparent','greenwich').degree\n rdate_offset_out = (rdate_tt_new.ut1.datetime.second - rdate_tt_new.utc.datetime.second)\n rdate_offset_out += 1.e-6*(rdate_tt_new.ut1.datetime.microsecond - rdate_tt_new.utc.datetime.microsecond)\n\n head['RDATE'] = rdate_out\n head['GSTIA0'] = rdate_gstiao_out\n head['DEGPDY'] = RDATE_DEGPERDY\n head['UT1UTC'] = rdate_offset_out #difference between UT1 and UTC ?\n head['DATUTC'] = 0.e0\n head['TIMESYS'] = 'UTC'\n\n head['FREQ']= ref_freq\n head['POLARX'] = 0.e0\n head['POLARY'] = 0.e0\n\n head['ARRNAM'] = 'VLBA' # TODO must be recognized by aips/casa\n head['XYZHAND'] = 'RIGHT'\n head['FRAME'] = '????'\n head['NUMORB'] = 0\n head['NO_IF'] = nchan\n head['NOPCAL'] = 0 #TODO add pol cal information\n head['POLTYPE'] = 'VLBI'\n head['FREQID'] = 1\n\n hdulist['AIPS AN'].header = head\n\n ##################### AIPS FQ TABLE #####################################################################################################\n # Convert types & columns\n freqid = np.array([1])\n bandfreq = np.array([ch1_freq + ch_spacing*i - ref_freq for i in range(nchan)]).reshape([1,nchan])\n chwidth = np.array([ch_bw for i in range(nchan)]).reshape([1,nchan])\n totbw = np.array([ch_bw for i in range(nchan)]).reshape([1,nchan])\n sideband = np.array([1 for i in range(nchan)]).reshape([1,nchan])\n\n freqid = fits.Column(name=\"FRQSEL\", format=\"1J\", array=freqid)\n bandfreq = fits.Column(name=\"IF FREQ\", format=\"%dD\"%(nchan), array=bandfreq, unit='HZ')\n chwidth = fits.Column(name=\"CH WIDTH\",format=\"%dE\"%(nchan), array=chwidth, unit='HZ')\n totbw = fits.Column(name=\"TOTAL BANDWIDTH\",format=\"%dE\"%(nchan),array=totbw, unit='HZ')\n sideband = fits.Column(name=\"SIDEBAND\",format=\"%dJ\"%(nchan),array=sideband)\n cols = fits.ColDefs([freqid, bandfreq, chwidth, totbw, sideband])\n\n # create table\n tbhdu = fits.BinTableHDU.from_columns(cols)\n\n # header information\n tbhdu.header.append((\"NO_IF\", nchan, \"Number IFs\"))\n tbhdu.header.append((\"EXTNAME\",\"AIPS FQ\"))\n tbhdu.header.append((\"EXTVER\",1))\n hdulist.append(tbhdu)\n\n ##################### AIPS NX TABLE #####################################################################################################\n\n scan_times = []\n scan_time_ints = []\n start_vis = []\n stop_vis = []\n\n #TODO make sure jds AND scan_info MUST be time sorted!!\n jj = 0\n #print scan_info\n\n comp_fac = 3600*24*100 # compare to 100th of a second\n\n for scan in scan_arr:\n scan_start = round(scan[0], ROUND_SCAN_INT)\n scan_stop = round(scan[1], ROUND_SCAN_INT)\n scan_dur = (scan_stop - scan_start)\n\n if jj>=len(jds):\n #print start_vis, stop_vis\n break\n\n # print \"%.12f %.12f %.12f\" %( jds[jj], scan_start, scan_stop)\n jd = round(jds[jj], ROUND_SCAN_INT)*comp_fac # ANDREW TODO precision??\n\n if (np.floor(jd) >= np.floor(scan_start*comp_fac)) and (np.ceil(jd) <= np.ceil(comp_fac*scan_stop)):\n start_vis.append(jj)\n # TODO AIPS MEMO 117 says scan_times should be midpoint!, but AIPS data looks likes it's at the start?\n #scan_times.append(scan_start - rdate_jd_out)\n scan_times.append(scan_start + 0.5*scan_dur - rdate_jd_out)\n scan_time_ints.append(scan_dur)\n while (jj < len(jds) and np.floor(round(jds[jj],ROUND_SCAN_INT)*comp_fac) <= np.ceil(comp_fac*scan_stop)):\n jj += 1\n stop_vis.append(jj-1)\n else:\n continue\n\n if jj < len(jds):\n if len(scan_arr) == 0:\n print(\"len(scan_arr) == 0\")\n else:\n print(scan_arr[-1])\n print(round(scan_arr[-1][0],ROUND_SCAN_INT),round(scan_arr[-1][1],ROUND_SCAN_INT))\n print(jj, len(jds), round(jds[jj], ROUND_SCAN_INT))\n print(\"WARNING!!!: in save_uvfits NX table, didn't get to all entries when computing scan start/stop!\")\n #raise Exception(\"in save_uvfits NX table, didn't get to all entries when computing scan start/stop!\")\n\n time_nx = fits.Column(name=\"TIME\", format=\"1D\", unit='DAYS', array=np.array(scan_times))\n timeint_nx = fits.Column(name=\"TIME INTERVAL\", format=\"1E\", unit='DAYS', array=np.array(scan_time_ints))\n sourceid_nx = fits.Column(name=\"SOURCE ID\",format=\"1J\", unit='', array=np.ones(len(scan_times)))\n subarr_nx = fits.Column(name=\"SUBARRAY\",format=\"1J\", unit='', array=np.ones(len(scan_times)))\n freqid_nx = fits.Column(name=\"FREQ ID\",format=\"1J\", unit='', array=np.ones(len(scan_times)))\n startvis_nx = fits.Column(name=\"START VIS\",format=\"1J\", unit='', array=np.array(start_vis)+1)\n endvis_nx = fits.Column(name=\"END VIS\",format=\"1J\", unit='', array=np.array(stop_vis)+1)\n cols = fits.ColDefs([time_nx, timeint_nx, sourceid_nx, subarr_nx, freqid_nx, startvis_nx, endvis_nx])\n\n tbhdu = fits.BinTableHDU.from_columns(cols)\n\n # header information\n tbhdu.header.append((\"EXTNAME\",\"AIPS NX\"))\n tbhdu.header.append((\"EXTVER\",1))\n\n hdulist.append(tbhdu)\n\n # Write final HDUList to file\n #hdulist.writeto(fname, clobber=True)#this is deprecated and changed to overwrite\n hdulist.writeto(fname, overwrite=True)\n\n return 0\n\ndef load_caltable_ds(datastruct, tabledir, sqrt_gains=False, skip_fluxcal=False):\n \"\"\"Load apriori cal tables\n \"\"\"\n\n if datastruct.dtype != \"EHTIM\":\n raise Exception(\"datastruct must be in EHTIM format in load_caltable!\")\n tarr = datastruct.antenna_info\n source = datastruct.obs_info.src\n mjd = int(np.min(datastruct.data['time'] - MJD_0))\n ra = datastruct.obs_info.ra\n dec = datastruct.obs_info.dec\n rf = datastruct.obs_info.ref_freq\n bw = datastruct.obs_info.ch_bw\n\n datatables = {}\n for s in range(0, len(tarr)):\n\n site = tarr[s]['site'].decode() # INI: bytes to str to avoid type errors downstream\n\n if skip_fluxcal: #mocking SEFDs equal to 1.0 spread across the day\n\n datatable = []\n for time in np.linspace(0.,24.,100):\n datatable.append(np.array((time, 1.0, 1.0), dtype=DTCAL))\n\n else: # getting SEFDS from files\n # AIPS can only handle 8-character source name so \"source\" may\n # be truncated. Therefore, we match a pattern (note the \"*\")\n # and proceed only if there is one match\n pattern = os.path.join(tabledir, f'{source}*_{site}.txt')\n filenames = glob.glob(pattern)\n if len(filenames) == 1:\n try:\n data = np.loadtxt(filenames[0], dtype=bytes).astype(str)\n except IOError:\n print(f'Skipping corrupted file: {filenames[0]}')\n continue\n\n filename_source = filenames[0].replace(tabledir+'/', '').replace(f'_{site}.txt', '')\n if source != filename_source:\n print('WARNING: name of source in filename is different from the one in the EHTIM datastruct')\n if filename_source.startswith(source):\n print(f'which is probably due to AIPS source name truncation; using the full name {filename_source} from the filename...')\n source = filename_source\n elif len(filenames) == 0:\n print(f'No file matching {pattern} exists! Skipping...')\n continue\n else:\n print(f'More than one file matching pattern {pattern}. Skipping...')\n continue\n\n datatable = []\n\n # ANDREW HACKY WAY TO MAKE IT WORK WITH ONLY ONE ENTRY\n onerowonly=False\n if data.ndim == 1:\n data = np.expand_dims(data, axis=0)\n onerowonly = True\n\n for row in data:\n time = (float(row[0]) - mjd) * 24.0 # time is given in mjd\n\n # # Maciek's old convention had a square root\n # rscale = np.sqrt(float(row[1])) # r\n # lscale = np.sqrt(float(row[2])) # l\n\n if len(row) == 3:\n rscale = float(row[1])\n lscale = float(row[2])\n elif len(row) == 5:\n rscale = float(row[1]) + 1j*float(row[2])\n lscale = float(row[3]) + 1j*float(row[4])\n else:\n raise Exception(\"cannot load caltable -- format unknown!\")\n if sqrt_gains:\n rscale = rscale**.5\n lscale = lscale**.5\n #ANDREW THERE ARE ZERO VALS IN THE CALTABLE\n if rscale==0. and lscale==0.:\n continue\n else:\n datatable.append(np.array((time, rscale, lscale), dtype=DTCAL))\n #ANDREW HACKY WAY TO MAKE IT WORK WITH ONLY ONE ENTRY\n if onerowonly:\n datatable.append(np.array((1.1*time, rscale, lscale), dtype=DTCAL))\n\n datatables[site] = np.array(datatable)\n\n if (len(datatables)<=0)&(skip_fluxcal==False):#only if no SEFD files available and we don't want just field rotation\n caltable=False\n else: #other cases, either we want flux and we do have SEFDs, or we want to skip fluxcal\n caltable = Caltable(ra, dec, rf, bw, datatables, tarr, source=source, mjd=mjd, timetype='UTC')\n return caltable\n\ndef xyz_2_latlong(obsvecs):\n \"\"\"Compute the (geocentric) latitude and longitude of a site at geocentric position x,y,z\n The output is in radians\n \"\"\"\n if len(obsvecs.shape)==1:\n obsvecs=np.array([obsvecs])\n out = []\n for obsvec in obsvecs:\n x = obsvec[0]\n y = obsvec[1]\n z = obsvec[2]\n lon = np.array(np.arctan2(y,x))\n lat = np.array(np.arctan2(z, np.sqrt(x**2+y**2)))\n out.append([lat,lon])\n out = np.array(out)\n #if out.shape[0]==1: out = out[0]\n return out\n\ndef apply_caltable_uvfits(caltable, datastruct, filename_out, interp='linear', extrapolate=True, frotcal=True, elev_function='astropy', interp_dt=1., \\\n elev_interp_kind='cubic', err_scale=1., skip_fluxcal=False, keep_absolute_phase=True):\n \"\"\"apply a calibration table to a uvfits file\n Args:\n caltable (Caltable) : a caltable object\n datastruct (Datastruct) : input data structure in EHTIM format\n filename_out (str) : uvfits output file name\n interp (str) : kind of interpolation to perform for gain tables\n extrapolate (bool) : toggle extrapolation for gain tables and elevation computation\n frotcal (bool): whether apply field rotation angle correction\n elev_function (str): 'astropy' or 'ehtim' for calculating elevation\n interp_dt (float) : time resolution for interpolation\n elev_interp_kind (str) : kind of interpolation to perform for elevation computation\n err_scale (float) : scaling factor for error\n skip_fluxcal (bool): toggle whether SEFDs should be applied (i.e. flux calibration)\n keep_absolute_phase (bool): toggle whether absolute phase of LL* visibilities should be kept\n \"\"\"\n\n if datastruct.dtype != \"EHTIM\":\n raise Exception(\"datastruct must be in EHTIM format in apply_caltable_uvfits!\")\n\n if not (caltable.tarr == datastruct.antenna_info).all():\n raise Exception(\"The telescope array in the Caltable is not the same as in the Datastruct\")\n\n # interpolate the calibration table\n rinterp = {}\n linterp = {}\n skipsites = []\n\n #PREPARE INTERPOLATION DATA\n xyz={}\n latitude={}\n longitude={}\n ra = caltable.ra*np.pi*2./24.#rad\n dec = caltable.dec*np.pi*2./360.#rad\n sourcevec = np.array([np.cos(dec), 0, np.sin(dec)])\n PAR={}\n ELE={}\n OFF={}\n elevfit={}\n gmst_function= lambda time_mjd: Time(time_mjd, format='mjd').sidereal_time('mean','greenwich').hour*2.*np.pi/24.\n\n\n #FIND MAX RANGE OF MJD TIMES FOR INTERPOLATION\n if (frotcal==True)&(interp_dt>0):\n dt_mjd = interp_dt*1./24./60./60. #interp_dt in sec\n mjd_max=-1\n mjd_min=1e10\n for s in range(0, len(caltable.tarr)):\n site = caltable.tarr[s]['site'].decode() # INI: bytes to str\n try:\n #sometimes station reported but no calibration\n time_mjd = caltable.data[site]['time']/24.0 + caltable.mjd\n mjd_max_foo = np.max(time_mjd)\n mjd_min_foo = np.min(time_mjd)\n if (mjd_max_foo > mjd_max):\n mjd_max = mjd_max_foo\n if (mjd_min_foo < mjd_min):\n mjd_min = mjd_min_foo\n except KeyError: continue\n #MAKE TIME GRIDS FOR INTERPOLATION\n time_mjd_fake = np.arange(mjd_min,mjd_max,dt_mjd)\n gmst_fake = gmst_function(time_mjd_fake)\n datetimes_fake = Time(time_mjd_fake, format='mjd').to_datetime()\n strtime_fake = [str(round_time(x)) for x in datetimes_fake]\n thetas_fake = np.mod((gmst_fake - ra), 2.*np.pi)\n\n for s in range(0, len(caltable.tarr)):\n site = caltable.tarr[s]['site'].decode() # INI: bytes to str\n xyz_foo = np.asarray((caltable.tarr[s]['x'],caltable.tarr[s]['y'],caltable.tarr[s]['z']))\n xyz[site] = xyz_foo\n latlong = xyz_2_latlong(xyz_foo)\n latitude[site] = latlong[0][0]#rad\n longitude[site] = latlong[0][1]#rad\n PAR[site] = station_frot[site][0]\n ELE[site] = station_frot[site][1]\n OFF[site] = station_frot[site][2]\n\n # This is only if we interpolate elevation\n if (frotcal==True)&(interp_dt>0):\n if elev_function=='ehtim':\n elev_fake_foo = get_elev_2(earthrot(xyz[site], thetas_fake), sourcevec)#ehtim\n else:\n elev_fake_foo = get_elev(ra, dec, xyz[site], strtime_fake)##astropy\n\n # INI: extrapolate elevation to values outside the range\n if extrapolate:\n elevfit[site] = scipy.interpolate.interp1d(time_mjd_fake, elev_fake_foo, kind=elev_interp_kind, fill_value='extrapolate')\n else:\n elevfit[site] = scipy.interpolate.interp1d(time_mjd_fake, elev_fake_foo, kind=elev_interp_kind)\n\n try:\n caltable.data[site]\n except KeyError:\n skipsites.append(site)\n print (\"No Calibration Data for %s !\" % site)\n continue\n\n if skip_fluxcal: #if we don't do flux calibration don't waste time on serious interpolating\n rinterp[site] = scipy.interpolate.interp1d([0],[1],kind='zero',fill_value='extrapolate')\n linterp[site] = scipy.interpolate.interp1d([0],[1],kind='zero',fill_value='extrapolate')\n\n else: #default option, create interpolating station based SEFD gains\n time_mjd = caltable.data[site]['time']/24.0 + caltable.mjd\n \n if extrapolate:\n rinterp[site] = scipy.interpolate.interp1d(time_mjd, caltable.data[site]['rscale'], kind=interp, fill_value='extrapolate')\n linterp[site] = scipy.interpolate.interp1d(time_mjd, caltable.data[site]['lscale'], kind=interp, fill_value='extrapolate')\n else:\n rinterp[site] = scipy.interpolate.interp1d(time_mjd, caltable.data[site]['rscale'], kind=interp)\n linterp[site] = scipy.interpolate.interp1d(time_mjd, caltable.data[site]['lscale'], kind=interp)\n\n\n #-------------------------------------------\n # sort by baseline\n data = datastruct.data\n idx = np.lexsort((data['t2'], data['t1']))\n bllist = []\n for key, group in it.groupby(data[idx], lambda x: set((x['t1'], x['t2'])) ):\n bllist.append(np.array([obs for obs in group]))\n bllist = np.array(bllist) #, dtype=object) # INI: avoid VisibleDeprecationWarning\n\n # apply the calibration\n\n datatable = []\n coub=0\n for bl_obs in bllist:\n t1 = bl_obs['t1'][0].decode() # INI: bytes to str\n t2 = bl_obs['t2'][0].decode()\n coub=coub+1\n print('Calibrating {}-{} baseline, {}/{}'.format(t1,t2,coub,len(bllist)))\n time_mjd = bl_obs['time'] - MJD_0 #dates are in mjd in Datastruct\n if frotcal==True:\n gmst = gmst_function(time_mjd)\n thetas = np.mod((gmst - ra), 2*np.pi)\n hangle1 = gmst + longitude[t1] - ra #HOUR ANGLE FIRST TELESCOPE\n hangle2 = gmst + longitude[t2] - ra #HOUR ANGLE SECOND TELESCOPE\n par1I_t1 = np.sin(hangle1)\n par1I_t2 = np.sin(hangle2)\n par1R_t1 = np.cos(dec)*np.tan(latitude[t1]) - np.sin(dec)*np.cos(hangle1)\n par1R_t2 = np.cos(dec)*np.tan(latitude[t2]) - np.sin(dec)*np.cos(hangle2)\n parangle1 = np.angle(par1R_t1 + 1j*par1I_t1 ) #PARALACTIC ANGLE T1\n parangle2 = np.angle(par1R_t2 + 1j*par1I_t2 ) #PARALACTIC ANGLE T2\n if interp_dt<=0:\n if elev_function=='ehtim':\n elev1 = get_elev_2(earthrot(xyz[t1], thetas), sourcevec)\n elev2 = get_elev_2(earthrot(xyz[t2], thetas), sourcevec)\n else:\n datetimes = Time(time_mjd, format='mjd').to_datetime()\n strtime = [str(round_time(x)) for x in datetimes]\n elev1 = get_elev(ra, dec, xyz[t1], strtime) #ELEVATION T1\n elev2 = get_elev(ra, dec, xyz[t2], strtime) #ELEVATION T2\n else:\n elev1 = elevfit[t1](time_mjd)\n elev2 = elevfit[t2](time_mjd)\n\n fran1 = PAR[t1]*parangle1 + ELE[t1]*elev1 + OFF[t1]\n fran2 = PAR[t2]*parangle2 + ELE[t2]*elev2 + OFF[t2]\n \n #Keeping absolute phase of the LL* visibilities\n if keep_absolute_phase:\n shift1 = 1j*fran1\n shift2 = 1j*fran2\n fran_R1 = np.exp(1j*fran1 + shift1)\n fran_L1 = np.exp(-1j*fran1 + shift1)\n fran_R2 = np.exp(1j*fran2 + shift2)\n fran_L2 = np.exp(-1j*fran2 + shift2)\n else:\n fran_R1 = np.exp(1j*fran1)\n fran_L1 = np.exp(-1j*fran1)\n fran_R2 = np.exp(1j*fran2)\n fran_L2 = np.exp(-1j*fran2)\n \n\n if t1 in skipsites:\n rscale1 = lscale1 = np.array(1.)\n else:\n if frotcal==False:\n rscale1 = rinterp[t1](time_mjd)\n lscale1 = linterp[t1](time_mjd)\n else:\n rscale1 = rinterp[t1](time_mjd)*fran_R1\n lscale1 = linterp[t1](time_mjd)*fran_L1\n if t2 in skipsites:\n rscale2 = lscale2 = np.array(1.)\n else:\n if frotcal==False:\n rscale2 = rinterp[t2](time_mjd)\n lscale2 = linterp[t2](time_mjd)\n else:\n rscale2 = rinterp[t2](time_mjd)*fran_R2\n lscale2 = linterp[t2](time_mjd)*fran_L2\n\n\n# if force_singlepol == 'R':\n# lscale1 = rscale1\n# lscale2 = rscale2\n# if force_singlepol == 'L':\n# rscale1 = lscale1\n# rscale2 = lscale2\n\n rrscale = rscale1 * rscale2.conj()\n llscale = lscale1 * lscale2.conj()\n rlscale = rscale1 * lscale2.conj()\n lrscale = lscale1 * rscale2.conj()\n\n bl_obs['rr'] = (bl_obs['rr']) * rrscale\n bl_obs['ll'] = (bl_obs['ll']) * llscale\n bl_obs['rl'] = (bl_obs['rl']) * rlscale\n bl_obs['lr'] = (bl_obs['lr']) * lrscale\n\n bl_obs['rrweight'] = (bl_obs['rrweight']) / (np.abs(rrscale)**2)\n bl_obs['llweight'] = (bl_obs['llweight']) / (np.abs(llscale)**2)\n bl_obs['rlweight'] = (bl_obs['rlweight']) / (np.abs(rlscale)**2)\n bl_obs['lrweight'] = (bl_obs['lrweight']) / (np.abs(lrscale)**2)\n\n if len(datatable):\n datatable = np.hstack((datatable, bl_obs))\n else:\n datatable = bl_obs\n\n # put in uvfits format datastruct\n # telescope arrays\n tarr = datastruct.antenna_info\n tkeys = {tarr[i]['site']: i for i in range(len(tarr))}\n tnames = tarr['site']\n tnums = np.arange(1, len(tarr) + 1)\n xyz = np.array([[tarr[i]['x'],tarr[i]['y'],tarr[i]['z']] for i in np.arange(len(tarr))])\n\n # uvfits format output data table\n bl_arr = np.empty((len(datatable)), dtype=BLTYPE)\n for i in range(len(datatable)):\n entry = datatable[i]\n t1num = entry['t1']\n t2num = entry['t2']\n rl = entry['rl']\n lr = entry['lr']\n if tkeys[entry['t2']] < tkeys[entry['t1']]: # reorder telescopes if necessary\n #print entry['t1'], tkeys[entry['t1']], entry['t2'], tkeys[entry['t2']]\n entry['t1'] = t2num\n entry['t2'] = t1num\n entry['u'] = -entry['u']\n entry['v'] = -entry['v']\n entry['rr'] = np.conj(entry['rr'])\n entry['ll'] = np.conj(entry['ll'])\n entry['rl'] = np.conj(lr)\n entry['lr'] = np.conj(rl)\n datatable[i] = entry\n bl_arr[i] = np.array((entry['time'],entry['t1'],entry['t2']),dtype=BLTYPE)\n _, unique_idx_anttime, idx_anttime = np.unique(bl_arr, return_index=True, return_inverse=True)\n _, unique_idx_freq, idx_freq = np.unique(datatable['freq'], return_index=True, return_inverse=True)\n\n # random group params\n u = datatable['u'][unique_idx_anttime]\n v = datatable['v'][unique_idx_anttime]\n t1num = [tkeys[scope] + 1 for scope in datatable['t1'][unique_idx_anttime]]\n t2num = [tkeys[scope] + 1 for scope in datatable['t2'][unique_idx_anttime]]\n bls = 256*np.array(t1num) + np.array(t2num)\n jds = datatable['time'][unique_idx_anttime]\n tints = datatable['tint'][unique_idx_anttime]\n\n # data table\n nap = len(unique_idx_anttime)\n nsubchan = 1\n nstokes = 4\n nchan = datastruct.obs_info.nchan\n\n outdat = np.zeros((nap, 1, 1, nchan, nsubchan, nstokes, 3))\n outdat[:,:,:,:,:,:,2] = -1.0\n\n vistypes = ['rr','ll','rl','lr']\n for i in range(len(datatable)):\n row_freq_idx = idx_freq[i]\n row_dat_idx = idx_anttime[i]\n\n for j in range(len(vistypes)):\n outdat[row_dat_idx,0,0,row_freq_idx,0,j,0] = np.real(datatable[i][vistypes[j]])\n outdat[row_dat_idx,0,0,row_freq_idx,0,j,1] = np.imag(datatable[i][vistypes[j]])\n outdat[row_dat_idx,0,0,row_freq_idx,0,j,2] = datatable[i][vistypes[j]+'weight']\n\n # package data for saving\n obsinfo_out = datastruct.obs_info\n antennainfo_out = Antenna_info(tnames, tnums, xyz)\n uvfitsdata_out = Uvfits_data(u,v,bls,jds, tints, outdat)\n datastruct_out = Datastruct(obsinfo_out, antennainfo_out, uvfitsdata_out)\n\n # save final file\n save_uvfits(datastruct_out, filename_out)\n return\n\ndef get_elev(ra_source, dec_source, xyz_antenna, time):\n #this one is by Michael Janssen\n \"\"\"\n given right ascension and declination of a sky source [ICRS: ra->(deg,arcmin,arcsec) and dec->(hour,min,sec)]\n and given the position of the telescope from the vex file [Geocentric coordinates (m)]\n and the time of the observation (e.g. '2012-7-13 23:00:00') [UTC:yr-m-d],\n returns the elevation of the telescope.\n Note that every parameter can be an array (e.g. the time)\n \"\"\"\n from astropy import units as u\n from astropy.coordinates import EarthLocation, AltAz, ICRS, Angle\n #angle conversions:\n ra_src = Angle(ra_source, unit=u.rad)\n dec_src = Angle(dec_source, unit=u.rad)\n\n source_position = ICRS(ra=ra_src, dec=dec_src)\n antenna_position = EarthLocation(x=xyz_antenna[0]*u.m, y=xyz_antenna[1]*u.m, z=xyz_antenna[2]*u.m)\n altaz_system = AltAz(location=antenna_position, obstime=time)\n trans_to_altaz = source_position.transform_to(altaz_system)\n elevation = trans_to_altaz.alt\n return elevation.rad\n\ndef round_time(t,round_s=1.):\n\n \"\"\"rounding time to given accuracy\n\n Args:\n t: time\n round_s: delta time to round to in seconds\n\n Returns:\n round_t: rounded time\n \"\"\"\n t0 = datetime.datetime(t.year,1,1)\n foo = t - t0\n foo_s = foo.days*24*3600 + foo.seconds + foo.microseconds*(1e-6)\n foo_s = np.round(foo_s/round_s)*round_s\n days = np.floor(foo_s/24/3600)\n seconds = np.floor(foo_s - 24*3600*days)\n microseconds = int(1e6*(foo_s - days*3600*24 - seconds))\n round_t = t0+datetime.timedelta(days,seconds,microseconds)\n return round_t\n\n\ndef earthrot(vecs, thetas):\n \"\"\"Rotate a vector / array of vectors about the z-direction by theta / array of thetas (radian)\n \"\"\"\n if len(vecs.shape)==1:\n vecs = np.array([vecs])\n if np.isscalar(thetas):\n thetas = np.array([thetas for i in range(len(vecs))])\n\n # equal numbers of sites and angles\n if len(thetas) == len(vecs):\n rotvec = np.array([np.dot(np.array(((np.cos(thetas[i]),-np.sin(thetas[i]),0),(np.sin(thetas[i]),np.cos(thetas[i]),0),(0,0,1))), vecs[i])\n for i in range(len(vecs))])\n\n # only one rotation angle, many sites\n elif len(thetas) == 1:\n rotvec = np.array([np.dot(np.array(((np.cos(thetas[0]),-np.sin(thetas[0]),0),(np.sin(thetas[0]),np.cos(thetas[0]),0),(0,0,1))), vecs[i])\n for i in range(len(vecs))])\n # only one site, many angles\n elif len(vecs) == 1:\n rotvec = np.array([np.dot(np.array(((np.cos(thetas[i]),-np.sin(thetas[i]),0),(np.sin(thetas[i]),np.cos(thetas[i]),0),(0,0,1))), vecs[0])\n for i in range(len(thetas))])\n else:\n raise Exception(\"Unequal numbers of vectors and angles in earthrot(vecs, thetas)!\")\n\n return rotvec\n\ndef get_elev_2(obsvecs, sourcevec):\n \"\"\"Return the elevation of a source with respect to an observer/observers in radians\n obsvec can be an array of vectors but sourcevec can ONLY be a single vector\n \"\"\"\n\n if len(obsvecs.shape)==1:\n obsvecs=np.array([obsvecs])\n\n anglebtw = np.array([np.dot(obsvec,sourcevec)/np.linalg.norm(obsvec)/np.linalg.norm(sourcevec) for obsvec in obsvecs])\n el = 0.5*np.pi - np.arccos(anglebtw)\n\n return el","repo_name":"sao-eht/eat","sub_path":"eat/postproc/cal_amplitude_farotate.py","file_name":"cal_amplitude_farotate.py","file_ext":"py","file_size_in_byte":39509,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"54"} +{"seq_id":"13428685838","text":"import os\nimport json\nfrom aws_cdk import (\n core,\n aws_ssm as ssm,\n aws_s3_deployment as s3_deployment,\n aws_s3 as s3,\n aws_cloudformation as cfn\n)\nfrom aws_cdk.core import Aws\n\ncurrent_dir = os.path.dirname(__file__)\n\n\nclass S3Stack(cfn.NestedStack):\n\n def __init__(self, scope: core.Construct, id: str, aws_region='', **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n\n # Build S3 bucket\n self.data_sandbox_bucket = s3.Bucket(self, 'DataSandboxBucket',\n encryption=s3.BucketEncryption.S3_MANAGED\n )\n\n # Upload appstream scripts\n\n deploy_appstream_scripts = s3_deployment.BucketDeployment(\n self, 'AppstreamScriptsDeployment',\n sources=[s3_deployment.Source.asset(os.path.join(current_dir, '../appstream_scripts/'))],\n destination_bucket=self.data_sandbox_bucket,\n destination_key_prefix='appstream-scripts'\n )\n \n # build ssm parameters\n ssm.StringParameter(self, 'BucketParam',\n parameter_name='/s3/datasandboxbucket',\n string_value=json.dumps({\n \"bucket-name\": [f'{self.data_sandbox_bucket.bucket_name}']\n }))","repo_name":"aws-samples/appstream-data-tools-isolation-blog","sub_path":"stacks/data_sandbox_s3.py","file_name":"data_sandbox_s3.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"6853785144","text":"from RouterConfig.common import schema as schemautils\nfrom RouterConfig.common.shell.api import API as execute_cmd_api\nfrom RouterConfig.driver import Driver\nfrom RouterConfig.route_config import logger\nimport schemas\n\n\nclass RipRouteConfigDriver(Driver):\n\n rip_config_file = '/usr/local/etc/ripd.conf'\n execute_cmd_api = execute_cmd_api(logger=logger)\n\n def __init__(self, json_data):\n self.config_dict = json_data\n self.is_configured = False\n\n @classmethod\n @schemautils.validate_schema(schemas.rip_route_config_schema, logger=logger)\n def create_driver(cls, body):\n return cls(body)\n\n def parse(self):\n if len(self.config_dict) > 0:\n rip_conf = self.config_dict\n res = \"hostname zebra\\npassword zebra\\nrouter rip\\n\"\n # get rip version (default: version 2)\n rip_version = rip_conf.get(\"version\", 2)\n res += 'version ' + str(rip_version) + '\\n'\n # configure the network of rip\n if \"networks\" in rip_conf:\n for network in rip_conf.get(\"networks\"):\n res += \"network \" + network + \"\\n\"\n # configure other rip configuration\n if 'others' in rip_conf:\n for other_config in rip_conf.get('others'):\n res += other_config + '\\n'\n # save the configuration\n with open(self.rip_config_file, 'w') as f:\n f.write(res)\n self.is_configured = True\n logger.info('RIP configuration has been parsed.')\n\n def apply(self):\n if self.execute_cmd_api.execute('ripd -d'):\n logger.info('RIP thread has been turned on.')\n else:\n logger.error('Fail to start RIP thread.')\n","repo_name":"yangsijie666/RouterConfig","sub_path":"RouterConfig/route_config/rip/rip_route.py","file_name":"rip_route.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"8538413849","text":"import torch\n\n\ndef intersection_over_union(boxes1, boxes2, box_format=\"midpoint\"):\n \"\"\"_summary_\n\n Args:\n boxes1 (tensor): boxes: [..., 4]\n boxes2 (tensor): should have same shape with boxes1\n \"\"\"\n\n if box_format == \"midpoint\":\n x11 = (boxes1[..., 0] - boxes1[..., 2] / 2).unsqueeze(-1)\n y11 = (boxes1[..., 1] - boxes1[..., 3] / 2).unsqueeze(-1)\n x12 = (boxes1[..., 0] + boxes1[..., 2] / 2).unsqueeze(-1)\n y12 = (boxes1[..., 1] + boxes1[..., 3] / 2).unsqueeze(-1)\n\n x21 = (boxes2[..., 0] - boxes2[..., 2] / 2).unsqueeze(-1)\n y21 = (boxes2[..., 1] - boxes2[..., 3] / 2).unsqueeze(-1)\n x22 = (boxes2[..., 0] + boxes2[..., 2] / 2).unsqueeze(-1)\n y22 = (boxes2[..., 1] + boxes2[..., 3] / 2).unsqueeze(-1)\n else:\n x11 = boxes1[..., 0:1]\n y11 = boxes1[..., 1:2]\n x12 = boxes1[..., 2:3]\n y12 = boxes1[..., 3:4]\n\n x21 = boxes2[..., 0]\n y21 = boxes2[..., 1]\n x22 = boxes2[..., 2]\n y22 = boxes2[..., 3]\n\n x1 = torch.max(x11, x21)\n y1 = torch.max(y11, y21)\n x2 = torch.min(x12, x22)\n y2 = torch.min(y12, y22)\n\n inter = (x2 - x1).clamp(min=0) * (y2 - y1).clamp(min=0)\n union = (x12 - x11) * (y12 - y11) + (x22 - x21) * (y22 - y21) - inter\n return inter / (union + 1e-6)\n\n\nif __name__ == \"__main__\":\n boxes1 = torch.tensor([[0, 0, 100, 100]])\n boxes2 = torch.tensor([[0, 0, 200, 200]])\n assert intersection_over_union(boxes1, boxes2) == torch.tensor([0.25])\n print(\"iou success!\")\n","repo_name":"Konstantin5389/YOLOv3","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70478229281","text":"# -*- mode: python; python-indent: 4 -*-\nimport ncs\nfrom ncs.application import Service\n\n\n# ------------------------\n# SERVICE CALLBACK EXAMPLE\n# ------------------------\nclass ServiceCallbacks(Service):\n\n @Service.create\n def cb_create(self, tctx, root, service, proplist):\n self.log.info('Service create(service=', service._path, ')')\n\n snmpVars = ncs.template.Variables()\n snmpVars.add('COMMUNITY', \"python-demo\" + service.comm_str)\n snmpVars.add('ACCESS', service.access)\n \n template = ncs.template.Template(service)\n template.apply('snmpPyTemp5-template', snmpVars)\n self.log.info('snmpPyTemp5: comm: ', service.comm_str, ' access: ', service.access)\n\n\n# ---------------------------------------------\n# COMPONENT THREAD THAT WILL BE STARTED BY NCS.\n# ---------------------------------------------\nclass Main(ncs.application.Application):\n def setup(self):\n self.log.info('Main RUNNING')\n self.register_service('snmpPyTemp5-servicepoint', ServiceCallbacks)\n\n def teardown(self):\n self.log.info('Main FINISHED')\n","repo_name":"i9t6/NSO_Services_getting_started","sub_path":"snmpPyTemp5_main.py","file_name":"snmpPyTemp5_main.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30130871692","text":"\nx = input().split()\n\nlst = [0,0]\n\nfor i in range(len(x)):\n exp = 0\n for j in x[i]:\n lst[i] += int(j)*(10**exp)\n exp += 1\n\n\nprint(max(lst))\n","repo_name":"felixApellSkjutar/Kattis","sub_path":"Filip/filip.py","file_name":"filip.py","file_ext":"py","file_size_in_byte":161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27344518513","text":"from collections import defaultdict\nfrom time import time\nfrom typing import List\n\n\nclass Solution:\n def groupStrings(self, strings: List[str]) -> List[List[str]]:\n dic = defaultdict(list)\n for word in strings:\n key = []\n base_shift = ord(word[0])\n for i in range(1, len(word)):\n char_shift = (ord(word[i]) - base_shift) % 26\n # char_shift = ord(word[i]) - base_shift\n # if char_shift < 0:\n # char_shift += 26\n key.append(str(char_shift))\n dic['.'.join(key)].append(word)\n return dic.values()\n\n\nstart_time = time()\n\n_strings = [\"abc\",\"bcd\",\"acef\",\"xyz\",\"az\",\"ba\",\"a\",\"z\"]\n# Input: strings = [\"abc\",\"bcd\",\"acef\",\"xyz\",\"az\",\"ba\",\"a\",\"z\"]\n# Output: [[\"acef\"],[\"a\",\"z\"],[\"abc\",\"bcd\",\"xyz\"],[\"az\",\"ba\"]]\n_strings = [\"abc\",\"am\"]\n\nprint(Solution().groupStrings(_strings))\n\nprint(\"--- %s seconds ---\" % (time() - start_time))\n","repo_name":"Sadomtsevvs/Leetcode","sub_path":"249. Group Shifted Strings.py","file_name":"249. Group Shifted Strings.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24119281081","text":"import sys\n\ndef cnt_divisor(n, k):\n div = 0\n while n:\n n //= k\n div += n\n return div\n\nl, s = map(int,(sys.stdin.readline().split()))\ndiv_2 = cnt_divisor(l, 2) - cnt_divisor(s, 2) - cnt_divisor(l-s,2)\ndiv_5 = cnt_divisor(l, 5) - cnt_divisor(s, 5) - cnt_divisor(l-s,5)\n\nif div_2 >= div_5:\n print(div_5)\n\nelse:\n print(div_2)\n\n# l, s = map(int,(sys.stdin.readline().split()))\n# if l//2 < s:\n# s = l - s\n\n# nume = 0\n# deno = 0\n\n# n_2 = 0\n# n_5 = 0\n\n# d_2 = 0\n# d_5 = 0\n\n# li = range(1, l+1)\n\n# for i in range(1,s+1):\n# tmp1 = li[-1*i]\n# tmp2 = li[i-1]\n\n# while True:\n# if tmp1 % 10 == 0:\n# n_2 += 1\n# n_5 += 1\n# tmp1 = tmp1/10\n\n# elif tmp1 % 5 == 0:\n# n_5 += 1\n# tmp1 = tmp1/5\n\n# elif tmp1 % 2 == 0:\n# n_2 += 1\n# tmp1 = tmp1/2\n \n# else:\n# break\n\n# while True:\n# if tmp2 % 10 == 0:\n# d_2 += 1\n# d_5 += 1\n# tmp2 = tmp2/10\n\n# elif tmp2 % 5 == 0:\n# d_5 += 1\n# tmp2 = tmp2/5\n\n# elif tmp2 % 2 == 0:\n# d_2 += 1\n# tmp2 = tmp2/2\n\n# else:\n# break\n\n# if (n_2 - d_2) > (n_5 - d_5):\n# res = n_5 - d_5\n# else:\n# res = n_2 - d_2\n# print(res)","repo_name":"AnWoosang/algo_study","sub_path":"baekjoon/300/2004.py","file_name":"2004.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2733200100","text":"from selenium import webdriver\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nimport time\r\nimport pandas as pd\r\nimport os\r\nfrom time import sleep\r\n\r\n#模拟页面\r\nchrome_options = webdriver.ChromeOptions()\r\nbrowser = webdriver.Chrome(chrome_options=chrome_options) #打开网页\r\nbrowser.minimize_window() # 最小化窗口\r\nwait = WebDriverWait(browser, 10) #设置等待避免由于网络延迟或浏览器卡顿导致的偶然失败\r\n\r\n#创建dataframe\r\ndf=pd.DataFrame()\r\n\r\nstart_year=int(input('请输入查询起始年份:'))\r\nend_year =int(input('请输入查询结束年份:'))\r\n\r\nreport_type=input('请输入报表类型:业绩报表,资产负债表,利润表,现金流量表:')\r\nif report_type=='业绩报表':\r\n report_type='yjbb'\r\nelif report_type=='资产负债表':\r\n report_type='zcfz'\r\nelif report_type=='利润表':\r\n report_type='lrb'\r\nelif report_type=='现金流量表':\r\n report_type='xjll'\r\n\r\n\r\n#获取2021年数据\r\nfor p in range(start_year,end_year+1):\r\n url = 'http://data.eastmoney.com/bbsj/'+str(p)+'12/'+report_type+'.html' #获取当年业绩报表网址\r\n print(url) \r\n browser.get(url) #获取当前网页\r\n i=1\r\n page=0\r\n while True:\r\n element = browser.find_elements_by_class_name(\"dataview-body\") #获取表格主体\r\n tb = pd.read_html(element[0].get_attribute(\"outerHTML\"))[0] #利用read_html函数获取表格内容\r\n tb[\"年份\"]=p #添加年份属性\r\n df=df.append(tb)\r\n \r\n #判断是否最后一页\r\n nextpage = browser.find_elements_by_link_text(\"下一页\")\r\n if len(nextpage)==0:\r\n break\r\n\r\n i=i+1\r\n \r\n #模拟翻页功能\r\n while True:\r\n try:\r\n next_page =wait.until(EC.presence_of_element_located((By.LINK_TEXT,\"下一页\")))\r\n wait.until(EC.element_to_be_clickable((By.LINK_TEXT,\"下一页\")) )\r\n next_page.click()\r\n if wait.until(EC.text_to_be_present_in_element((By.CLASS_NAME,\"active\"), str(i))):\r\n break\r\n except:\r\n pass\r\n \r\n #避免翻页过于频繁被ban\r\n sleep(1) \r\n\r\nfile_name=str(start_year)+'_'+str(end_year)+str(report_type)+'.xlsx'\r\n\r\ndf.to_excel(file_name)\r\n\r\n#在excel表上直接删除第一行,修改schema\r\ndf_zcfz=pd.read_excel('2021_2022zcfz.xlsx')\r\ndf_lrb=pd.read_excel('2021_2022lrb.xlsx')\r\nresult=pd.merge(df_lrb,df_zcfz,on='股票代码')\r\n\r\n\r\n#将万和亿改成整数\r\ndef str_to_num(x):\r\n if x[-1]=='亿':\r\n x=float(x[:-1])*10000000\r\n elif x[-1]=='万':\r\n x=float(x[:-1])*10000\r\n return x\r\n\r\nresult['净利润(元)']=result['净利润(元)'].apply(lambda x:str_to_num(x))\r\nresult['总资产(元)']=result['总资产(元)'].apply(lambda x:str_to_num(x))\r\nresult['ROA']=result['净利润(元)']/result['总资产(元)']\r\nresult=result.set_index(['股票代码'])\r\n","repo_name":"ZhengyiWang/Others","sub_path":"获取东方财富网财务数据.py","file_name":"获取东方财富网财务数据.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"31258559907","text":"import json\nfrom unittest.mock import patch, MagicMock, Mock\nfrom dataclasses import asdict\n\nimport settings\nfrom collector import MetricsCollector, MetricsRecord\n\n\nclass TestMetricsCollector:\n\n @patch('collector.lm')\n @patch('time.time', MagicMock(return_value=12345))\n def test_get_metrics(self, lm_mock):\n lm_mock.cpu_stat.cpu_percents.return_value = {'idle': 90}\n lm_mock.cpu_stat.load_avg.return_value = (10, 1, 1)\n lm_mock.mem_stat.mem_stats.return_value = (100, 1211)\n lm_mock.disk_stat.disk_usage.return_value = (\n None, None, None, None, b'5%')\n\n c = MetricsCollector(Mock())\n\n assert c.get_metrics() == MetricsRecord(\n 10, 10, 8.26, 5, settings.MACHINE_ID, 12345)\n\n\n def test_send_metrics(self):\n kafka_mock = Mock()\n\n metrics = MetricsRecord(10, 10, 10, 5, 'abcd', 12345)\n c = MetricsCollector(kafka_mock)\n c.get_metrics = Mock(return_value=metrics)\n c.send_metrics()\n\n metrics_bytes = json.dumps(asdict(metrics)).encode('utf8')\n\n kafka_mock.send.assert_called_with(\n settings.KAFKA_TOPIC_NAME, metrics_bytes)","repo_name":"artynusov/os-metrics","sub_path":"metrics_collector/metrics_collector/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33067038837","text":"#!/usr/bin/python3\n\"\"\"A Post request is requested and filled for\"\"\"\n\n\nimport sys\nimport urllib.parse\nimport urllib.request\n\n\nif __name__ == \"__main__\":\n url_look = sys.argv[1]\n value_check = {\"email\": sys.argv[2]}\n data = urllib.parse.urlencode(value_check).encode(\"ascii\")\n\n request = urllib.request.Request(url_look, data)\n with urllib.request.urlopen(request) as response:\n print(response.read().decode(\"utf-8\"))\n","repo_name":"Philippe-era/alx-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34656393187","text":"# cit1113\n# Alex Brown\n# Lecture L\n\n\"\"\"\nbook says to avoid using !=\nnested if statements or AND keyword\nOR keyword will skip second check if first is true. Put more likely to be true first for optimization.\n\nboolean is set to either true or false. Sometimes called a flag. Can be tested with if or while without == True\n\nnot (a and b)\nsame as\nnot a and not b\n\nnot (a or b)\nsame as\nnot a and not b\n\"\"\"\n\nfrozen = True\nif frozen:\n print(\"I'm frozen\")\n\nsteamy = False\nif steamy:\n print(\"I'm steamy\")\n\n","repo_name":"EssenceofLuna/ProgrammingIntro","sub_path":"Week 08/lectureL.py","file_name":"lectureL.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73824610721","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC
\n# MAGIC \"Databricks\n# MAGIC
\n# MAGIC\n# MAGIC # *Introduction to Clustering*. Presented by Advancing Analytics\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Loading in the data\n# MAGIC Below is the command for reading in a csv file into a Spark Dataframe that can be used passed to a model. The dataframe can be displayed using 'display(df)'. Or it can be converted to a Pandas Dataframe and displayed by typeing 'df' into a cell.\n\n# COMMAND ----------\n\nreadPath = \"dbfs:/mnt/azureml/credit_card_segmentation.csv\"\n\ndf = (\n spark.read.option(\"header\", True)\n .option(\"inferSchema\", True)\n .format(\"csv\")\n .load(readPath)\n)\n\n# COMMAND ----------\n\ndisplay(df.drop(\"CUST_ID\", \"CASH_ADVANCE_FREQUENCY\"))\n\n# COMMAND ----------\n\ndf.cache()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Import routines for building up the model\n\n# COMMAND ----------\n\nfrom pyspark.ml.clustering import KMeans\nfrom pyspark.ml.evaluation import ClusteringEvaluator\nfrom pyspark.ml.feature import VectorAssembler\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Generate the vector assembler and the model\n# MAGIC Below shows the vector assember and the linear regression model being generated. The vector assembler creates a vectorized column that contains all the features. This needs to done to before passing the dataframe to the regression model. Otherwise and error will be raised. The LinearRegression model takes the featuresCol as an argument (the vectorized column containing all the features), as well as which column is the label column. This differs from the Scikit-learn library convention where the label data is passed in a separate argument. \n\n# COMMAND ----------\n\nfeats = df.drop(\"CUST_ID\").columns\nvectorAssembler = VectorAssembler(\n inputCols=feats, outputCol=\"rawFeatures\", handleInvalid=\"skip\"\n)\n\n\nmodel = KMeans(featuresCol=\"rawFeatures\", k=7)\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC ## Create a pipeline\n# MAGIC The cell below is used to setup the pipeline for a machine learning model. A pipeline allows the user to orchastrate each step for a model including preparation, transformations and training. They are also a good tool to prevent data leakage that can happen in some transformation steps if not done correctly. \n\n# COMMAND ----------\n\nfrom pyspark.ml import Pipeline\n\npipeline = Pipeline().setStages([vectorAssembler, model])\n\n# COMMAND ----------\n\noutmod = pipeline.fit(df)\npredictions = outmod.transform(df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Evaluating the model\n# MAGIC The cell below shows how the use can evaluate the model. Unlike regression and clustering, unsupervised methods don't have an explicit way of scoring models. Instead they can be evaluated looking at things like euclidean distance between clusters and visually examining the clustering. \n\n# COMMAND ----------\n\nevaluator = ClusteringEvaluator(featuresCol=\"rawFeatures\")\n\nsilhouette = evaluator.evaluate(predictions.select(\"rawFeatures\", \"prediction\"))\nprint(\"Silhouette with squared euclidean distance = \" + str(silhouette))\n\n# Shows the result.\ncenters = outmod.stages[1].clusterCenters()\nprint(\"Cluster Centers: \")\nfor center in centers:\n print(center)\n","repo_name":"lukemenziesAA/introtosml","sub_path":"Introduction_to_Building_Scalable_Machine_Learning_Models/Notebooks/2 Clustering.py","file_name":"2 Clustering.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11129983993","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\n\ndac = [26, 19, 13, 6, 5, 11, 9, 10]\n\nfor i in dac: GPIO.setup(i, GPIO.OUT)\n\ndef dec2bin(value):\n return [int(elem) for elem in bin(value)[2:].zfill(8)]\n\ntry:\n T = float(input())\n for k in range(1000):\n for j in range(256):\n binlist = dec2bin(j)\n for i in range(len(dac)): GPIO.output(dac[i], binlist[i])\n time.sleep(T/1024)\n for j in range(254, 0, -1):\n binlist = dec2bin(j)\n for i in range(len(dac)): GPIO.output(dac[i], binlist[i])\n time.sleep(T/1024)\n\n\nexcept ValueError: print(\"you entered not a number\") \n\n\nfinally:\n for i in dac: GPIO.output(i, 0)\n GPIO.cleanup()","repo_name":"Data-Flex/DAC","sub_path":"4-2-triangle.py","file_name":"4-2-triangle.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23394557482","text":"#This version is made using list data type\r\n\r\nfrom posixpath import join\r\nimport sys, os\r\nimport time\r\nque = [] #this will store the questions\r\nans = [] #this will store the answers\r\nqu = \"\"\r\nru = \"\"\r\n\r\ndef j(v):\r\n return (\" \".join(v.split())).lower() #returns string v in lowercase after removing any extra spaces from it\r\n\r\ndef evaluation(x, y, z):\r\n c = 0 #stores number of correct answers\r\n os.system('cls')\r\n if z:\r\n print(\"The question will be displayed to you, enter the answer. Don't add any extra symbols, Good Luck!\")\r\n for i in range(len(x)):\r\n qu = j(input(join(x[i], \" \").replace(\"/\", \"\")))\r\n if y[i] == qu:\r\n print(\"Correct!\")\r\n c += 1\r\n else:\r\n print(join(\"Wrong answer, the correct answer is \", y[i], \".\").replace(\"/\", \"\"))\r\n if z:\r\n print(\"Tutorial over.\")\r\n w = join(\"You got 1 correct answer out of \", str(len(x)), \"!\").replace(\"/\", \"\") if c == 1 else join(\"You got \", str(c), \" correct answers out of \", str(len(x)), \"!\").replace(\"/\", \"\")\r\n print(w)\r\n print(\"Thanks for using :)\")\r\n reply = j(input(\"Would you like to try again? (reply with a 'yes' or 'no') \"))\r\n if reply == \"yes\":\r\n while True:\r\n if z:\r\n reply = input(\"Enter 'tuto' if you'd like to revisit the tutorial, and 'start' if you'd like to try it without the tutorial: \")\r\n if reply == \"tuto\":\r\n print(\"Directing you to the tutorial\")\r\n time.sleep(2)\r\n os.system('cls')\r\n x = []\r\n y = []\r\n code([True, True], x, y)\r\n break\r\n elif reply == \"start\":\r\n print(\"Directing you to a new terminal\")\r\n time.sleep(2)\r\n os.system('cls')\r\n x = []\r\n y = []\r\n code([False, False], x, y)\r\n break\r\n else:\r\n print(\"You entered an invalid response. Try again :)\")\r\n reply = input(\"Enter 'same' if you'd like to answer the same set of questions, 'diff' if you'd like to try again with a new set of questions, or 'edit' if you'd like to add some questions to the existing set of questions. \")\r\n if reply == 'same':\r\n evaluation(x, y, \"\")\r\n elif reply == 'diff':\r\n x = []\r\n y = []\r\n os.system('cls')\r\n code([False, False], x, y)\r\n elif reply == 'edit':\r\n os.system('cls')\r\n code([False, False], x, y)\r\n elif reply == \"no\":\r\n print(\"Bye bye, and good luck!\")\r\n else:\r\n print(\"I'll take that as a no, bye bye.\")\r\n \r\ndef code(a, q, r): #variable a stores boolean for tutorial, and q and r store the list for questions and answers respectively\r\n if a[1]:\r\n print(\"First, enter a question\")\r\n qu = j(input(\"Enter question here: \"))\r\n if a[1]:\r\n print(\"Then, enter the answer to the question.\")\r\n while True:\r\n ru = j(input(\"Enter answer here: \"))\r\n if qu[-1] ==\":\":\r\n q.append(qu) #adding qu to list\r\n else:\r\n q.append(join(qu, \":\").replace('/', ''))\r\n r.append(ru) #adding ru to list\r\n if(a[1]):\r\n print(\"You can do this as many times as you want to, when you're done inputting your set of questions and answer, instead of typing your question when asked for it, type 'stop' in lowercase. \")\r\n a = [True, False] #turning one of the Trues to false to prevent printing the above statement again and again\r\n qu = input(\"Enter question here: \")\r\n if qu == \"stop\":\r\n if (len(q) == 0):\r\n print(\"You didn't write any question! Try again :)\")\r\n reply = [True, True] if q[0] else \"\"\r\n code(reply)\r\n if a[0]:\r\n x = \"Now, you will be directed to a fresh terminal where you will be able to quiz/test yourself using the questions and answers which you entered.\"\r\n print(x)\r\n time.sleep(5.2) #belates execution of next line of code by 5.2 seconds\r\n evaluation(q, r, a[0])\r\n\r\nprint(\"This is a self-study tool for students, where they can create flashcards and evaluate themselves.\")\r\nreply = j(input(\"Would you like a tutorial? (reply with 'yes' for a tutorial) \"))\r\nreply = [True, True] if reply == \"yes\" else [False, False]\r\ncode(reply, que, ans)","repo_name":"Gurlakshpreet/flashcard","sub_path":"flashcard list ver.py","file_name":"flashcard list ver.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27306960669","text":"\"\"\"\nThis file defines the data types that are loaded from the data files.\n\"\"\"\nimport numba as nb\nimport numpy as np\n\nfrom oasislmf.pytools.common import areaperil_int, oasis_float\n\n# Footprint file formats in order of priority\nfp_format_priorities = ['parquet', 'binZ', 'bin', 'csv']\n\n# filenames\nfootprint_filename = 'footprint.bin'\nfootprint_index_filename = 'footprint.idx'\nzfootprint_filename = 'footprint.bin.z'\nzfootprint_index_filename = 'footprint.idx.z'\ncsvfootprint_filename = 'footprint.csv'\nparquetfootprint_filename = \"footprint.parquet\"\nparquetfootprint_meta_filename = \"footprint_parquet_meta.json\"\n\n\nFootprintHeader = nb.from_dtype(np.dtype([('num_intensity_bins', np.int32),\n ('has_intensity_uncertainty', np.int32)\n ]))\n\nEvent = nb.from_dtype(np.dtype([('areaperil_id', areaperil_int),\n ('intensity_bin_id', np.int32),\n ('probability', oasis_float)\n ]))\n\nEventCSV = nb.from_dtype(np.dtype([('event_id', np.int32),\n ('areaperil_id', areaperil_int),\n ('intensity_bin_id', np.int32),\n ('probability', oasis_float)\n ]))\n\nEventIndexBin = nb.from_dtype(np.dtype([('event_id', np.int32),\n ('offset', np.int64),\n ('size', np.int64)\n ]))\n\nEventIndexBinZ = nb.from_dtype(np.dtype([('event_id', np.int32),\n ('offset', np.int64),\n ('size', np.int64),\n ('d_size', np.int64)\n ]))\n\nIndex_type = nb.from_dtype(np.dtype([('start', np.int64),\n ('end', np.int64)\n ]))\n\nVulnerability = nb.from_dtype(np.dtype([('vulnerability_id', np.int32),\n ('intensity_bin_id', np.int32),\n ('damage_bin_id', np.int32),\n ('probability', oasis_float)\n ]))\n\nItem = nb.from_dtype(np.dtype([('id', np.int32),\n ('coverage_id', np.int32),\n ('areaperil_id', areaperil_int),\n ('vulnerability_id', np.int32),\n ('group_id', np.int32)\n ]))\n\nKeys = {'LocID': np.int32,\n 'PerilID': 'category',\n 'CoverageTypeID': np.int32,\n 'AreaPerilID': areaperil_int,\n 'VulnerabilityID': np.int32}\n","repo_name":"OasisLMF/OasisLMF","sub_path":"oasislmf/pytools/getmodel/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"54"} +{"seq_id":"8496465924","text":"'''\nCreated on 24 oct 2022\n\n@author: Dell Latitude E6540\n'''\n\nclass pythonObjAttr(object):\n\t\n\tdef __init__(self, attrName, attrModule, attrType):\n\t\t# Attribute name\n\t\t\n\t\tif attrName == 'package': # reserved keyword in Java, not to be used\n\t\t\tattrName = 'myPackage'; \n\t\t\n\t\tself.Name = attrName;\n\t\t# Attribute type\n\t\tself.ClassModule = attrModule;\n\t\tself.Class = attrType;\t\t\n\t\t\n\tdef print_java_attribute_declaration(self):\n\t\t\n\t\ts = \"\";\n\t\t# s = s + \"\\t// Class attribute \" + self.Name + \";\\n\"\n\t\ts = s + \"\\t\" + self.Class + \" \" +self.Name + \";\\n\";\n\t\t#s = s + \"\\t\\n\"\n\t\n\t\treturn s\n\t\n\tdef print_java_get_set_methods(self):\n\t\t\n\t\tattrName = self.Name[0].upper() + self.Name[1:];\n\t\t\n\t\ts = \"\";\n\t\t\n\t\t# print get\n\t\t\n\t\tif self.Class == 'boolean':\n\t\t\tgetFunc = 'is'\n\t\telse:\n\t\t\tgetFunc = 'get'\n\t\t\t\n\t\ts = s + \"\\tpublic \" + self.Class + \" \" + getFunc + attrName + \"() {\\n\";\n\t\ts = s + \"\\t\\treturn \" + self.Name + \";\\n\";\n\t\ts = s + \"\\t}\\n\\n\"\n\t\t\n\t\t# print set\n\t\ts = s + \"\\tpublic void set\" + attrName + \"(\" + \\\n\t\t\t\t\t\tself.Class + \" \" + self.Name + \") {\\n\";\n\t\ts = s + \"\\t\\tthis.\" + self.Name + \" = \" + self.Name + \";\\n\";\n\t\ts = s + \"\\t}\\n\\n\"\n\t\t\n\t\treturn s","repo_name":"AZblo/python2javaClassDefinitions","sub_path":"src/pythonObjReader/pythonObjAttr.py","file_name":"pythonObjAttr.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21353778224","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef gradAscent(dataSet, labelSet):\n dataMatrix = np.mat(dataSet)\n labelMatrix = np.mat(labelSet).T\n m, n = np.shape(dataMatrix)\n alpha = 0.001\n maxCycles = 500\n weight = np.ones((n, 1))\n for k in range(maxCycles):\n h = sigmoid(dataMatrix * weight)\n error = labelMatrix - h\n weight = weight + alpha * dataMatrix.T * error\n return weight.getA()\n\n\ndef sigmoid(inX):\n return 1.0 / (1 + np.exp(-inX))\n\n\ndef loadDataSet():\n dataMat = []\n labelMat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split() # 以空格切分数据\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])]) # 添加数据集\n labelMat.append(int(lineArr[2])) # 添加标签集\n fr.close() # 关闭文件\n return dataMat, labelMat\n\n\ndef plotBestFit(weights):\n dataMat, labelMat = loadDataSet() #加载数据集\n dataArr = np.array(dataMat) #转换成numpy的array数组\n n = np.shape(dataMat)[0] #数据个数\n xcord1 = []; ycord1 = [] #正样本\n xcord2 = []; ycord2 = [] #负样本\n for i in range(n): #根据数据集标签进行分类\n if int(labelMat[i]) == 1:\n xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2]) #1为正样本\n else:\n xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2]) #0为负样本\n fig = plt.figure()\n ax = fig.add_subplot(111) #添加subplot\n ax.scatter(xcord1, ycord1, s = 20, c = 'red', marker = 's',alpha=.5)#绘制正样本\n ax.scatter(xcord2, ycord2, s = 20, c = 'green',alpha=.5) #绘制负样本\n x = np.arange(-3.0, 3.0, 0.1)\n y = (-weights[0] - weights[1] * x) / weights[2]\n ax.plot(x, y)\n plt.title('BestFit') #绘制title\n plt.xlabel('X1'); plt.ylabel('X2') #绘制label\n plt.show()\n\n\n\nif __name__ == '__main__':\n dataMat, labelMat = loadDataSet()\n weights = gradAscent(dataMat, labelMat)\n plotBestFit(weights)\n\n\n\n\n\n\n\ndef plotData():\n dataMat, labelMat = loadDataSet() #数据集和标签集\n dataArr = np.array(dataMat) #将数据集转化为数组\n n = len(dataArr) #数据的个数\n right_x = []; right_y=[] #正确数据的x值和y值\n wrong_x = []; wrong_y=[] #错误数据的x值和y值\n for i in range(n): #循环遍历\n if labelMat[i] == 1: #如果是正确饿值\n right_x.append(dataArr[i][1]) #保存数据\n right_y.append(dataArr[i][2])\n else:\n wrong_x.append(dataArr[i][1])\n wrong_y.append(dataArr[i][2])\n plt.scatter(right_x, right_y, s=20, c='red', marker='s', alpha=.5, label='right') # 绘制正样本 #画正确的图\n plt.scatter(wrong_x, wrong_y, s=20, c='green', alpha=.5, label='wrong') # 绘制负样本 #画错误的图\n plt.title('DataSet') # 绘制title\n plt.xlabel('x')\n plt.ylabel('y') # 绘制label\n plt.legend(loc='lower left')\n plt.show()","repo_name":"Auraros/MachineLearn","sub_path":"LogisticReturn/Logistic.py","file_name":"Logistic.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16782736543","text":"import heapq\n\n\ndef dijkstra():\n q = []\n d = [[float('inf') for _ in range(k+1)] for _ in range(n+1)]\n d[1] = [0 for _ in range(k+1)]\n heapq.heappush(q, [0, 1, 0])\n while q:\n dist, now, cover = heapq.heappop(q)\n if dist > d[now][cover]:\n continue\n for i, j in lst[now]:\n dummy = j\n if d[i][cover] > dummy+dist:\n d[i][cover] = dummy+dist\n heapq.heappush(q, [d[i][cover], i, cover])\n if cover < k:\n if d[i][cover+1] > dist:\n d[i][cover+1] = dist\n heapq.heappush(q, [d[i][cover+1], i, cover+1])\n print(min(d[n]))\n\n\nn, m, k = map(int, input().split())\nlst = [[] for _ in range(n+1)]\nfor i in range(m):\n a, b, c = map(int, input().split())\n lst[b].append([a, c])\n lst[a].append([b, c])\ndijkstra()\n","repo_name":"yyytae0/algorithm-training","sub_path":"baekjoon/1162.py","file_name":"1162.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15587179561","text":"import gc\nimport os\nimport shutil\nimport time\nfrom types import SimpleNamespace\nimport csv\nimport numpy as np\nimport gc\n\nimport torch\nimport math\n\nfrom tensorboardX import SummaryWriter\nfrom torch.optim.lr_scheduler import LambdaLR, StepLR, MultiStepLR\n\nfrom basis_ae import TrainerBasisAE, Loss\nfrom basis_equivariant_layer import BasisEquivConvLyer\nfrom basis_equivariant_network import BasisEquivariantNet\nfrom constants import BATCH_SIZE, TEST, LOG_INTERVAL, DEVICE, OVERFIT_SUBSET\nfrom group_action import Group, RotationGroupTransformer, ScaleGroupTransformer\nfrom interpolator.kernels import BilinearKernel, GaussianKernel\nfrom serializer import Serializer\nfrom transform_tensor_batch import TransformTensorBatch\nfrom util import get_data\n\n\n#\n# def get_modelname(config_dict, config_dict):\n# modelname = ''\n# modelname += 'group_name:' + str(config_dict.group_name) + ' '\n# modelname += 'sigma:' + str(config_dict.sigma) + ' '\n# modelname += 'basis_sz:' + str(config_dict.basis_equiv_layers) + ' '\n# modelname += 'stride_sz_conv:' + str(config_dict.stride_sz_conv) + ' '\n# modelname += 'equivariance_rate:' + str(config_dict.equivariance_rate) + ' '\n# modelname += 'orthogonality_rate:' + str(config_dict.orthogonality_rate) + ' '\n# modelname += 'normalize:' + str(config_dict.normalize) + ' '\n# modelname += 'normalized_l2:' + str(config_dict.normalized_l2) + ' '\n# modelname += 'l2_coef:' + str(config_dict.weight_decay)\n# return modelname\n\n\ndef get_modelname(config_dict, target='prediction'):\n modelname = ''\n # I know, it puts _ after reconstruction and prediction; it is ok\n modelname += target + '_'\n modelname += 'dataset:' + str(config_dict.dataset) + '_'\n modelname += 'conv_type:' + str(config_dict.basis_equiv_layers_type) + '_'\n modelname += 'pool_type:' + str(config_dict.pool_type) + ' '\n modelname += 'last_layer_type:' + str(config_dict.last_layer_type) + ' '\n modelname += 'augmentation_angle:' + str(config_dict.rotation_augmentation_angle) + ' '\n modelname += 'aug_type :' + str(config_dict.rotation_augmentation_type) + ' '\n # modelname += 'sigma:' + str(config_dict.sigma) + ' '\n # modelname += 'basis_sz:' + str(config_dict.basis_equiv_layers) + ' '\n # modelname += 'stride_sz_conv:' + str(config_dict.stride_sz_conv) + ' '\n # modelname += 'equivariance_rate:' + str(config_dict.equivariance_rate) + ' '\n # modelname += 'orthogonality_rate:' + str(config_dict.orthogonality_rate) + ' '\n # modelname += 'normalize:' + str(config_dict.normalize) + ' '\n modelname += 'load:' + str(config_dict.load)\n\n modelname = modelname.replace(', ', ',')\n modelname = modelname.replace('Experiment ', 'Experiment', 1)\n\n return modelname\n\n\ndef init_trainer_model(equiv_rate, orthg_rate, lr, epochs, nr_group_elems, kernel_type,\n width, sigma, basis_equiv_layers, fc_sizes,\n bias, stride_sz_conv, normalize, weight_decay,\n use_scipy_order2, group_name, save, save_aux, verbose,\n train_basis_last_epoch, train_basis_every_n_batches, normalized_l2,\n dataset, target, load, load_aux, onebyoneconv,\n basis_equiv_layers_type, pool_type, last_layer_type, finetune_batches,\n pool_sz_conv=None, sz_output=10, rotation_augmentation_angle=0, rotation_augmentation_type='torch', optimizer='adam'):\n \"\"\"\n equiv_rate=1, orthg_rate=1, lr=0.003, epochs=10, nr_group_elems=4, kernel_type='Gaussian',\n width=3, sigma=1., basis_equiv_layers=[(5, 20, 3)], fc_sizes=[2048], pool_sz_conv=None,\n sz_output=10, bias=False, stride_sz_conv=1, normalize=False,\n weight_decay=0, use_scipy_order2=False, group_name='rotation', save=False, save_aux=None,\n verbose=True, train_basis_last_epoch=10000, train_basis_every_n_batches=1, normalized_l2=False,\n dataset='CIFAR10', target='reconstruction', load=None, load_aux=None, onebyoneconv=[],\n basis_equiv_layers_type='conv', pool_type='stride', last_layer_type='conv1x1', finetune_batches=1\n \"\"\"\n assert rotation_augmentation_type in ['gaussian', 'torch']\n assert optimizer in ['adam', 'adam_noams', 'sgd']\n assert last_layer_type in ['conv1x1', 'group1x1', 'linear']\n assert pool_type in ['stride', 'avg', 'max']\n assert basis_equiv_layers_type in ['conv', 'random', 'weiler', 'learned', 'average', 'gaussian', 'bilinear']\n if pool_sz_conv is None:\n pool_sz_conv = [1 for _ in range(len(stride_sz_conv))]\n if kernel_type == 'Bilinear':\n kernel = BilinearKernel()\n elif kernel_type == 'Gaussian':\n kernel = GaussianKernel(width, sigma)\n else:\n raise ValueError('invalid parameter value')\n\n if target not in ['reconstruction', 'prediction']:\n raise ValueError('invalid parameter value target')\n\n if save:\n for el in save_aux:\n assert el in ['model', 'basis']\n\n if target == 'reconstruction':\n trainer = TrainerReconstruction(DEVICE, dataset, verbose, save, save_aux)\n else:\n trainer = TrainerPrediction(DEVICE, dataset, verbose, save, save_aux)\n\n if trainer.dataset_name == 'MNIST':\n input_shape = (1, 28, 28)\n elif trainer.dataset_name == 'CIFAR10':\n input_shape = (3, 32, 32)\n else:\n raise ValueError()\n\n if group_name == 'rotation':\n rotation_group = Group(name=group_name, nr_group_elems=nr_group_elems,\n base_element=2 * math.pi / nr_group_elems)\n batch_transformer = TransformTensorBatch(kernel=kernel, image_size=torch.Size(\n (input_shape[-2] + 2, input_shape[-1] + 2)),\n device=trainer.device,\n group_sz=rotation_group.nr_group_elems,\n use_scipy_order2=use_scipy_order2)\n transformer = RotationGroupTransformer(group=rotation_group, device=trainer.device,\n rotation_batch_tansformer=batch_transformer)\n elif group_name == 'scale':\n scale_group = Group(name=group_name, nr_group_elems=nr_group_elems, base_element=1)\n scale_batch_transformer = TransformTensorBatch(kernel=kernel, image_size=torch.Size(\n (input_shape[-2] + 2, input_shape[-1] + 2)),\n device=trainer.device,\n group_sz=scale_group.nr_group_elems,\n use_scipy_order2=use_scipy_order2)\n transformer = ScaleGroupTransformer(group=scale_group, device=trainer.device,\n scale_batch_tansformer=scale_batch_transformer)\n else:\n raise ValueError(\"No such group\")\n\n config_dict = SimpleNamespace()\n\n config_dict.use_scipy_order2 = use_scipy_order2\n config_dict.kernel_type = kernel_type\n config_dict.sigma = sigma\n config_dict.width = width\n config_dict.group_name = group_name\n config_dict.nr_group_elems = nr_group_elems\n config_dict.basis_equiv_layers = basis_equiv_layers\n config_dict.fc_sizes = fc_sizes\n config_dict.shape_input = input_shape\n config_dict.sz_output = sz_output\n config_dict.bias = bias\n config_dict.stride_sz_conv = stride_sz_conv\n config_dict.pool_sz_conv = pool_sz_conv\n config_dict.orthogonality_rate = orthg_rate\n config_dict.equivariance_rate = equiv_rate\n config_dict.weight_decay = weight_decay\n config_dict.normalize = normalize\n config_dict.normalized_l2 = normalized_l2\n config_dict.epochs = epochs\n config_dict.lr = lr\n config_dict.dataset = dataset\n config_dict.pool_type = pool_type\n config_dict.last_layer_type = last_layer_type\n config_dict.finetune_batches = finetune_batches\n config_dict.rotation_augmentation_angle = rotation_augmentation_angle\n config_dict.rotation_augmentation_type = rotation_augmentation_type\n config_dict.optimizer = optimizer\n\n if train_basis_last_epoch is not None:\n assert train_basis_last_epoch <= epochs\n config_dict.train_basis_last_epoch = train_basis_last_epoch\n config_dict.train_basis_every_n_batches = train_basis_every_n_batches\n config_dict.onebyoneconv = onebyoneconv\n config_dict.basis_equiv_layers_type = basis_equiv_layers_type\n model = BasisEquivariantNet(transformer=transformer, basis_equiv_layers=basis_equiv_layers,\n fc_sizes=fc_sizes, shape_input=input_shape, sz_output=sz_output,\n bias=bias, stride_conv=stride_sz_conv,\n pool_sz_conv=pool_sz_conv, normalize_basis=normalize,\n lr=lr, normalized_l2=normalized_l2, onebyoneconv=onebyoneconv,\n basis_equiv_layers_type=basis_equiv_layers_type,\n pool_type=pool_type, last_layer_type=last_layer_type)\n\n config_dict.load = load\n config_dict.load_aux = load_aux\n assert load in [None, 'basis', 'model']\n if load is not None:\n if load == 'basis':\n trainer.serializer.load_model_basis(model, load_aux, config_dict)\n model.to(DEVICE)\n else:\n trainer.serializer.load_model(model, load_aux, config_dict)\n model = model.to(DEVICE)\n # if torch.cuda.device_count() > 1: # 0\n # print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n # model = torch.nn.DataParallel(model)\n trainer.set_model(model, config_dict, transformer)\n return trainer, model, transformer, config_dict\n\n\ndef init_train(*args, **kwargs):\n trainer, model, transformer, config_dict = init_trainer_model(*args, **kwargs)\n trainer.train(model=model, transformer=transformer, config_dict=config_dict)\n del model\n torch.cuda.empty_cache()\n\n\nclass TrainerPrediction:\n def __init__(self, device, dataset, verbose, save, save_aux):\n self.dataset_name = dataset\n self.device = device\n self.serializer = Serializer()\n self.target = 'prediction'\n self.trainer_basis = None\n self.writer = None\n self.verbose = verbose\n self.save = save\n self.save_aux = save_aux\n self.model_name = None\n self.config_dict = None\n self.max_accuracy = 0.\n self.transformer = None\n self.epoch_start = time.time()\n self.criterion = torch.nn.CrossEntropyLoss().to(self.device)\n self.epochs = None\n self.finetune_batches = None\n self.rotation_augmentation_angle = None\n self.rotation_augmentation_type = None\n\n def set_model(self, model, config_dict, transformer):\n self.epochs = config_dict.epochs\n self.finetune_batches = config_dict.finetune_batches\n self.rotation_augmentation_angle = config_dict.rotation_augmentation_angle\n self.rotation_augmentation_type = config_dict.rotation_augmentation_type\n if self.rotation_augmentation_type == 'torch':\n self.train_loader, self.valid_loader = get_data(dataset=self.dataset_name,\n train_augment_angle=self.rotation_augmentation_angle)\n elif self.rotation_augmentation_type == 'gaussian':\n self.train_loader, self.valid_loader = get_data(dataset=self.dataset_name)\n\n model_name = get_modelname(config_dict, self.target)\n self.config_dict = config_dict\n self.transformer = transformer\n if os.path.exists('images/' + model_name):\n print(\"model already trained\")\n # return\n # shutil.rmtree('./images/' + model_name)\n # print(\"overwriting old output file\")\n i = 0\n while os.path.exists('images/' + model_name + '_' + str(i)):\n i += 1\n model_name += '_' + str(i)\n path = os.path.join('images', model_name)\n os.mkdir(path)\n if config_dict.last_layer_type != 'group1x1':\n nr_layers = len(config_dict.basis_equiv_layers)\n else:\n nr_layers = len(config_dict.basis_equiv_layers) + 1\n for idx in range(nr_layers):\n os.mkdir(os.path.join(path, 'basis_layer:' + str(idx)))\n os.mkdir(os.path.join(path, 'basis_layer:' + str(idx), 'images'))\n os.mkdir(os.path.join(path, 'layer:' + str(idx)))\n os.mkdir(os.path.join(path, 'layer:' + str(idx), 'images'))\n\n self.serializer.save_config(model_name, config_dict)\n\n path_to_net_folder = os.path.join('images', str(model_name))\n self.writer = SummaryWriter(log_dir=path_to_net_folder)\n\n self.trainer_basis = TrainerBasisAE(config_dict.equivariance_rate, config_dict.orthogonality_rate,\n verbose=self.verbose, model_name=model_name,\n dataset_len=len(self.train_loader.dataset),\n train_loader_len=len(self.train_loader),\n log_writer=self.writer,\n train_basis_last_epoch=config_dict.train_basis_last_epoch,\n train_basis_every_n_batches=config_dict.train_basis_every_n_batches)\n self.model_name = model_name\n print('Device:' + str(DEVICE))\n print('\\n\\n --model_name created:' + model_name)\n with open(os.path.join(path_to_net_folder, 'model_str.txt'), 'w+') as fd:\n fd.write('Device:' + str(DEVICE))\n fd.write('\\n')\n fd.write(str(model))\n print(model)\n\n def train(self, model, transformer, config_dict=None):\n\n # self.set_model(model, config_dict, transformer)\n\n params_except_basis_layers = filter(lambda pair: '.basis.' not in pair[0], model.named_parameters())\n params = [param[1] for param in params_except_basis_layers]\n if self.config_dict.optimizer == 'adam':\n optimizer = torch.optim.Adam(params, lr=config_dict.lr, weight_decay=config_dict.weight_decay, amsgrad=True)\n scheduler = False\n elif self.config_dict.optimizer == 'adam_noams':\n optimizer = torch.optim.Adam(params, lr=config_dict.lr, weight_decay=config_dict.weight_decay, amsgrad=False)\n scheduler = False\n elif self.config_dict.optimizer == 'sgd':\n optimizer = torch.optim.SGD(params, lr=config_dict.lr, momentum=0.9, weight_decay=config_dict.weight_decay)\n milestones = [100, 200]\n scheduler = MultiStepLR(optimizer, milestones, gamma=0.1)\n else:\n raise NotImplementedError\n\n model = model.to(self.device)\n\n if self.finetune_batches > 0:\n # TODO change before cluster\n self.validate(model=model, epoch=-2, eq_loss=True)\n self.finetune_basis(model)\n model.freeze_basis()\n self.validate(model=model, epoch=-1, eq_loss=True)\n\n for epoch in range(0, self.epochs):\n if scheduler:\n scheduler.step(epoch)\n if not OVERFIT_SUBSET:\n self._train_epoch(model=model, optimizer=optimizer, epoch=epoch)\n else:\n self._overfit_small_subset_train(model=model,\n transformer=transformer,\n optimizer=optimizer,\n epoch=epoch,\n verbose=self.verbose)\n # TODO change before cluster\n if epoch != self.epochs - 1:\n self.validate(model=model, epoch=epoch)\n else:\n self.validate(model=model, epoch=epoch+1, eq_loss=True)\n # self.finetune_basis(model)\n\n model.cpu()\n\n if self.save:\n if 'model' in self.save_aux:\n self.serializer.save_model(self.model_name, model, config_dict)\n if self.verbose:\n print(\"saved model \" + self.model_name)\n if 'basis' in self.save_aux and config_dict.epochs <= self.trainer_basis.train_basis_last_epoch:\n self.serializer.save_model_basis(self.model_name, model, config_dict)\n\n self.model_name = None\n self.config_dict = None\n self.max_accuracy = 0.\n return model\n\n def validate(self, model, epoch, angle=None, eq_loss=False):\n\n model.eval()\n loss_sum = 0.\n correct = 0\n\n if epoch < self.trainer_basis.train_basis_last_epoch:\n self.trainer_basis.reset_log_sums(model.len_non1_basis_equiv_layers)\n\n # 1 Compute validation\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(self.valid_loader):\n data = data.to(self.device)\n # data = data.flip([-2, -1]) # 180*\n # data = data.transpose(-2, -1).flip(-1) # 90*\n # data = data.transpose(-2, -1).flip(-2) # 270*\n target = target.to(self.device)\n if angle is not None:\n data = self.transformer.apply_rotation_to_input(data, [angle])\n\n output, _ = model.forward_prediction(data, epoch=epoch, batch_idx=batch_idx,\n trainer_basis=self.trainer_basis, writer=self.writer,\n dataset_len=len(self.train_loader.dataset))\n loss = self.criterion(output, target)\n loss_sum += loss.item()\n pred = output.argmax(1)\n correct += pred.eq(target).sum().cpu().item()\n\n if epoch < self.trainer_basis.train_basis_last_epoch:\n rec_loss_sum, rec_loss_norm_sum, equiv_loss_sum, equiv_loss_norm_sum = self.trainer_basis.get_log_sums()\n rec_loss_sum = [x / len(self.valid_loader) for x in rec_loss_sum]\n rec_loss_norm_sum = [x / len(self.valid_loader) for x in rec_loss_norm_sum]\n equiv_loss_sum = [x / len(self.valid_loader) for x in equiv_loss_sum]\n equiv_loss_norm_sum = [x / len(self.valid_loader) for x in equiv_loss_norm_sum]\n for layer_idx in range(len(rec_loss_sum)):\n str_output = '[Validation: Layer:{} Epoch:{}]:\\tRec Loss per pixel:{:.6f}\\t Norm Rec Loss per ' \\\n 'image:{'':.6f} \\tEquiv Loss per pixel:{:.6f}\\tNorm Equiv Loss per image:{' \\\n ':.6f}'.format(layer_idx, epoch,\n rec_loss_sum[layer_idx].item(),\n rec_loss_norm_sum[layer_idx].item(),\n equiv_loss_sum[layer_idx].item(),\n equiv_loss_norm_sum[layer_idx].item(), )\n if self.verbose:\n print(str_output)\n path_to_layer_folder = os.path.join('images', self.model_name, 'basis_layer:' + str(layer_idx))\n\n output_file = os.path.join(path_to_layer_folder, 'output.txt')\n\n with open(output_file, \"a+\") as f:\n f.write(str_output)\n f.write(\"\\n\")\n\n # TODO log every epoch statistics here\n self.writer.add_scalar('basis_layer:' + str(layer_idx) + '/stats/val/reconstrution_loss',\n rec_loss_sum[layer_idx],\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n self.writer.add_scalar('basis_layer:' + str(layer_idx) + '/stats/val/reconstruction_loss_norm',\n rec_loss_norm_sum[layer_idx],\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n self.writer.add_scalar('basis_layer:' + str(layer_idx) + '/stats/val/equivariance_loss',\n equiv_loss_sum[layer_idx],\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n self.writer.add_scalar('basis_layer:' + str(layer_idx) + '/stats/val/equivariance_loss_norm',\n equiv_loss_norm_sum[layer_idx],\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n\n loss_sum /= len(self.valid_loader)\n\n accuracy = float(correct) / len(self.valid_loader.dataset)\n accuracy *= 100\n str_output = '[Validation: Epoch:{}]:\\tAccuracy:{:.6f}\\t NLL:{'':.6f}\\t duration:'.format(epoch, accuracy, loss_sum)\n str_output += str(time.time() - self.epoch_start)\n self.epoch_start = time.time()\n\n if self.verbose:\n print(str_output)\n path_to_model_output = os.path.join('images', self.model_name, 'output.txt')\n with open(path_to_model_output, \"a+\") as f:\n f.write(str_output)\n f.write(\"\\n\")\n\n # TODO log every epoch statistics here\n self.writer.add_scalar('stats/val/accuracy',\n accuracy,\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n self.writer.add_scalar('stats/val/NLL',\n loss_sum,\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n\n if accuracy > self.max_accuracy:\n if self.save and 'model' in self.save_aux:\n if self.trainer_basis.train_basis_last_epoch is None or \\\n epoch >= self.trainer_basis.train_basis_last_epoch:\n self.serializer.save_model(model_name=self.model_name, model=model,\n config_dict=self.config_dict, aux='best', acc=accuracy)\n self.max_accuracy = accuracy\n\n # 2 compute equivariance images, basis_plot, orthogonolaity_plot\n if (epoch+1) % 10 == 0 or epoch < 1:\n with torch.no_grad():\n data_shape_len = len(data.shape)\n rand_idx = np.random.randint(0, data.shape[0])\n one_image_subset = data[[rand_idx]].repeat(BATCH_SIZE, *[1] * (data_shape_len - 1))\n tranformation_indices = torch.tensor(\n [i % self.transformer.group.nr_group_elems for i in range(BATCH_SIZE)], dtype=torch.float,\n device=data.device)\n self.transformer.set_elements_sample(tranformation_indices)\n one_image_rotated = self.transformer.apply_sample_action_to_input(one_image_subset)\n\n model.forward_prediction(input=one_image_rotated, epoch=epoch, batch_idx=-1, trainer_basis=self.trainer_basis)\n\n if eq_loss:\n # 3 compute actual equivariance loss\n self.trainer_basis.reset_l2_normalized_total_equivariance_total_sums(len(model.basis_equiv_layers))\n with torch.no_grad():\n data_shape_len = len(data.shape)\n for batch_idx, (data, target) in enumerate(self.valid_loader):\n data = data.to(self.device)\n # for idx in range(data.shape[0]):\n one_image_subset = data[[0]].repeat(BATCH_SIZE, *[1] * (data_shape_len - 1))\n tranformation_indices = torch.tensor(\n [i % self.transformer.group.nr_group_elems for i in range(BATCH_SIZE)], dtype=torch.float,\n device=data.device)\n self.transformer.set_elements_sample(tranformation_indices)\n one_image_rotated = self.transformer.apply_sample_action_to_input(one_image_subset)\n\n # bnorm_adapted_input\n # because only the first 8 images are important when computing the eq loss\n one_image_rotated[self.transformer.group.nr_group_elems:] = data[self.transformer.group.nr_group_elems:]\n model.forward_prediction(input=one_image_rotated, epoch=epoch, batch_idx=batch_idx,\n trainer_basis=self.trainer_basis, eq_loss=True)\n if batch_idx == 100:\n break\n\n for layer in model.layers:\n if type(layer) == BasisEquivConvLyer and layer.filter_sz != 1:\n path_to_layer_folder = os.path.join('images', self.model_name, 'basis_layer:' + str(layer.index))\n path_to_layer_images = os.path.join(path_to_layer_folder, 'images')\n fig_number = 'epoch:' + str(epoch) + '_batch:' + str(-2) + '_' + str(layer.index) + '_'\n layer.basis_ae.basis.plot(fig_name=fig_number, path_to_layer_images=path_to_layer_images)\n\n per_layer_eq = self.trainer_basis.get_l2_normalized_total_equivariance_total_sums()\n for idx, layer_eq in enumerate(per_layer_eq):\n path_to_layer_folder = os.path.join('images', self.model_name, 'basis_layer:' + str(idx))\n with open(os.path.join(path_to_layer_folder, 'output.txt'), 'a+') as fd:\n fd.write('Layer FULL equivariance: '+str(layer_eq.item()))\n fd.write('\\n')\n if self.verbose:\n print('Layer '+str(idx)+' FULL equivariance: '+str(layer_eq.item()))\n self.writer.add_scalar('basis_layer:' + str(idx) + '/stats/val/full_equivariance',\n layer_eq.item(), global_step=len(self.train_loader.dataset) * (epoch+1))\n\n path_to_model_output = os.path.join('images', self.model_name, 'output.txt')\n with open(path_to_model_output, 'a+') as fd:\n fd.write('Model FULL equivariance: ' + ', '.join([str(i.item()) for i in per_layer_eq]))\n fd.write('\\n')\n\n if epoch + 1 == self.trainer_basis.train_basis_last_epoch:\n if self.save and 'basis' in self.save_aux:\n model = model.to(torch.device('cpu'))\n self.serializer.save_model_basis(self.model_name, model, self.config_dict)\n model = model.to(DEVICE)\n model.freeze_basis()\n\n # TODO remove this\n # with open(path_to_model_output, \"a+\") as f:\n # f.write('extra time in validation: '+str(time.time() - self.epoch_start))\n # f.write(\"\\n\")\n return loss_sum, accuracy\n\n def _train_epoch(self, model, optimizer, epoch):\n\n optimizer.zero_grad()\n start = time.time()\n for batch_idx, (data, target) in enumerate(self.train_loader):\n model.train()\n\n data = data.to(self.device)\n target = target.to(self.device)\n\n if self.rotation_augmentation_type == 'gaussian': # and self.rotation_augmentation_angle != '0'\n with torch.no_grad():\n if self.rotation_augmentation_angle == 'all':\n angles = np.random.rand(BATCH_SIZE)*math.pi\n elif self.rotation_augmentation_angle == '0':\n angles = np.zeros(BATCH_SIZE)\n else:\n t_indices = np.random.randint(0, int(360 / int(self.rotation_augmentation_angle)), BATCH_SIZE)\n base_element = float(2 * math.pi) / 360. * int(self.rotation_augmentation_angle)\n angles = base_element * t_indices\n\n data = self.transformer.apply_rotation_to_input(data, angles)\n\n output, ae_loss = model.forward_prediction(input=data, epoch=epoch, batch_idx=batch_idx,\n trainer_basis=self.trainer_basis)\n\n acc_loss = self.criterion(output, target)\n loss = acc_loss\n\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n if (batch_idx == 0 and epoch == 0) or (batch_idx + 1) % LOG_INTERVAL == 0:\n str_output = 'Train Epoch: {} [{}/{} ({:.0f}%)]\\tNLL: {:.6f}\\t duration:'.format(\n epoch, batch_idx * len(data), len(self.train_loader.dataset),\n 100. * batch_idx / len(self.train_loader), acc_loss.item())\n str_output += str(time.time() - start)\n start = time.time()\n if self.verbose:\n print(str_output)\n path_to_layer_output = os.path.join('images', self.model_name, 'output.txt')\n with open(path_to_layer_output, \"a+\") as f:\n f.write(str_output)\n f.write(\"\\n\")\n # TODO log train pred loss every step\n self.writer.add_scalar('stats/train/NLL',\n acc_loss.item(),\n global_step=len(self.train_loader.dataset) * (epoch) + (batch_idx + 1) * BATCH_SIZE)\n\n # TODO remove this\n # if (batch_idx == 0 and epoch == 0) or (batch_idx + 1) % LOG_INTERVAL == 0:\n # with open(path_to_layer_output, \"a+\") as f:\n # f.write('extra time in train: ' + str(time.time() - start))\n # f.write(\"\\n\")\n\n def finetune_basis(self, model):\n model.train()\n for layer in model.layers:\n # ...\n if type(layer) == BasisEquivConvLyer and layer.filter_sz != 1:\n layer.basis_ae.basis.FROZEN = False\n layer.basis_ae.basis._unnormalized_basis.requires_grad = True\n\n params_basis_layers = filter(lambda pair: '.basis.' in pair[0], model.named_parameters())\n params = [param[1] for param in params_basis_layers]\n optimizer = torch.optim.Adam(params, lr=1e-3, amsgrad=True)\n\n\n count_0_angle_images = int(BATCH_SIZE / self.transformer.group.nr_group_elems)\n angle_0_indices = torch.tensor([i * self.transformer.group.nr_group_elems for i in range(count_0_angle_images)], dtype=torch.long)\n range_after_0 = torch.tensor([i for i in range(1, self.transformer.group.nr_group_elems)], dtype=torch.long)\n indices_angles_after_0 = angle_0_indices[:, None] + range_after_0[None, :]\n\n for batch_idx, (data, target) in enumerate(self.train_loader):\n data = data.to(self.device)\n target = target.to(self.device)\n\n data_shape_len = len(data.shape)\n # select BATCH_SIZE/group_elems random indices\n rand_indices = np.random.randint(0, data.shape[0], count_0_angle_images+1)\n # select subset of images and create new axis to repeat images for each group element\n images_subset = data[rand_indices][:, None]\n images_subset = images_subset.repeat(1, self.transformer.group.nr_group_elems, *[1] * (data_shape_len - 1))\n images_subset = images_subset.view(-1, *images_subset.shape[2:])[0:BATCH_SIZE]\n tranformation_indices = torch.tensor(\n [i % self.transformer.group.nr_group_elems for i in range(BATCH_SIZE)], dtype=torch.float,\n device=data.device)\n self.transformer.set_elements_sample(tranformation_indices)\n rotated_subset = self.transformer.apply_sample_action_to_input(images_subset)\n\n activations = model.forward_prediction(input=rotated_subset, epoch=-1, batch_idx=batch_idx,\n trainer_basis=None, finetune_basis=True)\n\n backward_indices = -tranformation_indices % self.transformer.group.nr_group_elems\n rolled_back_y = self.transformer.apply_roll(activations, 2, backward_indices, 0)\n rolled_rotated_back_y = self.transformer.apply_sample_action_to_input(rolled_back_y,\n backward_indices)\n\n images_at_angle_0 = rolled_rotated_back_y[angle_0_indices][:, None].repeat(1, 7, *[1 for _ in range(len(rolled_rotated_back_y.shape)-1)])\n images_after_angle_0 = rolled_rotated_back_y[indices_angles_after_0.view(-1)].view(count_0_angle_images, 7, *rolled_rotated_back_y.shape[1:])\n\n images_after_angle_0 = images_after_angle_0.view(images_after_angle_0.shape[0]*images_after_angle_0.shape[1], *images_after_angle_0.shape[2:])\n images_at_angle_0 = images_at_angle_0.view(images_at_angle_0.shape[0] * images_at_angle_0.shape[1], *images_at_angle_0.shape[2:])\n l2_per_pixel, normalized_equiv_error, _ = Loss.get_normalized_l2_loss_at_non_zero_indices(images_after_angle_0, images_at_angle_0, normalized_l2=True)\n\n # images_at_angle_0 = rolled_rotated_back_y[angle_0_indices][:, None]\n # images_after_angle_0 = rolled_rotated_back_y[indices_angles_after_0.view(-1)].view(count_0_angle_images,\n # self.transformer.group.nr_group_elems - 1,\n # *rolled_rotated_back_y.shape[\n # 1:])\n # error = (images_at_angle_0.detach() - images_after_angle_0)\n # error = error.pow(2).view(error.shape[0], 7, -1).sum(-1)\n # error_norm = images_at_angle_0.pow(2).view(images_at_angle_0.shape[0], 1, -1).sum(-1).sqrt() * \\\n # images_after_angle_0.pow(2).view(images_after_angle_0.shape[0], 7, -1).sum(-1).sqrt()\n # normalized_equiv_error = error / error_norm\n # normalized_equiv_error = normalized_equiv_error.mean()\n\n optimizer.zero_grad()\n normalized_equiv_error.backward()\n optimizer.step()\n\n self.writer.add_scalar('stats/finetune/loss',\n normalized_equiv_error.item(),\n global_step=len(self.train_loader.dataset) * (0) + (batch_idx + 1) * BATCH_SIZE)\n\n if batch_idx == self.finetune_batches-1:\n print('Last finetune loss: '+str(normalized_equiv_error.item()))\n break\n\n def _overfit_small_subset_train(self, model, transformer, optimizer, epoch, criterion,\n verbose):\n raise NotImplementedError\n model.train()\n\n if epoch == 0:\n for batch_idx, (data, target) in enumerate(self.train_loader):\n self.batch_idx = 0\n self.data = data\n self.target = target\n break\n\n acc_loss_vector, equiv_loss_vector, orthg_loss_vector = [], [], []\n\n optimizer.zero_grad()\n data = self.data.to(self.device)\n target = self.target.to(self.device)\n\n transformer.set_elements_sample()\n rot_data = transformer.apply_sample_action_to_input(data)\n output = model(data, rot_data)\n\n # acc_loss = F.nll_loss(output, target)\n acc_loss = criterion(output, target)\n equiv_loss, orthg_loss = model.get_and_reset_loss()\n\n acc_loss_vector.append(acc_loss.item() if acc_loss is not None else None)\n equiv_loss_vector.append(equiv_loss.item() if equiv_loss is not None else None)\n orthg_loss_vector.append(orthg_loss.item() if orthg_loss is not None else None)\n\n # loss = equiv_loss\n loss = acc_loss\n loss = torch.add(loss, equiv_loss)\n loss = torch.add(loss, orthg_loss)\n\n loss.backward()\n optimizer.step()\n if verbose and ((self.batch_idx == 0 and epoch == 0) or (self.batch_idx + 1) % LOG_INTERVAL == 0):\n print(\n 'Train Epoch: {} [{}/{} ({:.0f}%)]\\tNLL: {:.6f}\\tEquiv Loss: {:.6f}\\tOrthg Loss: {:.6f}'.format(\n epoch, self.batch_idx * len(data), len(self.train_loader.dataset),\n 100. * self.batch_idx / len(self.train_loader), acc_loss.item(), equiv_loss.item(),\n orthg_loss.item()))\n\n return acc_loss_vector, equiv_loss_vector, orthg_loss_vector\n\n\nclass TrainerReconstruction:\n def __init__(self, device, dataset, verbose, save, save_aux):\n self.dataset_name = dataset\n self.train_loader, self.valid_loader = get_data(dataset=dataset)\n self.device = device\n self.serializer = Serializer()\n self.target = 'reconstruction'\n self.trainer_basis = None\n self.writer = None\n self.verbose = verbose\n self.save = save\n self.save_aux = save_aux\n self.model_name = None\n self.config_dict = None\n self.transformer = None\n\n def set_model(self, model, config_dict, transformer):\n model_name = get_modelname(config_dict, self.target)\n self.config_dict = config_dict\n self.transformer = transformer\n if os.path.exists('images/' + model_name):\n print(\"model already trained\")\n # return\n # shutil.rmtree('./images/' + model_name)\n # print(\"overwriting old output file\")\n i = 0\n while os.path.exists('images/' + model_name + '_'+str(i)):\n i += 1\n model_name += '_' + str(i)\n path = os.path.join('images', model_name)\n os.mkdir(path)\n for idx in range(len(config_dict.basis_equiv_layers)):\n os.mkdir(os.path.join(path, 'basis_layer:' + str(idx)))\n os.mkdir(os.path.join(path, 'basis_layer:' + str(idx), 'images'))\n os.mkdir(os.path.join(path, 'layer:' + str(idx)))\n os.mkdir(os.path.join(path, 'layer:' + str(idx), 'images'))\n\n self.serializer.save_config(model_name, config_dict)\n\n path_to_net_folder = os.path.join('images', str(model_name))\n self.writer = SummaryWriter(log_dir=path_to_net_folder)\n\n self.trainer_basis = TrainerBasisAE(config_dict.equivariance_rate, config_dict.orthogonality_rate,\n verbose=self.verbose, model_name=model_name,\n dataset_len=len(self.train_loader.dataset),\n train_loader_len=len(self.train_loader),\n log_writer=self.writer)\n self.model_name = model_name\n print('Device:' + str(DEVICE))\n print('\\n\\n --model_name created:' + model_name)\n with open(os.path.join(path_to_net_folder, 'model_str.txt'), 'w+') as fd:\n fd.write('Device:' + str(DEVICE))\n fd.write('\\n')\n fd.write(model_name)\n print(model)\n\n def train(self, model, transformer, config_dict):\n # self.set_model(model, config_dict, transformer)\n\n model = model.to(self.device)\n\n self.validate(model, epoch=-1)\n\n # Train\n for epoch in range(0, config_dict.epochs):\n start = time.time()\n self._train_epoch(model=model, epoch=epoch)\n self.validate(model, epoch)\n end = time.time()\n if self.verbose:\n print(\"epoch duration in seconds: \" + str(end - start))\n model.to(torch.device('cpu'))\n\n if self.save and 'basis' in self.save_aux:\n self.serializer.save_model_basis(self.model_name, model, config_dict)\n if self.verbose:\n print(\"saved model \" + self.model_name)\n\n self.model_name = None\n self.config_dict = None\n return model\n\n def _train_epoch(self, model, epoch):\n\n for batch_idx, (data, target) in enumerate(self.train_loader):\n model.train()\n\n data = data.to(self.device)\n model.forward_reconstruction(input=data, epoch=epoch, batch_idx=batch_idx, trainer_basis=self.trainer_basis)\n\n def validate(self, model, epoch):\n model.eval()\n self.trainer_basis.reset_log_sums(model.len_non1_basis_equiv_layers)\n # 1 compute validation\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(self.valid_loader):\n data = data.to(self.device)\n # target = target.to(self.device)\n\n with torch.no_grad():\n _ = model.forward_reconstruction(data, epoch=epoch, batch_idx=batch_idx,\n trainer_basis=self.trainer_basis)\n\n rec_loss_sum, rec_loss_norm_sum, equiv_loss_sum, equiv_loss_norm_sum = self.trainer_basis.get_log_sums()\n rec_loss_sum = [x / len(self.valid_loader) for x in rec_loss_sum]\n rec_loss_norm_sum = [x / len(self.valid_loader) for x in rec_loss_norm_sum]\n equiv_loss_sum = [x / len(self.valid_loader) for x in equiv_loss_sum]\n equiv_loss_norm_sum = [x / len(self.valid_loader) for x in equiv_loss_norm_sum]\n\n for layer_idx in range(len(rec_loss_sum)):\n str_output = '[Validation: Layer:{} Epoch:{}]:\\tRec Loss per pixel:{:.6f}\\t Norm Rec Loss per ' \\\n 'image:{'':.6f} \\tEquiv Loss per pixel:{:.6f}\\tNorm Equiv Loss per image:{' \\\n ':.6f}'.format(layer_idx, epoch,\n rec_loss_sum[layer_idx].item(),\n rec_loss_norm_sum[layer_idx].item(),\n equiv_loss_sum[layer_idx].item(),\n equiv_loss_norm_sum[layer_idx].item(), )\n if self.verbose:\n print(str_output)\n path_to_layer_folder = os.path.join('images', self.model_name, 'basis_layer:' + str(layer_idx))\n\n output_file = os.path.join(path_to_layer_folder, 'output.txt')\n\n with open(output_file, \"a+\") as f:\n f.write(str_output)\n f.write(\"\\n\")\n\n # TODO log every epoch statistics here\n self.writer.add_scalar('basis_layer:' + str(layer_idx) + '/stats/val/reconstrution_loss',\n rec_loss_sum[layer_idx],\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n self.writer.add_scalar('basis_layer:' + str(layer_idx) + '/stats/val/reconstruction_loss_norm',\n rec_loss_norm_sum[layer_idx],\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n self.writer.add_scalar('basis_layer:' + str(layer_idx) + '/stats/val/equivariance_loss',\n equiv_loss_sum[layer_idx],\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n self.writer.add_scalar('basis_layer:' + str(layer_idx) + '/stats/val/equivariance_loss_norm',\n equiv_loss_norm_sum[layer_idx],\n global_step=len(self.train_loader.dataset) * (epoch + 1))\n\n # 2 compute equivariance images, basis_plot, orthogonolaity_plot\n with torch.no_grad():\n data_shape_len = len(data.shape)\n rand_idx = np.random.randint(0, data.shape[0])\n one_image_subset = data[[rand_idx]].repeat(BATCH_SIZE, *[1] * (data_shape_len - 1))\n tranformation_indices = torch.tensor(\n [i % self.transformer.group.nr_group_elems for i in range(BATCH_SIZE)], dtype=torch.float,\n device=data.device)\n self.transformer.set_elements_sample(tranformation_indices)\n data = self.transformer.apply_sample_action_to_input(one_image_subset)\n\n model.forward_reconstruction(input=data, epoch=epoch, batch_idx=-1, trainer_basis=self.trainer_basis)\n\n def load_from_basis(self, model, list_paths_to_basis):\n self.serializer.load_model_basis(model, list_paths_to_basis)\n model = model.to(self.device)\n return model\n","repo_name":"NichitaDiaconu/Learning-to-Convolve","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":44460,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"8615791575","text":"# my code\n\nn = int(input())\nlists = list(map(int, input().split()))\n\narr = list(sorted(set(lists)))\ndictionary = {arr[i]: i for i in range(len(arr))}\n\nfor j in lists:\n print(dictionary[j], end = ' ')\n","repo_name":"inni-iii/Algorithm","sub_path":"coding with python/baekjoon/18870.py","file_name":"18870.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33915596718","text":"\"\"\"\n整数数组 nums 按升序排列,数组中的值 互不相同 。\n\n在传递给函数之前,nums 在预先未知的某个下标 k(0 <= k < nums.length)上进行了 旋转,使数组变为 [nums[k], nums[k+1], ..., nums[n-1], nums[0], nums[1], ..., nums[k-1]](下标 从 0 开始 计数)。例如, [0,1,2,4,5,6,7] 在下标 3 处经旋转后可能变为 [4,5,6,7,0,1,2] 。\n\n给你 旋转后 的数组 nums 和一个整数 target ,如果 nums 中存在这个目标值 target ,则返回它的下标,否则返回 -1 。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/search-in-rotated-sorted-array\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\ndef search(nums: list, target: int) -> int:\n \"\"\"\n 暴力解法\n :param nums: 输入数组\n :param target: 目标值\n :return: index\n \"\"\"\n if target in nums:\n return nums.index(target)\n return -1\n\n\ndef search_plus(nums: list, target: int) -> int:\n \"\"\"\n 二分查找,因为 list 是部分有序,因此不能直接使用二分查找法\n 平均的时间复杂度为:log(n)\n 最坏的时间复杂度为:O(n)\n :param nums: 输入数组\n :param target: 目标值\n :return: index\n \"\"\"\n left, right = 0, len(nums)\n while left <= right:\n mid_index = (left + right) // 2\n if nums[mid_index] == target:\n return mid_index\n elif nums[mid_index] > target:\n \"\"\" \n 当前中间值大于目标值,但是目标值在 [left,mid_index] 这样的区间,\n 因此所可能存在的范围在 [left,mid_index_1] 区间内 \n \"\"\"\n if nums[left] <= target < nums[mid_index]:\n right = mid_index - 1\n else:\n left = mid_index + 1\n else:\n \"\"\" 同理 \"\"\"\n if nums[mid_index] < target <= nums[right]:\n left += 1\n else:\n right -= 1\n return -1\n\n\nif __name__ == '__main__':\n # nums = [0, 1, 2, 4, 5, 6, 7]\n # target = 4\n nums = [4, 5, 6, 7, 0, 1, 2]\n target = 0\n\n index = search_plus(nums, target)\n print('index pos', index)\n","repo_name":"Zswdhy/LeetCode","sub_path":"100/33.搜索旋转排序数组.py","file_name":"33.搜索旋转排序数组.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19147720052","text":"# -*- coding: utf-8 -*-\n\n# Есть файл с протоколом регистраций пользователей на сайте - registrations.txt\n# Каждая строка содержит: ИМЯ ЕМЕЙЛ ВОЗРАСТ, разделенные пробелами\n# Например:\n# Василий test@test.ru 27\n#\n# Надо проверить данные из файла, для каждой строки:\n# - присутсвуют все три поля\n# - поле имени содержит только буквы\n# - поле емейл содержит @ и .\n# - поле возраст является числом от 10 до 99\n#\n# В результате проверки нужно сформировать два файла\n# - registrations_good.log для правильных данных, записывать строки как есть\n# - registrations_bad.log для ошибочных, записывать строку и вид ошибки.\n#\n# Для валидации строки данных написать метод, который может выкидывать исключения:\n# - НЕ присутсвуют все три поля: ValueError\n# - поле имени содержит НЕ только буквы: NotNameError (кастомное исключение)\n# - поле емейл НЕ содержит @ и .(точку): NotEmailError (кастомное исключение)\n# - поле возраст НЕ является числом от 10 до 99: ValueError\n# Вызов метода обернуть в try-except.\nimport os.path\n\n\ndef check_file(line):\n name, email, age = line.split(' ')\n age = int(age)\n if name is None or email is None or age is None:\n raise ValueError('ValueError')\n elif not name.isalpha():\n raise BaseException('NotNameError')\n elif not ('@' or '.') in email:\n raise BaseException('NotEmailError')\n elif not 10 <= age <= 99:\n raise ValueError('ValueError')\n\n else:\n # with open('registrations_good.log', mode='w', encoding='utf8') as log_good:\n # log_good.write(f'{line}\\n')\n log_good = open('registrations_good.log', 'a', encoding='utf8')\n log_good.write(f'{line}\\n')\n log_good.close()\n\n\ncounter = 0\n# log_good_is_exist = os.path.exists('registrations_good.log')\n# log_bad_is_exist = os.path.exists('registrations_bad.log')\n# if log_good_is_exist == True and log_bad_is_exist == True:\n# log_good_is_exist = os.path.\nif os.path.isfile('registrations_good.log') and os.path.isfile('registrations_bad.log'):\n os.remove('registrations_good.log')\n os.remove('registrations_bad.log')\n print(\"success\")\nelse:\n print(\"Files doesn't exists!\")\nwith open('registrations.txt', mode='r', encoding='utf8') as file_registrations:\n for line in file_registrations:\n counter += 1\n line = line[:-1]\n try:\n check_file(line)\n ###ЗАПИСЬ\n except ValueError as exc:\n if 'unpack' in exc.args[0]:\n print(f'Не хватает операндов {exc.args}')\n log_bad = open('registrations_bad.log', 'a', encoding='utf8')\n log_bad.write(f'{counter} {line} - {exc.args}\\n') # (Не присутствуют все три поля)\n log_bad.close()\n else:\n print(f'Не входит в возрастные рамки {exc} в строке {line}')\n log_bad = open('registrations_bad.log', 'a', encoding='utf8')\n log_bad.write(f'{counter} {line} - {exc.args}\\n')\n log_bad.close()\n except BaseException as exc:\n print(f'Исключение типа {exc.args}')\n log_bad = open('registrations_bad.log', 'a', encoding='utf8')\n log_bad.write(f'{counter} {line} - {exc.args}\\n')\n log_bad.close()\n # except BaseException as exc:\n # print(f'Исключение типа {exc.args}')\n # log_bad = open('registrations_bad.log', 'a', encoding='utf8')\n # log_bad.write(f'{line} - {exc.args}\\n ')\n # log_bad.close()\n","repo_name":"AhhaerDeLacum/Python-Course-","sub_path":"lesson_010/03_registration_log.py","file_name":"03_registration_log.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71291934563","text":"import os,sys\n\nproject_name = \"face_eyeballs\"\nweights_save = \"/WORKING/modelSale/face_eyeballs/weights/\"\nclassList = { '0':0, 'eye':1, 'nose':2, 'mouth':3, 'face':4, 'head':5, 'body':6 }\ncfgFolder = \"/WORKING/modelSale/face_mask_eyeball/aug_20220613/cfg_train\"\n\n'''\nclassList = { 'D00':0, 'D10':1, 'D20':2, 'D21':3, 'D30':4 ,'D31':5, 'D40':6, 'D41':7, 'D42':8, 'D99':9 }\ncfgFolder = \"/WORKING/WORKS/road_defects_2022_04/aug_20220408/cfg_train\"\n\nclassList = { \"balaclava_ski_mask\":0, \"eyeglasses\":1, \"face_no_mask\":2, \"face_other_covering\":3, \"face_shield\":4, \\\n \"face_with_mask\":5, \"face_with_mask_incorrect\":6, \"gas_mask\":7, \"goggles\":8, \"hair_net\":9, \"hat\":10, \\\n \"helmet\":11, \"hijab_niqab\":12, \"hood\":13, \"mask_colorful\":14, \"mask_surgical\":15, \"other\":16, \\\n \"scarf_bandana\":17, \"sunglasses\":18, \"turban\":19 }\n'''\ndark_home = \"/home/chtseng/frameworks/darknet\"\nyolofastest_home = \"/home/chtseng/frameworks/darknet\"\nyolov5_home = \"/home/chtseng/frameworks/yolov5\"\nyolor_home = \"/home/chtseng/frameworks/yolor\"\n\ncfgs = {\n \"yolov3\": [\"cfg/yolov3/yolov3.cfg\", \"pretrained/yolov3/darknet53.conv.74\", '608_9', 64, 32, 3],\n \"yolov3-tiny\": [\"cfg/yolov3/yolov3-tiny.cfg\", \"pretrained/yolov3/yolov3-tiny.conv.15\", '416_6', 70, 2, 3],\n \"yolov4-tiny\": [\"cfg/yolov4/yolov4-tiny.cfg\", \"pretrained/yolov4/yolov4-tiny.conv.29\", '416_6', 64, 1, 3],\n \"yolov4-p6\": [\"cfg/yolov4/yolov4-p6.cfg\", 'pretrained/yolov4-p6.conv.289', '1280_12', 64, 64, 4],\n \"yolov4\": [\"cfg/yolov4/yolov4.cfg\", \"pretrained/yolov4/yolov4.conv.137\", '608_9', 64, 64, 3],\n \"yolo-fastest\": [\"cfg/yolo-fastest/yolo-fastest-1.1.cfg\", \"pretrained/yolo-fastest/yolo-fastest.conv.109\", '320_6', 160, 2, 3],\n \"yolo-fastest-xl\": [\"cfg/yolo-fastest/yolo-fastest-1.1-xl.cfg\", \"pretrained/yolo-fastest/yolo-fastest-xl.conv.109\", '320_6', 120, 2, 3],\n \"yolov5n\": [\"cfg/yolov5/yolov5n.yaml\", \"yolov5n.pt\", '640_9', -1, 1, 3],\n \"yolov5s\": [\"cfg/yolov5/yolov5s.yaml\", \"yolov5s.pt\", '640_9', -1, 1, 3],\n \"yolov5m\": [\"cfg/yolov5/yolov5m.yaml\", \"yolov5m.pt\", '640_9', -1, 1, 3],\n \"yolov5l\": [\"cfg/yolov5/yolov5l.yaml\", \"yolov5l.pt\", '640_9', -1, 1, 3],\n \"yolov5x\": [\"cfg/yolov5/yolov5x.yaml\", \"yolov5x.pt\", '640_9', -1, 1, 3],\n #\"yolov5s-p6_640\": [\"cfg/yolov5/yolov5s6.yaml\", \"yolov5s6.pt\", '640_12', 64, 1, 3],\n #\"yolov5s-p6_960\": [\"cfg/yolov5/yolov5s6.yaml\", \"yolov5s6.pt\", '960_12', 48, 1, 3],\n \"yolov5x-p6\": [\"cfg/yolov5/yolov5x6.yaml\", \"yolov5x6.pt\", '1280_12', -1, 1, 3],\n #\"yolor_csp\": [\"cfg/yolor/yolor_csp.cfg\", \"pretrained/yolor/yolor_csp.pt\", \"640_9\", 64, 16, 3],\n #\"yolor_csp_x_star\": [\"cfg/yolor/yolor_csp_x.cfg\", \"pretrained/yolor/yolor_csp_x_star.pt\", \"640_9\", 66, 33, 3],\n \"yolor_p6\": [\"cfg/yolor/yolor_p6.cfg\", \"pretrained/yolor/yolor-p6.pt\", \"1280_12\", 66, 66, 3],\n \"yolor_w6\": [\"cfg/yolor/yolor_w6.cfg\", \"pretrained/yolor/yolor-w6.pt\", \"1280_12\", 66, 66, 3],\n #\"yolor_yolov4_p6\": [\"cfg/yolor/yolor_p6.cfg\", '', \"640_12\", 66, 66, 4],\n #\"yolor_yolov4_p7\": [\"cfg/yolor/yolor_p6.cfg\", '', \"640_20\", 66, 66, 4]\n}\n\nyolo_config = {\n '320_6': \"13, 9, 34, 25, 79, 97, 139,146, 182,239, 290,295\",\n '416_6': \"17, 12, 44, 33, 103,126, 181,190, 237,311, 377,383\",\n '320_9': \"10, 8, 28, 13, 19, 31, 48, 33, 84, 98, 108,179, 162,138, 192,247, 293,296\",\n '416_9': \"13, 10, 36, 17, 24, 40, 62, 43, 110,127, 140,233, 211,179, 249,322, 381,385\",\n '512_9': \"16, 13, 45, 21, 30, 49, 76, 53, 135,157, 172,287, 260,220, 307,396, 469,474\",\n '608_9': \"19, 15, 53, 24, 36, 58, 91, 63, 160,186, 204,341, 309,261, 365,470, 557,563\",\n '640_9': \"20, 16, 56, 26, 38, 61, 96, 67, 171,198, 258,309, 432,367, 340,549, 583,598\",\n '960_9': \"29, 24, 84, 39, 56, 92, 143,100, 253,294, 323,538, 487,413, 576,742, 879,889\",\n '1280_9': \"39, 32, 112, 51, 75,123, 191,133, 337,391, 430,717, 650,550, 768,989, 1173,1185\",\n '640_12': \"18, 15, 43, 23, 40, 63, 86, 36, 98, 91, 162,190, 188,333, 288,242, 298,396, 467,384, 379,580, 596,602\",\n '960_12': \"26, 23, 64, 35, 60, 94, 129, 54, 144,135, 246,275, 271,472, 432,377, 407,681, 643,546, 615,870, 901,897\",\n '1280_12': \"32, 28, 73, 42, 68,113, 141, 60, 130,174, 231,106, 317,390, 575,499, 431,742, 846,747, 712,1121, 1176,1196\",\n '1536_12': \"39, 34, 87, 50, 82,135, 170, 72, 156,209, 278,129, 381,468, 691,599, 518,890, 1015,896, 855,1346, 1411,1435\"\n}\n\n#yolotiny_config = {\n# '320': \"7, 17, 11, 23, 20, 37, 53, 39, 85, 48, 170,105\",\n# '416': \"9, 22, 15, 30, 27, 48, 68, 51, 110, 62, 221,137\",\n#}\n\n#---------------------------------------------------------------------\n\nclassNum = len(classList)\n#filterNum = (classNum + 5) * 3\n\n# yolov3: 608, yolov3-tiny:416, yolov4:608, yolov4-tiny:416, yolo-fastest:320,\n# yolo-fastest-xl:320, yolov4x-mish:640, yolov4-csp:512, yolov4-cspx-p7:1536\n# [CFG FILE, PRE-TRAINED WEIGHTS, SIZE, BATCH, DIVISION-BATCH, MASKS]\ncfgs_total = {\n \"yolov3\": [\"cfg/yolov3/yolov3.cfg\", \"pretrained/yolov3/darknet53.conv.74\", '608_9', 64, 32, 3],\n \"yolov3-tiny\": [\"cfg/yolov3/yolov3-tiny.cfg\", \"pretrained/yolov3/yolov3-tiny.conv.15\", '416_6', 66, 2, 3],\n \"yolov3-spp\": [\"cfg/yolov3/yolov3-spp.cfg\", \"pretrained/yolov3/yolov3-spp.weights\", '608_9', 64, 32, 3],\n \"yolov4\": [\"cfg/yolov4/yolov4.cfg\", \"pretrained/yolov4/yolov4.conv.137\", '608_9', 64, 64, 3],\n \"yolov4-tiny\": [\"cfg/yolov4/yolov4-tiny.cfg\", \"pretrained/yolov4/yolov4-tiny.conv.29\", '416_6', 72, 1, 3],\n \"yolo-fastest\": [\"cfg/yolo-fastest/yolo-fastest-1.1.cfg\", \"pretrained/yolo-fastest/yolo-fastest.conv.109\", '320_6', 160, 2, 3],\n \"yolo-fastest-xl\": [\"cfg/yolo-fastest/yolo-fastest-1.1-xl.cfg\", \"pretrained/yolo-fastest/yolo-fastest-xl.conv.109\", '320_6', 120, 2, 3],\n \"yolov4x-mish\": [\"cfg/yolov4/yolov4x-mish.cfg\", '', '640_9', 64, 64, 3],\n \"yolov4-csp\": [\"cfg/yolov4/yolov4-csp.cfg\", '', '640_9', 64, 64, 3],\n \"yolov4-p5\": [\"cfg/yolov4/yolov4-p5.cfg\", 'pretrained/yolov4-p5.conv.232', '896_12', 64, 64, 4],\n \"yolov4-p6\": [\"cfg/yolov4/yolov4-p6.cfg\", 'pretrained/yolov4-p6.conv.289', '1280_12', 64, 64, 4],\n \"yolov5n\": [\"cfg/yolov5/yolov5n.yaml\", \"yolov5n.pt\", '640_9', 128, 1, 3],\n \"yolov5s\": [\"cfg/yolov5/yolov5s.yaml\", \"yolov5s.pt\", '640_9', 64, 1, 3],\n \"yolov5m\": [\"cfg/yolov5/yolov5m.yaml\", \"yolov5m.pt\", '640_9', 24, 1, 3],\n \"yolov5l\": [\"cfg/yolov5/yolov5l.yaml\", \"yolov5l.pt\", '640_9', 12, 1, 3],\n \"yolov5x\": [\"cfg/yolov5/yolov5x.yaml\", \"yolov5x.pt\", '640_9', 8, 1, 3],\n \"yolov5s-p6_640\": [\"cfg/yolov5/yolov5s6.yaml\", \"yolov5s6.pt\", '640_12', 48, 1, 3],\n \"yolov5s-p6_960\": [\"cfg/yolov5/yolov5s6.yaml\", \"yolov5s6.pt\", '960_12', 48, 1, 3],\n \"yolov5s-p6\": [\"cfg/yolov5/yolov5s6.yaml\", \"yolov5s6.pt\", '1280_12', 48, 1, 3],\n \"yolov5m-p6\": [\"cfg/yolov5/yolov5m6.yaml\", \"yolov5m6.pt\", '1280_12', 32, 1, 3],\n \"yolov5l-p6\": [\"cfg/yolov5/yolov5l6.yaml\", \"yolov5l6.pt\", '1280_12', 24, 1, 3],\n \"yolov5x-p6\": [\"cfg/yolov5/yolov5x6.yaml\", \"yolov5x6.pt\", '1280_12', 12, 1, 3],\n \"yolor_csp\": [\"cfg/yolor/yolor_csp.cfg\", \"pretrained/yolor/yolor_csp.pt\", \"640_9\", 64, 16, 3],\n \"yolor_csp_star\": [\"cfg/yolor/yolor_csp.cfg\", \"pretrained/yolor/yolor_csp_star.pt\", \"640_9\", 64, 16, 3],\n \"yolor_csp_x\": [\"cfg/yolor/yolor_csp_x.cfg\", \"pretrained/yolor/yolor_csp_x.pt\", \"640_9\", 66, 22, 3],\n \"yolor_csp_x_star\": [\"cfg/yolor/yolor_csp_x.cfg\", \"pretrained/yolor/yolor_csp_x_star.pt\", \"640_9\", 66, 33, 3],\n \"yolor_p6\": [\"cfg/yolor/yolor_p6.cfg\", \"pretrained/yolor/yolor_p6.pt\", \"1280_12\", 66, 66, 3],\n \"yolor_w6\": [\"cfg/yolor/yolor_w6.cfg\", \"pretrained/yolor/yolor_w6.pt\", \"1280_12\", 66, 66, 3],\n \"yolor_yolov4_csp\": [\"cfg/yolor/yolov4_csp.cfg\", '', \"640_9\", 66, 33, 3],\n \"yolor_yolov4_csp_x\": [\"cfg/yolor/yolov4_csp_x.cfg\", '', \"640_9\", 66, 66, 3],\n \"yolor_yolov4_p6\": [\"cfg/yolor/yolov4_p6.cfg\", '', \"1280_16\", 66,66, 4],\n \"yolor_yolov4_p7\": [\"cfg/yolor/yolov4_p7.cfg\", '', \"1536_20\", 66,66, 4]\n}\n\npwd = os.getcwd()\n\n#make dataset yaml for YOLOV5\nwith open('cfg/data_yolov5.yaml') as file:\n dataset_content = file.read()\nfile.close\n\nclass_txt = '['\nfor i, cname in enumerate(classList):\n class_txt += \"'{}'\".format(cname)\n if i<(len(classList)-1): class_txt += ', '\nclass_txt += ']'\n\ndataset_content = dataset_content.replace(\"{TRAIN_LIST}\", os.path.join(cfgFolder,'train.txt'))\ndataset_content = dataset_content.replace(\"{TEST_LIST}\", os.path.join(cfgFolder,'test.txt'))\ndataset_content = dataset_content.replace(\"{CLASSES}\", str(classNum))\ndataset_content = dataset_content.replace(\"{CLASS_LIST}\", class_txt)\n\nfile = open(os.path.join(cfgFolder, 'ds_yolov5.yaml'), \"w\")\nfile.write(dataset_content)\nfile.close\n#---- end\n\n#make dataset yaml for YOLOR\nwith open('cfg/yolor_data.yaml') as file:\n dataset_content = file.read()\nfile.close\n\nclass_txt = '['\nfor i, cname in enumerate(classList):\n class_txt += \"'{}'\".format(cname)\n if i<(len(classList)-1): class_txt += ', '\nclass_txt += ']'\n\ndataset_content = dataset_content.replace(\"{TRAIN_LIST}\", os.path.join(cfgFolder,'train.txt'))\ndataset_content = dataset_content.replace(\"{TEST_LIST}\", os.path.join(cfgFolder,'test.txt'))\ndataset_content = dataset_content.replace(\"{CLASSES}\", str(classNum))\ndataset_content = dataset_content.replace(\"{CLASS_LIST}\", class_txt)\n\nfile = open(os.path.join(cfgFolder, 'ds_yolor.yaml'), \"w\")\nfile.write(dataset_content)\nfile.close\n#---- end\n\n\ntfile = open( os.path.join(cfgFolder, 'train_cmd.txt'), 'w')\n\nfor cfg_name in cfgs:\n if(cfg_name[:6] == 'yolov5'):\n anchors = yolo_config[cfgs[cfg_name][2]]\n anch_list = anchors.split(',')\n\n anchors1, anchors2, anchors3, anchors4 = \"\", \"\", \"\", \"\"\n for a in range(0,6):\n anchors1 += anch_list[a]\n if a<6: anchors1 += ','\n for a in range(6,12):\n anchors2 += anch_list[a]\n if a<12: anchors2 += ','\n for a in range(12,18):\n anchors3 += anch_list[a]\n if a<18: anchors3 += ','\n if len(anch_list) > 18:\n for a in range(18,24):\n anchors4 += anch_list[a]\n if a<24: anchors4 += ','\n\n with open(cfgs[cfg_name][0]) as file:\n file_content = file.read()\n file.close\n\n #file_content = open( cfgs[cfg_name][0], 'w')\n\n file_updated = file_content.replace(\"{CLASSES}\", str(classNum))\n file_updated = file_updated.replace(\"{ANCHOR1}\", str(anchors1))\n file_updated = file_updated.replace(\"{ANCHOR2}\", str(anchors2))\n file_updated = file_updated.replace(\"{ANCHOR3}\", str(anchors3))\n if len(anch_list) >= 18:\n file_updated = file_updated.replace(\"{ANCHOR4}\", str(anchors4))\n\n cfg_file = cfg_name + '.yaml'\n path_project = os.path.join( weights_save, project_name )\n path_project_name = os.path.join(path_project, cfg_name)\n\n exec_cmd = \" cd {}\\n $(which python) train.py \\\\\\n --data {} \\\\\\n --imgsz {} \\\\\\n --batch {} \\\\\\n --epochs 300 \\\\\\n --project {} \\\\\\n --name {} \\\\\\n --device {} \\\\\\n --weights {}\".format( \\\n yolov5_home, os.path.join(cfgFolder, 'ds_yolov5.yaml'), cfgs[cfg_name][2].split('_')[0], \\\n cfgs[cfg_name][3], path_project, path_project_name, '{GPU}', cfgs[cfg_name][1])\n\n #exec_cmd += \" --freeze 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14\"\n\n else:\n with open(cfgs[cfg_name][0]) as file:\n file_content = file.read()\n file.close\n\n batch = cfgs[cfg_name][3]\n div = cfgs[cfg_name][4]\n\n #if(cfg_name in [\"yolov3-tiny\", \"yolov4-tiny\", \"yolo-fastest\", \"yolo-fastest-xl\"]):\n # anch = yolotiny_config[str(cfgs[cfg_name][2])]\n\n #else:\n anch = yolo_config[str(cfgs[cfg_name][2])]\n filterNum = (classNum + 5) * cfgs[cfg_name][5]\n\n file_updated = file_content.replace(\"{BATCH}\", str(batch))\n file_updated = file_updated.replace(\"{SUBDIVISIONS}\", str(div))\n file_updated = file_updated.replace(\"{SIZE}\", str(cfgs[cfg_name][2].split('_')[0]))\n file_updated = file_updated.replace(\"{FILTERS}\", str(filterNum))\n file_updated = file_updated.replace(\"{CLASSES}\", str(classNum))\n file_updated = file_updated.replace(\"{ANCHORS}\", anch)\n\n cfg_file = cfg_name +'.cfg'\n\n if (cfg_name in [\"yolo-fastest\", \"yolo-fastest-xl\"]):\n exec_cmd = \"{}/darknet detector train \\\\\\n {} \\\\\\n {} \\\\\\n {} \\\\\\n -dont_show \\\\\\n -mjpeg_port {} \\\\\\n -clear \\\\\\n -gpus {}\".format(\\\n yolofastest_home, os.path.join(cfgFolder,'obj.data'), os.path.join(cfgFolder,cfg_name+'.cfg'), os.path.join(pwd,cfgs[cfg_name][1]), '{DARKNET_PORT}', '{GPU}')\n\n elif(cfg_name[:5] == 'yolor'):\n if cfgs[cfg_name][2].split('_')[0] in ['640','512']:\n hyp_file = os.path.join(pwd, 'cfg', 'yolor', 'hyp.scratch.640.yaml')\n elif cfgs[cfg_name][2].split('_')[0] == '1280':\n hyp_file = os.path.join(pwd, 'cfg', 'yolor', 'hyp.scratch.1280.yaml')\n\n if cfgs[cfg_name][1] != '':\n weights_file = os.path.join(pwd,cfgs[cfg_name][1])\n else:\n weights_file = ''\n\n exec_cmd = \"cd {}\\\\\\n $(which python) train.py --batch-size {} --img {} {} --data {} --cfg {} --weights '{}' --device {} --name {} --hyp {} --epochs {}\".format(\\\n yolor_home, cfgs[cfg_name][3], cfgs[cfg_name][2].split('_')[0], cfgs[cfg_name][2].split('_')[0], os.path.join(cfgFolder, 'ds_yolor.yaml'), \\\n os.path.join(cfgFolder,cfg_name+'.cfg'), weights_file, '{GPU}', cfg_name, hyp_file, 300)\n\n else:\n exec_cmd = \"{}/darknet detector train \\\\\\n {} \\\\\\n {} \\\\\\n {} \\\\\\n -dont_show \\\\\\n -mjpeg_port {} \\\\\\n -clear \\\\\\n -gpus {}\".format(\\\n dark_home, os.path.join(cfgFolder,'obj.data'), os.path.join(cfgFolder,cfg_name+'.cfg'), os.path.join(pwd,cfgs[cfg_name][1]), '{DARKNET_PORT}', '{GPU}' )\n\n\n file = open(os.path.join(cfgFolder, cfg_file), \"w\")\n file.write(file_updated)\n file.close\n\n\n print(\"-----------------------------------------\")\n print(\" Command for training {} model\".format(cfg_name.upper()))\n print(\"-----------------------------------------\")\n print(exec_cmd)\n print('')\n\n tfile.write(\"---------------------------------------------------------------------\\n\")\n tfile.write(\" [{} model] \\n\".format(cfg_name.upper()))\n tfile.write(exec_cmd + '\\n\\n')\n\ntfile.close()\n","repo_name":"ch-tseng/Make_YOLO_Train","sub_path":"pretrained/4_make_model.py","file_name":"4_make_model.py","file_ext":"py","file_size_in_byte":14470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16183528285","text":"from cgi import test\r\nimport random\r\nimport unittest\r\nfrom unittest.mock import MagicMock\r\nfrom Asset import Asset\r\nfrom Bank import Bank\r\nfrom Command import SellCommand\r\nfrom CommandQueue import CommandQueue\r\nfrom Command import Command\r\nfrom Command import BuyCommand\r\nfrom MessageManager import MessageManager\r\nfrom StockExchange import StockExchange\r\nfrom User import User\r\n\r\ndef assert_not_called_with(self, *args, **kwargs):\r\n try:\r\n self.assert_called_with(*args, **kwargs)\r\n except AssertionError:\r\n return\r\n raise AssertionError('Expected %s to not have been called.' % self._format_mock_call_signature(args, kwargs))\r\n\r\nMagicMock.assert_not_called_with = assert_not_called_with\r\n\r\ndef create_user() -> User:\r\n return User(random.randrange(1, 5000), \"Eggbert\")\r\n\r\ndef create_asset(name: str = None) -> Asset:\r\n if (name != None):\r\n return Asset(random.randrange(1, 5000), name)\r\n else:\r\n return Asset(random.randrange(1, 5000), str(random.randrange(1, 5000)))\r\n\r\ndef create_sell_command(time_remaining : int = 2, user : User = create_user(), asset : Asset = create_asset(), price: int = 40) -> SellCommand:\r\n return SellCommand(random.randrange(1, 5000), time_remaining, user, asset, price)\r\n\r\ndef create_buy_command(time_remaining : int = 2, user : User = create_user(), asset : Asset = create_asset(), max_price: int = 500) -> BuyCommand:\r\n return BuyCommand(random.randrange(1, 5000), time_remaining, user, asset, max_price)\r\n\r\ndef create_command_queue(commands : list[Command]) -> CommandQueue:\r\n commandQueue : CommandQueue = CommandQueue()\r\n commandQueue.get_commands = MagicMock(return_value = commands)\r\n commandQueue.deduct_time = MagicMock()\r\n commandQueue.delete_command = MagicMock()\r\n return commandQueue\r\n\r\ndef create_bank(balance : int = 700, has_asset : bool = True):\r\n bank : Bank = Bank()\r\n bank.get_balance = MagicMock(return_value = balance)\r\n bank.has_asset = MagicMock(return_value = has_asset)\r\n bank.transfer = MagicMock()\r\n bank.transfer_asset = MagicMock()\r\n return bank\r\n\r\nclass StockExchangeTests(unittest.TestCase):\r\n def test_sell_with_no_buyers(self):\r\n my_sell_command : SellCommand = create_sell_command()\r\n\r\n commandQueue : CommandQueue = create_command_queue([my_sell_command])\r\n bank : Bank = create_bank()\r\n messageManager : MessageManager = MessageManager(None)\r\n\r\n stockExchange : StockExchange = StockExchange(commandQueue, bank, messageManager)\r\n stockExchange.process()\r\n\r\n commandQueue.deduct_time.assert_called_with(my_sell_command)\r\n bank.transfer.assert_not_called()\r\n bank.transfer_asset.assert_not_called()\r\n commandQueue.delete_command.assert_not_called()\r\n \r\n def test_buy_with_no_sellers(self):\r\n my_buy_command : BuyCommand = create_buy_command()\r\n\r\n commandQueue : CommandQueue = create_command_queue([my_buy_command])\r\n bank : Bank = create_bank()\r\n messageManager : MessageManager = MessageManager(None)\r\n\r\n stockExchange : StockExchange = StockExchange(commandQueue, bank, messageManager)\r\n stockExchange.process()\r\n\r\n commandQueue.deduct_time.assert_called_with(my_buy_command)\r\n bank.transfer.assert_not_called()\r\n bank.transfer_asset.assert_not_called()\r\n commandQueue.delete_command.assert_not_called()\r\n\r\n def test_buying_and_selling_happy_path(self):\r\n asset : Asset = create_asset(\"My Stock\")\r\n buyer : User = create_user()\r\n seller : User = create_user()\r\n my_buy_command : BuyCommand = create_buy_command(max_price=500, asset=asset, user=buyer)\r\n my_sell_command : SellCommand = create_sell_command(price=40, asset=asset, user=seller)\r\n\r\n commandQueue : CommandQueue = create_command_queue([my_buy_command, my_sell_command])\r\n bank : Bank = create_bank()\r\n messageManager : MessageManager = MessageManager(None)\r\n\r\n stockExchange : StockExchange = StockExchange(commandQueue, bank, messageManager)\r\n stockExchange.process()\r\n\r\n commandQueue.delete_command.assert_any_call(my_buy_command)\r\n commandQueue.delete_command.assert_any_call(my_sell_command)\r\n bank.transfer.assert_any_call(buyer, seller, 40)\r\n bank.transfer_asset.assert_any_call(seller, buyer, asset)\r\n \r\n def test_buying_and_selling_different_assets(self):\r\n asset_a : Asset = create_asset()\r\n asset_b : Asset = create_asset()\r\n buyer : User = create_user()\r\n seller : User = create_user()\r\n my_buy_command : BuyCommand = create_buy_command(max_price=500, asset=asset_a, user=buyer)\r\n my_sell_command : SellCommand = create_sell_command(price=40, asset=asset_b, user=seller)\r\n\r\n commandQueue : CommandQueue = create_command_queue([my_buy_command, my_sell_command])\r\n bank : Bank = create_bank()\r\n messageManager : MessageManager = MessageManager(None)\r\n\r\n stockExchange : StockExchange = StockExchange(commandQueue, bank, messageManager)\r\n stockExchange.process()\r\n\r\n commandQueue.delete_command.assert_not_called()\r\n bank.transfer.assert_not_called()\r\n bank.transfer_asset.assert_not_called()\r\n\r\n def test_lowest_selling_bid_gets_first_priority(self):\r\n asset : Asset = create_asset()\r\n buyer : User = create_user()\r\n low_seller : User = create_user()\r\n high_seller : User = create_user()\r\n my_buy_command : BuyCommand = create_buy_command(max_price=500, asset=asset, user=buyer)\r\n low_sell_command : SellCommand = create_sell_command(price=40, asset=asset, user=low_seller)\r\n high_sell_command : SellCommand = create_sell_command(price=80, asset=asset, user=high_seller)\r\n\r\n commandQueue : CommandQueue = create_command_queue([my_buy_command, low_sell_command, high_sell_command])\r\n bank : Bank = create_bank()\r\n messageManager : MessageManager = MessageManager(None)\r\n\r\n stockExchange : StockExchange = StockExchange(commandQueue, bank, messageManager)\r\n stockExchange.process()\r\n\r\n commandQueue.delete_command.assert_any_call(my_buy_command)\r\n commandQueue.delete_command.assert_any_call(low_sell_command)\r\n commandQueue.delete_command.assert_not_called_with(high_sell_command)\r\n bank.transfer.assert_any_call(buyer, low_seller, 40)\r\n bank.transfer.assert_not_called_with(buyer, high_seller, 80)\r\n bank.transfer_asset.assert_any_call(low_seller, buyer, asset)\r\n bank.transfer_asset.assert_not_called_with(high_seller, buyer, asset)\r\n\r\n def test_lowest_buying_bid_gets_first_priority(self):\r\n asset : Asset = create_asset()\r\n low_buyer : User = create_user()\r\n high_buyer : User = create_user()\r\n seller : User = create_user()\r\n low_buy_command : BuyCommand = create_buy_command(max_price=50, asset=asset, user=low_buyer)\r\n high_buy_command : BuyCommand = create_buy_command(max_price=70, asset=asset, user=high_buyer)\r\n sell_command : SellCommand = create_sell_command(price=40, asset=asset, user=seller)\r\n\r\n commandQueue : CommandQueue = create_command_queue([low_buy_command, high_buy_command, sell_command])\r\n bank : Bank = create_bank()\r\n messageManager : MessageManager = MessageManager(None)\r\n\r\n stockExchange : StockExchange = StockExchange(commandQueue, bank, messageManager)\r\n stockExchange.process()\r\n\r\n commandQueue.delete_command.assert_any_call(sell_command)\r\n commandQueue.delete_command.assert_any_call(low_buy_command)\r\n commandQueue.delete_command.assert_not_called_with(high_buy_command)\r\n bank.transfer.assert_any_call(low_buyer, seller, 40)\r\n bank.transfer.assert_not_called_with(high_buyer, seller, 80)\r\n bank.transfer_asset.assert_any_call(seller, low_buyer, asset)\r\n bank.transfer_asset.assert_not_called_with(seller, high_buyer, asset)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()","repo_name":"UnironicHeyMoon/HMSE","sub_path":"StockExchangeTests.py","file_name":"StockExchangeTests.py","file_ext":"py","file_size_in_byte":8062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28829210217","text":"from http import HTTPStatus\nfrom uuid import UUID\n\nfrom fastapi import Depends, HTTPException\nfrom fastapi.security import HTTPBasicCredentials\nfrom starlette.status import HTTP_401_UNAUTHORIZED\n\nfrom fastapi import Response\nfrom fastapi.security import HTTPBasic\nfrom funcy import first, invoke\nfrom peewee_async import execute\n\nfrom app.database import db_manager\nfrom app.db_models import User\nfrom app.responses import APIResponse\nfrom .utils import (\n # User,\n response_model, get_or_404, set_attrs,\n)\nfrom ..models.user import EnvelopedListOfUsersResponse, EnvelopedUserResponse, UserCreateRequest, UserUpdateRequest, \\\n UserResponse\n\nsecurity = HTTPBasic()\n\n\nasync def get_current_username(credentials: HTTPBasicCredentials = Depends(security)):\n user = await get_or_404(User.select(), email=credentials.username)\n\n if credentials.password != user.password:\n raise HTTPException(\n status_code=HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect email or password\",\n headers={\"WWW-Authenticate\": \"Basic\"},\n )\n return user.id\n\n\n@response_model(EnvelopedListOfUsersResponse)\nasync def get_users() -> Response:\n users = await execute(User.select())\n return APIResponse(invoke(map(UserResponse.from_orm, users), 'dict'))\n\n\n@response_model(EnvelopedUserResponse)\nasync def get_user(user_id: UUID = Depends(get_current_username)) -> Response:\n user = await get_or_404(User.select(), id=user_id)\n return APIResponse(\n UserResponse.from_orm(user).dict(), status_code=HTTPStatus.OK\n )\n\n@response_model(EnvelopedUserResponse, status_code=HTTPStatus.CREATED)\nasync def create_user(data: UserCreateRequest) -> Response:\n user_id = await execute(User.insert(**data.dict()))\n user = first(await execute(User.filter(id=user_id)))\n return APIResponse(\n UserResponse.from_orm(user).dict(), status_code=HTTPStatus.CREATED\n )\n\n\n@response_model(EnvelopedUserResponse, status_code=HTTPStatus.OK)\nasync def update_user(user_id: UUID, data: UserUpdateRequest) -> Response:\n user = await get_or_404(User.select(), id=user_id)\n\n update_data = data.dict(exclude_unset=True)\n\n course = set_attrs(user, **update_data)\n await db_manager.update(course)\n return APIResponse(UserResponse.from_orm(course).dict())\n\n\n@response_model(status_code=HTTPStatus.NO_CONTENT)\nasync def delete_user(user_id: UUID) -> Response:\n await get_or_404(User.select(), id=user_id)\n await execute(User.delete().where(User.id == user_id))\n return APIResponse(status_code=HTTPStatus.NO_CONTENT)","repo_name":"MissiaL/dating-backend","sub_path":"api/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19157664852","text":"__author__ = 'Joseph'\n\n\nlist_a = [1, 3, 5, 3, 3, 2, 3, 3, 3, 3]\n\n\ndef majority(a):\n if len(a) == 0:\n return None\n if len(a) == 1:\n return a[0]\n half = int(len(a)/2)\n left = majority(a[0:half])\n right = majority(a[half:])\n if left == right:\n return left\n left_count = len([x for x in a if x == left])\n if left_count > half + 1:\n return left\n right_count = len([x for x in a if x == right])\n if right_count > half + 1:\n return right\n return \"No majority element\"\n\n\ndef majority_linear(a):\n x = prune(a)\n count = len([y for y in a if y == x])\n if count > len(a)/2:\n return x\n else:\n return \"No majority element\"\n\n\ndef prune(s):\n n = len(s)\n if n == 1:\n return s[0]\n if n % 2 != 0:\n n -= 1\n s1 = s[::2]\n s2 = s[1::2]\n result = []\n for i in range(0, n/2):\n if s1[i] == s2[i]:\n result.append(s1[i])\n return prune(result)\n","repo_name":"trixr4kdz/cmsi282","sub_path":"homework3/Problem_223.py","file_name":"Problem_223.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34914759556","text":"import socket\n\nHOST = '192.168.125.1'\nPORT = 1025\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n server_address = (HOST, PORT)\n print('connecting to %s port ' + str(server_address))\n s.connect(server_address)\n print(\"Connected\")\n\ntry:\n while True:\n msg = input('Client: ')\n s.sendall(msg.encode())\n\n if msg == \"quit\":\n break\n\n data = s.recv(1024)\n print('Server: ', data.decode(\"utf8\"))\nfinally:\n print(\"End\")\n","repo_name":"nguyenkhangduy298/ABB_robot_object_detection_vision","sub_path":"TCPConnection/SampleClient.py","file_name":"SampleClient.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36151527406","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 1 15:04:33 2020\r\n\r\n@author: lockd\r\n\r\n BUBBLE SORT\r\n\r\n\"\"\"\r\n\r\ndef Bubble_sort(l): #Taking list as a parameter \r\n \r\n n = len(l)\r\n \r\n for i in range(n):\r\n #range(0,3)=0,1,2\r\n for j in range(0, n-1-i): #n-1 because if we have 3 elements in a list, then we will compare 1st element two times(with 2nd and 3rd number) and in the next iteration of i, the last element won't be compared\r\n if(l[j]>l[j+1]):\r\n temp = l[j]\r\n l[j] = l[j+1]\r\n l[j+1] = temp\r\n \r\nl = [66,23,2,12,9,0,56,5,98,3]\r\nBubble_sort(l)\r\n\r\nfor i in l:\r\n print(i)\r\n \r\n \r\n\r\n","repo_name":"RuchiRaina3/My-Python-Projects","sub_path":"Projects/Searching-Sorting/Bubble Sort.py","file_name":"Bubble Sort.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15493952162","text":"import kivy\n\nfrom kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\nfrom kivy.uix.screenmanager import ScreenManager\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.clock import Clock\nfrom kivy.uix.scrollview import ScrollView\n\nfrom bluetoothcube.cubedisplay import CubeDisplay # noqa: F401\n\n\nclass Hideable(kivy.event.EventDispatcher):\n hidden = kivy.properties.BooleanProperty(False)\n\n def __init__(self):\n super().__init__()\n self.bind(hidden=lambda w, v: self.hide() if v else self.show())\n\n def hide(self):\n if hasattr(self, 'saved_attrs'):\n return # Already hidden\n self.saved_attrs = (self.opacity, self.disabled)\n self.opacity, self.disabled = (0, True)\n\n def show(self):\n if not hasattr(self, 'saved_attrs'):\n return # Not hidden\n self.opacity, self.disabled = self.saved_attrs\n del self.saved_attrs\n\n\nclass TimerButton(Button):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.timer = App.get_running_app().timer\n self.just_stopped = False\n\n def on_press(self):\n self.just_stopped = False\n if self.timer.running:\n self.timer.stop()\n self.just_stopped = True\n\n def on_release(self):\n if not self.timer.running and not self.just_stopped:\n self.timer.start()\n\n\nclass PrimeButton(Button):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.timer = App.get_running_app().timer\n\n def on_press(self):\n if self.timer.primed:\n self.timer.unprime()\n else:\n self.timer.prime()\n\n\n# Uses the timer to display measured time.\nclass TimeDisplay(Label):\n bcolor = kivy.properties.ListProperty([1, 1, 1, 1])\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.updateevent = None\n\n app = App.get_running_app()\n\n self.timer = app.timer\n self.timehistory = app.timehistory\n\n self.timer.bind(\n running=self.on_timer_running_changed,\n primed=lambda timer, primed: self.update_bg_color()\n )\n self.timehistory.bind(\n last_time=lambda th, lt: self.update_display(),\n on_time_invalidated=lambda th: self.clear())\n\n def on_timer_running_changed(self, timer, running):\n if running:\n self.updateevent = Clock.schedule_interval(\n lambda dt: self.update_display(), 0.1)\n else:\n Clock.unschedule(self.updateevent)\n self.update_display()\n\n def update_display(self):\n if self.timer.running:\n v = self.timer.get_time()\n precision = 1\n else:\n if self.timehistory.last_time:\n v = self.timehistory.last_time.time\n else:\n v = 0\n precision = 2\n\n self.text = f\"{v:0.{precision}f}\"\n\n if v >= 100:\n self.time_text_ratio = 0.25\n else:\n self.time_text_ratio = 0.38\n\n def update_bg_color(self):\n if self.timer.primed:\n self.parent.bcolor = [0.4, 0, 0, 1]\n else:\n self.parent.bcolor = [0, 0, 0, 1]\n\n def clear(self):\n self.text = \"0.0\"\n\n\n# TODO: This widget is now a bare Label - we could make it richer by using a\n# Grid and utilizing partial updates.\nclass AnalysisDisplay(Label):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.analyzer = App.get_running_app().analyzer\n self.timer = App.get_running_app().timer\n self.timehistory = App.get_running_app().timehistory\n\n self.analyzer.bind(\n current_stage=lambda a, cs: self.update_display())\n self.timer.bind(\n on_solve_started=self.on_solve_started,\n on_solve_ended=self.on_solve_ended,\n running=lambda t, r: self.update_display())\n self.timehistory.bind(\n last_time=lambda th, lt: self.update_display())\n\n self.updateevent = None\n self.update_display()\n\n def on_solve_started(self, timer):\n self.updateevent = Clock.schedule_interval(\n lambda dt: self.update_display(), 0.1)\n\n def on_solve_ended(self, timer):\n if self.updateevent:\n Clock.unschedule(self.updateevent)\n\n def update_display(self):\n text = f\"Using {self.analyzer.method} analyzer.\\n\"\n\n if self.timer.running:\n stages = self.analyzer.get_stage_times()\n else:\n lt = self.timehistory.last_time\n if lt and lt.meta and 'stage_times' in lt.meta:\n text += \"Last solve:\\n\"\n stages = lt.meta['stage_times']\n else:\n stages = []\n\n for i, v in enumerate(stages):\n stage_name, t = v\n precision = 1 if i+1 == len(stages) else 2\n text += f\"[b]{stage_name}[/b]: {t:.0{precision}f}\\n\"\n\n self.text = text\n\n\n# Created dynamically as cubes are discovered.\nclass CubeButton(AnchorLayout):\n button = kivy.properties.ObjectProperty(None)\n\n\nclass CubeStateDisplay(Label):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cube = App.get_running_app().cube\n self.cube.bind(on_state_changed=self.on_cube_state_changed)\n\n def on_cube_state_changed(self, cube, new_state):\n self.text = '\\n'.join(new_state.get_representation_strings())\n\n\nclass HideableLabel(Label, Hideable):\n pass\n\n\nclass HideableButton(Button, Hideable):\n pass\n\n\n# A scrollview that makes mouse wheel up/down events behave like left/right.\nclass ScrollViewLR(ScrollView):\n def on_scroll_start(self, touch, check_children=True):\n # Translate up/down scroll events to left/right.\n if hasattr(touch, 'button'):\n if touch.button == 'scrollup':\n touch.button = 'scrollleft'\n if touch.button == 'scrolldown':\n touch.button = 'scrollright'\n # Call original implementation.\n super().on_scroll_start(touch, check_children)\n\n\nclass LastTime(BoxLayout):\n lt = kivy.properties.ObjectProperty(\n None, allownone=True, force_dispatch=True)\n\n\nclass BluetoothCubeRoot(ScreenManager):\n pass\n","repo_name":"rafalcieslak/bluetoothcube","sub_path":"bluetoothcube/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":6314,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"16783497813","text":"garo, sero = map(int, input().split())\ns = int(input())\nloc = [list(map(int, input().split())) for _ in range(s)] # 1:상, 2:하, 3:좌, 4:우\nD_dir, D_loc = map(int, input().split())\n\nlst = [[0] * (garo + 1) for _ in range(sero + 1)]\n\ns_lst = []\nfor k in loc:\n if k[0] == 1:\n s_lst.append([0, k[1]])\n elif k[0] == 2:\n s_lst.append([sero, k[1]])\n elif k[0] == 3:\n s_lst.append([k[1], 0])\n elif k[0] == 4:\n s_lst.append([k[1], garo])\n# print(s_lst)\n\nfor i in range(sero + 1):\n for j in range(garo + 1):\n if i == 0 or i == sero or j == 0 or j == garo:\n lst[i][j] = 1\n\nif D_dir == 1:\n target = [0, D_loc]\nelif D_dir == 2:\n target = [sero, D_loc]\nelif D_dir == 3:\n target = [D_loc, 0]\nelif D_dir == 4:\n target = [D_loc, garo]\n\n# print(s_lst)\n# print(*lst, sep=\"\\n\")\n\ndi = [0, 1, 0, -1] # 우 하 좌 상\ndj = [1, 0, -1, 0]\nfinal = 0\n\nfor idx in s_lst:\n i = idx[0]\n j = idx[1]\n if i == 0:\n dr = 0\n elif i == sero:\n dr = 2\n elif j == 0:\n dr = 3\n elif j == garo:\n dr = 1\n\n cnt = 0\n while True:\n ni, nj = i + di[dr], j + dj[dr]\n if (0 <= ni <= sero) and (0 <= nj <= garo) and (lst[ni][nj]) != 0:\n i, j = ni, nj\n cnt += 1\n if [ni, nj] == target:\n break\n else: # 방향 꺾기\n dr = (dr + 1) % 4 # 0-1-2-3-0-1-2-...\n\n final = final + min((garo * 2 + sero * 2) - cnt, cnt)\n\n # print(final)\nprint(final)\n","repo_name":"yyytae0/algorithm-training","sub_path":"baekjoon/2564_new.py","file_name":"2564_new.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35168047263","text":"'''\r\nHomework 2, Exercise 5\r\nTyler Schumacher\r\nSeptember 2, 2019\r\nGuessing a number\r\n'''\r\nimport random\r\n\r\nguessesUsed = 0 #Initialized\r\n\r\n#Introduction\r\nprint('Hello!')\r\nprint('You have 7 chances to guess my number.')\r\nprint('My number is between 1 and 33')\r\n\r\nnumber = random.randint(1, 33) #Random number between 1 and 33 is chosen\r\nlow = 1 #Initialized\r\nhigh = 33\r\nguess = random.randint(1, 33) #Random guess\r\nwhile guessesUsed < 7: #While less than 7 guesses\r\n print('Take a guess!')\r\n guess = (low + high) // 2 #Takes guess based off previous guess\r\n print(guess)\r\n guess = int(guess)\r\n\r\n guessesUsed = guessesUsed + 1\r\n\r\n if guess < number: #Guess is too low\r\n print('Your guess is too low!')\r\n low = guess + 1\r\n elif guess > number: #Guess is too high\r\n print('Your guess is too high!')\r\n high = guess\r\n elif guess == number: #Correct guess\r\n break\r\n\r\nif guess == number: #Correct guess\r\n guessesUsed = str(guessesUsed) #Converted to string\r\n number = str(number) #Converted to string\r\n print('Nice job!')\r\n print('You guessed ' + number + ' in ' + guessesUsed + ' guesses!')\r\nelse: #Did not guess in under 7 guesses\r\n number = str(number) #Converted to string\r\n print('You were not able to guess my number in 7 guesses.')\r\n print('My number was ' + number)\r\n","repo_name":"tschuma3/Python-Fundamentals","sub_path":"Homework 2/hm2_Tyler_Schumacher_ex_5.py","file_name":"hm2_Tyler_Schumacher_ex_5.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73228715363","text":"import json\nimport itertools as it\n\n\ndef main():\n with open(\"genres_10_mil.json\") as f:\n genres = json.load(f)\n for g1, g2 in it.combinations(genres.keys(), 2):\n intersect = set(genres[g1]).intersection(genres[g2])\n if len(intersect) > 0:\n print(g1, g2)\n print(intersect)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"georgejdanforth/ml-final-project","sub_path":"src/tag_genres/compare_genres.py","file_name":"compare_genres.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17798523581","text":"\"\"\"\nThis script exports checks which files and test methods have changed. If the only thing\nthat changed in the PR are test methods, only execute those.\n\nTo do so, it creates bash script with \"SKIP_\" variables and \"CIV_CONFIG_FILE\" that\nis a file with the configuration of CIV in yaml format.\n\"\"\"\nimport os\nimport subprocess\nimport sys\nimport yaml\n\nfrom pprint import pprint\n\n\ndef get_files_changed():\n os.system('git remote add upstream https://github.com/osbuild/cloud-image-val.git')\n os.system('git fetch upstream')\n files_changed_cmd = ['git', 'diff', '--name-only', 'HEAD', 'upstream/main']\n files_changed_raw = subprocess.run(files_changed_cmd, stdout=subprocess.PIPE)\n\n if files_changed_raw.stdout == b'' or files_changed_raw.stderr is not None:\n print('ERROR: git diff command failed or there are no changes in the PR')\n exit()\n\n return str(files_changed_raw.stdout)[2:-3].split('\\\\n')\n\n\ndef lines_into_list(file_name):\n list = []\n with open(file_name, 'r') as diff:\n file_started = False\n\n # Skip the diff header\n for line in diff:\n if not file_started:\n if line[0:2] == '@@':\n file_started = True\n continue\n\n list.append(line.rstrip())\n\n return list\n\n\ndef changed_file_to_diff_list(file_changed):\n # get whole sript diff to list (useful for debugging)\n file_changed_underscore = file_changed.replace('/', '_').replace('.', '_')\n os.system(f'git diff -U10000 --output=/tmp/diff_{file_changed_underscore} HEAD upstream/main {file_changed}')\n\n # Read file into list\n return lines_into_list(f'/tmp/diff_{file_changed_underscore}')\n\n\ndef find_method_name(direction, line_num, diff):\n if direction == 'above':\n step = -1\n stop = 0\n elif direction == 'below':\n step = 1\n stop = len(diff)\n else:\n print(f'direction has to be \"above\" or \"below\", not {direction}')\n exit()\n\n for i in range(line_num, stop, step):\n raw_line = diff[i][1:].strip()\n if raw_line[0:3] == 'def':\n method = raw_line[4:].split('(')[0]\n return method\n elif raw_line[0:5] == 'class':\n print(f'A class was found before a function, the filter cannot be applied. Class: {raw_line}')\n return None\n\n\ndef get_method_from_changed_line(line_num, diff):\n raw_line = diff[line_num][1:].strip()\n\n if raw_line[0:3] == 'def':\n method = find_method_name('above', line_num + 1, diff)\n elif raw_line[0:1] == '@':\n method = find_method_name('below', line_num, diff)\n else:\n method = find_method_name('above', line_num, diff)\n\n return method\n\n\ndef get_modified_methods():\n modified_methods = set()\n test_dirs = ['test_suite/cloud/', 'test_suite/generic/']\n\n files_changed = get_files_changed()\n print('--- Files changed:')\n print(*files_changed, sep='\\n')\n\n for file_changed in files_changed:\n # Check if file is a test suite file\n if test_dirs[0] not in file_changed and test_dirs[1] not in file_changed:\n print(f'{file_changed} is not a test suite file, filter cannot be applied')\n return None\n\n diff = changed_file_to_diff_list(file_changed)\n for line_num, line in enumerate(diff):\n if line[0:1] in ['+', '-']:\n method = get_method_from_changed_line(line_num, diff)\n\n if method is None:\n return None\n elif method[0:4] != 'test':\n print(f'The method \"{method}\" is not a test')\n return None\n else:\n modified_methods.add(method)\n\n return modified_methods\n\n\ndef write_vars_file(vars, vars_file_path):\n with open(vars_file_path, 'w+') as vars_file:\n for var in vars:\n if vars[var] is not None:\n vars_file.write(f'export {var}=\"{vars[var]}\"\\n')\n\n\ndef get_modified_methods_str():\n modified_methods = get_modified_methods()\n if modified_methods is None:\n return None\n\n print('--- Modified methods:')\n print(*list(modified_methods), sep='\\n')\n return ' or '.join(list(modified_methods))\n\n\ndef get_skip_vars():\n skip_vars = {'skip_aws': 'true', 'skip_azure': 'true', 'skip_gcp': 'true'}\n files_changed = get_files_changed()\n for file_changed in files_changed:\n if 'test_suite/generic/' in file_changed:\n skip_vars = {'skip_aws': 'false', 'skip_azure': 'false', 'skip_gcp': 'false'}\n return skip_vars\n elif file_changed == 'test_suite/cloud/test_aws.py':\n skip_vars['skip_aws'] = 'false'\n elif file_changed == 'test_suite/cloud/test_azure.py':\n skip_vars['skip_azure'] = 'false'\n elif file_changed == 'test_suite/cloud/test_gcp.py':\n skip_vars['skip_gcp'] = 'false'\n\n return skip_vars\n\n\ndef write_config_file(config_path, civ_config):\n with open(config_path, 'w+') as config_file:\n yaml.dump(civ_config, config_file)\n\n\nif __name__ == '__main__':\n vars_file_path = sys.argv[1]\n vars = {}\n\n if os.environ['CI_COMMIT_REF_SLUG'] != 'main':\n skip_vars = get_skip_vars()\n modified_methods_str = get_modified_methods_str()\n else:\n modified_methods_str = None\n\n civ_config = {'resources_file': '/tmp/resource-file.json',\n 'output_file': '/tmp/report.xml',\n 'environment': 'automated',\n 'tags': {'Workload': 'CI Runner',\n 'Job_name': 'In_CI_Cloud_Test:' + os.environ['CI_JOB_NAME'],\n 'Project': 'CIV',\n 'Branch': os.environ['CI_COMMIT_REF_SLUG'],\n 'Pipeline_id': os.environ['CI_PIPELINE_ID'],\n 'Pipeline_source': os.environ['CI_PIPELINE_SOURCE']},\n 'debug': True,\n 'include_markers': 'not pub',\n 'test_filter': modified_methods_str}\n\n # If modified_methods_str is different than None, we might need to skip some clouds\n # If it's None, just run CIV in all clouds, no skipping\n if modified_methods_str:\n vars['SKIP_AWS'] = skip_vars['skip_aws']\n vars['SKIP_AZURE'] = skip_vars['skip_azure']\n vars['SKIP_GCP'] = skip_vars['skip_gcp']\n else:\n vars['SKIP_AWS'] = 'false'\n vars['SKIP_AZURE'] = 'false'\n vars['SKIP_GCP'] = 'false'\n\n print('--- SKIP_:')\n [print(key, ': ', value) for key, value in vars.items()]\n\n config_path = '/tmp/civ_config.yaml'\n vars['CIV_CONFIG_FILE'] = config_path\n\n write_config_file(config_path, civ_config)\n print('--- civ_config:')\n pprint(civ_config)\n\n write_vars_file(vars, vars_file_path)\n","repo_name":"osbuild/cloud-image-val","sub_path":"schutzbot/get_civ_config.py","file_name":"get_civ_config.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"8917025906","text":"import json\nimport os\nimport uuid\nfrom dataclasses import asdict, dataclass\n\nimport aiofiles\nimport aiofiles.os\nfrom dask import dataframe as dd\nfrom django.conf import settings\nfrom django.http import HttpRequest, HttpResponse\nfrom django.shortcuts import render\n\nfrom . import forms\n\n# Create your views here.\n\n\n@dataclass\nclass Candle:\n id: int\n banknifty: str\n date: int\n time: str\n open: float\n high: float\n low: float\n close: float\n volume: int\n\n\nasync def read_csv(csv_file_name):\n csv_file_path = os.path.join(settings.MEDIA_ROOT, csv_file_name)\n\n dtype = {\n \"BANKNIFTY\": str,\n \"DATE\": int,\n \"TIME\": str,\n \"OPEN\": float,\n \"HIGH\": float,\n \"LOW\": float,\n \"CLOSE\": float,\n \"VOLUME\": int,\n }\n\n df = dd.read_csv(csv_file_path, dtype=dtype)\n return df\n\n\nasync def get_in_timeframe(df, timeframe):\n timeframe_df = df.head(timeframe)\n candles: list[Candle] = []\n for i, record in timeframe_df.iterrows():\n banknifty = record[\"BANKNIFTY\"]\n date = record[\"DATE\"]\n time = record[\"TIME\"]\n open = record[\"OPEN\"]\n high = record[\"HIGH\"]\n low = record[\"LOW\"]\n close = record[\"CLOSE\"]\n volume = record[\"VOLUME\"]\n candle = Candle(i, banknifty, date, time, open, high, low, close, volume)\n candles.append(candle)\n\n banknifty = candles[0].banknifty\n date = candles[0].date\n time = candles[0].time\n open = candles[0].open\n high = max(candles, key=lambda candle: candle.high).high\n low = min(candles, key=lambda candle: candle.low).low\n close = candles[-1].close\n volume = candles[-1].volume\n\n candle = Candle(\n timeframe + 1, banknifty, date, time, open, high, low, close, volume\n )\n candle_dict = asdict(candle)\n return candle_dict\n\n\nasync def save_csv_file(csv_file):\n csv_file_name = str(uuid.uuid4()) + \".csv\"\n await aiofiles.os.makedirs(settings.MEDIA_ROOT, exist_ok=True)\n csv_file_path = os.path.join(settings.MEDIA_ROOT, csv_file_name)\n async with aiofiles.open(csv_file_path, \"wb\") as file:\n for chunk in csv_file.chunks():\n await file.write(chunk)\n\n return csv_file_name\n\n\nasync def save_json(csv_file_name: str, candle_dict: dict):\n json_file_name = csv_file_name.replace(\".csv\", \".json\")\n json_file_path = os.path.join(settings.MEDIA_ROOT, json_file_name)\n async with aiofiles.open(json_file_path, \"w\") as file:\n await file.write(json.dumps(candle_dict))\n return json_file_name\n\n\nasync def hello(request: HttpRequest):\n form = forms.CsvForm()\n json_file_name = \"\"\n if request.method == \"POST\":\n form = forms.CsvForm(request.POST, request.FILES)\n if form.is_valid():\n csv_file = form.cleaned_data[\"csv_file\"]\n timeframe = form.cleaned_data[\"timeframe\"]\n csv_file_name = await save_csv_file(csv_file)\n df = await read_csv(csv_file_name)\n candle_dict = await get_in_timeframe(df, timeframe)\n json_file_name = await save_json(csv_file_name, candle_dict)\n return render(\n request, \"index.html\", context={\"form\": form, \"json_file\": json_file_name}\n )\n","repo_name":"Deshdeepak1/TradingProject","sub_path":"MainApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29078882568","text":"import threading, requests, webbrowser, configparser\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport datetime\n\nserver_address = (\"127.0.0.1\", 5000)\nevent_get_token = threading.Event()\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\n\nauthorization_base_url = (\n \"https://connect.deezer.com/oauth/auth.php?perms=manage_library\"\n)\nredirect_uri = \"http://localhost:5000/callback\"\n\napp_id = config[\"DEEZER\"][\"DEEZER_APP_ID\"]\nsecret = config[\"DEEZER\"][\"DEEZER_SECRET\"]\n\nauth_URL = f\"{authorization_base_url}&app_id={app_id}&redirect_uri={redirect_uri}\"\n\naccess_token = \"\"\n\n\nclass DeezerOAuthHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n try:\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n\n self.wfile.write(\n bytes(\"Callback received\", \"utf-8\")\n )\n self.wfile.write(bytes(\"\", \"utf-8\"))\n self.wfile.write(bytes(\"

You can close this tab.

\", \"utf-8\"))\n self.wfile.write(bytes(\"\", \"utf-8\"))\n\n if \"/callback\" in self.path:\n code = self.path.split(\"?\")[1].split(\"=\")[1]\n # save the token in a txt file so we can terminate the server thread\n r = requests.get(\n f\"https://connect.deezer.com/oauth/access_token.php?app_id={app_id}&secret={secret}&code={code}\"\n )\n access_token = r.text.split(\"=\")[1].split(\"&\")[0]\n token_file = open(\"deezer_token.txt\", \"w\")\n line = f\"{access_token}&{datetime.datetime.now().timestamp()}\"\n token_file.write(line)\n token_file.close()\n event_get_token.set()\n return\n except Exception as e:\n print(e)\n\n def log_message(self, format, *args):\n pass\n\n\ndef get_token():\n try:\n server = HTTPServer(server_address, DeezerOAuthHandler)\n thread1 = threading.Thread(name=\"server\", target=server.serve_forever)\n thread1.start()\n\n webbrowser.open(auth_URL, new=0)\n\n event_get_token.wait(10)\n server.shutdown()\n server.server_close()\n\n except KeyboardInterrupt:\n exit()\n\n\nif __name__ == \"__main__\":\n get_token()\n","repo_name":"nocfer/spotify-to-deezer","sub_path":"deezer/DeezerOAuthHandler.py","file_name":"DeezerOAuthHandler.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"2486126363","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef is_above_the_break(x, y):\n if y < 95:\n return False\n if x ** 2 + y ** 2 < 225 ** 2:\n return False\n\n return True\n\n\ndef is_in_paint(x, y):\n if abs(x) > 70:\n return False\n if y > 150:\n return False\n if x ** 2 + y ** 2 < 50 ** 2 and y > 0:\n return False\n\n return True\n\n\ndef is_right_corner(x, y):\n if x > 220 and y < 95:\n return True\n return False\n\n\ndef is_left_corner(x, y):\n if x < -220 and y < 95:\n return True\n return False\n\n\ndef is_resticted_area(x, y):\n if x ** 2 + y ** 2 <= 50 ** 2 and y > 0:\n return True\n return False\n\n\ndef get_zone_(row):\n x = row[\"xLegacy\"]\n y = row[\"yLegacy\"]\n\n if is_above_the_break(x, y):\n return \"Above the Break 3\"\n elif is_in_paint(x, y):\n return \"In The Paint (Non-RA)\"\n elif is_right_corner(x, y):\n return \"Right Corner 3\"\n elif is_left_corner(x, y):\n return \"Left Corner 3\"\n elif is_resticted_area(x, y):\n return \"Restricted Area\"\n else:\n return \"Mid-Range\"\n\n\ndef clean_league_avg(league_avg):\n data = league_avg[\"resultSets\"][0][\"rowSet\"]\n df = pd.DataFrame(data, columns=league_avg[\"resultSets\"][0][\"headers\"])\n\n df = df.groupby(\"SHOT_ZONE_BASIC\").agg({\"FGA\": \"sum\", \"FGM\": \"sum\"})\n\n df.loc[\"Above the Break 3\", \"FGA\"] += df.loc[\"Backcourt\", \"FGA\"]\n df[\"FG%\"] = df[\"FGM\"] / df[\"FGA\"]\n df.drop(\"Backcourt\", inplace=True)\n\n return df.loc[:, \"FG%\"]\n\n\ndef add_text(ax, zone, league_avg, percent):\n points = [(-22, 250), (-22, 100), (-250, 0), (-22, 170), (-22, 0), (200, 0)]\n zone = zone[\"Shot Type\"].unstack()\n for i in range(len(zone)):\n text = \" \" + str(int(zone[\"Made Shot\"][i])) + \"/\" + str(\n int(zone[\"Missed Shot\"][i]) + int(zone[\"Made Shot\"][i]))\n text += '\\nPA: ' + str(round(percent[i] * 100, 1)) + \"%\\n\"\n text += \"LA: {:.2f}%\".format(float(league_avg[percent.index.values[i]]) * 100)\n ax.text(points[i][0], points[i][1], text, bbox=dict(facecolor='white', alpha=0.7))\n","repo_name":"EVanwormhoudt/NBAprojectPython","sub_path":"src/nba/visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39418317205","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport functools\nimport simplejson as json\nimport math\n\nwith requests.Session() as session:\n\t\n\t# get config properties\n\twith open(\"config.json\") as rawConfig:\n\t\tconfig = json.load(rawConfig)\n\n\t# create session\n#\tsession.headers['accept'] = config[\"session\"][\"accept\"]\n\tsession.headers['cookie'] = config[\"session\"][\"cookie\"] \n\t\t\n\tdata = session.get('https://' + config[\"slack-workspace\"] + '.slack.com/customize/emoji?page=1', allow_redirects=True).text\n\n\t# prepare to parse data\n\tsoup = BeautifulSoup(data, \"html.parser\")\n\n\t# find how many pages there are to scrape\n\tmaxPage = int(functools.reduce(lambda a,b : a if int(a[\"data-pagination\"]) > int(b[\"data-pagination\"]) else b, soup.find_all(\"a\", {\"data-pagination\": re.compile(r\".*\")}))[\"data-pagination\"])\n\n\t# scrape all emotes\n\tprint(\"Progress:\")\n\tdataOut = {\"count\": 0, \"fails\": 0, \"emojis\": {}, \"failed-urls\": [], \"workspace\": config[\"slack-workspace\"]}\n\tfor i in range(1, maxPage+1):\n\t\tprint(\">\", int(math.floor(i/(maxPage+1) * 100)), \"%\")\n\t\tdata = session.get('https://'+config[\"slack-workspace\"]+'.slack.com/customize/emoji?page='+str(i), allow_redirects=True).text\n\t\tsoup = BeautifulSoup(data, \"html.parser\")\n\n\t\tfor elem in soup.find_all(\"span\", {\"data-original\": re.compile(r\".*\")}):\n\t\t\tdataOut[\"count\"] += 1\n\t\t\t\n\t\t\ttry:\n\t\t\t\t#print(re.match(r'.*\\/(.*)\\/.*', elem[\"data-original\"]).group(1))\n\t\t\t\tdataOut[\"emojis\"][re.match(r'.*\\/(.*)\\/.*', elem[\"data-original\"]).group(1)] = elem[\"data-original\"]\n\t\t\texcept:\n\t\t\t\tdataOut[\"failed-urls\"].append(elem[\"data-original\"])\n\t\t\t\tdataOut[\"fails\"] += 1\t\t\t\t\n\n\tprint(\"You pulled\", dataOut[\"count\"], \"emojis from\", config[\"slack-workspace\"])\n\t\n\ttry:\n\t\twith open(config[\"outFile\"], \"w\") as outfile:\n\t\t\toutfile.write(json.dumps(dataOut, indent=4))\n\t\toutfile.close()\n\t\tprint(\"Successfully wrote to\", config[\"outFile\"])\n\texcept:\n\t\tprint(\"Could not write to\", config[\"outFile\"])\n\n","repo_name":"rammom/bulk-slack-emoji-download","sub_path":"buildData.py","file_name":"buildData.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29395979828","text":"'''\nCreated on Nov 17, 2016\n\n@author: songbo\n'''\n\nimport os\nimport time\nimport datetime\nimport argparse\nimport xlsxwriter\nimport shutil\nimport openpyxl\nimport xlrd\n# import pylab as pl\n\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n\nfrom tools.utility.constants import *\nfrom configs.androidConfig import appVersion, phoneVersion, buildVersion, deviceNet, mtuanAPPVersion\n\n# pl.mpl.rcParams['font.sans-serif'] = ['SimHei']\n\nclass Parser(object):\n def __init__(self):\n self.xmlTypeDict = {\n TYPE_CPU : 'cpuUsage',\n TYPE_MEMORY : 'ramUsage',\n TYPE_TX : 'netUpFlow',\n TYPE_RX : 'netDownFlow',\n TYPE_TEMPERATURE : 'temperature',\n TYPE_TRAFFIC : 'traffic'\n }\n\n def txtParser(self, txtPath, type):\n performanceData = list()\n dataFile = open(txtPath, mode='r', encoding='utf-8')\n try:\n allLines = dataFile.readlines()\n for line in allLines:\n performanceData.append(line)\n except Exception as e:\n raise FileExistsError(e)\n finally:\n dataFile.close()\n\n return self._getTxtData(performanceData, type)\n\n def _getTxtData(self, performanceData, type):\n dataList = list()\n try:\n if type == TYPE_FPS:\n for line in performanceData:\n value = str(line).split(' ')\n if(len(value) > 1):\n dataList.append(value)\n else:\n for line in performanceData:\n value = str(line).split(':')\n if(len(value) > 1):\n dataList.append(value[1])\n except Exception as e:\n print(str(e))\n finally:\n return dataList\n\n def xmlParser(self, xmlPath, type):\n doc = ET.parse(xmlPath)\n root = doc.getroot()\n\n nodes = root.findall('performance')\n\n dataList = []\n if type == TYPE_TRAFFIC:\n dataList_rx = []\n dataList_tx = []\n for child in nodes:\n data_rx = self._getXmlData(child, self.xmlTypeDict[TYPE_RX])\n data_tx = self._getXmlData(child, self.xmlTypeDict[TYPE_TX])\n dataList_rx.append(float(data_rx))\n dataList_tx.append(float(data_tx))\n dataList.append(round(sum(dataList_rx) + sum(dataList_tx), 2))\n else:\n for child in nodes:\n data = self._getXmlData(child, self.xmlTypeDict[type])\n dataList.append(data)\n return dataList\n\n def _getXmlData(self, node, type):\n data = node.find(type).text\n if type == self.xmlTypeDict[TYPE_MEMORY]:\n data = round(float(data) / 1024, 2)\n return data if data else 0\n\n\nclass DataHandler(object):\n def __init__(self):\n self.rsPath = ''\n self.testCase = ''\n self.type = ''\n self.parser = Parser()\n\n def handle(self, rsPath, testCase, type):\n self.rsPath = rsPath\n self.testCase = testCase\n self.type = type\n\n return self._getPerfData()\n\n def _getPerfData(self):\n ffanDate = []\n mtuanDate = []\n if self.type == TYPE_TRAFFIC and (self.testCase == CASE_COLD_BOOT or self.testCase == CASE_WARM_BOOT):\n FFanTxtPath = os.path.join(os.path.join(os.path.join(self.rsPath, FFAN), CASE_FOLDER_LIST[self.testCase]),\n TRAFFIC_FILE)\n MTUANTxtPath = os.path.join(os.path.join(os.path.join(self.rsPath, MTUAN), CASE_FOLDER_LIST[self.testCase]),\n TRAFFIC_FILE)\n if os.path.exists(FFanTxtPath) and os.path.exists(MTUANTxtPath):\n ffanDate = self.parser.txtParser(FFanTxtPath, self.type)\n mtuanDate = self.parser.txtParser(MTUANTxtPath, self.type)\n else:\n print('Can not find %s test performance results!!!' % self.testCase)\n else:\n if self.type == TYPE_COLD_BOOT or self.type == TYPE_WARM_BOOT:\n FFanTxtPath = os.path.join(os.path.join(os.path.join(self.rsPath, FFAN), CASE_FOLDER_LIST[self.testCase]),\n BOOTTIME_FILE)\n MTUANTxtPath = os.path.join(os.path.join(os.path.join(self.rsPath, MTUAN), CASE_FOLDER_LIST[self.testCase]),\n BOOTTIME_FILE)\n if os.path.exists(FFanTxtPath) and os.path.exists(MTUANTxtPath):\n ffanDate = self.parser.txtParser(FFanTxtPath, self.type)\n mtuanDate = self.parser.txtParser(MTUANTxtPath, self.type)\n else:\n print('Can not find %s test performance results!!!' % self.testCase)\n elif self.type == TYPE_FPS:\n FFanTxtPath = os.path.join(os.path.join(os.path.join(self.rsPath, FFAN), CASE_FOLDER_LIST[self.testCase]),\n FPS_FILE)\n MTuanTxtPath = os.path.join(os.path.join(os.path.join(self.rsPath, MTUAN), CASE_FOLDER_LIST[self.testCase]),\n FPS_FILE)\n if os.path.exists(FFanTxtPath) and os.path.exists(MTuanTxtPath):\n ffanDate = self.parser.txtParser(FFanTxtPath, self.type)\n mtuanDate = self.parser.txtParser(MTuanTxtPath, self.type)\n else:\n print('Can not find %s test performance results!!!' % self.testCase)\n else:\n FFanXmlPath = os.path.join(os.path.join(os.path.join(self.rsPath, FFAN), CASE_FOLDER_LIST[self.testCase]), PERF_FILE)\n MTuanXmlPath = os.path.join(os.path.join(os.path.join(self.rsPath, MTUAN), CASE_FOLDER_LIST[self.testCase]), PERF_FILE)\n if os.path.exists(FFanXmlPath) and os.path.exists(MTuanXmlPath):\n ffanDate = self.parser.xmlParser(FFanXmlPath, self.type)\n mtuanDate = self.parser.xmlParser(MTuanXmlPath, self.type)\n else:\n print('Can not find %s test performance results!!!' % self.testCase)\n\n return ffanDate, mtuanDate\n\n\n# 性能数据处理装饰器\ndef dataHandle(type):\n def handler(func):\n def wrapper(cls, testCase):\n print('Process %s performance data!!!' % type)\n dataHandler = DataHandler()\n cls.dataList[FFAN], cls.dataList[MTUAN] = dataHandler.handle(rsPath=cls.rsPath,\n testCase=testCase,\n type=type)\n if cls.dataList[FFAN] and cls.dataList[MTUAN]:\n cls.dataLength = cls._dataLength(cls.dataList[FFAN], cls.dataList[MTUAN])\n func(cls, testCase)\n return wrapper\n return handler\n\n\nclass Handler(object):\n def __init__(self):\n self.rsPath = ''\n self.reportPath = ''\n self.workbook = ''\n self.dataLength = 0\n\n # 性能测试结果list\n self.dataList = dict()\n\n self.dataList[FFAN] = ''\n self.dataList[MTUAN] = ''\n\n def handle(self, rsPath):\n self.rsPath = rsPath\n self._mkReportDir()\n try:\n for testCase in CASE_LIST:\n if os.path.exists(os.path.join(os.path.join(self.rsPath, FFAN), CASE_FOLDER_LIST[testCase])) and \\\n os.path.exists(os.path.join(os.path.join(self.rsPath, MTUAN), CASE_FOLDER_LIST[testCase])):\n report = os.path.join(self.reportPath, EXCEL_REPORT_FILE % CASE_FOLDER_LIST[testCase])\n self.workbook = xlsxwriter.Workbook(report)\n if testCase == CASE_FPS:\n self._fpsHandle(testCase)\n else:\n self._trafficHandle(testCase)\n if testCase == CASE_WARM_BOOT:\n self._warmBootHandle(testCase)\n elif testCase == CASE_COLD_BOOT:\n self._coldBootHandle(testCase)\n else:\n self._cpuHandle(testCase)\n self._memoryHandle(testCase)\n self._rxHandle(testCase)\n self._txHandle(testCase)\n self._temperatureHandle(testCase)\n else:\n print('Missing %s test cases in result path.' % testCase)\n except Exception as e:\n print(e)\n finally:\n self.workbook.close()\n\n def _mkReportDir(self):\n '''\n 创建报告目录, 包含excel报告, 性能测试结果图表图片\n '''\n self.reportPath = os.path.join(self.rsPath, 'report')\n if os.path.exists(self.reportPath):\n timestamp = os.path.getmtime(self.reportPath)\n date = datetime.datetime.fromtimestamp(timestamp).strftime('%Y_%m_%d_%H_%M_%S')\n newDirName = 'report_' + date\n\n newLogDir = os.path.join(self.rsPath, newDirName)\n try:\n os.renames(self.reportPath, newLogDir)\n except Exception as e:\n raise IOError('Modify the [%s] directory name failed with following error: \\n'\n '%s' % (self.reportPath, e))\n\n try:\n os.makedirs(self.reportPath)\n except Exception as e:\n raise IOError('Create the [%s] directory failed with following error: \\n'\n '%s' % (self.reportPath, e))\n\n @staticmethod\n def _dataLength(x, y):\n return len(x) if len(x) > len(y) else len(y)\n\n @dataHandle(TYPE_CPU)\n def _cpuHandle(self, testCase):\n # 生成CPU perf sheet\n self._createExcelReport(u'CPU 性能', self.workbook, TYPE_CPU, u'次数', u'cpu使用率(%)')\n\n @dataHandle(TYPE_MEMORY)\n def _memoryHandle(self, testCase):\n # 生成memory perf sheet\n self._createExcelReport(u'内存性能', self.workbook, TYPE_MEMORY, u'次数', u'内存(Mb)')\n\n @dataHandle(TYPE_RX)\n def _rxHandle(self, testCase):\n # 生成rx perf sheet\n self._createExcelReport(u'下行速率', self.workbook, TYPE_RX, u'次数', u'下行速率(KBps)')\n\n @dataHandle(TYPE_TX)\n def _txHandle(self, testCase):\n # 生成rx perf sheet\n self._createExcelReport(u'上行速率', self.workbook, TYPE_TX, u'次数', u'上行速率(KBps)')\n\n @dataHandle(TYPE_TEMPERATURE)\n def _temperatureHandle(self, testCase):\n # 生成rx perf sheet\n self._createExcelReport(u'电池温度', self.workbook, TYPE_TEMPERATURE, u'次数', u'电池温度(℃)')\n\n @dataHandle(TYPE_COLD_BOOT)\n def _coldBootHandle(self, testCase):\n # 生成冷启动性能excel\n self._createExcelReport(u'冷启动性能', self.workbook, TYPE_COLD_BOOT, u'次数', u'启动时间(ms)')\n\n @dataHandle(TYPE_WARM_BOOT)\n def _warmBootHandle(self, testCase):\n # 生成热启动性能excel\n self._createExcelReport(u'热启动性能', self.workbook, TYPE_WARM_BOOT, u'次数', u'启动时间(ms)')\n\n @dataHandle(TYPE_FPS)\n def _fpsHandle(self, testCase):\n self._createExcelReport(u'OverDraw和FPS 性能', self.workbook, TYPE_FPS, u'帧/秒', 'OverDraw', 'FPS')\n\n @dataHandle(TYPE_TRAFFIC)\n def _trafficHandle(self, testCase):\n self._createExcelReport(u'流量统计', self.workbook, TYPE_TRAFFIC, u'', u'流量统计(Kb)')\n\n def _createExcelReport(self, title, workbook, key, *args):\n worksheet = workbook.add_worksheet(title)\n\n format_title = workbook.add_format() # 定义format_title格式对象\n format_title.set_border(1) # 定义format_title对象单元格边框加粗(1像素)的格式\n format_title.set_bg_color('#cccccc') # 定义format_title对象单元格背景颜色为\n # '#cccccc'的格式\n format_title.set_align('center') # 定义format_title对象单元格居中对齐的格式\n format_title.set_bold() # 定义format_title对象单元格内容加粗的格式\n\n format_ave = workbook.add_format() # 定义format_ave格式对象\n format_ave.set_border(1) # 定义format_ave对象单元格边框加粗(1像素)的格式\n\n if len(args) == 2 and key == TYPE_TRAFFIC:\n worksheet.write(1, 1, args[1], format_title)\n worksheet.write(2, 1, FFAN_APP, format_title)\n worksheet.write(3, 1, MTUAN_APP, format_title)\n worksheet.write(1, 2, '', format_title)\n worksheet.write(2, 2, float(self.dataList[FFAN][0]), format_ave)\n worksheet.write(3, 2, float(self.dataList[MTUAN][0]), format_ave)\n chart = workbook.add_chart({'type': 'column'})\n chart.add_series({'values': [title, 2, 2, 2, 2],\n 'name': [title, 2, 1]})\n chart.add_series({'values': [title, 3, 2, 3, 2],\n 'name': [title, 3, 1]})\n chart.set_title({'name': title})\n chart.set_y_axis({'name': args[1]})\n worksheet.insert_chart('F15', chart, {'x_scale': 2, 'y_scale': 1})\n elif len(args) == 2 and (key == TYPE_COLD_BOOT or key == TYPE_WARM_BOOT):\n dataFFanList = [float(data) for data in self.dataList[FFAN]]\n dataMTuanList = [float(data) for data in self.dataList[MTUAN]]\n maxFFanData = max(dataFFanList)\n maxMTuanData = max(dataMTuanList)\n minFFanData = min(dataFFanList)\n minMTuanData = min(dataMTuanList)\n averageFFanData = round(float(sum(dataFFanList) / len(dataFFanList)), 2)\n averageMTuanData = round(float(sum(dataMTuanList) / len(dataMTuanList)), 2)\n worksheet.write(14, 1, u'统计', format_title)\n worksheet.write(14, 2, FFAN_APP, format_title)\n worksheet.write(14, 3, MTUAN_APP, format_title)\n worksheet.write(15, 1, u'最大值', format_title)\n worksheet.write(16, 1, u'最小值', format_title)\n worksheet.write(17, 1, u'平均值', format_title)\n worksheet.write(15, 2, maxFFanData, format_ave)\n worksheet.write(16, 2, minFFanData, format_ave)\n worksheet.write(17, 2, averageFFanData, format_ave)\n worksheet.write(15, 3, maxMTuanData, format_ave)\n worksheet.write(16, 3, minMTuanData, format_ave)\n worksheet.write(17, 3, averageMTuanData, format_ave)\n\n if len(self.dataList[FFAN]) > len(self.dataList[MTUAN]):\n for _ in range(0, len(self.dataList[FFAN]) - len(self.dataList[MTUAN])):\n self.dataList[MTUAN].append('0')\n else:\n for _ in range(0, len(self.dataList[MTUAN]) - len(self.dataList[FFAN])):\n self.dataList[FFAN].append('0')\n\n worksheet.write(1, 1, args[1], format_title)\n worksheet.write(2, 1, FFAN_APP, format_title)\n worksheet.write(3, 1, MTUAN_APP, format_title)\n for row in range(0, self.dataLength):\n worksheet.write(1, row + 2, row+1, format_title)\n worksheet.write(2, row + 2, float(self.dataList[FFAN][row]), format_ave)\n worksheet.write(3, row + 2, float(self.dataList[MTUAN][row]), format_ave)\n\n\n\n chart = workbook.add_chart({'type': 'column'})\n chart.add_series({'categories': [title, 1, 2, 1, row+2],\n 'values': [title, 2, 2, 2, row+2],\n 'name': [title, 2, 1]})\n chart.add_series({'categories': [title, 1, 2, 1, row+2],\n 'values': [title, 3, 2, 3, row+2],\n 'name': [title, 3, 1]})\n chart.set_title({'name': title})\n chart.set_y_axis({'name': args[1]})\n worksheet.insert_chart('F15', chart, {'x_scale': 2, 'y_scale': 1.5})\n elif len(args) == 2:\n dataFFanList = [float(data) for data in self.dataList[FFAN]]\n dataMTuanList = [float(data) for data in self.dataList[MTUAN]]\n maxFFanData = max(dataFFanList)\n maxMTuanData = max(dataMTuanList)\n minFFanData = min(dataFFanList)\n minMTuanData = min(dataMTuanList)\n averageFFanData = round(float(sum(dataFFanList) / len(dataFFanList)), 2)\n averageMTuanData = round(float(sum(dataMTuanList) / len(dataMTuanList)), 2)\n worksheet.write(14, 1, u'统计', format_title)\n worksheet.write(14, 2, FFAN_APP, format_title)\n worksheet.write(14, 3, MTUAN_APP, format_title)\n worksheet.write(15, 1, u'最大值', format_title)\n worksheet.write(16, 1, u'最小值', format_title)\n worksheet.write(17, 1, u'平均值', format_title)\n worksheet.write(15, 2, maxFFanData, format_ave)\n worksheet.write(16, 2, minFFanData, format_ave)\n worksheet.write(17, 2, averageFFanData, format_ave)\n worksheet.write(15, 3, maxMTuanData, format_ave)\n worksheet.write(16, 3, minMTuanData, format_ave)\n worksheet.write(17, 3, averageMTuanData, format_ave)\n\n if len(self.dataList[FFAN]) > len(self.dataList[MTUAN]):\n for _ in range(0, len(self.dataList[FFAN]) - len(self.dataList[MTUAN])):\n self.dataList[MTUAN].append('0')\n else:\n for _ in range(0, len(self.dataList[MTUAN]) - len(self.dataList[FFAN])):\n self.dataList[FFAN].append('0')\n\n worksheet.write(1, 1, args[1], format_title)\n worksheet.write(2, 1, FFAN_APP, format_title)\n worksheet.write(3, 1, MTUAN_APP, format_title)\n for row in range(0, self.dataLength):\n worksheet.write(1, row + 2, row + 1, format_title)\n worksheet.write(2, row + 2, float(self.dataList[FFAN][row]), format_ave)\n worksheet.write(3, row + 2, float(self.dataList[MTUAN][row]), format_ave)\n\n chart = workbook.add_chart({'type': 'line'})\n chart.add_series({'categories': [title, 1, 2, 1, row + 2],\n 'values': [title, 2, 2, 2, row + 2],\n 'name': [title, 2, 1]})\n chart.add_series({'categories': [title, 1, 2, 1, row + 2],\n 'values': [title, 3, 2, 3, row + 2],\n 'name': [title, 3, 1]})\n chart.set_title({'name': title})\n chart.set_x_axis({'name' : args[0]})\n chart.set_y_axis({'name': args[1]})\n worksheet.insert_chart('F15', chart, {'x_scale': 3.5, 'y_scale': 1.5})\n else:\n worksheet.write(1, 1, args[1], format_title)\n worksheet.write(2, 1, FFAN_APP, format_title)\n worksheet.write(3, 1, MTUAN_APP, format_title)\n worksheet.write(1, 7, args[2], format_title)\n worksheet.write(2, 7, FFAN_APP, format_title)\n worksheet.write(3, 7, MTUAN_APP, format_title)\n for data in self.dataList[FFAN]:\n if data[0] == 'Mine':\n worksheet.write(1, 2, data[0], format_title)\n worksheet.write(1, 8, data[0], format_title)\n worksheet.write(2, 2, float(data[1]), format_ave)\n worksheet.write(2, 8, float(data[2]), format_ave)\n elif data[0] == 'Dashboard':\n worksheet.write(1, 3, data[0], format_title)\n worksheet.write(1, 9, data[0], format_title)\n worksheet.write(2, 3, float(data[1]), format_ave)\n worksheet.write(2, 9, float(data[2]), format_ave)\n elif data[0] == 'BenefitsLife':\n worksheet.write(1, 4, data[0], format_title)\n worksheet.write(1, 10, data[0], format_title)\n worksheet.write(2, 4, float(data[1]), format_ave)\n worksheet.write(2, 10, float(data[2]), format_ave)\n else:\n worksheet.write(1, 5, data[0], format_title)\n worksheet.write(1, 11, data[0], format_title)\n worksheet.write(2, 5, float(data[1]), format_ave)\n worksheet.write(2, 11, float(data[2]), format_ave)\n for data in self.dataList[MTUAN]:\n if data[0] == 'Mine':\n worksheet.write(3, 2, float(data[1]), format_ave)\n worksheet.write(3, 8, float(data[2]), format_ave)\n elif data[0] == 'Dashboard':\n worksheet.write(3, 3, float(data[1]), format_ave)\n worksheet.write(3, 9, float(data[2]), format_ave)\n worksheet.write(3, 4, 0, format_ave)\n worksheet.write(3, 5, 0, format_ave)\n worksheet.write(3, 10, 0, format_ave)\n worksheet.write(3, 11, 0, format_ave)\n\n chart = workbook.add_chart({'type': 'column'})\n chart.add_series({'categories': [title, 1, 2, 1, 5],\n 'values': [title, 2, 2, 2, 5],\n 'name': [title, 2, 1]})\n chart.add_series({'categories': [title, 1, 2, 1, 5],\n 'values': [title, 3, 2, 3, 5],\n 'name': [title, 3, 1]})\n chart.set_title({'name': 'OverDraw 性能'})\n chart.set_y_axis({'name': args[0]})\n worksheet.insert_chart('B14', chart, {'x_scale': 1.5, 'y_scale': 1})\n chart = workbook.add_chart({'type': 'column'})\n chart.add_series({'categories': [title, 1, 8, 1, 11],\n 'values': [title, 2, 8, 2, 11],\n 'name': [title, 2, 7]})\n chart.add_series({'categories': [title, 1, 8, 1, 11],\n 'values': [title, 3, 8, 3, 11],\n 'name': [title, 3, 7]})\n chart.set_title({'name': 'FPS 性能'})\n chart.set_y_axis({'name': args[0]})\n worksheet.insert_chart('N14', chart, {'x_scale': 1.5, 'y_scale': 1})\n\nclass DataSummary(object):\n def __init__(self):\n self.rsPath = ''\n self.testCase = ''\n\n def handle(self, rsPath):\n self.rsPath = rsPath\n self.attachmentPath = os.path.join(self.rsPath, 'attachment')\n if not os.path.exists(self.attachmentPath):\n os.makedirs(self.attachmentPath)\n\n def getLogData(self,testCase):\n self.testCase = testCase\n return self._parserLogData()\n\n def _parserLogData(self):\n i = 0\n caseErrorInfo = {'ANR': 0, 'JRTERROR': 0, 'JRTCRASH': 0, 'APPDIED': 0, 'SYSTEMERROR': 0}\n tmpFile = os.path.join(self.logPath, '%s.txt') % self.testCase\n cmdFind = 'find %s -name \"%s*.log\" > %s' % (self.logPath, self.testCase, tmpFile)\n os.system(cmdFind)\n if os.path.exists(tmpFile):\n logCasePath = open(tmpFile, 'r')\n logPaths = logCasePath.readlines()\n logCasePath.close()\n if logPaths != []:\n for logPath in logPaths:\n caseLogFile = logPath[:-1]\n if os.path.exists(caseLogFile):\n logInfo = open(caseLogFile, 'r')\n logLines = logInfo.readlines()\n logInfo.close()\n for logLine in logLines:\n if logLine.find(\"anr\") != -1:\n caseErrorInfo['ANR'] += 1\n elif logLine.find(\"crash\") != -1:\n caseErrorInfo['JRTCRASH'] += 1\n elif logLine.find(\"Reading a NULL string not supported here.\") != -1:\n i += 1\n caseErrorInfo['JRTERROR'] += 1\n elif logLine.find(\"Got null root node from accessibility - Retrying...\") != -1:\n caseErrorInfo['JRTERROR'] += 1\n elif logLine.find(\"died\") != -1:\n caseErrorInfo['APPDIED'] += 1\n elif logLine.find(\"system error\") != -1:\n caseErrorInfo['SYSTEMERROR'] += 1\n caseErrorInfo['JRTERROR'] = caseErrorInfo['JRTERROR'] - (i-1)\n\n if os.path.exists(tmpFile):\n os.remove(tmpFile)\n\n return caseErrorInfo\n\nclass PerformanceSummary(object):\n def __init__(self):\n self.rsPath = ''\n self.attachmentPath = ''\n self.workbook = ''\n self.cpuData = {'dianying': {'max':'100%', 'min':'0%', 'anv':'50%', 'top':u'第1s', '40%':u'1s', '60%':u'1s', '80%':u'1s', '100%':u'1s', 'rst':u'正常'}, \n 'meishi': {'max':'100%', 'min':'0%', 'anv':'50%', 'top':u'第1s', '40%':u'1s', '60%':u'1s', '80%':u'1s', '100%':u'1s', 'rst':u'正常'},\n 'dingdan': {'max':'100%', 'min':'0%', 'anv':'50%', 'top':u'第1s', '40%':u'1s', '60%':u'1s', '80%':u'1s', '100%':u'1s', 'rst':u'正常'},\n 'denglu': {'max':'100%', 'min':'0%', 'anv':'50%', 'top':u'第1s', '40%':u'1s', '60%':u'1s', '80%':u'1s', '100%':u'1s', 'rst':u'正常'}}\n self.memoryData = {'dianying': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '60M':u'1s', '80M':u'1s', '100M':u'1s', 'rst':u'正常'},\n 'meishi': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '60M':u'1s', '80M':u'1s', '100M':u'1s', 'rst':u'正常'},\n 'dingdan': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '60M':u'1s', '80M':u'1s', '100M':u'1s', 'rst':u'正常'},\n 'denglu': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '60M':u'1s', '80M':u'1s', '100M':u'1s', 'rst':u'正常'}}\n self.txData = {'dianying': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '10K':u'1s', '20K':u'1s', '40K':u'1s', '60K':u'1s', '80K':u'1s', '100K':u'1s','rst':u'正常'},\n 'meishi': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '10K':u'1s', '20K':u'1s', '40K':u'1s', '60K':u'1s', '80K':u'1s', '100K':u'1s','rst':u'正常'},\n 'dingdan': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '10K':u'1s', '20K':u'1s', '40K':u'1s', '60K':u'1s', '80K':u'1s', '100K':u'1s','rst':u'正常'},\n 'denglu': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '10K':u'1s', '20K':u'1s', '40K':u'1s', '60K':u'1s', '80K':u'1s', '100K':u'1s','rst':u'正常'}}\n self.rxData = {'dianying': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '20K':u'1s', '40K':u'1s', '60K':u'1s', '80K':u'1s', '100K':u'1s','rst':u'正常'},\n 'meishi': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '20K':u'1s', '40K':u'1s', '60K':u'1s', '80K':u'1s', '100K':u'1s','rst':u'正常'},\n 'dingdan': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '20K':u'1s', '40K':u'1s', '60K':u'1s', '80K':u'1s', '100K':u'1s','rst':u'正常'},\n 'denglu': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '20K':u'1s', '40K':u'1s', '60K':u'1s', '80K':u'1s', '100K':u'1s','rst':u'正常'}}\n self.batteryTemperatureData = {'dianying': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '60℃':u'1s', '80℃':u'1s', 'rst':u'正常'},\n 'meishi': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '60℃':u'1s', '80℃':u'1s', 'rst':u'正常'},\n 'dingdan': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '60℃':u'1s', '80℃':u'1s', 'rst':u'正常'},\n 'denglu': {'max':'100', 'min':'0', 'anv':'50', 'top':u'第1s', '60℃':u'1s', '80℃':u'1s', 'rst':u'正常'}}\n self.fpsData = {'max':'10', 'min':'0', 'anv':'50', '16ms':u'否', '20ms':u'否', 'rst':u'正常'}\n self.coldBootData = {'num':'10', 'max':'9999', 'min':'1000', 'anv':u'5555', 'top':u'第1次', 'rst':u'正常'}\n self.warmBootData = {'num':'10', 'max':'1000', 'min':'0', 'anv':u'500', 'top':u'第1次', 'rst':u'正常'}\n\n self.ffanAppVersion = appVersion\n self.mtuanAppVersion = mtuanAPPVersion\n self.phoneVersion = phoneVersion\n self.buildVersion = buildVersion\n self.deviceNet = deviceNet\n\n def performanceSummary(self, rsPath):\n self.caseData = {u'CPU 性能':['0'], u'内存性能':['0'], u'上行速率':['0'], u'下行速率':['0'], u'电池温度':['0']}\n self.rsPath = rsPath\n try:\n resourcesDirectory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + \"/resources/\"\n xlsFileTemplate = os.path.join(resourcesDirectory, 'templatePerformance.xlsx')\n self.attachmentPath = os.path.join(self.rsPath, 'attachment')\n if not os.path.exists(self.attachmentPath):\n os.makedirs(self.attachmentPath)\n shutil.copy(xlsFileTemplate, self.attachmentPath)\n reportPath = os.path.join(self.rsPath, 'report')\n if os.path.exists(reportPath):\n dianYingFile = os.path.join(reportPath, 'dianying_performance_result.xlsx')\n meiShiHuiFile = os.path.join(reportPath, 'meishihui_performance_result.xlsx')\n dingDanFile = os.path.join(reportPath, 'wodedingdan_performance_result.xlsx')\n dengLuFile = os.path.join(reportPath, 'wodedenglu_performance_result.xlsx')\n fpsFile = os.path.join(reportPath, 'fps_performance_result.xlsx')\n coldBootFile = os.path.join(reportPath, 'coldboot_performance_result.xlsx')\n warmBootFile = os.path.join(reportPath, 'warmboot_performance_result.xlsx')\n excelList = {'dianying':dianYingFile, 'meishi':meiShiHuiFile, 'dingdan':dingDanFile, 'denglu':dengLuFile, 'fps':fpsFile, 'coldboot':coldBootFile, 'warmboot':warmBootFile}\n for (case,excelFile) in excelList.items():\n if os.path.exists(excelFile):\n data = xlrd.open_workbook(excelFile)\n if excelFile in (dianYingFile, meiShiHuiFile, dingDanFile, dengLuFile):\n sheetList = [u'CPU 性能', u'内存性能', u'上行速率', u'下行速率', u'电池温度']\n caseTime = 0\n for sheetTemp in sheetList:\n table = data.sheet_by_name(sheetTemp)\n if sheetTemp == u'CPU 性能':\n self.cpuData[case]['max'] = table.cell(15,2).value\n self.cpuData[case]['min'] = table.cell(16,2).value\n self.cpuData[case]['anv'] = table.cell(17,2).value\n cpuOver40 = 0\n cpuOver60 = 0\n cpuOver80 = 0\n cpuOver100 = 0\n cpuTop = []\n cpuTempData = table.row_values(2)\n for q in range(2, len(cpuTempData)):\n if (40 < cpuTempData[q] and cpuTempData[q] < 60) or (cpuTempData[q] == 60):\n cpuOver40 += 1\n elif (60 < cpuTempData[q] and cpuTempData[q] < 80) or (cpuTempData[q] == 80):\n cpuOver60 += 1\n elif (80 < cpuTempData[q] and cpuTempData[q] < 100) or (cpuTempData[q] == 100):\n cpuOver80 += 1\n elif 100 < cpuTempData[q]:\n cpuOver100 += 1\n if cpuTempData[q] == self.cpuData[case]['max']:\n cpuTop.append(q-1)\n self.cpuData[case]['40%'] = str(cpuOver40) + 's'\n self.cpuData[case]['60%'] = str(cpuOver60) + 's'\n self.cpuData[case]['80%'] = str(cpuOver80) + 's'\n self.cpuData[case]['100%'] = str(cpuOver100) + 's'\n if len(cpuTop) > 1:\n cpuTopTotal = ''\n for r in range(len(cpuTop)):\n if r == 0:\n cpuTopTotal = str(cpuTop[r])\n else:\n cpuTopTotal = cpuTopTotal + '、' + str(cpuTop[r])\n self.cpuData[case]['top'] = u'第' + cpuTopTotal + 's'\n else:\n self.cpuData[case]['top'] = u'第' + str(cpuTop[0]) + 's'\n cpuTotalTime = len(cpuTempData) - 2\n for dd in range(len(cpuTempData), 2, -1):\n if cpuTempData[dd-1] == 0:\n cpuTotalTime -= 1\n if (cpuOver60 > cpuTotalTime * 0.4) and ((cpuOver60 < cpuTotalTime * 0.6) or (cpuOver60 == cpuTotalTime * 0.6)):\n self.cpuData[case]['rst'] = u'偏高'\n elif cpuOver60 > cpuTotalTime * 0.6:\n self.cpuData[case]['rst'] = u'高'\n else:\n self.cpuData[case]['rst'] = u'正常'\n caseTime = cpuTotalTime\n elif sheetTemp == u'内存性能':\n self.memoryData[case]['max'] = table.cell(15,2).value\n self.memoryData[case]['min'] = table.cell(16,2).value\n self.memoryData[case]['anv'] = table.cell(17,2).value\n memoryOver60 = 0\n memoryOver80 = 0\n memoryOver100 = 0\n memoryOver120 = 0\n memoryTop = []\n memoryTempData = table.row_values(2)\n for t in range(2, len(memoryTempData)):\n if (60 < memoryTempData[t] and memoryTempData[t] < 80) or (memoryTempData[t] == 80):\n memoryOver60 += 1\n elif (80 < memoryTempData[t] and memoryTempData[t] < 100) or (memoryTempData[t] == 100):\n memoryOver60 += 1\n memoryOver80 += 1\n elif (100 < memoryTempData[t] and memoryTempData[t] < 100) or (memoryTempData[t] == 120):\n memoryOver60 += 1\n memoryOver80 += 1\n memoryOver100 += 1\n elif 120 < memoryTempData[t]:\n memoryOver60 += 1\n memoryOver80 += 1\n memoryOver100 += 1\n memoryOver120 += 1\n if memoryTempData[t] == self.memoryData[case]['max']:\n memoryTop.append(t-1)\n self.memoryData[case]['60M'] = str(memoryOver60) + 's'\n self.memoryData[case]['80M'] = str(memoryOver80) + 's'\n self.memoryData[case]['100M'] = str(memoryOver100) + 's'\n if len(memoryTop) > 1:\n memoryTopTotal = ''\n for u in range(len(memoryTop)):\n if u == 0:\n memoryTopTotal = str(memoryTop[u])\n else:\n memoryTopTotal = memoryTopTotal + '、' + str(memoryTop[u])\n self.memoryData[case]['top'] = u'第' + memoryTopTotal + u's'\n else:\n self.memoryData[case]['top'] = u'第' + str(memoryTop[0]) + u's'\n memoryTotalTime = len(memoryTempData) - 2\n for cc in range(len(memoryTempData), 2, -1):\n if memoryTempData[cc-1] == 0:\n memoryTotalTime -= 1\n if (memoryOver80 > memoryTotalTime * 0.5) and ((memoryOver120 < memoryTotalTime * 0.5) or (memoryOver120 == memoryTotalTime * 0.5)):\n self.memoryData[case]['rst'] = u'偏高'\n elif memoryOver120 > memoryTotalTime * 0.5:\n self.memoryData[case]['rst'] = u'高'\n else:\n self.memoryData[case]['rst'] = u'正常'\n elif sheetTemp == u'上行速率':\n self.txData[case]['max'] = table.cell(15,2).value\n self.txData[case]['min'] = table.cell(16,2).value\n self.txData[case]['anv'] = table.cell(17,2).value\n txOver10 = 0\n txOver20 = 0\n txOver40 = 0\n txOver60 = 0\n txOver80 = 0\n txOver100 = 0\n txTop = []\n txTempData = table.row_values(2)\n for v in range(2, len(txTempData)):\n if (10 < txTempData[v] and txTempData[v] < 20) or (txTempData[v] == 20):\n txOver10 += 1\n elif (20 < txTempData[v] and txTempData[v] < 40) or (txTempData[v] == 40):\n txOver10 += 1\n txOver20 += 1\n elif (40 < txTempData[v] and txTempData[v] < 60) or (txTempData[v] == 60):\n txOver10 += 1\n txOver20 += 1\n txOver40 += 1\n elif (60 < txTempData[v] and txTempData[v] < 80) or (txTempData[v] == 80):\n txOver10 += 1\n txOver20 += 1\n txOver40 += 1\n txOver60 += 1\n elif (80 < txTempData[v] and txTempData[v] < 100) or (txTempData[v] == 100):\n txOver10 += 1\n txOver20 += 1\n txOver40 += 1\n txOver60 += 1\n txOver80 += 1\n elif 100 < txTempData[v]:\n txOver10 += 1\n txOver20 += 1\n txOver40 += 1\n txOver60 += 1\n txOver80 += 1\n txOver100 += 1\n if txTempData[v] == self.txData[case]['max']:\n txTop.append(v-1)\n self.txData[case]['10K'] = str(txOver10) + 's'\n self.txData[case]['20K'] = str(txOver20) + 's'\n self.txData[case]['40K'] = str(txOver40) + 's'\n self.txData[case]['60K'] = str(txOver60) + 's'\n self.txData[case]['80K'] = str(txOver80) + 's'\n self.txData[case]['100K'] = str(txOver100) + 's'\n if len(txTop) > 1:\n txTopTotal = ''\n for w in range(len(txTop)):\n if w == 0:\n txTopTotal = str(txTop[w])\n else:\n txTopTotal = txTopTotal + '、' + str(txTop[w])\n self.txData[case]['top'] = u'第' + txTopTotal + u's'\n else:\n self.txData[case]['top'] = u'第' + str(txTop[0]) + u's'\n if (txOver40 > caseTime * 0.5) and ((txOver60 < caseTime * 0.5) or (txOver60 == caseTime * 0.5)):\n self.txData[case]['rst'] = u'偏高'\n elif txOver60 > caseTime * 0.5:\n self.txData[case]['rst'] = u'高'\n else:\n self.txData[case]['rst'] = u'正常'\n elif sheetTemp == u'下行速率':\n self.rxData[case]['max'] = table.cell(15,2).value\n self.rxData[case]['min'] = table.cell(16,2).value\n self.rxData[case]['anv'] = table.cell(17,2).value\n rxOver20 = 0\n rxOver30 = 0\n rxOver40 = 0\n rxOver60 = 0\n rxOver80 = 0\n rxOver100 = 0\n rxTop = []\n rxTempData = table.row_values(2)\n for x in range(2, len(rxTempData)):\n if (20 < rxTempData[x] and rxTempData[x] < 30) or (rxTempData[x] == 30):\n rxOver20 += 1\n elif (30 < rxTempData[x] and rxTempData[x] < 40) or (rxTempData[x] == 40):\n rxOver20 += 1\n rxOver30 += 1\n elif (40 < rxTempData[x] and rxTempData[x] < 60) or (rxTempData[x] == 60):\n rxOver20 += 1\n rxOver40 += 1\n elif (60 < rxTempData[x] and rxTempData[x] < 80) or (rxTempData[x] == 80):\n rxOver20 += 1\n rxOver40 += 1\n rxOver60 += 1\n elif (80 < rxTempData[x] and rxTempData[x] < 100) or (rxTempData[x] == 100):\n rxOver20 += 1\n rxOver40 += 1\n rxOver60 += 1\n rxOver80 += 1\n elif 100 < rxTempData[x]:\n rxOver20 += 1\n rxOver40 += 1\n rxOver60 += 1\n rxOver80 += 1\n rxOver100 += 1\n if rxTempData[x] == self.rxData[case]['max']:\n rxTop.append(x-1)\n self.rxData[case]['20K'] = str(rxOver20) + 's'\n self.rxData[case]['40K'] = str(rxOver40) + 's'\n self.rxData[case]['60K'] = str(rxOver60) + 's'\n self.rxData[case]['80K'] = str(rxOver80) + 's'\n self.rxData[case]['100K'] = str(rxOver100) + 's'\n if len(rxTop) > 1:\n rxTopTotal = ''\n for y in range(len(rxTop)):\n if y == 0:\n rxTopTotal = str(rxTop[y])\n else:\n rxTopTotal = rxTopTotal + '、' + str(rxTop[y])\n self.rxData[case]['top'] = u'第' + rxTopTotal + u's'\n else:\n self.rxData[case]['top'] = u'第' + str(rxTop[0]) + u's'\n if (rxOver20 > caseTime * 0.4) and ((rxOver30 < caseTime * 0.4) or (rxOver30 == caseTime * 0.4)):\n self.rxData[case]['rst'] = u'偏高'\n elif rxOver30 > caseTime * 0.4:\n self.rxData[case]['rst'] = u'高'\n else:\n self.rxData[case]['rst'] = u'正常'\n elif sheetTemp == u'电池温度':\n self.batteryTemperatureData[case]['max'] = table.cell(15,2).value\n self.batteryTemperatureData[case]['min'] = table.cell(16,2).value\n self.batteryTemperatureData[case]['anv'] = table.cell(17,2).value\n batteryTemperatureOver37 = 0\n batteryTemperatureOver38 = 0\n batteryTemperatureOver60 = 0\n batteryTemperatureOver80 = 0\n batteryTemperatureTop = []\n batteryTemperatureTempData = table.row_values(2)\n for aa in range(2, len(batteryTemperatureTempData)):\n if (60 < batteryTemperatureTempData[aa] and batteryTemperatureTempData[aa] < 80) or (batteryTemperatureTempData[aa] == 80):\n batteryTemperatureOver37 += 1\n batteryTemperatureOver38 += 1\n batteryTemperatureOver60 += 1\n elif 80 < batteryTemperatureTempData[aa]:\n batteryTemperatureOver37 += 1\n batteryTemperatureOver38 += 1\n batteryTemperatureOver60 += 1\n batteryTemperatureOver80 += 1\n if batteryTemperatureTempData[aa] == self.batteryTemperatureData[case]['max']:\n batteryTemperatureTop.append(aa-1)\n self.batteryTemperatureData[case]['60℃'] = str(batteryTemperatureOver60) + 's'\n self.batteryTemperatureData[case]['80℃'] = str(batteryTemperatureOver80) + 's'\n if len(batteryTemperatureTop) > 1:\n batteryTemperatureTopTotal = ''\n for bb in range(len(batteryTemperatureTop)):\n if bb == 0:\n batteryTemperatureTopTotal = str(batteryTemperatureTop[bb])\n elif len(batteryTemperatureTop) > 3:\n batteryTemperatureTopTotal = str(batteryTemperatureTop[0]) + '、' + str(batteryTemperatureTop[1]) + '...'\n else:\n batteryTemperatureTopTotal = batteryTemperatureTopTotal + '、' + str(batteryTemperatureTop[bb])\n self.batteryTemperatureData[case]['top'] = u'第' + batteryTemperatureTopTotal + u's'\n else:\n self.batteryTemperatureData[case]['top'] = u'第' + str(batteryTemperatureTop[0]) + u's'\n batteryTemperatureTotalTime = len(batteryTemperatureTempData) - 2\n for ee in range(len(batteryTemperatureTempData), 2, -1):\n if batteryTemperatureTempData[ee-1] == 0:\n batteryTemperatureTotalTime -= 1\n if (batteryTemperatureOver37 > batteryTemperatureTotalTime * 0.5) and ((batteryTemperatureOver38 < batteryTemperatureTotalTime * 0.5) or (batteryTemperatureOver38 == batteryTemperatureTotalTime * 0.5)):\n self.batteryTemperatureData[case]['rst'] = u'偏高'\n elif batteryTemperatureOver38 > batteryTemperatureTotalTime * 0.5:\n self.batteryTemperatureData[case]['rst'] = u'高'\n else:\n self.batteryTemperatureData[case]['rst'] = u'正常'\n elif excelFile == fpsFile:\n table = data.sheet_by_name(u'OverDraw和FPS 性能')\n tempFpsDate = []\n over16 = u'否'\n over20 = u'否'\n temp16 = {0: u'否', 1:u'是'}\n temp20 = {0: u'否', 1:u'是'}\n mine = table.cell(2,8).value\n tempFpsDate.append(mine)\n dashboard = table.cell(2,9).value\n tempFpsDate.append(dashboard)\n huilife = table.cell(2,10).value\n tempFpsDate.append(huilife)\n ffantong = table.cell(2,11).value\n tempFpsDate.append(ffantong)\n for i in range(len(tempFpsDate)):\n if tempFpsDate[i] > 20:\n over16 = temp16[1]\n over20 = temp20[1]\n elif (16 < tempFpsDate[i] < 20) or tempFpsDate[i] == 20:\n over16 = temp16[1]\n over20 = temp20[0]\n elif over16 == temp16[1]:\n over16 = temp16[1]\n elif over20 == temp20[1]:\n over20 = temp20[1]\n self.fpsData['max'] = max(tempFpsDate)\n self.fpsData['min'] = min(tempFpsDate)\n self.fpsData['anv'] = round(float(sum(tempFpsDate) / len(tempFpsDate)), 2)\n self.fpsData['16ms'] = over16\n self.fpsData['20ms'] = over20\n if over16 == u'是' and over20 == u'是':\n self.fpsData['rst'] = u'高'\n elif over16 == u'是' and over20 == u'否':\n self.fpsData['rst'] = u'偏高'\n else:\n self.fpsData['rst'] = u'正常'\n elif excelFile == coldBootFile:\n if len(data.sheets()) > 1:\n table = data.sheet_by_name(u'冷启动性能')\n self.coldBootData['num'] = 10\n self.coldBootData['max'] = table.cell(15,2).value\n self.coldBootData['min'] = table.cell(16,2).value\n self.coldBootData['anv'] = table.cell(17,2).value\n coldTop = []\n coldTempData = table.row_values(2)\n coldMid = 0\n coldBig = 0\n for j in range(2, len(coldTempData)):\n if (coldTempData[j] > 4000 and coldTempData[j] < 6000) or (coldTempData[j] == 6000):\n coldMid += 1\n elif coldTempData[j] > 6000:\n coldBig += 1\n if coldTempData[j] == self.coldBootData['max']:\n coldTop.append(j-1)\n if len(coldTop) > 1:\n for s in range(len(coldTop)):\n if s == 0:\n coldTopTotal = str(coldTop[s])\n else:\n coldTopTotal = coldTopTotal + '、' + str(coldTop[s])\n self.coldBootData['top'] = u'第' + coldTopTotal + u'次'\n else:\n self.coldBootData['top'] = u'第' + str(coldTop[0]) + u'次'\n if self.coldBootData['max'] > 4000 and coldMid > 3:\n self.coldBootData['rst'] = u'偏高'\n elif self.coldBootData['max'] > 6000 and coldBig > 3:\n self.coldBootData['rst'] = u'高'\n else:\n self.coldBootData['rst'] = u'正常'\n elif excelFile == warmBootFile:\n if len(data.sheets()) > 1:\n table = data.sheet_by_name(u'热启动性能')\n self.warmBootData['num'] = 10\n self.warmBootData['max'] = table.cell(15,2).value\n self.warmBootData['min'] = table.cell(16,2).value\n self.warmBootData['anv'] = table.cell(17,2).value\n warmTop = []\n warmTempData = table.row_values(2)\n warmMid = 0\n warmBig = 0\n for k in range(2, len(warmTempData)):\n if (warmTempData[k] > 2000 and warmTempData[k] < 4000) or (warmTempData[k] == 4000):\n warmMid += 1\n elif warmTempData[k] > 4000:\n warmBig += 1\n if warmTempData[k] == self.warmBootData['max']:\n warmTop.append(k-1)\n if len(warmTop) > 1:\n for n in range(len(warmTop)):\n if n == 0:\n warmTopTotal = str(warmTop[n])\n else:\n warmTopTotal = warmTopTotal + '、' + str(warmTop[n])\n self.warmBootData['top'] = u'第' + warmTopTotal + u'次'\n else:\n self.warmBootData['top'] = u'第' + str(warmTop[0]) + u'次'\n if self.warmBootData['max'] > 2000 and warmMid > 3:\n self.warmBootData['rst'] = u'偏高'\n elif self.warmBootData['max'] > 4000 and warmBig > 3:\n self.warmBootData['rst'] = u'高'\n else:\n self.warmBootData['rst'] = u'正常'\n xlsFile = os.path.join(self.attachmentPath, 'templatePerformance.xlsx')\n except Exception as e:\n print(e)\n finally:\n self._writeExcel(xlsFile)\n\n def _writeExcel(self, file='file.xls'):\n\n try:\n wb = openpyxl.load_workbook(file)\n for sheet_name in PERFORMANCE_REPORT_SHEET:\n ws = wb.get_sheet_by_name(sheet_name)\n if sheet_name == u'测试环境':\n ws['C4'] = self.phoneVersion\n ws['C5'] = self.buildVersion\n ws['C9'] = self.ffanAppVersion\n ws['C10'] = self.mtuanAppVersion\n ws['A14'] = self.deviceNet\n elif sheet_name == u'CPU 性能':\n ws['B3'] = self.cpuData['dianying']['max']\n ws['C3'] = self.cpuData['dianying']['min']\n ws['D3'] = self.cpuData['dianying']['anv']\n ws['E3'] = self.cpuData['dianying']['top']\n ws['F3'] = self.cpuData['dianying']['40%']\n ws['G3'] = self.cpuData['dianying']['60%']\n ws['H3'] = self.cpuData['dianying']['80%']\n ws['I3'] = self.cpuData['dianying']['100%']\n ws['J3'] = self.cpuData['dianying']['rst']\n ws['B4'] = self.cpuData['meishi']['max']\n ws['C4'] = self.cpuData['meishi']['min']\n ws['D4'] = self.cpuData['meishi']['anv']\n ws['E4'] = self.cpuData['meishi']['top']\n ws['F4'] = self.cpuData['meishi']['40%']\n ws['G4'] = self.cpuData['meishi']['60%']\n ws['H4'] = self.cpuData['meishi']['80%']\n ws['I4'] = self.cpuData['meishi']['100%']\n ws['J4'] = self.cpuData['meishi']['rst']\n ws['B5'] = self.cpuData['dingdan']['max']\n ws['C5'] = self.cpuData['dingdan']['min']\n ws['D5'] = self.cpuData['dingdan']['anv']\n ws['E5'] = self.cpuData['dingdan']['top']\n ws['F5'] = self.cpuData['dingdan']['40%']\n ws['G5'] = self.cpuData['dingdan']['60%']\n ws['H5'] = self.cpuData['dingdan']['80%']\n ws['I5'] = self.cpuData['dingdan']['100%']\n ws['J5'] = self.cpuData['dingdan']['rst']\n ws['B6'] = self.cpuData['denglu']['max']\n ws['C6'] = self.cpuData['denglu']['min']\n ws['D6'] = self.cpuData['denglu']['anv']\n ws['E6'] = self.cpuData['denglu']['top']\n ws['F6'] = self.cpuData['denglu']['40%']\n ws['G6'] = self.cpuData['denglu']['60%']\n ws['H6'] = self.cpuData['denglu']['80%']\n ws['I6'] = self.cpuData['denglu']['100%']\n ws['J6'] = self.cpuData['denglu']['rst']\n elif sheet_name == u'内存性能':\n ws['B3'] = self.memoryData['dianying']['max']\n ws['C3'] = self.memoryData['dianying']['min']\n ws['D3'] = self.memoryData['dianying']['anv']\n ws['E3'] = self.memoryData['dianying']['top']\n ws['F3'] = self.memoryData['dianying']['60M']\n ws['G3'] = self.memoryData['dianying']['80M']\n ws['H3'] = self.memoryData['dianying']['100M']\n ws['I3'] = self.memoryData['dianying']['rst']\n ws['B4'] = self.memoryData['meishi']['max']\n ws['C4'] = self.memoryData['meishi']['min']\n ws['D4'] = self.memoryData['meishi']['anv']\n ws['E4'] = self.memoryData['meishi']['top']\n ws['F4'] = self.memoryData['meishi']['60M']\n ws['G4'] = self.memoryData['meishi']['80M']\n ws['H4'] = self.memoryData['meishi']['100M']\n ws['I4'] = self.memoryData['meishi']['rst']\n ws['B5'] = self.memoryData['dingdan']['max']\n ws['C5'] = self.memoryData['dingdan']['min']\n ws['D5'] = self.memoryData['dingdan']['anv']\n ws['E5'] = self.memoryData['dingdan']['top']\n ws['F5'] = self.memoryData['dingdan']['60M']\n ws['G5'] = self.memoryData['dingdan']['80M']\n ws['H5'] = self.memoryData['dingdan']['100M']\n ws['I5'] = self.memoryData['dingdan']['rst']\n ws['B6'] = self.memoryData['denglu']['max']\n ws['C6'] = self.memoryData['denglu']['min']\n ws['D6'] = self.memoryData['denglu']['anv']\n ws['E6'] = self.memoryData['denglu']['top']\n ws['F6'] = self.memoryData['denglu']['60M']\n ws['G6'] = self.memoryData['denglu']['80M']\n ws['H6'] = self.memoryData['denglu']['100M']\n ws['I6'] = self.memoryData['denglu']['rst']\n elif sheet_name == u'上行速率':\n ws['B3'] = self.txData['dianying']['max']\n ws['C3'] = self.txData['dianying']['min']\n ws['D3'] = self.txData['dianying']['anv']\n ws['E3'] = self.txData['dianying']['top']\n ws['F3'] = self.txData['dianying']['10K']\n ws['G3'] = self.txData['dianying']['20K']\n ws['H3'] = self.txData['dianying']['40K']\n ws['I3'] = self.txData['dianying']['60K']\n ws['J3'] = self.txData['dianying']['80K']\n ws['K3'] = self.txData['dianying']['100K']\n ws['L3'] = self.txData['dianying']['rst']\n ws['B4'] = self.txData['meishi']['max']\n ws['C4'] = self.txData['meishi']['min']\n ws['D4'] = self.txData['meishi']['anv']\n ws['E4'] = self.txData['meishi']['top']\n ws['F4'] = self.txData['meishi']['10K']\n ws['G4'] = self.txData['meishi']['20K']\n ws['H4'] = self.txData['meishi']['40K']\n ws['I4'] = self.txData['meishi']['60K']\n ws['J4'] = self.txData['meishi']['80K']\n ws['K4'] = self.txData['meishi']['100K']\n ws['L4'] = self.txData['meishi']['rst']\n ws['B5'] = self.txData['dingdan']['max']\n ws['C5'] = self.txData['dingdan']['min']\n ws['D5'] = self.txData['dingdan']['anv']\n ws['E5'] = self.txData['dingdan']['top']\n ws['F5'] = self.txData['dingdan']['10K']\n ws['G5'] = self.txData['dingdan']['20K']\n ws['H5'] = self.txData['dingdan']['40K']\n ws['I5'] = self.txData['dingdan']['60K']\n ws['J5'] = self.txData['dingdan']['80K']\n ws['K5'] = self.txData['dingdan']['100K']\n ws['L5'] = self.txData['dingdan']['rst']\n ws['B6'] = self.txData['denglu']['max']\n ws['C6'] = self.txData['denglu']['min']\n ws['D6'] = self.txData['denglu']['anv']\n ws['E6'] = self.txData['denglu']['top']\n ws['F6'] = self.txData['denglu']['10K']\n ws['G6'] = self.txData['denglu']['20K']\n ws['H6'] = self.txData['denglu']['40K']\n ws['I6'] = self.txData['denglu']['60K']\n ws['J6'] = self.txData['denglu']['80K']\n ws['K6'] = self.txData['denglu']['100K']\n ws['L6'] = self.txData['denglu']['rst']\n elif sheet_name == u'下行速率':\n ws['B3'] = self.rxData['dianying']['max']\n ws['C3'] = self.rxData['dianying']['min']\n ws['D3'] = self.rxData['dianying']['anv']\n ws['E3'] = self.rxData['dianying']['top']\n ws['F3'] = self.rxData['dianying']['20K']\n ws['G3'] = self.rxData['dianying']['40K']\n ws['H3'] = self.rxData['dianying']['60K']\n ws['I3'] = self.rxData['dianying']['80K']\n ws['J3'] = self.rxData['dianying']['100K']\n ws['K3'] = self.rxData['dianying']['rst']\n ws['B4'] = self.rxData['meishi']['max']\n ws['C4'] = self.rxData['meishi']['min']\n ws['D4'] = self.rxData['meishi']['anv']\n ws['E4'] = self.rxData['meishi']['top']\n ws['F4'] = self.rxData['meishi']['20K']\n ws['G4'] = self.rxData['meishi']['40K']\n ws['H4'] = self.rxData['meishi']['60K']\n ws['I4'] = self.rxData['meishi']['80K']\n ws['J4'] = self.rxData['meishi']['100K']\n ws['K4'] = self.rxData['meishi']['rst']\n ws['B5'] = self.rxData['dingdan']['max']\n ws['C5'] = self.rxData['dingdan']['min']\n ws['D5'] = self.rxData['dingdan']['anv']\n ws['E5'] = self.rxData['dingdan']['top']\n ws['F5'] = self.rxData['dingdan']['20K']\n ws['G5'] = self.rxData['dingdan']['40K']\n ws['H5'] = self.rxData['dingdan']['60K']\n ws['I5'] = self.rxData['dingdan']['80K']\n ws['J5'] = self.rxData['dingdan']['100K']\n ws['K5'] = self.rxData['dingdan']['rst']\n ws['B6'] = self.rxData['denglu']['max']\n ws['C6'] = self.rxData['denglu']['min']\n ws['D6'] = self.rxData['denglu']['anv']\n ws['E6'] = self.rxData['denglu']['top']\n ws['F6'] = self.rxData['denglu']['20K']\n ws['G6'] = self.rxData['denglu']['40K']\n ws['H6'] = self.rxData['denglu']['60K']\n ws['I6'] = self.rxData['denglu']['80K']\n ws['J6'] = self.rxData['denglu']['100K']\n ws['K6'] = self.rxData['denglu']['rst']\n elif sheet_name == u'电池温度':\n ws['B3'] = self.batteryTemperatureData['dianying']['max']\n ws['C3'] = self.batteryTemperatureData['dianying']['min']\n ws['D3'] = self.batteryTemperatureData['dianying']['anv']\n ws['E3'] = self.batteryTemperatureData['dianying']['top']\n ws['F3'] = self.batteryTemperatureData['dianying']['60℃']\n ws['G3'] = self.batteryTemperatureData['dianying']['80℃']\n ws['H3'] = self.batteryTemperatureData['dianying']['rst']\n ws['B4'] = self.batteryTemperatureData['meishi']['max']\n ws['C4'] = self.batteryTemperatureData['meishi']['min']\n ws['D4'] = self.batteryTemperatureData['meishi']['anv']\n ws['E4'] = self.batteryTemperatureData['meishi']['top']\n ws['F4'] = self.batteryTemperatureData['meishi']['60℃']\n ws['G4'] = self.batteryTemperatureData['meishi']['80℃']\n ws['H4'] = self.batteryTemperatureData['meishi']['rst']\n ws['B5'] = self.batteryTemperatureData['dingdan']['max']\n ws['C5'] = self.batteryTemperatureData['dingdan']['min']\n ws['D5'] = self.batteryTemperatureData['dingdan']['anv']\n ws['E5'] = self.batteryTemperatureData['dingdan']['top']\n ws['F5'] = self.batteryTemperatureData['dingdan']['60℃']\n ws['G5'] = self.batteryTemperatureData['dingdan']['80℃']\n ws['H5'] = self.batteryTemperatureData['dingdan']['rst']\n ws['B6'] = self.batteryTemperatureData['denglu']['max']\n ws['C6'] = self.batteryTemperatureData['denglu']['min']\n ws['D6'] = self.batteryTemperatureData['denglu']['anv']\n ws['E6'] = self.batteryTemperatureData['denglu']['top']\n ws['F6'] = self.batteryTemperatureData['denglu']['60℃']\n ws['G6'] = self.batteryTemperatureData['denglu']['80℃']\n ws['H6'] = self.batteryTemperatureData['denglu']['rst']\n elif sheet_name == u'FPS 性能':\n ws['B2'] = self.fpsData['max']\n ws['B3'] = self.fpsData['min']\n ws['B4'] = self.fpsData['anv']\n ws['B5'] = self.fpsData['16ms']\n ws['B6'] = self.fpsData['20ms']\n ws['B7'] = self.fpsData['rst']\n elif sheet_name == u'冷启动性能':\n ws['B2'] = self.coldBootData['num']\n ws['B3'] = self.coldBootData['max']\n ws['B4'] = self.coldBootData['min']\n ws['B5'] = self.coldBootData['anv']\n ws['B6'] = self.coldBootData['top']\n ws['B7'] = self.coldBootData['rst']\n elif sheet_name == u'热启动性能':\n ws['B2'] = self.warmBootData['num']\n ws['B3'] = self.warmBootData['max']\n ws['B4'] = self.warmBootData['min']\n ws['B5'] = self.warmBootData['anv']\n ws['B6'] = self.warmBootData['top']\n ws['B7'] = self.warmBootData['rst']\n xlsFile = os.path.join(self.attachmentPath, u'飞凡竞品性能评测报告(%s).xlsx' % time.strftime(\"%Y%m%d\"))\n wb.save(xlsFile)\n if os.path.exists(file):\n os.remove(file)\n except:\n print(\"no sheet in %s named %s\" % file,sheet_name)\n\nclass SendMail(object):\n def __init__(self):\n pass\n\n def mail(self):\n pass\n\ndef parse_command():\n '''\n 解析日志路径命令行参数\n '''\n parser = argparse.ArgumentParser()\n parser.add_argument('-l', '--log_path', action='store', default='.',\n dest='log_path', help='Setup log path, default is current execution directory.')\n args = parser.parse_args()\n rsPath = os.path.abspath(args.log_path)\n return rsPath\n\nif __name__ == \"__main__\":\n rspath = parse_command()\n handler = Handler()\n handler.handle(rspath)\n","repo_name":"liu111xiao111/UItest","sub_path":"tools/performanceHandler.py","file_name":"performanceHandler.py","file_ext":"py","file_size_in_byte":72134,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"12558193766","text":"import sys\nimport argparse\nfrom joblib import Parallel, delayed\nimport numpy as np\nimport datetime\nimport pickle\nimport os\nimport learning_algorithm as la\nimport source_task_creation as stc\nimport simulation_classes as sc\nimport gym\nimport envs\n\n\nsys.path.append(\"../\")\n\n\ndef main():\n\n # General env properties\n env_tgt = gym.make('LQG1D-v0')\n env_src = gym.make('LQG1D-v0')\n param_space_size = 1\n state_space_size = 1\n env_param_space_size = 3\n episode_length = 20\n gaussian_transition = True\n\n env_param = sc.EnvParam(env_tgt, param_space_size, state_space_size, env_param_space_size, episode_length, gaussian_transition)\n\n mean_initial_param = -0.1 * np.ones(param_space_size)\n variance_initial_param = 0\n variance_action = 0.1\n\n simulation_param = sc.SimulationParam(mean_initial_param, variance_initial_param, variance_action, arguments.batch_size,\n arguments.iterations, arguments.gamma, None, arguments.learning_rate, arguments.ess_min,\n \"Yes\" if arguments.adaptive else \"No\", arguments.n_min, use_adam=arguments.use_adam)\n\n # Source tasks\n pis = [[-0.1], [-0.15], [-0.2], [-0.25], [-0.3], [-0.35], [-0.4], [-0.45]]\n if arguments.random_src:\n A = np.random.uniform(0.6, 1.4, arguments.n_source_models)\n B = np.random.uniform(0.8, 1.2, arguments.n_source_models)\n else:\n A = np.array(arguments.src_A)\n B = np.array(arguments.src_B)\n envs = [[A[i], B[i], 0.09] for i in range(A.shape[0])]\n print(envs)\n policy_params = []\n env_params = []\n\n for e in envs:\n for p in pis:\n policy_params.append(p)\n env_params.append(e)\n\n policy_params = np.array(policy_params)\n env_params = np.array(env_params)\n\n n_config_cv = policy_params.shape[0]\n\n data = stc.sourceTaskCreationSpec(env_src, episode_length, arguments.n_source_samples, arguments.gamma, variance_action,\n policy_params, env_params, param_space_size, state_space_size, env_param_space_size)\n\n stats = {}\n for estimator in estimators:\n stats[estimator] = []\n\n for estimator in estimators:\n\n print(estimator)\n\n # Create a new dataset object\n source_dataset = sc.SourceDataset(*data, n_config_cv)\n\n off_policy = 0 if estimator in [\"GPOMDP\", \"REINFORCE\", \"REINFORCE-BASELINE\"] else 1\n\n name = estimator\n\n if estimator.endswith(\"SR\"):\n # Create a fake dataset for the sample-reuse algorithm\n data_sr = stc.sourceTaskCreationSpec(env_src, episode_length, 1, arguments.gamma, variance_action,\n np.array([[-0.1]]), np.array([[1.0, 1.0, 0.09]]), param_space_size,\n state_space_size, env_param_space_size)\n source_dataset = sc.SourceDataset(*data_sr, 1)\n name = estimator[:-3]\n\n result = la.learnPolicy(env_param, simulation_param, source_dataset, name, off_policy=off_policy,\n model_estimation=0, dicrete_estimation=0,\n model_estimator=None, verbose=not arguments.quiet)\n\n stats[estimator].append(result)\n\n return stats\n\n\ndef run(id, seed):\n\n # Set the random seed\n np.random.seed(seed)\n\n print(\"Starting run {0}\".format(id))\n\n results = main()\n\n print(\"Done run {0}\".format(id))\n\n # Log the results\n with open(\"{0}/{1}.pkl\".format(folder, id), 'wb') as output:\n pickle.dump(results, output)\n\n return results\n\n\n# Command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--iterations\", default=100, type=int)\nparser.add_argument(\"--learning_rate\", default=8e-6, type=float)\nparser.add_argument(\"--gamma\", default=0.99, type=float)\nparser.add_argument(\"--batch_size\", default=10, type=int)\nparser.add_argument(\"--ess_min\", default=20, type=int)\nparser.add_argument(\"--n_min\", default=5, type=int)\nparser.add_argument(\"--adaptive\", default=False, action='store_true')\nparser.add_argument(\"--use_adam\", default=False, action='store_true')\nparser.add_argument(\"--random_src\", default=True, action='store_false')\nparser.add_argument(\"--n_source_samples\", default=10, type=int)\nparser.add_argument(\"--src_A\", default=None, nargs=\"+\", type=float)\nparser.add_argument(\"--src_B\", default=None, nargs=\"+\", type=float)\nparser.add_argument(\"--n_source_models\", default=5, type=int)\nparser.add_argument(\"--n_jobs\", default=1, type=int)\nparser.add_argument(\"--n_runs\", default=1, type=int)\nparser.add_argument(\"--quiet\", default=False, action='store_true')\n\n# Read arguments\narguments = parser.parse_args()\n\nestimators = [\"PD-IS\", \"MIS-CV-BASELINE\", \"PD-MIS-CV-BASELINE\", \"PD-MIS-CV-BASELINE_SR\", \"GPOMDP\"]\n\n# Base folder where to log\nfolder = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\nos.mkdir(folder)\n\n# Save arguments\nwith open(\"{0}/params.txt\".format(folder), 'w') as f:\n for key, value in vars(arguments).items():\n f.write(\"{0}: {1}\\n\".format(key, value))\n\n# Seeds for each run\nseeds = [np.random.randint(1000000) for _ in range(arguments.n_runs)]\n\nif arguments.n_jobs == 1:\n results = [run(id, seed) for id, seed in zip(range(arguments.n_runs), seeds)]\nelse:\n results = Parallel(n_jobs=arguments.n_jobs, backend='loky')(delayed(run)(id, seed) for id, seed in zip(range(arguments.n_runs), seeds))\n\nwith open('{0}/results1.pkl'.format(folder), 'wb') as output:\n pickle.dump(results, output)\n\n################################################\n\nprint(folder)\n","repo_name":"AndreaTirinzoni/transfer-policy-search","sub_path":"scripts/run_lqr_ideal.py","file_name":"run_lqr_ideal.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"73741033121","text":"from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom . import forms\nfrom . import views\n\napp_name = \"portal\"\n\n\nurlpatterns = [\n path(\"\", views.login_redirect, name=\"login_redirect\"),\n path(\"dashboard_redirect/\", views.dashboard_redirect, name=\"dashboard_redirect\"),\n path(\n \"login/\",\n auth_views.LoginView.as_view(\n template_name=\"public/login.html\", authentication_form=forms.LoginForm\n ),\n name=\"login\",\n ),\n path(\"logout/\", auth_views.LogoutView.as_view(), name=\"logout\"),\n]\n","repo_name":"zagdiablo/eduwall-backend-django","sub_path":"portal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74823356960","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n\n head_l1 = l1\n head_l2 = l2\n\n # 遍历链表\n temp = []\n\n head = ListNode(0)\n cur = head\n\n \n move = 0\n while head_l1 != None and head_l2 != None:\n\n\n sum_data = head_l1.val + head_l2.val + move\n\n \n de = sum_data%10\n\n temp.append(de)\n\n head.next = ListNode(de)\n\n head = head.next\n \n move = sum_data//10\n\n\n head_l1 = head_l1.next\n head_l2 = head_l2.next\n\n if head_l1 == None:\n\n while head_l2:\n\n sum_data = head_l2.val + move\n\n de = sum_data%10\n\n\n temp.append(sum_data)\n head_l2 = head_l2.next\n head.next = ListNode(de)\n\n move = sum_data//10\n\n\n\n head = head.next\n else:\n\n while head_l1:\n\n sum_data = head_l1.val + move\n\n de = sum_data%10\n\n temp.append(de)\n\n head_l1 = head_l1.next\n\n head.next = ListNode(de)\n move = sum_data//10\n\n head = head.next\n\n if move >= 1:\n head.next = ListNode(move)\n head = head.next\n\n return cur.next\n\n\n\n\n\n\n\n\n","repo_name":"YuanyuanChenCircle/leetcode_notes","sub_path":"two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26775701812","text":"from typing import Iterable, Union\n\nfrom meutils.docarray_ import Document, DocumentArray\nfrom meutils.docarray_.array.storage.base.seqlike import BaseSequenceLikeMixin\n\n\nclass SequenceLikeMixin(BaseSequenceLikeMixin):\n \"\"\"Implement sequence-like methods for DocumentArray with Redis as storage\"\"\"\n\n def __eq__(self, other):\n \"\"\"Compare this object to the other, returns True if and only if other\n as the same type as self and other has the same meta information\n\n :param other: the other object to check for equality\n :return: ``True`` if other is equal to self\n \"\"\"\n # two DA are considered as the same if they have the same client meta data\n return (\n type(self) is type(other)\n and self._client.client_info() == other._client.client_info()\n and self._config == other._config\n )\n\n def __len__(self):\n \"\"\"Return the length of :class:`DocumentArray` that uses Redis as storage\n\n :return: the length of this :class:`DocumentArrayRedis` object\n \"\"\"\n if self._list_like:\n return len(self._offset2ids)\n try:\n lua_script = f'return #redis.pcall(\"keys\", \"{self._config.index_name}:*\")'\n cmd = self._client.register_script(lua_script)\n return cmd()\n except:\n return 0\n\n def __contains__(self, x: Union[str, 'Document']):\n \"\"\"Check if ``x`` is contained in this :class:`DocumentArray` with Redis storage\n\n :param x: the id of the document to check or the document object itself\n :return: True if ``x`` is contained in self\n \"\"\"\n if isinstance(x, str):\n return self._doc_id_exists(x)\n elif isinstance(x, Document):\n return self._doc_id_exists(x.id)\n else:\n return False\n\n def __repr__(self):\n \"\"\"Return the string representation of :class:`DocumentArrayRedis` object\n :return: string representation of this object\n \"\"\"\n return f''\n\n def _upload_batch(self, batch_of_docs: Iterable['Document']):\n pipe = self._client.pipeline()\n for doc in batch_of_docs:\n payload = self._document_to_redis(doc)\n pipe.hset(self._doc_prefix + doc.id, mapping=payload)\n pipe.execute()\n\n def _extend(self, docs: Iterable['Document']):\n da = DocumentArray(docs)\n for batch_of_docs in da.batch(self._config.batch_size):\n self._upload_batch(batch_of_docs)\n if self._list_like:\n self._offset2ids.extend(batch_of_docs[:, 'id'])\n","repo_name":"yuanjie-ai/MeUtils","sub_path":"meutils/other/docarray/array/storage/redis/seqlike.py","file_name":"seqlike.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"20036490665","text":"\"\"\"\nDatagram reading logic for conversion between .kmall to .xyz, with temporary .pings file in between.\nThe logic is derived from the file \"doItAllKmallNoSeabedImageNoTide.py\".\n\"\"\"\nimport os\nimport struct\nclear = lambda: os.system('cls')\ntimes = []\ntides = []\nnoTides = -1\nmin_e = min_n = 99.0\nmax_e = max_n = 0.0\nstart_t = stop_t = -1\nlastidt = 0\nstdstr = \"\"\n\n# Process one depth datagram, #MRZ\n# lengtha and chunk are from processDatagram, see below\n# millisec is decoded from the header, so I send it in as a parameter here\ndef process_MRZ_data(millisec, lengtha, chunk, outputIO):\n\tglobal min_e\n\tglobal min_n\n\tglobal max_e\n\tglobal max_n\n\tglobal start_t\n\tglobal stop_t\n\tglobal stdstr\n\tXY_DECIMALS = 8\n\t\n\t# Headersize is 4 bytes smaller than in the headerfile, remember that the 4\n\t# bytes with the length has been dropped\n\theadersize = 1 + 1 + 1 + 1 + 1 + 1 + 2 + 4 + 4\n\tpartitionsize = 2 + 2\n\tcommonsize = 2 + 2 + 8\n\tcommon = struct.Struct('HHBBBBBBBB')\n\tnumBytesCmnPart, pingCnt, rxFansPerPing, rxFanIndex, swathsPerPing, swathAlongPosition, \\\n\ttxTransducerInd, rxTransducerInd, numRxTransducers, algorithmType = common.unpack_from(chunk, headersize + partitionsize)\n\tpinginfo_size = 2 + 2 + 4 + 1 + 1 + 1 + 1 + 1 + 1 + 2 + 11 * 4 + 2 + 2 + 1 + 1 + 2 + 4 + 4 + 4 + 4 + 2 + 2 + 4 + 2 + 2 + 6 * 4 + 1 + 1 + 1 + 1 + 8 + 8 + 4 + 8\n\tpinginfo = struct.Struct('HHfBBBBBBHfffffffffffhhBBHIfffHHfHHffffffBBBBddf')\n\tnumBytesInfoData, padding0, pingRate_Hz, beamSpacing, depthMode,\\\n\tsubDepthMode, distanceBtwSwath, detectionMode, pulseForm, \\\n\tpadding01, frequencyMode_Hz, freqRangeLowLim_Hz, \\\n\tfreqRangeHighLim_Hz, maxTotalTxPulseLength_sec, \\\n\tmaxEffTxPulseLength_sec, maxEffTxBandWidth_Hz, \\\n\tabsCoeff_dBPerkm, portSectorEdge_deg, \\\n\tstarbSectorEdge_deg, portMeanCov_deg, \\\n\tstarbMeanCov_deg, portMeanCov_m, \\\n\tstarbMeanCov_m, modeAndStabilisation, \\\n\truntimeFilter1, runtimeFilter2,\\\n\tpipeTrackingStatus, transmitArraySizeUsed_deg,\\\n\treceiveArraySizeUsed_deg, transmitPower_dB,\\\n\tSLrampUpTimeRemaining, padding1,\\\n\tyawAngle_deg, numTxSectors, numBytesPerTxSector,\\\n\theadingVessel_deg, soundSpeedAtTxDepth_mPerSec,\\\n\ttxTransducerDepth_m, z_waterLevelReRefPoint_m, \\\n\tx_txTransducerArm_SCS_m, y_txTransducerArm_SCS_m,\\\n\tlatLongInfo, posSensorStatus, attitudeSensorStatus,\\\n\tpadding2, latitude_deg, longitude_deg,\\\n\tellipsoidHeightReRefPoint_m = pinginfo.unpack_from(chunk, headersize + partitionsize + commonsize)\n\t\n\t# Fix of a bug in Python, where binary alignments are not correct\n\tlatlon = struct.Struct(\"d\")\n\tklat = latlon.unpack_from(chunk, headersize + partitionsize + commonsize + 124)\n\tklon = latlon.unpack_from(chunk, headersize + partitionsize + commonsize + 124 + 8)\n\tellheight = struct.Struct(\"f\")\n\tellipsheight = ellheight.unpack_from(chunk, headersize + partitionsize + commonsize + 124 + 8 + 8)\n\tlatitude_deg = klat[0]\n\tlongitude_deg = klon[0]\n\tellipsoidHeightReRefPoint_m = ellipsheight[0]\n\t\n\t# Pointer offset to sectorInfo\n\tsectorInfo_offset = headersize + partitionsize + commonsize + pinginfo_size\n\t# Changed from version 0\n\tsectorInfo = struct.Struct('BBBBfffffffBBHfff')\n\tsectorInfo_size = 1 + 1 + 1 + 1 + 7 * 4 + 1 + 1 + 2 + 4 + 4 + 4\n\ti = 0\n\twhile (i < numTxSectors):\n\t\ttxSectorNumb, txArrNumber, txSubArray, padding0,\\\n\t\tsectorTransmitDelay_sec, tiltAngleReTx_deg,\\\n\t\ttxNominalSourceLevel_dB, txFocusRange_m,\\\n\t\tcentreFreq_Hz, signalBandWidth_Hz, \\\n\t\ttotalSignalLength_sec, pulseShading, signalWaveForm,\\\n\t\tpadding1, highVoltageLevel_dB, sectorTrackingCorr_dB, effectiveSignalLength_sec = sectorInfo.unpack_from(chunk, sectorInfo_offset + i * sectorInfo_size)\n\t\ti+=1\n\n\trxInfo_offset = sectorInfo_offset + numTxSectors * sectorInfo_size\n\trxInfo = struct.Struct('HHHHffffHHHH')\n\trxInfo_size = 2 + 2 + 2 + 2 + 4 + 4 + 4 + 4 + 2 + 2 + 2 + 2\n\tnumBytesRxInfo, numSoundingsMaxMain, numSoundingsValidMain, numBytesPerSounding, \\\n\tWCSampleRate, seabedImageSampleRate, BSnormal_dB, BSoblique_dB, \\\n\textraDetectionAlarmFlag, numExtraDetections, numExtraDetectionClasses, \\\n\tnumBytesPerClass = rxInfo.unpack_from(chunk, rxInfo_offset)\n\textraDetClassInfo_offset = rxInfo_offset + rxInfo_size\n\textraDetectionSize = 2 + 1 + 1\n\textraDetectionStruct = struct.Struct('HBB')\n\tsounding_offset = extraDetClassInfo_offset + numExtraDetectionClasses * extraDetectionSize\n\tsoundingStruct = struct.Struct('HBBBBBBBBHffffffHHffffffffffffffffffHHHH')\n\tsounding_size = 2 + 8 + 2 + 6 * 4 + 2 + 2 + 18 * 4 + 4 * 2\n\n # Offset to seabed image\n\tseabedImageStart = sounding_offset + (sounding_size * (numSoundingsMaxMain + numExtraDetections))\n\tseabedStruct = struct.Struct('h')\n\tsbed_len = lengtha + 4 - seabedImageStart - 4\n\ttot_no_sbed = sbed_len / 2\n\tverify_length = tot_no_sbed * 2\n\tlenStruct = struct.Struct('I')\n\tdgmlenver = seabedImageStart + sbed_len\n\tdgmlen = lenStruct.unpack_from(chunk,dgmlenver - 4)[0] # should be 4 more then lengtha\n\n\toutputstr = f\"\\n%.{XY_DECIMALS}f %.{XY_DECIMALS}f %.2f %.2f %d\\n\" % (latitude_deg, longitude_deg, \n\t\tellipsoidHeightReRefPoint_m, z_waterLevelReRefPoint_m, millisec)\n\toutputIO.write(outputstr)\n\tsbed_start = seabedImageStart # This is the pointer to the start of the seabed image for current beam\n\tno_sbed_found = 0\n\ti = 0\n\tstdstr = \"\"\n\twhile(i < numSoundingsMaxMain):\n\t\tsoundingIndex, txSectorNumb, detectionType, \\\n\t\tdetectionMethod, rejectionInfo1, rejectionInfo2, \\\n\t\tpostProcessingInfo, detectionClass, detectionConfidenceLevel, \\\n\t\tpadding, rangeFactor, qualityFactor, \\\n\t\tdetectionUncertaintyVer_m, detectionUncertaintyHor_m, \\\n\t\tdetectionWindowLength_sec, echoLength_sec, \\\n\t\tWCBeamNumb, WCrange_samples, WCNomBeamAngleAcross_deg, \\\n\t\tmeanAbsCoeff_dBPerkm, reflectivity1_dB, reflectivity2_dB, \\\n\t\treceiverSensitivityApplied_dB, sourceLevelApplied_dB, \\\n\t\tBScalibration_dB, TVG_dB, beamAngleReRx_deg, \\\n\t\tbeamAngleCorrection_deg, twoWayTravelTime_sec, \\\n\t\ttwoWayTravelTimeCorrection_sec, deltaLatitude_deg, \\\n\t\tdeltaLongitude_deg, z_reRefPoint_m, y_reRefPoint_m, \\\n\t\tx_reRefPoint_m, beamIncAngleAdj_deg, realTimeCleanInfo, \\\n\t\tSIstartRange_samples, SIcentreSample, \\\n\t\tSInumSamples = soundingStruct.unpack_from(chunk, sounding_offset + i * sounding_size)\n\t\ti+=1\n\t\t\t\n\t\t# THIS IS IT. This is where we output xyz-points\n\t\t# Depths are referred to the reference point. To get it to the waterline,\n\t\t# SUBSTRACT the distance from\n\t\t# Error estimates are also available: detectionUncertaintyVer_m and\n\t\t# detectionUncertaintyHor_m\n\t\twaterlevel = z_reRefPoint_m - z_waterLevelReRefPoint_m\n\t\tplat = latitude_deg + deltaLatitude_deg\n\t\tplon = longitude_deg + deltaLongitude_deg\n\t\toutputstr = f\" %.{XY_DECIMALS}f %.{XY_DECIMALS}f %.2f %.2f\" % (deltaLatitude_deg, deltaLongitude_deg, \n\t\t\tz_reRefPoint_m, reflectivity1_dB)\n\t\toutputIO.write(outputstr)\n\t\tn = float(latitude_deg)\n\t\te = float(longitude_deg)\n\t\tt = int(millisec)\n\t\tif (start_t < 0 or t < start_t):\n\t\t\tstart_t = t\n\t\tif (t > stop_t):\n\t\t\tstop_t = t\n\t\tif (min_e > e):\n\t\t\tmin_e = e\n\t\tif (min_n > n):\n\t\t\tmin_n = n\n\t\tif (e > max_e):\n\t\t\tmax_e = e\n\t\tif (n > max_n):\n\t\t\tmax_n = n\t\n\n# Datagram processing follows the official structure given by Kongsberg\ndef processDatagram(lengtha, chunk, outputIO):\n\theader_without_length = struct.Struct('ccccBBHII')\n\tdgm_type0, dgm_type1, dgm_type2, dgm_type3, dgm_version, sysid, emid, sec, nsec = header_without_length.unpack_from(chunk, 0)\n\tdgm_type = dgm_type0 + dgm_type1 + dgm_type2 + dgm_type3\n\t\t\n\t# Decode time\n\tnanosec = sec\n\tnanosec *= 1E9\n\tnanosec += nsec\n\tmillisec = nanosec\n\tmillisec /= 1E6\t\n\t\n\t# Decode datagram type/version\n\tstrk = dgm_type.decode()\n\tif (strk == '#MRZ'):\n\t\tassert dgm_version == 3, \"Wrong version of datagram, see Kongsberg library for KMALL files for original implementation\"\n\t\tif (dgm_version == 3):\n\t\t\tprocess_MRZ_data(millisec, lengtha, chunk, outputIO)","repo_name":"kwigulaker/EchoLocation","sub_path":"EM2040/utils/kmall_to_xyz/process_datagram.py","file_name":"process_datagram.py","file_ext":"py","file_size_in_byte":7714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"27740395118","text":"import unittest\nimport os\nfrom pathlib import Path\nfrom fastapi.testclient import TestClient\nfrom tracking_api.app import fastapi_app,check_file\n\nclient = TestClient(fastapi_app)\n\n\nclass TestAPI(unittest.TestCase):\n\n def test_no_file(self):\n Path('/tmp/ok').touch()\n self.assertEqual(check_file(), True)\n\n def test_ping_200(self):\n \"\"\"\n Test to validate that the ping returns 200 when the file is there.\n \"\"\"\n response = client.get(\"/ping\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), \"OK\")\n\n def test_ping_503(self):\n \"\"\"\n Test to validate that the ping returns 200 when the file is there.\n \"\"\"\n os.remove('/tmp/ok')\n # Unset the cache for testing\n check_file.cache_clear()\n response = client.get(\"/ping\")\n self.assertEqual(response.status_code, 503)\n # Cleanup for next test run\n self.assertEqual(response.json(), \"Service Unavailable\")\n\n\n\n def test_image_200(self):\n \"\"\"\n Test to validate that the gif is returned.\n \"\"\"\n response = client.get(\"/img\")\n self.assertEqual(response.status_code, 200)\n\n def test_random_fails(self):\n \"\"\"\n Test to validating unknown endpoints return 404\n \"\"\"\n response = client.get(\"/random\")\n self.assertEqual(response.status_code, 404)\n\n","repo_name":"bilbobx182/sojern_interview","sub_path":"tests/test_integration_tracking_api.py","file_name":"test_integration_tracking_api.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15538086993","text":"N, X = map(int, input().split())\nS = list(input())\n\nscore = X\nfor i in S:\n if i == \"x\" and score > 0:\n score -= 1\n if i == \"o\":\n score += 1\n\nprint(score)\n","repo_name":"Kenshiro-Tanaka/AtCoder","sub_path":"ABC184/abc184_b.py","file_name":"abc184_b.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71907783842","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\nfrom techism2.models import Event, Setting\nfrom django.core.cache import cache\nfrom django.core.mail import send_mail\n\ntags_cache_key = \"tags\"\n\n\ndef get_event_query_set():\n \"Gets a base query set with all non-archived and published events\"\n return __get_base_event_query_set().filter(archived__exact=False)\n\ndef get_archived_event_query_set():\n \"Gets a base query set with all archived and published events\"\n return __get_base_event_query_set().filter(archived__exact=True)\n\ndef __get_base_event_query_set():\n return Event.objects.filter(published__exact=True)\n\ndef get_tags():\n # Note: no synchronization, propably not possible on GAE\n tags = cache.get(tags_cache_key)\n \n if tags:\n return tags\n else:\n tags = update_tags_cache()\n return tags\n\ndef update_tags_cache():\n tags = __fetch_tags()\n cache.set(tags_cache_key, tags, 1800) # expire after 30 min\n return tags\n\ndef __fetch_tags():\n dict_list = get_event_query_set().values('tags')\n tags = dict() \n for dictionary in dict_list:\n for tag_list in dictionary.itervalues():\n if tag_list:\n for tag in tag_list:\n if tag not in tags:\n tags[tag] = 0\n tags[tag] += 1\n return tags\n\ndef send_event_review_mail(event):\n from_setting, _ = Setting.objects.get_or_create(name='event_review_mail_from', defaults={'value': u'x'})\n to_setting, _ = Setting.objects.get_or_create(name='event_review_mail_to', defaults={'value': u'x'})\n subject = u'[Techism] Neues Event - bitte prüfen'\n message_details = u'Titel: %s\\n\\nBeschreibung: %s\\n\\n' % (event.title, event.description);\n message_urls = u'Login-Url: %s\\n\\nEvent-Url: %s\\n\\n' % (get_secure_url()+\"/accounts/login/\", get_secure_url()+\"/admin/techism2/event/\");\n message = message_details + message_urls\n fr = from_setting.value\n to = to_setting.value.split(',')\n send_mail(subject, message, fr, to, fail_silently=False)\n\ndef get_secret_key():\n secret_key_setting, _ = Setting.objects.get_or_create(name='SECRET_KEY', defaults={'value': u'none'})\n secret_key = secret_key_setting.value\n return secret_key\n\ndef get_secure_url():\n secure_url_setting, _ = Setting.objects.get_or_create(name='secure_url', defaults={'value': u'none'})\n secure_url = secure_url_setting.value\n return secure_url\n\n","repo_name":"gimler/techism2","sub_path":"techism2/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20576392433","text":"from django.db.models.signals import post_delete, post_save\nfrom .models import Profile , Follower\nfrom django.contrib.auth.models import User\n\n\n\ndef createProfile(sender,instance,created,**kwargs):\n # IMPORTANT : when user is created a profile for it will be generated \n if created: #created is flag (true or false for new user)\n user = instance\n profile = Profile.objects.create(\n #auto fill info in profile when a user is created\n user = user, #auto connecting the user that triggers to its profile (user after equal is instance)\n username = user.username,\n email = user.email,\n name = user.first_name\n )\n\ndef createFollower(sender,instance,created,**kwargs):\n if created:\n user = instance\n follower = Follower.objects.create(\n user = user,\n name = user.name\n )\n \n\n\npost_save.connect(createProfile,sender=User) \npost_save.connect(createFollower,sender=Follower)","repo_name":"rohandevray/socialnetwork","sub_path":"users/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24498525351","text":"from collections import defaultdict\nimport copy\n\n\ndef input(file_name):\n with open(file_name) as f:\n for line in f:\n yield line.rstrip().split(\"-\")\n\n\ndef small_full(path):\n for n, v in path.items():\n if v == 2 and n.islower() and n not in [\"start\", \"end\"]:\n return True\n return False\n\n\ndef dfs(g, node, path):\n path[node] += 1\n if node == \"end\":\n return 1\n sum = 0\n for adj in g[node]:\n if adj.islower() and path[adj] == 2 and adj != \"end\":\n continue\n if adj.islower() and path[adj] == 1 and adj != \"end\" and small_full(path):\n continue\n sum += dfs(g, adj, copy.deepcopy(path))\n return sum\n\n\ndef main():\n g = defaultdict(lambda: set())\n for l, r in input(\"test3.txt\"):\n if l != \"end\" and r != \"start\":\n g[l].add(r)\n if r != \"end\" and l != \"start\":\n g[r].add(l)\n\n n_paths = 0\n for adj in g[\"start\"]:\n path = defaultdict(lambda: 0)\n path[\"start\"] = 1\n n_paths += dfs(g, adj, path)\n print(n_paths)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"markokristian/aoc2021","sub_path":"12/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22937679880","text":"__author__ = 'FRANKCHUKY'\r\n\r\n\r\nfrom django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index),\r\n path('user', views.user),\r\n path('adduser', views.add_user, name='adduser'),\r\n path('addtask', views.add_task, name='addtask'),\r\n path('detail', views.detail),\r\n path('movetask', views.move_task, name='movetask'),\r\n path('edittask/', views.move_task, name='edittask'),\r\n path('user/view/', views.user_profile),\r\n\r\n]\r\n","repo_name":"franko4don/myscrumy","sub_path":"nwanze-franklin/django-nwanzescrumy/nwanzescrumy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29918891556","text":"import ast\nimport numpy as np\nimport openai\nimport os\nimport pandas as pd\nimport pickle\nimport sys\nimport tiktoken\nfrom cli import parser\nfrom process_markdown import process_markdown_folder\nfrom tenacity import (retry, stop_after_attempt, wait_random_exponential)\n\n# CONFIGS #\nDF_FILENAME = \"embeddings.csv\"\nQUERY_CACHE_FILENAME = \"query_cache.pickle\"\nOPENAI_EMBEDDINGS_MODEL = \"text-embedding-ada-002\"\nMAX_TOKENS = 8191 # Max number of tokens for OpenAI's Embeddings api\nEMBEDDING_ENCODING = \"cl100k_base\"\n\nopenai.api_key = os.environ.get(\"OPENAI_API_KEY\")\n\ndef get_num_tokens(text, encoding_name=EMBEDDING_ENCODING):\n \"\"\"Returns the number of tokens in a string\n \"\"\"\n encoding = tiktoken.get_encoding(encoding_name)\n num_tokens = len(encoding.encode(text))\n return num_tokens\n\n\ndef truncate_tokens(text, encoding_name=EMBEDDING_ENCODING, max_tokens=MAX_TOKENS):\n \"\"\"Truncates the tokens in a string to have the max_tokens\n \"\"\"\n encoding = tiktoken.get_encoding(encoding_name)\n return encoding.encode(text)[:max_tokens]\n\n\n@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))\ndef get_embedding_from_api(text):\n return openai.Embedding.create(\n model=OPENAI_EMBEDDINGS_MODEL,\n input=text\n )[\"data\"][0][\"embedding\"]\n\n\ndef embed(data):\n \"\"\"Obtains embeddings from OpenAI Embeddings Api\n Returns a data frame with the generated embeddings\n \"\"\"\n responses = []\n\n for item in data:\n # Add title to text for additional context\n text = f\"{item['Title']}. {item['Text']}\"\n num_tokens = get_num_tokens(text)\n if num_tokens > MAX_TOKENS:\n text = truncate_tokens(text)\n try:\n embedding = get_embedding_from_api(text)\n item[\"Embedding\"] = embedding\n responses.append(item)\n except Exception as e:\n print(f\"Error fetching embedding from api: \", e)\n continue\n\n df = pd.DataFrame(responses)\n return df\n\n\ndef generate_data_embeddings(folder_path):\n \"\"\"Generates embeddings for text data and saves them to a csv data file.\n \"\"\"\n texts = process_markdown_folder(folder_path)\n print(\"done processing .MD files...\")\n print(\"generating embeddings. it may take a while...\")\n df = embed(texts)\n print(\"done generating embeddings, saving df to file...\")\n df.to_csv(DF_FILENAME)\n\n\ndef cosine_similarity(v1, v2):\n \"\"\"Computes the cosine similarity between two vectors:\n cosine similarity = (A . B)/(||A||*||B||)\n where:\n A . B is the dot product of vectors A and B\n ||A|| is the L2 norm (euclidean norm) of vector A\n ||B|| is the L2 norm (euclidean norm) of vector B\n \"\"\"\n dot_product = np.dot(v1, v2)\n norm_v1 = np.linalg.norm(v1)\n norm_v2 = np.linalg.norm(v2)\n return dot_product / (norm_v1 * norm_v2)\n\n\ndef search(query):\n \"\"\"Performs semantic search with embeddings.\n Returns the similarity value associated to each record in descending order of relevance.\n The search index data frame loaded from csv file is also returned.\n Similarity is obtained using cosine similarity distance between embeddings.\n \"\"\"\n # Load search index\n try:\n df = pd.read_csv(DF_FILENAME)\n search_index = df.to_dict(orient='records')\n except FileNotFoundError as e:\n print(f\"{e}. Run again with flag -p set to True to process files\")\n sys.exit(1)\n\n # Load query cache if it exists. Otherwise create it.\n try:\n with open(QUERY_CACHE_FILENAME, 'rb') as f:\n query_cache = pickle.load(f)\n except OSError:\n query_cache = {}\n\n if query in query_cache:\n query_vector = query_cache[query]\n else:\n query_vector = get_embedding_from_api(query)\n query_cache[query] = query_vector\n with open(QUERY_CACHE_FILENAME, 'wb') as f:\n pickle.dump(query_cache, f)\n\n similarities = []\n for record in search_index:\n similarities.append(\n cosine_similarity(ast.literal_eval(record[\"Embedding\"]), query_vector)\n )\n result = pd.Series(similarities, index=df.index).sort_values(ascending=False)\n return result, df\n\n\ndef print_results(results, df, num_results):\n length = num_results if num_results <= len(results) else len(results)\n\n print(\"\\n----------\")\n for i in results[:length].index:\n print(f\"{df.iloc[i]['Text']}\\n\")\n print(\"----------\\n\")\n\n\ndef main():\n args = parser.parse_args()\n\n if args.preprocess == True:\n if args.folderpath is None:\n raise ValueError(\"Folder path not defined. Use flag -f with path to folder\")\n generate_data_embeddings(args.folderpath)\n\n if args.query is not None:\n results, df = search(args.query)\n print_results(results, df, args.num)\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ilee38/llm-semantic-search","sub_path":"semantic_search.py","file_name":"semantic_search.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2985013758","text":"\"\"\"\nAn image is represented by an m x n integer grid image where image[i][j] represents the pixel value of the image.\n\nYou are also given three integers sr, sc, and newColor. You should perform a flood fill on the image starting from the pixel image[sr][sc].\n\nTo perform a flood fill, consider the starting pixel, plus any pixels connected 4-directionally to the starting pixel of the same color as \nthe starting pixel, plus any pixels connected 4-directionally to those pixels (also with the same color), and so on. Replace the color of \nall of the aforementioned pixels with newColor.\n\nReturn the modified image after performing the flood fill.\n\nImage of Flood Fill Example #1\nExamples:\n\nInput: \nimage = [[1,1,1],\n [1,1,0],\n [1,0,1]\n ],\n \n sr = 1, sc = 1, newColor = 2\n\nOutput: [[2,2,2],\n [2,2,0],\n [2,0,1]\n ]\n\nExplanation: From the center of the image with position (sr, sc) = (1, 1) (i.e., the red pixel), all pixels connected by a path of the \nsame color as the starting pixel (i.e., the blue pixels) are colored with the new color.\nNote the bottom corner is not colored 2, because it is not 4-directionally connected to the starting pixel.\n\nInput: image = [[0,0,0],\n [0,0,0]\n ], \n sr = 0, sc = 0, newColor = 2\n\nOutput: [[2,2,2],\n [2,2,2]\n ]\n\nU:\n bao: Can we modify the input array or should we need to create another array?\n What about a different color? - Keep the same.\n far: Are we limited to the 4 directions of traversal?\n What if a color is the same color as being landed on?\n jes: \n kev: Driving.\n tin: What if there is an empty node? - > Is there an \"X\" to designate a hole in the 2D array?\n xin: What happens if the input is empty?\nM:\n DFS\n Helper Function - If the position is valid? -> Run DFS\nP:\n * Track the starting color that we land on\n * Loop thru each cell that is 4-D adjacent\n * Run DFS on each cell if it is not in the visited set\n * Compare to check if the color is the same as the starting color\n * If it is the same, then change > recursive call on neighbor cell\n * Else if the boundary is OOB, return\nI:\nR:\nE:\n\n\"\"\"\n\n\ndef solution(image, sr=1, sc=1, new_color=2):\n\n visited = set()\n\n rows = len(image)\n cols = len(image[0])\n\n def dfs(row, col):\n visited.add((row, col))\n starting_color = image[row][col]\n\n image[row][col] = new_color\n\n possible_dir = [(row, col + 1), (row + 1, col), (row - 1, col), (row, col - 1)]\n\n for next_row, next_col in possible_dir:\n if (\n next_row in range(rows)\n and next_col in range(cols)\n and (next_row, next_col) not in visited\n ):\n if image[next_row][next_col] != starting_color:\n continue\n\n dfs(next_row, next_col)\n\n dfs(sr, sc)\n print(visited)\n return image\n\n\nif __name__ == \"__main__\":\n # test1 = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n test1 = [[1, 1, 1], [1, 1, 0], [1, 0, 1]]\n\n print(solution(test1))\n","repo_name":"kev-odin/laughing-waddle","sub_path":"week_8/week8_session1a.py","file_name":"week8_session1a.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43186966919","text":"import os\r\nimport xml.dom.minidom\r\n\r\ndef add_label(data_dir1, data_dir2, label_name_list):\r\n AnnoPath = data_dir1\r\n Annolist = []\r\n Annolist1 = os.listdir(AnnoPath)\r\n for Anno in Annolist1:\r\n if Anno.split('.')[-1] == 'xml':\r\n Annolist.append(Anno) \r\n \r\n for Anno in Annolist:\r\n label_name_list1 = label_name_list.copy()\r\n xmlfile = AnnoPath + Anno\r\n xmlfile_write = data_dir2 + Anno\r\n \r\n try:\r\n DOMTree = xml.dom.minidom.parse(xmlfile)\r\n except:\r\n f = open(xmlfile, \"r\")\r\n r = f.read()\r\n text = str(r.encode('utf-8'), encoding = \"utf-8\")\r\n DOMTree = xml.dom.minidom.parseString(text)\r\n \r\n try:\r\n DOMTree_write = xml.dom.minidom.parse(xmlfile_write)\r\n except:\r\n f_write = open(xmlfile_write, \"r\")\r\n r_write = f_write.read()\r\n text_write = str(r_write.encode('utf-8'), encoding = \"utf-8\")\r\n DOMTree_write = xml.dom.minidom.parseString(text_write)\r\n\r\n \r\n collection = DOMTree.documentElement\r\n objectlist = collection.getElementsByTagName(\"object\")\r\n \r\n collection_write = DOMTree_write.documentElement\r\n objectlist_write = collection_write.getElementsByTagName(\"object\")\r\n for index in range(len(objectlist_write)):\r\n objects = objectlist_write[index]\r\n namelist = objects.getElementsByTagName('name')\r\n idlist = objects.getElementsByTagName('id')\r\n objectname = namelist[0].childNodes[0].data\r\n idname = idlist[0].childNodes[0].data\r\n if objectname + idname in label_name_list1:\r\n label_name_list1.remove(objectname + idname)\r\n tab_text = DOMTree.createTextNode('\\t')\r\n collection_write.appendChild(tab_text)\r\n collection_write.appendChild(objects)\r\n tab_text = DOMTree.createTextNode('\\n')\r\n collection_write.appendChild(tab_text)\r\n \r\n for index in range(len(objectlist)):\r\n objects = objectlist[index]\r\n\r\n namelist = objects.getElementsByTagName('name')\r\n idlist = objects.getElementsByTagName('id')\r\n objectname = namelist[0].childNodes[0].data\r\n idname = idlist[0].childNodes[0].data\r\n if objectname + idname in label_name_list1:\r\n tab_text = DOMTree.createTextNode('\\t')\r\n collection_write.appendChild(tab_text) \r\n collection_write.appendChild(objects)\r\n tab_text = DOMTree.createTextNode('\\n')\r\n collection_write.appendChild(tab_text)\r\n \r\n with open(xmlfile_write, 'w', encoding='utf-8') as f_write: \r\n DOMTree_write.writexml(f_write, encoding='utf-8') \r\n\r\nif __name__ == '__main__':\r\n ### This code is developed for dense annotaion in DarkLabel.\r\n ### You can first annotate one instance in a seperate dir, and then use the code to add the annotate in another dir.\r\n data_dir1 = './DJI_0323_2_11/00/' ## dir for annotation\r\n data_dir2 = './DJI_0323_2/00/' ## dir for annotation add (dense annotations)\r\n label_name_list = ['pedestrian0','plane1'] ## name list of added annotations\r\n add_label(data_dir1, data_dir2, label_name_list) ### add anntations 'pedestrian0' and 'plane1' in \".xml\" of data_dir1 to \".xml\" of data_dir2.\r\n \r\n \r\n \r\n ","repo_name":"ICCV2023ID10503/RGBT-Tiny","sub_path":"codes/auxiliary_codes/add_specific_label.py","file_name":"add_specific_label.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40820638409","text":"'''\r\nCSC373 Fall 2020\r\nAssignment 6\r\nUniversity of Toronto Mississauga\r\n'''\r\n\r\n# Do NOT add any \"import\" statements\r\n# Do NOT use Python dictionaries\r\n\r\n\r\n# Constants\r\nDIRS = 4\r\n# For searching coordinates\r\nX = 0\r\nY = 1\r\n# DIR\r\nUP = 0\r\nRIGHT = 1\r\nDOWN = 2\r\nLEFT = 3\r\n# TYPES of EDGE\r\nTURN = 't'\r\nMOVE = 'm'\r\n\r\n# COST MATRIX REFERENCE\r\nCOST = 0 # Where cost of the path is held in cost matrix\r\nSTART = 1 # Starting move type in cost matrix\r\nFINAL = 2 # Final move type in cost matrix\r\n\r\n\r\ndef longest_travel_time(grid_size, a, b, c, M):\r\n '''\r\n Pre: integer grid_size, positive floats a, b, c, 2D-array M\r\n Post: return the longest travel time between any two configurations in the city\r\n '''\r\n n = grid_size*grid_size\r\n # Create cost matrix of 2d grid\r\n # 1 2\r\n # 3 4\r\n # [1u, 1r, 1d, 1l, 2u,2r,2d, ..., 4u, 4r, 4d, 4l]\r\n cost = [[[float('inf'), -1, -1] for index in range(n*DIRS)] if M[ind_to_coord(node, grid_size)[X]][ind_to_coord(node, grid_size)[Y]] ==\r\n 1 else None for node in range(n*DIRS)]\r\n # setup_cost_matrix\r\n for x in range(grid_size):\r\n for y in range(grid_size):\r\n if M[x][y] == 1:\r\n base= coord_to_ind(x, y, 0, grid_size)\r\n for direction in range(DIRS):\r\n index= coord_to_ind(x, y, direction, grid_size)\r\n cw= (direction + 1) % DIRS\r\n ccw= (direction - 1) % DIRS\r\n cost[index][base + cw]= [c, TURN, TURN]\r\n cost[index][base + ccw]= [c, TURN, TURN]\r\n if direction == UP and y != 0:\r\n cost[index][coord_to_ind(\r\n x, y-1, direction, grid_size)]= [a+b, MOVE, MOVE] if M[x][y-1] == 1 else float('inf')\r\n elif direction == RIGHT and x != grid_size - 1:\r\n cost[index][coord_to_ind(\r\n x+1, y, direction, grid_size)]= [a+b, MOVE, MOVE] if M[x+1][y-1] == 1 else float('inf')\r\n elif direction == DOWN and y != grid_size - 1:\r\n cost[index][coord_to_ind(\r\n x, y+1, direction, grid_size)]= [a+b, MOVE, MOVE] if M[x][y+1] == 1 else float('inf')\r\n elif direction == LEFT and x != 0:\r\n cost[index][coord_to_ind(\r\n x-1, y, direction, grid_size)]= [a+b, MOVE, MOVE] if M[x-1][y] == 1 else float('inf')\r\n\r\n for outer_index in range(n*DIRS):\r\n outer= ind_to_coord(outer_index, grid_size)\r\n if M[outer[X]][outer[Y]] == 1:\r\n for center_index in range(n*DIRS):\r\n center= ind_to_coord(center_index, grid_size)\r\n if M[center[X]][center[Y]] == 1 and cost[outer_index][center_index] != [float('inf'), -1, -1]:\r\n for inner_index in range(n*DIRS):\r\n inner= ind_to_coord(inner_index, grid_size)\r\n # Comparison only matters if tile is driveable\r\n print()\r\n if M[inner[X]][inner[Y]] == 1 and cost[center_index][inner_index] != [float('inf'), -1, -1]:\r\n if cost[outer_index][center_index][FINAL] == MOVE and cost[outer_index][center_index][START] == MOVE:\r\n if cost[outer_index][center_index][COST] + cost[center_index][inner_index][COST] - b < cost[outer_index][inner_index][COST]:\r\n cost[outer_index][inner_index][COST]= cost[outer_index][center_index][COST] + \\\r\n cost[center_index][inner_index][COST] - b\r\n cost[outer_index][inner_index][START]= cost[outer_index][center_index][START]\r\n cost[outer_index][inner_index][FINAL]= cost[center_index][center_index][FINAL]\r\n elif cost[outer_index][center_index][COST] + cost[center_index][inner_index][COST] < cost[outer_index][inner_index][COST]:\r\n cost[outer_index][inner_index][COST]= cost[outer_index][center_index][COST] + \\\r\n cost[center_index][inner_index][COST]\r\n cost[outer_index][inner_index][START]= cost[outer_index][center_index][START]\r\n cost[outer_index][inner_index][FINAL]= cost[center_index][center_index][FINAL]\r\n\r\n longest= 0\r\n # Find the longest saved\r\n for outer in range(n*DIRS):\r\n for inner in range(n*DIRS):\r\n coord= ind_to_coord(outer, grid_size)\r\n if cost[outer][inner] != [] and cost[outer][inner][COST] > longest and cost[outer][inner] != float('inf'):\r\n longest= cost[outer][inner]\r\n return longest\r\n\r\n\r\ndef ind_to_coord(index, grid_size):\r\n x= (index//(DIRS)) % grid_size\r\n y= (index//(DIRS))//grid_size\r\n dir= index % DIRS\r\n return (x, y, dir)\r\n\r\n\r\ndef coord_to_ind(x, y, dir, grid_size):\r\n '''\r\n Pre: Integer coordinates x and y representing the path and direction.\r\n '''\r\n ind= x*(DIRS)\r\n ind += y*(DIRS)*grid_size\r\n return ind + dir\r\n\r\n\r\nif __name__ == '__main__':\r\n # some small test cases\r\n # Case 1\r\n time= longest_travel_time(3, 1.0, 1.0, 1.0, [[1, 1, 1], [1, 0, 1],\r\n [1, 1, 1]])\r\n assert time == 11.0\r\n","repo_name":"Alshadex/dotfiles","sub_path":"bin/longest_travel_time_b.py","file_name":"longest_travel_time_b.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73435383523","text":"import torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torchscope import scope\nfrom torchvision import models\n\nfrom mobilefacenet import MobileFaceNet\n\n\nclass FECNet(nn.Module):\n def __init__(self):\n super(FECNet, self).__init__()\n filename = 'mobilefacenet.pt'\n model = MobileFaceNet()\n model.load_state_dict(torch.load(filename))\n self.model = model\n self.relu = nn.PReLU()\n self.fc = nn.Linear(128, 16)\n\n def forward(self, input):\n x = self.model(input)\n x = self.relu(x)\n x = self.fc(x)\n x = F.normalize(x)\n return x\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n batch_size = x.shape[0]\n return x.view(batch_size, -1)\n\n\nclass DepthwiseSeparableConv(nn.Module):\n def __init__(self, nin, nout, kernel_size, padding, bias=False):\n super(DepthwiseSeparableConv, self).__init__()\n self.depthwise = nn.Conv2d(nin, nin, kernel_size=kernel_size, padding=padding, groups=nin, bias=bias)\n self.pointwise = nn.Conv2d(nin, nout, kernel_size=1, bias=bias)\n\n def forward(self, x):\n out = self.depthwise(x)\n out = self.pointwise(out)\n return out\n\n\nclass RankNetMobile(nn.Module):\n def __init__(self, pretrained=True):\n super(RankNetMobile, self).__init__()\n # mobilenet = models.mobilenet_v2(pretrained=True)\n\n filename = 'mobilefacenet.pt'\n model = MobileFaceNet()\n if pretrained:\n model.load_state_dict(torch.load(filename))\n\n self.model = model\n self.dropout = nn.Dropout(0.8)\n # self.relu = nn.LeakyReLU(0.2, inplace=True)\n self.fc = nn.Linear(128, 16)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, input1, input2, input3):\n e1 = self.predict(input1)\n e2 = self.predict(input2)\n e3 = self.predict(input3)\n d12 = F.pairwise_distance(e1, e2, p=2)\n d13 = F.pairwise_distance(e1, e3, p=2)\n # d23 = F.pairwise_distance(e2, e3, p=2)\n return self.sigmoid(d12 - d13)\n\n def predict(self, input):\n x = self.model(input)\n x = self.dropout(x)\n # x = self.relu(x)\n x = self.fc(x)\n x = F.normalize(x, dim=1)\n return x\n\n\nclass ResNetEmotionModel(nn.Module):\n def __init__(self):\n super(ResNetEmotionModel, self).__init__()\n resnet = models.resnet50(pretrained=True)\n # Remove linear layer\n modules = list(resnet.children())[:-1]\n self.features = nn.Sequential(*modules)\n # building last several layers\n self.fc = nn.Linear(2048, 16)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n x = F.normalize(x)\n return x\n\n\nclass ResNetRankModel(nn.Module):\n def __init__(self, pretrained=True):\n super(ResNetRankModel, self).__init__()\n resnet = models.resnet50(pretrained=True)\n # Remove linear layer\n modules = list(resnet.children())[:-1]\n self.features = nn.Sequential(*modules)\n self.fc = nn.Linear(2048, 16)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, input1, input2, input3):\n e1 = self.predict(input1)\n e2 = self.predict(input2)\n e3 = self.predict(input3)\n d12 = F.pairwise_distance(e1, e2, p=2)\n d13 = F.pairwise_distance(e1, e3, p=2)\n d23 = F.pairwise_distance(e2, e3, p=2)\n\n return self.sigmoid(d12 - (d13 + d23) / 2)\n\n def predict(self, input):\n x = self.features(input)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n x = F.normalize(x)\n return x\n\n\nif __name__ == \"__main__\":\n model = ResNetEmotionModel()\n scope(model, input_size=(3, 224, 224))\n","repo_name":"foamliu/Facial-Expression-Embedding","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"671597620","text":"from django import forms\nfrom .models import DataSetFile\n\nclass UploadForm(forms.ModelForm):\n file_type = forms.ChoiceField(choices=(\n ('Service', 'Service Totals'),\n ('Bill', 'Bill Totals')\n ))\n\n class Meta:\n model = DataSetFile\n fields = ('csv_file', 'file_type')\n","repo_name":"OpenUpSA/municipal-data","sub_path":"household/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"54"} +{"seq_id":"73460055520","text":"import numpy as np\nimport cv2\nimport vrep\nfrom scene import Scene\nfrom matplotlib import pyplot as plt\nfrom tool import * \nimport time\n\nimport os\n\n# vrep\nip = '127.0.0.1'\nport = 19997\ncam_num = 3\n\ntrain_size = 50\nsample_size = 20\niteration=100\nscore_threshold=0.02\nscalar_threshold = 0.0005\n\ndef get_sub_mat(qa, qa_prime, qb, qb_prime):\n mat = np.zeros((6,8))\n # equation 1\n mat[:3, 0] = qa[1:] - qb[1:]\n mat[:3, 1:4] = cross_mat(qa[1:] + qb[1:])\n # equation 2\n mat[3:, 0] = qa_prime[1:] - qb_prime[1:]\n mat[3:, 1:4] = cross_mat(qa_prime[1:] + qb_prime[1:])\n mat[3:, 4] = qa[1:] - qb[1:]\n mat[3:, 5:] = cross_mat(qa[1:] + qb[1:])\n return mat\n\n# solve AX=XB by dual quaternion\ndef dual_quaternion_approach(motionAs, motionBs):\n size = motionAs.shape[0]\n T = []\n for j in range(size):\n motionA = motionAs[j]\n motionB = motionBs[j]\n ra, ta = mat_to_r_t(motionA)\n rb, tb = mat_to_r_t(motionB)\n qa, qa_prime = rot2dualquat(ra, ta)\n qb, qb_prime = rot2dualquat(rb, tb)\n T.append(get_sub_mat(qa, qa_prime, qb, qb_prime))\n T = np.concatenate(T)\n\n U, s, V = np.linalg.svd(T)\n idx1, idx2 = np.argsort(s)[:2].tolist()\n v7 = V[idx1]\n v8 = V[idx2]\n \n u1 = v7[:4]\n v1 = v7[4:]\n u2 = v8[:4]\n v2 = v8[4:]\n\n a = np.dot(u1,v1)\n b = np.dot(u1,v2) + np.dot(u2,v1)\n c = np.dot(u2,v2)\n \n s1 = (-b + np.sqrt(b*b-4*a*c)) / (2*a)\n s2 = (-b - np.sqrt(b*b-4*a*c)) / (2*a)\n\n x1 = s1**2 * np.dot(u1,u1) + 2*s1*np.dot(u1,u2) + np.dot(u2,u2)\n x2 = s2**2 * np.dot(u1,u1) + 2*s2*np.dot(u1,u2) + np.dot(u2,u2)\n (x,s) = (x1,s1) if x1 >= x2 else (x2,s2)\n\n lambda2 = np.sqrt(1/x)\n lambda1 = s * lambda2\n\n q = lambda1 * u1 + lambda2 * u2\n q_ = lambda1 * v1 + lambda2 * v2\n\n r_ba, t_ba = dualquat2r_t(q, q_)\n return r_t_to_mat(r_ba, t_ba), s\n\ndef get_error(T_world_cam, motionAs, motionBs, score_threshold):\n motionAs_ = np.matmul(T_world_cam, motionBs)\n motionAs_ = np.matmul(motionAs_, np.linalg.inv(T_world_cam))\n error = np.linalg.norm(motionAs - motionAs_) / np.linalg.norm(motionAs)\n tAs_ = motionAs_[:, :3, 3]\n tAs = motionAs[:, :3, 3]\n error = np.linalg.norm(tAs_ - tAs) / np.linalg.norm(tAs)\n return error\n\ndef ransac_for_calibration(motionAs, motionBs, sample_size=20, iteration=10, score_threshold=0.002, show=False):\n best_error = np.inf\n best_result = None\n\n for i in range(iteration): \n sample_idxs = np.random.randint(0,motionAs.shape[0],size=(1,sample_size))\n sampled_motionAs = motionAs[sample_idxs.ravel().tolist()]\n sampled_motionBs = motionBs[sample_idxs.ravel().tolist()]\n \n result, singular_values = dual_quaternion_approach(sampled_motionAs, sampled_motionBs)\n error = get_error(result, motionAs, motionBs, score_threshold)\n\n if error < best_error:\n best_error = error\n best_result = result\n if show:\n print(\"iter \", i, \"error: \", error)\n return best_result, best_error\n\ndef get_motion(A, B, scalar_threshold=0.0005, train_size=20, show=False):\n size = A.shape[0]\n motionAs = []\n motionBs = []\n for i in range(size):\n Ai = A[i]\n Bi = B[i]\n\n for j in range(i+1,size):\n Aj = A[j]\n Bj = B[j]\n\n motionA = np.matmul(np.linalg.inv(Aj), Ai)\n motionB = np.matmul(Bj, np.linalg.inv(Bi))\n ra, ta = mat_to_r_t(motionA)\n rb, tb = mat_to_r_t(motionB)\n qa, qa_prime = rot2dualquat(ra, ta)\n qb, qb_prime = rot2dualquat(rb, tb)\n\n # check scalar be equivalent\n diff_scalar = np.abs(qa[0]-qb[0])\n diff_scalar_ = np.abs(qa_prime[0]-qb_prime[0])\n # if show:\n # print(j, diff_scalar, diff_scalar_)\n if(diff_scalar < scalar_threshold and diff_scalar_ < scalar_threshold):\n motionAs.append(motionA)\n motionBs.append(motionB)\n shuffle_idxs = [i for i in range(len(motionAs))]\n np.random.shuffle(shuffle_idxs)\n if show:\n print('valid motion size: ', len(motionAs))\n print('train size: ', train_size)\n return np.stack(motionAs)[shuffle_idxs][:train_size], np.stack(motionBs)[shuffle_idxs][:train_size]\n\ndef hand_eye_calibration(A, B, sample_size=20, iteration=10, score_threshold=0.002, scalar_threshold=0.0005, train_size=20, show=False):\n motionAs, motionBs = get_motion(\n A,\n B,\n scalar_threshold=scalar_threshold, \n train_size=train_size, \n show=show\n )\n T_world_cam, error = ransac_for_calibration(\n motionAs, \n motionBs, \n sample_size, \n iteration, \n score_threshold, \n show=show\n )\n if show:\n print('best error:', error)\n return T_world_cam\n\ndef main():\n input_path = \"./calibration\"\n cam_paths = [os.path.join(input_path,'camera{:d}'.format(i)) for i in range(cam_num)]\n\n scene = Scene(ip, port)\n T_world_cams = scene.get_cam_matrixs()\n theta = np.pi\n l = np.array([0,0,1])\n q = np.array([np.cos(theta/2)]+(np.sin(theta/2) * l).tolist())\n r = quat2rot(q)\n cam_fix = r_t_to_mat(r, np.zeros(3))\n T_world_cams = [np.matmul(T_world_cams[i],cam_fix) for i in range(cam_num)]\n\n for i in range(cam_num):\n print('camera ',i)\n cam_path = cam_paths[i]\n T_world_end = np.loadtxt(os.path.join(cam_path, 'world_end.txt')).reshape((-1,4,4))\n T_cam_obj = np.loadtxt(os.path.join(cam_path, 'cam_obj.txt')).reshape((-1,4,4))\n\n # hand eye calibration\n T_world_cam = hand_eye_calibration(\n np.linalg.inv(T_world_end)[::-1], \n T_cam_obj[::-1], \n sample_size=sample_size, \n iteration=iteration, \n score_threshold=score_threshold,\n scalar_threshold=scalar_threshold,\n train_size=train_size,\n show=True\n )\n\n # check X\n print('final check')\n thetaj, nj, tj = mat_to_theta_n_t(T_world_cam)\n thetaj_, nj_, tj_ = mat_to_theta_n_t(T_world_cams[i])\n print(thetaj, nj, tj)\n print(thetaj_, nj_, tj_)\n print(\n \"theta:{:6f} n:{:6f} t:{:6f}\".format(\n np.linalg.norm(thetaj-thetaj_), \n np.linalg.norm(nj-nj_)/np.linalg.norm(nj), \n np.linalg.norm(tj-tj_)/np.linalg.norm(tj)\n )\n )\n print('-----------------------------------------------------')\n\n np.savetxt(os.path.join(cam_path, 'world_cam.txt'),T_world_cam)\n\nif __name__ == \"__main__\":\n main()","repo_name":"djy89/hand-eye-calibration","sub_path":"calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"1693959016","text":"from logging import error\n\nimport scrapy\nimport datetime as dt\n\n\nclass IdSpider(scrapy.Spider):\n name = \"id\"\n fic_page_addr = \"http://fanfics.me/fic\"\n start_urls = [\n # \"http://fanfics.me/find?section=find&fandom1=2&fandom2=0&pers1=0&pers2=0&pers3=0&pers4=0&pers5=0&pers6=0&size=0&reit=0&status=1&date=0&translate=0&original_language=0&reit1=3#fics\",\n # \"http://fanfics.me/find?section=find&fandom1=2&fandom2=0&pers1=0&pers2=0&pers3=0&pers4=0&pers5=0&pers6=0&size=0&reit=0&status=1&date=0&translate=0&original_language=0&reit1=1#fics\",\n # \"http://fanfics.me/find?section=find&fandom1=2&fandom2=0&pers1=0&pers2=0&pers3=0&pers4=0&pers5=0&pers6=0&size=0&reit=0&status=1&date=0&translate=0&original_language=0&reit1=2#fics\",\n \"http://fanfics.me/find?section=find&fandom1=2&fandom2=0&pers1=0&pers2=0&pers3=0&pers4=0&pers5=0&pers6=0&size=0&reit=0&status=1&date=0&translate=0&original_language=0&reit1=4#fics\"\n ]\n\n def parse(self, response):\n for fic_page in response.css('.FicTable').css('.FicTable_Title a').xpath('@href').extract():\n yield response.follow(fic_page, self.parse_metadata)\n\n paginator = response.css(\"div.paginator\")[0]\n current_page = int(paginator.css(\"span.this\").xpath(\"text()\").extract_first())\n next_page = paginator.xpath(\"span/a[text() = '{}']/@href\".format(current_page + 1)).extract_first()\n\n print('seen {} fics on page {}; next page is {}'.format(len(response.css('.FicTable')), current_page, next_page))\n if next_page is not None:\n yield response.follow(next_page, self.parse, dont_filter=True)\n else:\n import random\n with open(\"last_page_{}.html\".format(''.join(random.choices(\"abcdefghijklmn\", k=4))), \"wb\") as f:\n f.write(response.body)\n\n def parse_metadata(self, response):\n try:\n table = response.css(\".FicHead\")[0]\n except IndexError:\n return []\n fic_id = response.url[len(self.fic_page_addr):]\n published = response.css(\"#fic_info_content_stat table tr\").xpath(\n \"./td[contains(., \\\"Опубликован:\\\")]/parent::tr/td[last()]/text()\").extract_first()\n last_update = response.css(\"#fic_info_content_stat table tr\").xpath(\n \"./td[contains(., \\\"Изменен:\\\")]/parent::tr/td[last()]/text()\").extract_first()\n category = table.css(\"h1>span::text\").extract_first().replace(\"(\", \"\").replace(\")\", \"\")\n views = response.css(\"#fic_info_content_stat table tr\").xpath(\n \"./td[contains(., \\\"Просмотров:\\\")]/parent::tr/td[last()]/text()\").extract_first()\n subscribers = response.css(\"#fic_info_content_stat table tr\").xpath(\n \"./td[contains(., \\\"Читателей:\\\")]/parent::tr/td[last()]/span/text()\").extract_first()\n recommendations = response.css(\"#fic_info_content_stat table tr\").xpath(\n \"./td[contains(., \\\"Рекомендаций:\\\")]/parent::tr/td[last()]/text()\").extract_first()\n comments = response.css(\"#fic_info_content_stat table tr\").xpath(\n \"./td[contains(., \\\"Комментариев:\\\")]/parent::tr/td[last()]/text()\").extract_first()\n\n metadata = {\n \"id\": fic_id,\n \"title\": table.css(\"h1::text\").extract_first(),\n \"published\": dt.datetime.strptime(published, \"%d.%m.%Y\").date() if published else None,\n \"last_update\": dt.datetime.strptime(last_update, \"%d.%m.%Y\").date() if last_update else None,\n \"category\": category,\n \"views\": int(views.replace(\" \", \"\")),\n \"subscribers\": int(subscribers),\n \"recommendations\": int(recommendations),\n \"comments\": int(comments)\n }\n\n for field in table.css(\"div.tr\"):\n title = field.css(\".title\").extract_first()\n content = field.css(\".content\")\n if \"Рейтинг\" in title:\n metadata[\"rating\"] = content.css('::text').extract_first()\n elif \"Автор\" in title:\n metadata[\"authors\"] = content.xpath(\"./span/@data-show-member\").extract()\n elif \"Переводчик\" in title:\n metadata[\"translators\"] = content.xpath(\"./span/@data-show-member\").extract()\n elif \"Оригинал\" in title:\n orig_content_table = content.css(\"table.translation_info tr\")\n for orig_field in orig_content_table:\n orig_title = orig_field.css(\"td.first\").extract_first()\n if \"Название\" in orig_title:\n metadata[\"original_title\"] = orig_field.css(\"td.second::text\").extract_first()\n elif \"Автор\" in orig_title:\n metadata[\"authors\"] = orig_field.css(\"td.second a::text\").extract()\n elif \"Язык\" in orig_title:\n metadata[\"original_language\"] = orig_field.css(\"td.second::text\").extract_first()\n else:\n pass\n elif \"Бет\" in title:\n metadata[\"betas\"] = content.xpath(\"./span/@data-show-member\").extract()\n elif \"Жанр\" in title:\n genre = content.xpath(\"text()\").extract_first()\n metadata[\"genre\"] = genre.split(\", \") if genre is not None else []\n elif \"События\" in title:\n metadata[\"events\"] = content.css(\"a::text\").extract()\n elif \"Предупреждение\" in title:\n metadata[\"warnings\"] = content.xpath(\"text()\").extract_first().split(\", \")\n elif \"Размер\" in title:\n size_cat = content.css('::text').extract_first()\n size_cat = 'small' if 'Мини' in size_cat else 'medium' if 'Миди' in size_cat else 'large'\n size_kb = content.css('#FicSize::text').extract_first()\n size_kb = int(size_kb.split()[0]) if size_kb else 0\n metadata['size_cat'] = size_cat\n metadata['size_kb'] = size_kb\n elif \"Персонажи\" in title:\n characters = content.xpath('a[contains(@href, \"paring\")]/text()').extract()\n characters = [pair.split('/') for pair in characters]\n characters.extend(content.xpath('a[contains(@href, \"character\")]/text()').extract())\n metadata['characters'] = characters\n elif not ((\"Фандом\" in title) or (\"Статус\" in title)):\n error(\"Unmanadeg data at {}: {}\".format(response.url, title))\n\n yield metadata\n","repo_name":"stacymiller/andan2018_vis","sub_path":"2019/scraping/spiders/id_spider.py","file_name":"id_spider.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73747927841","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 8 23:54:54 2018\n\n@author: thomasdrayton\n\"\"\"\n\nimport pandas as pd\nimport os\nimport glob\n\n\nindir =\"/Users/thomasdrayton/Desktop/FYP/InformationExtraction\"\nos.chdir(indir)\n\nfileList = glob.glob(\"*.csv\") # putting all the csv files into list\n\n\ndf_list = []\n\nfor file in fileList:\n df = pd.read_csv(file)\n df_list.append(df)\n \n\n\ndf_subset = pd.concat(df_list,axis=0)\ndf_subset.drop(df_subset.columns[0],axis=1, inplace = True)\n\ndf_subset.set_index(['eng test','window'], drop = True, inplace = True)\n\n#df_subset = df_subset.transpose()\n\n\nprint(df_subset)\n#%%\n#df_n_IPvib = df_subset.drop(['tfuelv_f','toilv_f','tgtv_f','p30v_f'],axis=1)\n# =============================================================================\n# outfile = \"/Users/thomasdrayton/Desktop/subset_ready_for_labelling.csv\"\n# df_subset.to_csv(outfile)\n# =============================================================================\n\n\n \n ","repo_name":"bittahbandit/FYP-Scripts","sub_path":"dbcreation_from_feat_extrction_files.py","file_name":"dbcreation_from_feat_extrction_files.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39851568602","text":"from ravi.Ravi import *\n\nwood_from_every_tree = 1\nmetal_from_every_mine = 1\n\nwood_for_bow = 2\nmetal_for_bow = 1\nwood_for_sword = 1\nmetal_for_sword = 2\n\ntree_cutting_duration = 1\nmine_digging_duration = 1\nbow_making_duration = 1\nsword_making_duration = 1\n\nclass_player = EntityClass()\nclass_player.addProperty(\"WOOD\", int, 0)\nclass_player.addProperty(\"METAL\", int, 0)\nclass_player.addProperty(\"BOW\", int, 0)\nclass_player.addProperty(\"SWORD\", int, 0)\n\nclass_world = EntityClass()\nclass_world.addProperty(\"TREE\", int, 6)\nclass_world.addProperty(\"MINE\", int, 4)\nclass_world.addProperty(\"REMAINING_DAYS\", int, 5)\n\ncontext = NarrativeContext()\ncontext.addEntity(\"PLAYER\", class_player)\ncontext.addEntity(\"WORLD\", class_world)\n\n\n# =================================================== PreConditions ====================================================\n\n\ndef can_cut_tree(s: NarrativeState) -> bool:\n return s.getValue(\"WORLD\", \"TREE\") > 0 and \\\n s.getValue(\"WORLD\", \"REMAINING_DAYS\") >= tree_cutting_duration\n\n\ndef can_dig_mine(s: NarrativeState) -> bool:\n return s.getValue(\"WORLD\", \"MINE\") > 0 and \\\n s.getValue(\"WORLD\", \"REMAINING_DAYS\") >= mine_digging_duration\n\n\ndef can_make_bow(s: NarrativeState) -> bool:\n return s.getValue(\"PLAYER\", \"WOOD\") >= wood_for_bow and \\\n s.getValue(\"PLAYER\", \"METAL\") >= metal_for_bow and \\\n s.getValue(\"WORLD\", \"REMAINING_DAYS\") >= bow_making_duration\n\n\ndef can_make_sword(s: NarrativeState) -> bool:\n return s.getValue(\"PLAYER\", \"WOOD\") >= wood_for_sword and \\\n s.getValue(\"PLAYER\", \"METAL\") >= metal_for_sword and \\\n s.getValue(\"WORLD\", \"REMAINING_DAYS\") >= sword_making_duration\n\n\n# ======================================================= Actions ======================================================\n\n\ndef cut_tree(s: NarrativeState) -> NarrativeState:\n newValue = s.getValue(\"PLAYER\", \"WOOD\") + wood_from_every_tree\n s.setValue(\"PLAYER\", \"WOOD\", newValue)\n\n newValue = s.getValue(\"WORLD\", \"REMAINING_DAYS\") - tree_cutting_duration\n s.setValue(\"WORLD\", \"REMAINING_DAYS\", newValue)\n return s\n\n\ndef dig_mine(s: NarrativeState) -> NarrativeState:\n newValue = s.getValue(\"PLAYER\", \"METAL\") + metal_from_every_mine\n s.setValue(\"PLAYER\", \"METAL\", newValue)\n\n newValue = s.getValue(\"WORLD\", \"REMAINING_DAYS\") - mine_digging_duration\n s.setValue(\"WORLD\", \"REMAINING_DAYS\", newValue)\n return s\n\n\ndef make_bow(s: NarrativeState) -> NarrativeState:\n newValue = s.getValue(\"PLAYER\", \"WOOD\") - wood_for_bow\n s.setValue(\"PLAYER\", \"WOOD\", newValue)\n\n newValue = s.getValue(\"PLAYER\", \"METAL\") - metal_for_bow\n s.setValue(\"PLAYER\", \"METAL\", newValue)\n\n newValue = s.getValue(\"PLAYER\", \"BOW\") + 1\n s.setValue(\"PLAYER\", \"BOW\", newValue)\n\n newValue = s.getValue(\"WORLD\", \"REMAINING_DAYS\") - bow_making_duration\n s.setValue(\"WORLD\", \"REMAINING_DAYS\", newValue)\n\n return s\n\n\ndef make_sword(s: NarrativeState) -> NarrativeState:\n newValue = s.getValue(\"PLAYER\", \"WOOD\") - wood_for_sword\n s.setValue(\"PLAYER\", \"WOOD\", newValue)\n\n newValue = s.getValue(\"PLAYER\", \"METAL\") - metal_for_sword\n s.setValue(\"PLAYER\", \"METAL\", newValue)\n\n newValue = s.getValue(\"PLAYER\", \"SWORD\") + 1\n s.setValue(\"PLAYER\", \"SWORD\", newValue)\n\n newValue = s.getValue(\"WORLD\", \"REMAINING_DAYS\") - sword_making_duration\n s.setValue(\"WORLD\", \"REMAINING_DAYS\", newValue)\n\n return s\n\n\n# ====================================================== Choices =======================================================\n\n\nch_cut_tree = NarrativeChoice(can_cut_tree, cut_tree,\n \"CUT A TREE TO GET +%d WOOD (%d DAYS)\" \\\n % (wood_from_every_tree, tree_cutting_duration))\n\nch_dig_mine = NarrativeChoice(can_dig_mine, dig_mine,\n \"DIG A MINE TO GET +%d METAL (%d DAYS)\" \\\n % (metal_from_every_mine, mine_digging_duration))\n\nch_make_bow = NarrativeChoice(can_make_bow, make_bow,\n \"SPEND %d WOOD and %d METAL TO MAKE A BOW (%d DAYS)\" \\\n % (wood_for_bow, metal_for_bow, bow_making_duration))\n\nch_make_sword = NarrativeChoice(can_make_sword, make_sword,\n \"SPEND %d WOOD and %d METAL TO MAKE A SWORD (%d DAYS)\" \\\n % (wood_for_sword, metal_for_sword, sword_making_duration))\n\n# =================================================== Assertions =======================================================\n\nassertion_1 = NarrativeAssertion(\n \"Number of WOOD should never become negative\",\n lambda m: len(filterStates(lambda s: s.getValue(\"PLAYER\", \"WOOD\") < 0,\n statesOf(m)))\n == 0\n)\n\nassertion_2 = NarrativeAssertion(\n \"Number of METAL should never become negative\",\n lambda m: len(filterStates(lambda s: s.getValue(\"PLAYER\", \"METAL\") < 0,\n statesOf(m)))\n == 0\n)\n\ndef calculate_success_Ratio(model: NarrativeModel) -> float:\n successPathCount = len(pathsFromTo(model,\n model.initialStates,\n filterStates(successCondition, statesOf(model))))\n\n failPathCount = len(pathsFromTo(model,\n model.initialStates,\n filterStates(failCondition, statesOf(model))))\n\n return successPathCount / (successPathCount + failPathCount)\n\n\nassertion_3 = NarrativeAssertion(\n \"5 percent < success ratio < 8 percent\",\n lambda m: 0.05 < calculate_success_Ratio(m) < 0.08\n)\n\n\n# =============================================== Termination Conditions ===============================================\n\n\ndef failCondition(s: NarrativeState) -> bool:\n return s.getValue(\"WORLD\", \"REMAINING_DAYS\") == 0\n\n\ndef successCondition(s: NarrativeState) -> bool:\n return s.getValue(\"PLAYER\", \"BOW\") >= 1 and \\\n s.getValue(\"PLAYER\", \"SWORD\") >= 1\n\n\n# ================================================== Narration Setting =================================================\n\n\ninitial_states = {NarrativeState(context)}\nterm_conditions = {failCondition, successCondition}\nchoices = [ch_cut_tree, ch_dig_mine,ch_make_bow, ch_make_sword]\nassertions = [assertion_1, assertion_2, assertion_3]\nsettings: NarrationSetting = NarrationSetting(initial_states=initial_states,\n termination_conditions=term_conditions,\n choices=choices,\n assertions=assertions)\n\nmodel: NarrativeModel = generateNarrativeModel(setting=settings, max_depth=math.inf)\n\n# ======================================== Narration Generation and Proof Checking =====================================\n\nmodel.validateAssertions()\nprint(\"SUCCESS RATIO: \", calculate_success_Ratio(model))\nprint(\"TERMINABLE:\", model.hasAbsoluteTermination())\nmodel.runNarration(True, NarrativeState(context))\n#model.drawNarrationGraph(show_state=False, show_choices=True)\n","repo_name":"SACHAM0RA/RaviFramework","sub_path":"examples/SampleNarration2.py","file_name":"SampleNarration2.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27820844817","text":"from sentence_transformers import SentenceTransformer, util\nfrom elasticsearch import Elasticsearch, helpers\nimport streamlit as st\nimport pandas as pd\nfrom PIL import Image\n\n#@st.cache(allow_output_mutation=True)\n\nes = Elasticsearch(['https://localhost:9200'],ca_certs=False, verify_certs=False, basic_auth=('elastic', 'polina1518'))\nmodel = SentenceTransformer('multi-qa-MiniLM-L6-cos-v1')\n#('multi-qa-distilbert-dot-v1')\n\n\ndef run():\n #image = Image.open('IMDB.PNG')\n #st.image(image, caption='') \n \n st.title('Búsqueda semántica IMDB')\n ranker = st.sidebar.radio('Opciones:', [\"Léxica\", \"Semántica\"], index=0)\n st.text('')\n input_text = []\n comment = st.text_input('Ingrese texto de pelicula a buscar !')\n input_text.append(comment)\n \n df_result = pd.DataFrame()\n df_result['Titulo'] = ''\n df_result['Descripcion'] = ''\n df_result['Link'] = ''\n\n if st.button('SEARCH'):\n with st.spinner('Searching ......'):\n if input_text is not '':\n #result = []\n print(f'INPUT: ', input_text)\n question_embedding = model.encode(input_text[0])\n if ranker == 'Léxica':\n print('Busqueda lexica....')\n bm25 = es.search(index=\"quora\", body={\"query\": {\"match\": {\"field_traduccion\": input_text[0] }}})\n for hit in bm25['hits']['hits'][0:5]:\n xtitulo = hit['_source']['field_titulo']\n xlink = hit['_source']['field_link']\n xdescripcion = hit['_source']['field_traduccion']\n df_result = df_result.append({'Titulo': xtitulo, 'Descripcion': xdescripcion, 'Link': xlink} , ignore_index=True)\n #result.append(hit['_source']['question'])\n else:\n print('Busqueda semántica....')\n sem_search = es.search(index=\"quora\", body={\n \"query\": {\n \"script_score\": {\n \"query\": {\n \"match_all\": {}\n },\n \"script\": {\n \"source\": \"cosineSimilarity(params.queryVector, doc['field_traduccion_vector']) + 1.0\",\n \"params\": {\n \"queryVector\": question_embedding\n }\n }\n }\n }\n })\n for hit in sem_search['hits']['hits'][0:5]:\n xtitulo = hit['_source']['field_titulo']\n xlink = hit['_source']['field_link']\n xdescripcion = hit['_source']['field_traduccion']\n df_result = df_result.append({'Titulo': xtitulo, 'Descripcion': xdescripcion, 'Link': xlink} , ignore_index=True)\n #result.append(hit['_source']['question'])\n \n #for i in df_result:\n st.dataframe(df_result)\n #st.success(f\"{str(i)}\")\n\nif __name__ == '__main__':\n #model_embedding, client = load_es()\n run()\n","repo_name":"Polindem/streamlit-elastic","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33762323268","text":"#кал сам кал\r\nwhile True:\r\n\r\n aa = input(\" Что надо? (+, -, *, /,**): \")\r\n if (aa != '+') and (aa != '-') and (aa != '*') and (aa != '/') and (aa != '**'):\r\n print('\\n Долбаеб ну шо ты пишеш?') # Поясняю за \\n , эта штука делает отступ между строками, что бы все не слипалось\r\n else:\r\n a = (input(\"\\n Введи первое число \"))\r\n if a.isdigit(): # Проверяет есть ли в СТРОКЕ цифры: если строка \"1e2\" дает False а если '123'дает True \r\n a = float(a) # а теперь уже даем строке новый тип, что бы небыло ошибок\r\n else: # Иначе в строке буква\r\n test = False # переменная чисто что б цикл остановить, по другому не придумал\r\n while test != True:\r\n a = input('\\n Калькулятор не работает с буквами, введи число: ')\r\n test = a.isdigit() # Если в стоке опять буквы то test = False если букв нет то True и чикл завершается\r\n a = float(a) # Присвоение типа, иначе будет сложение строк и 2+2 даст 22 =)\r\n\r\n # Точно то же что и с а\r\n\r\n b = (input(\"\\n Введи второе число \"))\r\n if b.isdigit(): # То же что и с а только с b\r\n b = float(b) \r\n else:\r\n test = False\r\n while test != True:\r\n b = input('\\n Калькулятор не работает с буквами, введи число: ')\r\n test = b.isdigit()\r\n b = float(b)\r\n\r\n if aa == \"+\":\r\n c = a + b\r\n print(\"\\n Результат \" + str(c))\r\n elif aa == \"-\":\r\n c = a - b\r\n print(\"\\n Результат \" + str(c))\r\n elif aa == \"*\":\r\n c = a * b\r\n print(\"\\n Результат \" + str(c))\r\n elif aa== \"/\":\r\n c = a / b\r\n print(\"\\n Результат \" + str(c))\r\n elif aa == \"**\":\r\n c = a ** b\r\n print(\"\\n Результат \" + str(c))\r\n\r\n","repo_name":"D2MIN/D2MIN","sub_path":"kalkulyator.py","file_name":"kalkulyator.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23243839414","text":"#2022 전정대, 경영대, 사과대 학과별 재학생\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport pandas as pd\n\n\nfile_path = 'C:/Users/kiw/github_15zd/data_mining/data/충북권 대학 등록금.csv' \ndf = pd.read_csv(file_path, encoding='utf-8')\n\nfontpath = 'C:/Windows/Fonts/malgunsl.ttf'\nfont_name = fm.FontProperties(fname=fontpath, size=50).get_name()\nplt.rc('font', family=font_name)\n\ndata_his = df['등록금']\n\nplt.hist(data_his, bins = [3000000, 4000000, 5000000, 6000000, 7000000, 8000000], edgecolor='white')\n\n# 히스토그램 그리기\nplt.title('충북권 대학 등록금 히스토그램')\nplt.xlabel('등록금 (백만원)')\nplt.ylabel('학교 수')\n\n# 그래프 표시\nplt.show()\n\n\n\n\n\n","repo_name":"kiw331/data_mining","sub_path":"ch4_ex2.py","file_name":"ch4_ex2.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26743669416","text":"import gym\nimport gym_deepline\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom gym_deepline.agents.DDQNatml_weighted_q import *\nimport os\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nprim_list = ['GaussianNBClassifier', 'BernoulliNBClassifier', 'MultinomialNB', 'DecisionTreeClassifier',\n 'ExtraTreesClassifier', 'RF_classifier', 'GradientBoostingClassifier', 'KNeighborsClassifierPrim',\n 'LinearSVC', 'LogisticRegression', 'XGBClassifier', 'FastICA', 'MaxAbsScaler', 'MinMaxScaler',\n 'Normalizer', 'PCA_Randomized', 'RobustScaler', 'StandardScaler', 'imputer', 'OneHotEncoder',\n 'NumericData', 'ImputerMedian', 'ImputerOneHotEncoderPrim', 'UnivariateSelectChiFWE',\n 'f_classifFWE', 'f_classifPercentile', 'VarianceThreshold', 'UnivariateSelectChiPercentile',\n 'RFE_RandomForest', 'QuantileTransformer',\n 'MajorityVoting', 'RandomForestMeta', 'RandomForestRegressorMeta', 'AdaBoostClassifierMeta',\n 'ExtraTreesMetaClassifier', 'GradientBoostingClassifierMeta', 'XGBClassifierMeta' \n 'KBinsDiscretizerOrdinal', 'RandomTreesEmbedding', 'KernelPCA', 'UnivariateSelectChiKbest',\n 'mutual_info_classifKbest'\n ]\n\n\ndef train_deepline(env, log_dir, datasets_indices):\n env.set_env_params(prim_list, lj_list=datasets_indices, embedd_size=15, log_pipelines=True)\n info = env.state_info\n env = AtmlMonitor(env, log_dir, allow_early_resets=True)\n env = DummyVecEnv([lambda: env])\n kwargs = dict(layers=[256, 128, 64, 8], state_info=info)\n model = DqnAtml(CustomPolicy, env, verbose=1, policy_kwargs=kwargs, prioritized_replay=True,\n learning_rate=0.00005, gamma=0.98)\n env.envs[0].env.observation.model = model\n\n print('Start Training')\n model.learn(total_timesteps=10, log_interval=100)\n model.save(log_dir + \"/last_model\")\n return model\n\n\ndef test_deepline(env, model, datasets_idx):\n obs = env.reset()\n env.set_env_params(prim_list, datasets_idx, embedd_size=15, log_pipelines=True)\n env.observation.model = model\n x_train = env.observation.X_train.copy(deep=True)\n y_train = env.observation.Y_train.copy()\n x_test = env.observation.X_test.copy(deep=True)\n y_test = env.observation.Y_test.copy()\n model.set_env(env)\n\n ds = env.observation.learning_job.name\n print('Testing dataset: {}'.format(ds))\n\n done = False\n while not done:\n action, _states = model.predict(obs, deterministic=False)\n\n obs, reward, done, info = env.step(action)\n env.render()\n if done:\n env.observation.pipe_run.produce(x_test)\n score = env.observation.pipe_run.learning_job.metric.evaluate(y_test.copy(), env.observation.pipe_run.produce_outputs['predictions'])\n print('Score: {}'.format(score))\n\n\nif __name__ == '__main__':\n log_dir = 'logs/'\n env = gym.make('deepline-v0')\n\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n train_indices = list(range(45))\n test_indices = [46]\n\n num_training_steps = 150 # change to 50,000-150,000 for better results!\n model = train_deepline(env, log_dir, train_indices)\n test_deepline(env, model, test_indices)\n\n\n\n","repo_name":"yuvalhef/gym-deepline","sub_path":"gym_deepline/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"37091816908","text":"import pytest\nimport time\nimport math\nfrom selenium.webdriver.common.by import By\n\n\n@pytest.mark.parametrize('link', [\"236895\", \"236896\", \"236897\", \"236898\", \"236899\", \"236903\", \"236904\", \"236905\"])\ndef test_answer(browser, link):\n input_result = math.log(int(time.time()))\n url = f\"https://stepik.org/lesson/{link}/step/1/\"\n browser.get(url)\n browser.implicitly_wait(10)\n field_for_input = browser.find_element(By.TAG_NAME, 'textarea')\n field_for_input.send_keys(input_result)\n button = browser.find_element(By.CLASS_NAME, 'submit-submission')\n button.click()\n\n feedback = browser.find_element(By.CLASS_NAME, 'smart-hints__hint').text\n\n assert feedback == 'Correct!', 'Isn\\'t correct'\n","repo_name":"zokm/stepik-auto-tests-course","sub_path":"part3/p3_l6_s3_test_parametrize.py","file_name":"p3_l6_s3_test_parametrize.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32541402144","text":"# Databricks notebook source\n# MAGIC %md \n# MAGIC ##### Access ADLS Using SAS token\n# MAGIC\n# MAGIC Note that the recommended way to access ADLS from Databricks is by using AAD Service Principal and the backed by Azure Key Vault Databricks Secret Scope.\n# MAGIC\n# MAGIC Here for simplicity we use SAS token.\n# MAGIC\n# MAGIC Note: Replace the storage account and coontainer with your names. \n\n# COMMAND ----------\n\nstorage_account='asastoremcw303474'\ncontainer_name = 'labs-303474'\ndata_folder_name = 'FlightsDelays'\n\n# COMMAND ----------\n\n# Set up Spark configuration for SAS token in order to access Azure Datalake\nspark.conf.set(f\"fs.azure.account.auth.type.{storage_account}.dfs.core.windows.net\", \"SAS\")\nspark.conf.set(f\"fs.azure.sas.token.provider.type.{storage_account}.dfs.core.windows.net\", \"org.apache.hadoop.fs.azurebfs.sas.FixedSASTokenProvider\")\nspark.conf.set(f\"fs.azure.sas.fixed.token.{storage_account}.dfs.core.windows.net\", \"sp=racwlmeo&st=2023-09-07T14:17:14Z&se=2023-11-30T23:17:14Z&spr=https&sv=2022-11-02&sr=c&sig=jyWEvg%2FzLmK9J%2BOxIp%2B8QSCKYpVmNPfKNcNIo68Rh6E%3D\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ##### ABFS protocol\n# MAGIC\n# MAGIC ABFS protocol (Azure Blob File System) - Azure Blob storage driver for Hadoop required by Spark.\n# MAGIC ABFS is part of Apache Hadoop and is included in many of the commercial distributions of Hadoop. It's a recommended protocol today to use when working with ADLS v2\n# MAGIC\n# MAGIC The objects in ADLS are represented as URIs with the following URI schema:\n# MAGIC\n# MAGIC _abfs[s]://container_name@account_name.dfs.core.windows.net///_\n# MAGIC \n# MAGIC If you add an **_'s'_** at the end (abfss) then the ABFS Hadoop client driver will ALWAYS use Transport Layer Security (TLS) irrespective of the authentication method chosen.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ##### dbutils\n# MAGIC\n# MAGIC Databricks utility tool\n# MAGIC\n# MAGIC You can do many opertations with dbutils, for more details see [dbutils](https://learn.microsoft.com/en-us/azure/databricks/dev-tools/databricks-utils)\n# MAGIC\n# MAGIC Run `dbutils.fs.help()` for the full list of functions.\n# MAGIC\n# MAGIC In this example we list the content of ADLS folder _FlightsDelays_ in ADLS container:\n\n# COMMAND ----------\n\n#display(dbutils.fs.ls(\"/mnt/sandboxes/FlightsDelays/\"))\n\ndisplay(dbutils.fs.ls( f\"abfss://{container_name}@{storage_account}.dfs.core.windows.net/FlightsDelays/\"))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ##### Initial data exploration\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Load file to Spark DataFrame (pySpark)\n\n# COMMAND ----------\n\ndf = spark.read.csv(f'abfss://{container_name}@{storage_account}.dfs.core.windows.net/FlightsDelays/FlightDelaysWithAirportCodes.csv', header=True)\n\n# COMMAND ----------\n\n# Useful display method \ndisplay(df)\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC Run Spark operations of DataFrame we've just created\n\n# COMMAND ----------\n\n# Print file schema in dataframe \ndf.printSchema()\n\n# COMMAND ----------\n\n# Infer schema with inferSchema=\"True\" Spark option\ndf = spark.read.csv(f'abfss://{container_name}@{storage_account}.dfs.core.windows.net/FlightsDelays/FlightDelaysWithAirportCodes.csv', header=True, inferSchema=\"true\")\n\n# COMMAND ----------\n\n# let's check the updated schema in the new DataFrame....\ndf.printSchema()\n\n# COMMAND ----------\n\n# count number of records in the file\ndisplay(df.count())\n\n# COMMAND ----------\n\n# Using filter in DataFrame. Count total number of delays\ndisplay(df.filter(df[\"DepDel15\"] == 1).count())\n\n# COMMAND ----------\n\n# Check how many flights per month. Sort output by click on \"count\" header.\nres = df.groupBy(\"Month\").count()\ndisplay(res)\n\n# COMMAND ----------\n\n#distribution of values for our target column DepDel15\ndisplay(df.groupBy(\"DepDel15\").count())\n\n# COMMAND ----------\n\n# Check how many flights per month\n# We can see that July is most busy months \nfrom pyspark.sql import functions as F\n# Group the data by the Month column and count the number of rows in each group ans sort it\nres = df.groupBy(\"Month\").agg(F.count(\"*\").alias(\"Count\")).sort(F.desc(\"Count\"))\ndisplay(res)\n\n# COMMAND ----------\n\n#Example of Spark SQL. Count how many flights per origin airport\n#load dataframe into temporary view\ndf.createOrReplaceTempView(\"FLIGHTS_DATA\")\n\n# Use Spark SQL to get the number of flights for each origin airport code\nmost_loaded_airport = spark.sql(\"\"\"\n SELECT OriginAirportName, Month, COUNT(*) as flights\n FROM FLIGHTS_DATA\n GROUP BY OriginAirportName,Month\n ORDER BY flights DESC\n\"\"\")\n\n# Show the result\ndisplay(most_loaded_airport)\n\n# COMMAND ----------\n\n# count how many delays per Origin airport\nfrom pyspark.sql import functions as F\n\n#Calculate number of delays per origin airport\ndelay_counts = df.groupBy(\"OriginAirportName\").agg({\"DepDel15\": \"sum\"}).limit(10)\n#Rename autogenerated column to human readable\ndelay_counts = delay_counts.withColumnRenamed(\"sum(DepDel15)\", \"DelayCount\")\ndisplay(delay_counts.sort(F.desc(\"DelayCount\"))) \n\n# COMMAND ----------\n\n# Count all cases of double delay: in Origin and Destination \nfrom pyspark.sql import functions as F\n\n# Filter only the rows where DepDel15 is equal to 1 (delayed flights)\ndelayed_flights = df.filter(df[\"DepDel15\"] == \"1\")\n\n# Group the data by OriginAirportCode and DestAirportName and count the number of delayed flights\nresult = delayed_flights.groupBy(\"OriginAirportName\", \"DestAirportName\").agg(F.count(\"DepDel15\").alias(\"Delay Count\")).sort(F.desc(\"Delay Count\"))\n\n# Show the result\ndisplay(result)\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC ##### Data Quality exploration\n\n# COMMAND ----------\n\n# MAGIC %md Check the number of null values in DepDel15, this colimn states the departure delay of at least in 15 min.\n# MAGIC\n# MAGIC If we want to use this field for delay prediction we should fix those records with null values. \n\n# COMMAND ----------\n\ndf = spark.read.csv(f'abfss://{container_name}@{storage_account}.dfs.core.windows.net/FlightsDelays/FlightDelaysWithAirportCodes.csv', header=True, inferSchema=\"true\")\ndisplay(df)\n\n# COMMAND ----------\n\n#simple check of missing or invalid values with distinct for the target field: DepDel15\ndisplay(df.select('DepDel15').distinct())\n\n# COMMAND ----------\n\n#check percentage of missing values for our target field: DepDel15\nfrom pyspark.sql.functions import col\n\npercentage_nulls_in_DepDel15 = df.filter(col(\"DepDel15\").isNull() ).count() / df.count() * 100\nprint (f\"{percentage_nulls_in_DepDel15} % null values in DepDel15 column\") \n\n# COMMAND ----------\n\n#let's do it simpler with dbutils\ndbutils.data.summarize(df)\n\n# COMMAND ----------\n\ndbutils.data.help(\"summarize\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC #### Exercise \n# MAGIC Run the same analysis for FlightWeatherWithAirportCode.csv file.\n# MAGIC Start from cell number 6\n","repo_name":"vladfeigin/azuredatabrickstraining","sub_path":"day1/1-Data exploration with pySpark -1.py","file_name":"1-Data exploration with pySpark -1.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70252577121","text":"import socket\n\n# 实例化socket对象\nsk = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n# # 参数一\n# socket.AF_INET ipv4\n# socket.AF_INET ipv6\n# # 参数二\n# socke.SOCK_STREAM TCP协议 (面向连接)特点:效率要求不高,但是安全可靠性高 打电话,收发文件\n# socket.SOCK_DGRAM UDP协议 (面向无连接) 特点:效率要求高,内容要求不高 直播,语音\n\nip_port = (\"127.0.0.1\",9000) # 元组 127.0.0.1代表本地 5000以下的电脑自己定义的\nsk.bind(ip_port) # 绑定端口号与ip地址 不绑定是随机的,创建客户端会找不到服务器\nprint(\"开始监听\")\nsk.listen(5) # 监听5个请求,类似进程池\nprint(\"accept\")\nconn,addr = sk.accept() # 响应请求队列 返回元组 (con,addr)(连接,地址)\nprint(\"接收内容。。。\")\ndata = conn.recv(1024).decode() # 接收内容并解码\nprint(data)\nsk.close()","repo_name":"champion-yang/pythonSocket","sub_path":"python/day01/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28841734920","text":"\"\"\"This module contains the implementation of the pdf documents ingestor.\"\"\"\nimport os\nimport random\nimport subprocess\nimport tempfile\nfrom typing import List\n\nfrom .exceptions import UnsupportedQuotesSource\nfrom .ingestor_interface import IngestorInterface\nfrom .quote_model import QuoteModel\n\n\nclass PdfIngestor(IngestorInterface):\n \"\"\"Define the implementation of the ingestor.\"\"\"\n\n supported_extensions: List[str] = ['pdf']\n\n @classmethod\n def parse(cls, path: str) -> List[QuoteModel]:\n \"\"\"Define the parse algorithm.\n\n Arguments:\n path -- the path to the document to ingest\n\n Return:\n a list of `QuoteModel` objects\n \"\"\"\n if not cls.can_ingest(path):\n raise UnsupportedQuotesSource(\n f'Cannot ingest quotes document: {path}.')\n\n tmp = f'{tempfile.gettempdir()}/{random.randint(0,1000000)}.txt'\n call = subprocess.call(\n ['pdftotext', path, tmp])\n\n file_ref = open(tmp, \"r\")\n quotes = []\n for line in file_ref.readlines():\n line = line.strip('\\n\\r').strip()\n if len(line) > 0:\n parsed = line.split('-')\n new_quote = QuoteModel(\n parsed[0].strip().strip('\"'), parsed[1].strip().strip('\"'))\n quotes.append(new_quote)\n\n file_ref.close()\n os.remove(tmp)\n return quotes\n","repo_name":"vladflore/intermediate-python-nanodegree-capstone_2","sub_path":"QuoteEngine/pdf_ingestor.py","file_name":"pdf_ingestor.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43507259776","text":"from django.shortcuts import render, redirect\nfrom .forms import ReviewForm\nfrom .models import Review\nfrom .models import *\nfrom django.http import HttpResponse\n\n\ndef body(request):\n return render(request, 'body.html')\n\n\ndef index(request):\n value = Pizza.objects.all()\n value2 = Menu.objects.all()\n\n context = {\n 'value': value,\n 'value2': value2\n }\n return render(request, 'about.html', context)\n\n\ndef reviews(request):\n if request.method == 'POST':\n form = ReviewForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('reviews')\n else:\n form = ReviewForm()\n\n reviews = Review.objects.all()\n\n return render(request, 'users.html', {'form': form, 'reviews': reviews})\n\n\ndef menu(request):\n value2 = Menu.objects.all()\n context = {\n 'value': value2\n }\n return render(request, 'about.html', context)\n\n\ndef regis(request):\n if request.method == 'POST':\n pizza_name = request.POST.get('pizza_name')\n quantity = request.POST.get('quantity')\n delivery_address = request.POST.get('delivery_address') # аналогично для delivery_address\n phone_number = request.POST.get('phone_number')\n\n PizzaOrder.objects.create(\n pizza_name=pizza_name,\n quantity=quantity,\n delivery_address=delivery_address,\n phone_number=phone_number\n )\n\n return HttpResponse('Ваш заказ скоро будет доставлен, будьте в связи!')\n return render(request, 'delivery.html')","repo_name":"talasbek0/Django_proect","sub_path":"proect_pill/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45001678514","text":"import pandas as pd\n\ndef changeControlVariable(originalControlWindow, controlVariable):\n\n control_df = pd.DataFrame()\n for variable in controlVariable:\n\n if(variable == 'distribution'):\n #distribution: distribution is neutral if the minimum distribution is reached\n #formula = (distribution(t) - min(distribution)) / min(distribution)\n #therefore, if the minimum distribution applies to the entire dataset, the feature is 0 for all weeks\n\n #create series of 0 for each element in scope\n distribution_df = pd.Series([0 for x in range(len(originalControlWindow))]).rename('distribution')\n\n #merge with existing controlFrame\n control_df = pd.concat([control_df,distribution_df], axis=1)\n\n if(variable == 'covid'):\n #create series of 0 for each element in scope\n covid_df = pd.Series([0 for x in range(len(originalControlWindow))]).rename('covid')\n #merge with existing controlFrame\n control_df = pd.concat([control_df,covid_df], axis=1)\n\n if(variable == 'promotion'):\n #create series of 0 for each element in scope\n promotion_df = pd.Series([0 for x in range(len(originalControlWindow))]).rename('promotion')\n #merge with existing controlFrame\n control_df = pd.concat([control_df,promotion_df], axis=1)\n\n if(variable == 'epros'):\n #create series of 0 for each element in scope\n epros_df = pd.Series([0 for x in range(len(originalControlWindow))]).rename('epros')\n #merge with existing controlFrame\n control_df = pd.concat([control_df,epros_df], axis=1)\n\n if(variable == 'off_trade_visibility'):\n #create series of 0 for each element in scope\n off_trade_df = pd.Series([0 for x in range(len(originalControlWindow))]).rename('off_trade_visibility')\n #merge with existing controlFrame\n control_df = pd.concat([control_df,off_trade_df], axis=1)\n\n\n return control_df","repo_name":"alex0267/marketing_mix_model_product","sub_path":"BUSINESS_OUTPUT/changeControlVariable.py","file_name":"changeControlVariable.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8833044042","text":"_list = [1, 3, 5, 23, 43, 54, 38, 54, 9,30]\n_list2 = [1, 2, 7, 54, 9, 30,72, 35]\n\ndef intersect(_list, _list2):\n elem = 0\n size = len(_list)\n _list3 = ([])\n i = 0\n while i < size:\n if _list[i] in _list2:\n _list3.append(_list[i])\n i += 1\n return _list3\nprint(intersect(_list, _list2))\n\n","repo_name":"ShegzBit/New_Practice","sub_path":"PythonProgramming/Refresh/intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5228366098","text":"from ea import DE, DEcrossover\nfrom math import sqrt\nimport tempfile\n\nimport pytest\n\nfrom mock import Mock\n\nfrom numpy.random import rand, seed\n\nfrom numpy import copy, all\n\ndef sphere(x):\n return sqrt((x*x).sum())\n\n@pytest.fixture\ndef output():\n tmpfile = tempfile.NamedTemporaryFile(\"w\", delete=True)\n return tmpfile.name\n\n@pytest.fixture\ndef funinfo():\n return {'lower': -5, 'upper': 5, 'threshold': 0, 'best': 0}\n\n@pytest.mark.parametrize(\"dim\", \n[2]#, 5, 10]\n)\ndef test_DE(output, funinfo, dim):\n evals = dim*1000\n (fitness, best, evals) = DE.DE(sphere, funinfo, dim, evals, name_output=output, debug=False)\n assert sphere(best) == fitness\n\n@pytest.mark.parametrize(\"dim\", \n[2, 5]\n)\ndef test_evals(output, funinfo, dim):\n fun_fitness = Mock()\n fun_fitness.side_effect = sphere\n evals = dim*1000\n assert fun_fitness.call_count == 0\n (fitness, best, realevals) = DE.DE(fun_fitness, funinfo, dim, evals, name_output=output, run=1, debug=False)\n assert sphere(best) == fitness\n assert realevals - evals < 60\n assert realevals == fun_fitness.call_count\n\n@pytest.mark.de\n@pytest.mark.parametrize(\"dim\", \n[2, 5]\n)\ndef test_evals(output, funinfo, dim):\n evals = dim*100\n popsize = 10\n population = (rand(dim*popsize)*10-5).reshape((popsize, dim))\n previous_population = copy(population)\n result = DE.DE(sphere, funinfo, dim, evals, name_output=output, run=1, debug=False, population=population)\n assert sphere(result.solution) == result.fitness\n \n # Check the population improves\n for i in range(popsize):\n assert not all(population[i] == previous_population[i])\n assert all(previous_population[i] == previous_population[i])\n assert sphere(population[i]) <= sphere(previous_population[i])\n\n@pytest.mark.de\n@pytest.mark.parametrize(\"dim\", \n[2, 5]\n)\ndef test_evals_continue(output, funinfo, dim):\n evals = dim*100\n popsize = 10\n first_population = (rand(dim*popsize)*10-5).reshape((popsize, dim))\n myseed = 12345679\n seed(myseed)\n population = copy(first_population)\n total_evals = 0\n result = DE.DE(sphere, funinfo, dim, evals, name_output=output, run=1, debug=False, population=population)\n assert sphere(result.solution) == result.fitness\n total_evals += result.evaluations\n second_population = copy(population)\n result = DE.DE(sphere, funinfo, dim, evals, name_output=output, run=1, debug=False, population=population)\n total_evals += result.evaluations\n\n # Check the population improves\n for i in range(popsize):\n assert not all(first_population[i] == second_population[i])\n assert not all(population[i] == second_population[i])\n assert sphere(first_population[i]) >= sphere(second_population[i])\n assert sphere(second_population[i]) >= sphere(population[i])\n\n # Check again with the double of evaluations\n final_population = copy(population)\n population = copy(first_population)\n seed(myseed)\n result = DE.DE(sphere, funinfo, dim, total_evals-popsize, name_output=output, run=1, debug=False, population=population)\n assert sphere(result.solution) == result.fitness\n\n for i in range(popsize):\n assert all(population[i] == final_population[i])\n","repo_name":"dmolina/shadeils","sub_path":"ea/ea/tests/test_de.py","file_name":"test_de.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"73678029602","text":"import calendar\nfrom datetime import date\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom kso.ConcertCalendarEvent import SeasonCalendarEvent\nfrom otherViews import render_to_response_with_extra_data\nfrom models import Season\n\n\n# noinspection PyUnusedLocal\ndef downloadSeason(request, season_id, member):\n season = get_object_or_404(Season, pk=season_id)\n return SeasonCalendarEvent(season, member)()\n\n\ndef seasonDetail(request, season_id, member):\n if season_id == \"next\":\n futureSeasons = Season.objects.filter(endDate__gte=date.today()).order_by(\"startDate\")\n if not futureSeasons:\n raise Http404\n #if futureSeasons.count() <= 1:\n raise Http404\n #season = futureSeasons[1]\n elif season_id == \"this\":\n futureSeasons = Season.objects.filter(endDate__gte=date.today()).order_by(\"startDate\")\n if not futureSeasons:\n raise Http404\n season = futureSeasons[0]\n else:\n season = get_object_or_404(Season, pk=season_id)\n htmlCalendar = calendar.HTMLCalendar()\n htmlCalendar.setfirstweekday(calendar.SUNDAY)\n firstConcert = season.concert_set.all().order_by(\"dateAndTime\")[0]\n start = firstConcert.dateAndTime\n if firstConcert.rehearsal_set.all():\n start = firstConcert.rehearsal_set.all().order_by(\"start\")[0].start\n months = [htmlCalendar.formatmonth(start.year + int((start.month + i - 1) / 12), (start.month + i - 1) % 12 + 1) for\n i in range(12)]\n td_join = lambda a, b: a + '' + b\n tr_join = lambda a, b: a + '' + b\n cal = reduce(tr_join, [reduce(td_join, months[i:i + 3]) for i in range(0, 12, 3)])\n cal = '' + cal + ''\n if member == \"m/\":\n return render_to_response_with_extra_data(request, \"seasoncalendar.html\", {'calendar': cal, 'season': season},\n member)\n return render_to_response_with_extra_data(request, \"seasondetail.html\", {'calendar': cal, 'season': season}, \"\")\n","repo_name":"davidmus/kso","sub_path":"ksodjango/kso/seasonViews.py","file_name":"seasonViews.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21475387808","text":"from .Lugar import Lugar\nfrom .Pais import Pais\nclass Continente(Lugar):\n ListaPaises=[]\n\n def addPais(self, Nombre, Codigo, Coordenadas):\n unPais = Pais\n unPais.Nombre = Nombre\n unPais.Codigo = Codigo\n unPais.Coordenadas = Coordenadas\n self.ListaPaises.append(unPais)\n\n def delPais(self, Codigo):\n for i in self.ListaPaises:\n if i.Codigo == Codigo:\n self.ListaPaises.remove(i)\n\n def modPais(self, Codigo, nombreAtributo, atributoNuevo):\n for i in self.ListaPaises:\n if i.Codigo == Codigo:\n i.nombreAtributo = atributoNuevo","repo_name":"EmpanadasDeLaBufe/ABSLOL","sub_path":"Clases/Continente.py","file_name":"Continente.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13758746077","text":"import networkx as nx\nimport igraph as ig\nimport argparse\nimport csv\nimport community.community_louvain as cl\nimport numpy as np\nimport ast\nfrom networkx.algorithms.community import modularity\n\ndef get_membership_list_from_file(membership_path):\n membership = dict()\n with open(membership_path) as f:\n for line in f:\n i, m = line.strip().split()\n membership[int(i)] = m\n return membership\n\n\ndef group_to_partition(partition):\n part_dict = {}\n for index, value in partition.items():\n if value in part_dict:\n part_dict[value].append(index)\n else:\n part_dict[value] = [index]\n return part_dict.values()\n\n\ndef network_statistics(graph, ground_truth_membership=None, show_connected_components=False):\n print('** Network statistics **')\n node_count, edge_count, isolate_count = graph.number_of_nodes(), graph.number_of_edges(), len(\n list(nx.isolates(graph)))\n connected_components_sizes = [len(c) for c in sorted(nx.connected_components(graph), key=len, reverse=True)]\n connected_component_num = nx.number_connected_components(graph)\n max_connected_component = max(connected_components_sizes)\n degrees = [d for n, d in graph.degree()]\n min_degree, max_degree, mean_degree, median_degree = np.min(degrees), np.max(degrees), np.mean(degrees), np.median(\n degrees)\n print('#nodes, #edges, #singletons:', node_count, edge_count, isolate_count)\n print('num connected comp:', connected_component_num)\n print('max connected comp size:', max_connected_component)\n if show_connected_components:\n print(connected_components_sizes)\n print('min, max, mean, median degree:', min_degree, max_degree, mean_degree, median_degree)\n if ground_truth_membership:\n print('ground truth partition statistics')\n partition_statistics(graph, group_to_partition(membership_list_to_dict(ground_truth_membership)))\n return node_count, edge_count, isolate_count, connected_component_num, max_connected_component, min_degree, max_degree, mean_degree, median_degree\n\n\ndef partition_statistics(G, partition, show_cluster_size_dist=True):\n print('\\n** Partition statistics **')\n cluster_num = len(partition)\n cluster_sizes = [len(c) for c in partition]\n min_size, max_size, mean_size, median_size = np.min(cluster_sizes), np.max(cluster_sizes), np.mean(\n cluster_sizes), np.median(cluster_sizes)\n singletons = [c for c in partition if len(c) == 1]\n singletons_num = len(singletons)\n non_singleton_num = len(partition) - len(singletons)\n modularity_score = modularity(G, partition)\n coverage = (G.number_of_nodes() - len(singletons)) / G.number_of_nodes()\n\n print('#clusters in partition:', cluster_num)\n if show_cluster_size_dist and cluster_num < 100:\n print('cluster sizes:')\n print(sorted(cluster_sizes, reverse=True))\n print('min, max, mean, median cluster sizes:', min_size, max_size, mean_size, median_size)\n print('number of singletons:', singletons_num)\n print('number of non-singleton clusters:', non_singleton_num)\n print('modularity:', modularity_score)\n print('coverage:', coverage)\n return cluster_num, min_size, max_size, mean_size, median_size, singletons_num, non_singleton_num, modularity_score, coverage\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Threshold Consensus\")\n parser.add_argument(\"-n\", \"--edgelist\", type=str, required=True,\n help=\"Network edge-list file\")\n parser.add_argument(\"-m\", \"--membership\", type=str, required=True,\n help=\"Partition membership\")\n #parser.add_argument(\"-g\", \"--groundtruth\", type=str, required=False,\n # help=\"Ground-truth membership\")\n args = parser.parse_args()\n net = nx.read_edgelist(args.edgelist, nodetype=int)\n network_statistics(net)\n partition = get_membership_list_from_file(args.membership)\n partition = group_to_partition(partition)\n partition_statistics(net, partition)\n\n\n\n","repo_name":"ytabatabaee/consensus-clustering","sub_path":"evaluate_partition.py","file_name":"evaluate_partition.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36159134791","text":"# author: Carlina Kim, Karanpal Singh, Sukriti Trehan, Victor Cuspinera\n# date: 2020-06-22\n\n'''This script will read new comments file and will predict the themes and subthemes for the comments. \nThe output dataframe will be saved in the specified directory.\n\nThere are 2 parameters Input Path and Output Path where you want to write the file with theme and subtheme predictions.\n\nUsage: predict_new_comments.py --input_dir= --output_dir=\n\nExample:\n python src/models/predict_new_comments.py --input_dir=data/new_data/ --output_dir=data/new_data/\n\nOptions:\n--input_dir= Directory name for new comments excel file\n--output_dir= Directory for saving excel file with predicted themes and subthemes \n'''\n\nimport pandas as pd\nimport numpy as np\nimport keras\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nimport sys\nsys.path.append('src/data/')\nfrom preprocess import Preprocessing\n\nfrom docopt import docopt\n\nopt = docopt(__doc__)\n\ndef main(input_dir, output_dir):\n \"\"\"\n This function loads files from input_dir, makes theme and subtheme predictions\n based on the saved models and saves an excel file with predictions in the output_dir\n \"\"\"\n print(\"\\n--- START: predict_new_comment.py ---\\n\")\n\n print(\"**Loading the data**\")\n ## Reading new comments data\n try:\n new_comments = pd.read_excel(input_dir + '/new_comments.xlsx')\n except:\n print(\"File new_comments.xlsx not found.\\n\")\n print(\"--- END: predict_new_comments.py ---\\n\")\n return\n\n ## Load training data\n X_train = pd.read_excel('data/interim/question1_models/advance/X_train.xlsx')\n\n ## Load y_train and extract column names for themes and subthemes\n y_train = pd.read_excel('data/interim/question1_models/advance/y_train.xlsx')\n theme_names = y_train.rename(columns={'FEW':'FWE'}).iloc[:,:12].columns\n subthemes = y_train.iloc[:,12:-1].columns\n\n print('**Preprocessing: this step could take time, please be patient.**')\n X_train = Preprocessing().general(X_train['Comment'])\n new_comments_ppd = Preprocessing().general(new_comments['Comment'])\n new_comment_ppd_df = pd.DataFrame(new_comments_ppd, columns = ['Comment'])\n\n ## Get parameters\n print('**Computing the required parameters**')\n max_len = max(len(comment.split()) for comment in X_train)\n vect=Tokenizer()\n vect.fit_on_texts(X_train)\n\n encoded_new_comments = vect.texts_to_sequences(new_comments_ppd)\n padded_new_comments = pad_sequences(encoded_new_comments, maxlen=max_len, padding='post')\n\n ## Loading saved model\n print('**Loading the saved theme model**')\n theme_model = tf.keras.models.load_model('models/Theme_Model/theme_model')\n print(\"**Making the theme predictions**\")\n pred_themes_array = theme_model.predict(padded_new_comments)\n pred_themes_array = (pred_themes_array > 0.4)*1\n\n ## Making dataframe of prediction\n pred_themes = pd.DataFrame(pred_themes_array, columns=theme_names)\n\n print(\"**Theme predictions are successfully done. Predicting subthemes now.**\\n\")\n\n ## Creating dictionary with theme indices as keys predicted comment indices as values\n ind_dict = dict()\n for i in range(pred_themes_array.shape[1]):\n ind_dict[i] = np.where(pred_themes_array[:,i] == 1)[0]\n\n ## Creating 2d zero array of size (#comments x 62)\n zero_arrays = np.zeros((pred_themes_array.shape[0], 62))\n\n subtheme_pos = dict()\n\n count_i = 0\n\n for i in range(len(theme_names)):\n count_a = count_i\n for sublab in subthemes:\n if sublab.startswith(theme_names[i]):\n count_i += 1\n subtheme_pos[i] = range(count_a, count_i)\n\n\n ## Creating dictionary for theme names and theme indices\n theme_dict = dict()\n model_dict = dict()\n for i in range(len(theme_names)):\n model_dict[i] = str(theme_names[i]).lower() + '_model'\n theme_dict[i] = str(theme_names[i])\n\n ## Loop for predicting subthemes\n pred_subthemes = dict()\n pred_thresh = {0:0.4, 1:0.4, 2:0.3, 3:0.4, 4:0.5, 5:0.3, 6:0.4, 7:0.4, 8:0.4, 9:0.3, 10:0.3, 11:0.4}\n \n for i in list(ind_dict.keys()):\n \n print(\"**Predicting subthemes for comments classified as label\", theme_dict[i], \"**\")\n\n\t # subset comments for predicted label\n # print(\"comment_subsets\\n\", new_comments_ppd)\n comments_subset = new_comment_ppd_df.iloc[ind_dict[i]] ## MAY BE DOESN'T NEED ILOC\n\n\t # load respective train set for predicted label\n input_dir_1 = 'data/interim/subthemes/' + str(theme_dict[i])\n x_train = pd.read_excel(input_dir_1 + '/X_train_subset.xlsx')\n\n\t # Preprocessing comments and x_train\n print(\"**Preprocessing training set for this label. This may take a little time**\")\n x_train = Preprocessing().general(x_train['Comment'])\n # comments_subset = Preprocessing().general(comments_subset['Comment'])\n\n\t # Getting parameters\n print(\"**Getting the required parameters now**\")\n max_len = max(len(comment.split()) for comment in x_train)\n vect=Tokenizer()\n vect.fit_on_texts(x_train)\n\n\t # Padding comments\n encoded_docs_comments = vect.texts_to_sequences(comments_subset['Comment'])\n padded_docs_comments = pad_sequences(encoded_docs_comments, maxlen=max_len, padding='post')\n\n\t # loading model\n print(\"**Loading saved model for theme\", model_dict[i], \"**\")\n model = tf.keras.models.load_model('models/Subtheme_Models/' + model_dict[i])\n\n\t # Predictions\n print(\"**Predicting subthemes for comments**\")\n try:\n pred = model.predict(padded_docs_comments)\n pred = (pred > pred_thresh[i])*1\n pred_subthemes[i] = pred\n for j in range(pred_subthemes[i].shape[0]):\n zero_arrays[ind_dict[i][j], subtheme_pos[i]] += pred_subthemes[i][j]\n except:\n next\n print(\"Predictions for subthemes of \", theme_dict[i], \"are completed!\")\n print('-----------------------------------')\n\n print(\"**Subtheme predictions are successfully done**\")\n subtheme_pred = pd.DataFrame(zero_arrays, columns=subthemes)\n\n final_pred = pd.concat([pd.Series(new_comments['Comment']), pred_themes, subtheme_pred], axis=1)\n final_pred.to_excel(output_dir + '/predictions.xlsx')\n print(\"**Predictions have been saved to\", output_dir, \"**\\n\")\n print(\"--- END: predict_new_comments.py ---\\n\")\n\n return\n\nif __name__ == \"__main__\":\n main(opt[\"--input_dir\"], opt[\"--output_dir\"])","repo_name":"singh-karanpal/Capstone","sub_path":"src/models/predict_new_comments.py","file_name":"predict_new_comments.py","file_ext":"py","file_size_in_byte":6686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3188272787","text":"import string\nimport nltk\nimport os\nimport pickle\nimport numpy as np \nimport pandas as pd \nimport scipy.sparse as sparse\n\nfrom src.data_loading import interactions\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn import preprocessing\nfrom nltk import SnowballStemmer\nfrom nltk.corpus import stopwords\nnltk.download('stopwords', quiet=True)\n\nstemmer = SnowballStemmer(\"english\")\n\n# TODO: replace these with the actual files\n\nTFIDF_FILE = os.path.join(interactions.ROOT, 'models', \"tfidf-vectorizer.pickle\")\nMETADATA_TFIDF = os.path.join(interactions.ROOT, 'data', 'metadata_tfidf.json')\nTFIDF_MATRIX = os.path.join(interactions.ROOT, 'data', 'sparse_matrix.npz')\nTOPIC_MODEL = os.path.join(interactions.ROOT, 'models', 'topic_model.pickle')\n\n\ndef tokenize(text):\n # translator that replaces punctuation with empty spaces\n translator = str.maketrans(string.punctuation, ' ' * len(string.punctuation)) \n return [stemmer.stem(i) for i in text.translate(translator).split()] \n\nstop_words = set([tokenize(word)[0] for word in stopwords.words('english')])\n\n\ndef train_tfidf_vectorizer(\n text_list, \n filename=TFIDF_FILE ,\n max_features=1000, stop_words=stop_words, **kwargs):\n \"\"\"Outputs tfidf transformed vectors from the input text\"\"\"\n assert not filename or not os.path.exists(filename), 'tfidf file already exists'\n \n vectorizer = TfidfVectorizer(\n analyzer='word', \n strip_accents='unicode',\n tokenizer=tokenize,\n stop_words=stop_words,\n max_features=max_features, **kwargs)\n \n tfidf_vectors = vectorizer.fit_transform(text_list)\n if filename: pickle.dump(vectorizer, open(filename, \"wb\"))\n return tfidf_vectors, vectorizer\n\n\ndef tfidf_transform(text_list, filename=TFIDF_FILE):\n \"\"\"load tfidf file from path \"\"\"\n with open(filename, 'rb') as f:\n vectorizer = pickle.load(f)\n return vectorizer.transform(text_list)\n\n\ndef training_pipeline(filename=METADATA_TFIDF, \n tfidf_filename=TFIDF_FILE,\n metadata_filename=interactions.METADATA_FILE, testing=False,**kwargs):\n \"\"\"convert metadata into TFIDF vectors and store tfidf vectorizer\"\"\"\n metadata = interactions.get_metadata(filename=metadata_filename)\n if testing:\n metadata = metadata[:100]\n filename = 'testing-' + filename\n tfidf_filename = 'testing-' + tfidf_filename\n \n metadata['agg_text'] = interactions.aggregate_text(metadata)\n tfidf_vectors, vectorizer = train_tfidf_vectorizer(\n metadata['agg_text'], \n filename=tfidf_filename, **kwargs)\n \n sparse.save_npz(TFIDF_MATRIX, tfidf_vectors)\n\n metadata['tfidf'] = tfidf_vectors.toarray().tolist()\n if testing: filename = 'testing-' + filename\n if filename and not os.path.exists(filename):\n metadata.to_json(filename)\n return metadata\n\n \ndef topic_generator(tfidf, num_topics=10, verbose=False, \n filename='topic_model.pickle', **kwargs):\n \"\"\"train topic model with TFIDF vectors\"\"\"\n assert not filename or not os.path.exists(filename), 'topic model already exists'\n # Fitting LDA model\n lda = LatentDirichletAllocation(\n n_components = num_topics, \n learning_method='online',\n random_state=42, **kwargs) #adjust n_components\n \n doctopic = lda.fit_transform(tfidf)\n if filename: pickle.dump(lda, open(filename, \"wb\"))\n return doctopic, lda\n\n\ndef topic_transform(tfidf_vectors, filename=TOPIC_MODEL):\n \"\"\"load topic model and transform tfidf vectors in topic vectors \"\"\"\n with open(filename, 'rb') as f:\n lda = pickle.load(f)\n return lda.transform(tfidf_vectors)\n\n\ndef get_topics(topic_model_filename=TOPIC_MODEL, tfidf_filename=TFIDF_FILE):\n \"\"\"load topic model, TFIDF from pickle and get topics\"\"\"\n with open(topic_model_filename, 'rb') as f: lda = pickle.load(f)\n with open(tfidf_filename, 'rb') as f: vectorizer = pickle.load(f)\n features = {v:k for k,v in vectorizer.vocabulary_.items()} \n\n # Displaying the top keywords in each topic\n ls_keywords = []\n for i,topic in enumerate(lda.components_):\n word_idx = np.argsort(topic)[::-1][:10]\n keywords = ', '.join(features[i] for i in word_idx)\n ls_keywords.append(keywords)\n print(i, keywords) \n return ls_keywords \n\n\ndef analyze_all_clusters(cluster_file=interactions.CLUSTER_FILE):\n pass \n\n\ndef generate_cluster_vector(cluster, filename=interactions.METADATA_FILE):\n '''\n Inputs:\n cluster: list of URLs\n filename: original metadata file \n ngram: number of ngrams to use \n num_topics: number of topics to generate \n i_min: minimum number of interactions\n n_len: threshold length of text to consider in the metadata set\n '''\n \n # extract tfidf vectors from 'metadata_tfidf.csv'\n pass \n ","repo_name":"kenzeng24/social-network-url-clustering","sub_path":"src/preprocessing/vectorize_text.py","file_name":"vectorize_text.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"12070905582","text":"def analyze_file(file):\n with open(file, \"r\") as f:\n n = int(f.readline())\n num1_main = 0\n num2_main = 0\n for i in range(n):\n number = int(f.readline())\n if number % 7 == 0:\n if number % 160 != num2_main % 160:\n num1_main = max(number, num1_main)\n else:\n if number % 160 != num1_main % 160:\n num2_main = max(number, num2_main)\n print(num1_main, num2_main)\n\n\nanalyze_file(\"Input_files_28129/28129_A.txt\")\nanalyze_file(\"Input_files_28129/28129_B.txt\")\n","repo_name":"KurmaevAmir/Preparation_for_the_EGE","sub_path":"Task 27/ege.sdamgia.ru/solution_28129.py","file_name":"solution_28129.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71293629921","text":"def merge(nums1, m, nums2, n):\n '''\n 88. Merge Sorted Array\n ======================\n Given two arrays nums1 and nums2, merge them into nums1.\n nums1 has extra space padded with zeros.\n\n Example\n -------\n >>> nums1 = [1, 2, 3, 0, 0, 0]\n >>> nums2 = [2, 5, 6]\n >>> merge(nums1, nums2)\n >>> all(nums1[i] <= nums1[i + 1] for i in range(len(nums1) - 1))\n True\n\n '''\n i = m - 1\n j = n - 1\n k = m + n - 1\n while i >= 0 and j >= 0:\n if nums1[i] > nums2[j]:\n nums1[k] = nums1[i]\n i -= 1\n k -= 1\n else:\n nums1[k] = nums2[j]\n j -= 1\n k -= 1\n if j >= 0:\n nums1[:k + 1] = nums2[:j + 1]\n return nums1\n\n","repo_name":"deehzee/algods","sub_path":"practice/leetcode/lc0088_merge_sorted_arrays.py","file_name":"lc0088_merge_sorted_arrays.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23101371696","text":"import datetime\nimport json\n\nfrom django.db.models import Sum\n\n\nfrom .models import Product\nfrom .models import Inventory\nfrom .models import InventoryLog\nfrom .models import Book\nfrom .models import BookLog\n\n\ndef invoice_data_validator(invoice_data):\n \n # Validate Invoice Info ----------\n\n # invoice-number\n try:\n invoice_number = int(invoice_data['invoice-number'])\n except:\n print(\"Error: Incorrect Invoice Number\")\n return \"Error: Incorrect Invoice Number\"\n\n # invoice date\n try:\n date_text = invoice_data['invoice-date']\n datetime.datetime.strptime(date_text, '%Y-%m-%d')\n except:\n print(\"Error: Incorrect Invoice Date\")\n return \"Error: Incorrect Invoice Date\"\n\n # Validate Customer Data ---------\n\n # customer-name\n if len(invoice_data['customer-name']) < 1 or len(invoice_data['customer-name']) > 200:\n print(\"Error: Incorrect Customer Name\")\n return \"Error: Incorrect Customer Name\"\n\n if len(invoice_data['customer-address']) > 600:\n print(\"Error: Incorrect Customer Address\")\n return \"Error: Incorrect Customer Address\"\n\n if len(invoice_data['customer-phone']) > 14:\n print(\"Error: Incorrect Customer Phone\")\n return \"Error: Incorrect Customer Phone\"\n if len(invoice_data['customer-gst']) != 15 and len(invoice_data['customer-gst']) != 0:\n print(\"Error: Incorrect Customer GST\")\n return \"Error: Incorrect Customer GST\"\n return None\n\n\ndef invoice_data_processor(invoice_post_data):\n print(invoice_post_data)\n processed_invoice_data = {}\n\n processed_invoice_data['invoice_number'] = invoice_post_data['invoice-number']\n processed_invoice_data['invoice_date'] = invoice_post_data['invoice-date']\n\n processed_invoice_data['customer_name'] = invoice_post_data['customer-name']\n processed_invoice_data['customer_address'] = invoice_post_data['customer-address']\n processed_invoice_data['customer_phone'] = invoice_post_data['customer-phone']\n processed_invoice_data['customer_gst'] = invoice_post_data['customer-gst']\n\n processed_invoice_data['vehicle_number'] = invoice_post_data['vehicle-number']\n\n if 'igstcheck' in invoice_post_data:\n processed_invoice_data['igstcheck'] = True\n else:\n processed_invoice_data['igstcheck'] = False\n\n processed_invoice_data['items'] = []\n processed_invoice_data['invoice_total_amt_without_gst'] = float(invoice_post_data['invoice-total-amt-without-gst'])\n processed_invoice_data['invoice_total_amt_sgst'] = float(invoice_post_data['invoice-total-amt-sgst'])\n processed_invoice_data['invoice_total_amt_cgst'] = float(invoice_post_data['invoice-total-amt-cgst'])\n processed_invoice_data['invoice_total_amt_igst'] = float(invoice_post_data['invoice-total-amt-igst'])\n processed_invoice_data['invoice_total_amt_with_gst'] = float(invoice_post_data['invoice-total-amt-with-gst'])\n\n\n invoice_post_data = dict(invoice_post_data)\n for idx, product in enumerate(invoice_post_data['invoice-product']):\n if product:\n print(idx, product)\n item_entry = {}\n item_entry['invoice_product'] = product\n item_entry['invoice_hsn'] = invoice_post_data['invoice-hsn'][idx]\n item_entry['invoice_unit'] = invoice_post_data['invoice-unit'][idx]\n item_entry['invoice_qty'] = int(invoice_post_data['invoice-qty'][idx])\n item_entry['invoice_rate_with_gst'] = float(invoice_post_data['invoice-rate-with-gst'][idx])\n item_entry['invoice_gst_percentage'] = float(invoice_post_data['invoice-gst-percentage'][idx])\n\n item_entry['invoice_rate_without_gst'] = float(invoice_post_data['invoice-rate-without-gst'][idx])\n item_entry['invoice_amt_without_gst'] = float(invoice_post_data['invoice-amt-without-gst'][idx])\n\n item_entry['invoice_amt_sgst'] = float(invoice_post_data['invoice-amt-sgst'][idx])\n item_entry['invoice_amt_cgst'] = float(invoice_post_data['invoice-amt-cgst'][idx])\n item_entry['invoice_amt_igst'] = float(invoice_post_data['invoice-amt-igst'][idx])\n item_entry['invoice_amt_with_gst'] = float(invoice_post_data['invoice-amt-with-gst'][idx])\n\n processed_invoice_data['items'].append(item_entry)\n\n print(processed_invoice_data)\n return processed_invoice_data\n\ndef update_products_from_invoice(invoice_data_processed, request):\n for item in invoice_data_processed['items']:\n new_product = False\n if Product.objects.filter(user=request.user,\n product_name=item['invoice_product'],\n product_hsn=item['invoice_hsn'],\n product_unit=item['invoice_unit'],\n product_gst_percentage=item['invoice_gst_percentage']).exists():\n product = Product.objects.get(user=request.user,\n product_name=item['invoice_product'],\n product_hsn=item['invoice_hsn'],\n product_unit=item['invoice_unit'],\n product_gst_percentage=item['invoice_gst_percentage'])\n else:\n new_product = True\n product = Product(user=request.user,\n product_name=item['invoice_product'],\n product_hsn=item['invoice_hsn'],\n product_unit=item['invoice_unit'],\n product_gst_percentage=item['invoice_gst_percentage'])\n product.product_rate_with_gst = item['invoice_rate_with_gst']\n product.save()\n\n if new_product:\n create_inventory(product)\n\n# ================== Inventory methods ====================\n\ndef create_inventory(product):\n if not Inventory.objects.filter(user=product.user, product=product).exists():\n new_inventory = Inventory(user=product.user, product=product)\n new_inventory.save()\n\ndef update_inventory(invoice, request):\n invoice_data = json.loads(invoice.invoice_json)\n for item in invoice_data['items']:\n product = Product.objects.get(user=request.user,\n product_name=item['invoice_product'],\n product_hsn=item['invoice_hsn'],\n product_unit=item['invoice_unit'],\n product_gst_percentage=item['invoice_gst_percentage'])\n inventory = Inventory.objects.get(user=product.user, product=product)\n change = int(item['invoice_qty'])*(-1)\n inventory_log = InventoryLog(user=product.user,\n product=product,\n date=datetime.datetime.now(),\n change=change,\n change_type=4,\n associated_invoice=invoice,\n description=\"Sale - Auto Deduct\")\n inventory_log.save()\n inventory.current_stock += change\n inventory.last_log = inventory_log\n inventory.save()\n\n\ndef remove_inventory_entries_for_invoice(invoice, user):\n inventory_logs = InventoryLog.objects.filter(user=user,\n associated_invoice=invoice)\n for inventory_log in inventory_logs:\n inventory_product = inventory_log.product\n inventory_log.delete()\n # update the inventory total\n inventory_obj = Inventory.objects.get(user=user, product=inventory_product)\n recalculate_inventory_total(inventory_obj, user)\n\n\ndef recalculate_inventory_total(inventory_obj, user):\n new_total = InventoryLog.objects.filter(user=user, product=inventory_obj.product).aggregate(Sum('change'))['change__sum']\n if not new_total:\n new_total = 0\n inventory_obj.current_stock = new_total\n inventory_obj.save()\n\n\n# ================ Book methods ===========================\n\ndef add_customer_book(customer):\n # check if customer already exists\n if Book.objects.filter(user=customer.user, customer=customer).exists():\n return\n book = Book(user=customer.user,\n customer=customer)\n book.save()\n\n\ndef auto_deduct_book_from_invoice(invoice):\n invoice_data = json.loads(invoice.invoice_json)\n\n book = Book.objects.get(user=invoice.user, customer=invoice.invoice_customer)\n\n book_log = BookLog(parent_book=book,\n date=invoice.invoice_date,\n change_type=1,\n change=(-1.0)*float(invoice_data['invoice_total_amt_with_gst']),\n associated_invoice=invoice,\n description=\"Purchase - Auto Deduct\")\n\n book_log.save()\n\n book.current_balance = book.current_balance + book_log.change\n book.last_log = book_log\n book.save()\n","repo_name":"ghoshbishakh/billgst","sub_path":"gstbillingapp/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9031,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"36184748843","text":"# Dillon J. Cooper\n# CS 240 Midterm\n\n# Exercise 1:\n\n\ndef upper_file(y, z):\n \"\"\" str, str -> Nonetype\n\n Given a string containing a source filename and a string containing a\\\n destination filename, opens the source file, reads and converts all the\\\n text to uppercase, and saves it to the destination file. Returns nothing.\n \"\"\"\n var = []\n with open(y, 'r') as inputfile:\n var = inputfile.read().upper()\n with open(z, 'w') as outputfile:\n for item in var:\n outputfile.write(item)\n\n\n# Exercise 2:\n\ndef csv_parse(d):\n \"\"\" str -> tuple\n Given a string containing a source filename, parses a CSV file, returning\\\n a tuple containing two lists.\n \"\"\"\n\n second_list = []\n tuple_1 = ()\n\n with open(d, 'r') as inputfile:\n var = inputfile.readline().strip().split(',')\n second_line = inputfile.readlines()\n for item in second_line:\n item = item.strip()\n item = item.split(',')\n second_list.append(item)\n tuple_1 = (var, second_list)\n return tuple_1\n\n\n# Exercise 3:\n\ndef common_birds():\n \"\"\" (list), (list) -> set()\n Given two lists of string containing bird names, returns a set of unique\\\n bird names (no duplicates) common to both bird watcher's lists.\n \"\"\"\n\n list_1 = []\n list_2 = []\n w = set()\n\n for item in list_1:\n if item in list_2:\n w.add(item)\n return w\n\n\n# Exercise 4:\n\ndef csv_parse_dict(d):\n \"\"\" str -> (list of dicts)\n Given a string containing a source filename, parses a CSV file, returning\\\n a list of dictionaries.\n \"\"\"\n\n a = []\n\n with open(d, 'r') as inputfile:\n var = inputfile.readline().strip().split(',')\n second_line = inputfile.readlines()\n for item in second_line:\n item = item.strip()\n item = item.split(',')\n b = dict()\n for thing in item:\n key = var[item.index(thing)]\n b[key] = thing\n a.append(b)\n return a\n\n\n# Exercise 5:\n\ndef rot13(char):\n char = char.upper()\n if char.isalpha():\n return chr((((ord(char) - ord('A') + 13) % 26) + ord('A')))\n else:\n return char\n\n\ndef rot13str(l):\n \"\"\" str -> str\n Accepts a string as a parameter and returns an encrypted string.\n \"\"\"\n\n if len(l) == 1:\n return rot13(l)\n else:\n return rot13(l[0]) + rot13str(l[1:])\n","repo_name":"dilloncooper15/CS240","sub_path":"dcooper_midterm.py","file_name":"dcooper_midterm.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29394723598","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport os\nimport time\nfrom unittest import TestCase\nfrom unittest import TestLoader\n\nimport HTMLTestRunner\n\nfrom cases.ios.ffan.common.clearAppData import ClearAppData\nfrom cases.ios.ffan.common.testPrepare import TestPrepare\nfrom configs.iosDriverConfig import IosDriverConfigs as IDC\nfrom driver.appium_driver import AppiumDriver\nfrom pages.ios.ffan.dashboard_page import DashboardPage\nfrom pages.ios.ffan.store_info_page import StoreInfoPage\nfrom pages.ios.ffan.stores_and_supermarkets_page import StoresAndSupermarketsPage\nfrom pages.ios.ffan.switch_city_page import SwitchCityPage\nfrom cases.logger import logger\n\n\nclass ShangChaoTestCase(TestCase):\n '''\n 作者 宋波\n 巡检checklist #Anonymous\n 自动化测试 #Anonymous\n 启动APP,商超显示正常。\n '''\n\n @classmethod\n def setUpClass(cls):\n '''\n 初始化Appium driver\n '''\n\n cls.driver = AppiumDriver(None,\n None,\n IDC.platformName,\n IDC.platformVersion,\n IDC.deviceName,\n IDC.driverUrl,\n IDC.bundleId,\n IDC.udid).getDriver()\n logger.info(\"Appium client init completed\")\n\n def setUp(self):\n self.logger = logger\n TestPrepare(self, self.driver, self.logger).prepare(False)\n\n def test_case(self):\n dashboardPage = DashboardPage(self, self.driver, self.logger)\n dashboardPage.validSelf()\n dashboardPage.clickOnStores()\n\n storesAndSupermarketsPage = StoresAndSupermarketsPage(self, self.driver, self.logger)\n storesAndSupermarketsPage.validSelf()\n tempText = storesAndSupermarketsPage.clickOnStoreOrSupermarket()\n\n storeInfoPage = StoreInfoPage(self, self.driver, self.logger)\n storeInfoPage.validKeywords(tempText)\n storeInfoPage.clickBackKey()\n\n storesAndSupermarketsPage.validSelf()\n storesAndSupermarketsPage.clickBackKey()\n\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n suite = TestLoader().loadTestsFromTestCase(ShangChaoTestCase)\n now = time.strftime('%Y_%m_%d_%H_%M_%S')\n reportpath = os.getcwd()\n filename = os.path.join(reportpath, 'Feifan_automation_test_report_' + now + '.html')\n fp = open(filename, 'wb')\n runner = HTMLTestRunner.HTMLTestRunner(fp, 'Feifan_automation_test_report', 'Result for test')\n runner.run(suite)\n","repo_name":"liu111xiao111/UItest","sub_path":"cases/ios/ffan/routing_inspection_test_cases/shangChao.py","file_name":"shangChao.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"19513261391","text":"\n\nimport maya.cmds as cmds\nfrom .. import UI_Object\n\n########################################################################\nclass LoadUI(UI_Object.UI):\n\t\"\"\"\n\tloadUI command allows loading of a user interface created in Trolltech \n\tSome Qt classes have equivalents in Maya. If a widget's class is recognized, the Maya-equivelent will be created instead.\n\t\n\tAny dynamic properties on a widget which start with a '-' character will be treated as a MEL flag/value pair. Similarly, any which start with a '+' will be treated as a Python flag/value pair. Such pairs will be applied to the widget upon creation.\n\t\"\"\"\n\t#----------------------------------------------------------------------\n\tdef __init__(self, name=None, **kwargs):\n\t\tparent = None\n\t\tif kwargs.has_key(\"qtParent\"):\n\t\t\tparent = kwargs.pop(\"qtParent\")\n\t\t\t\n\t\tif name == None:\n\t\t\tname = cmds.loadUI(**kwargs)\n\t\t\tsuper(LoadUI, self).__init__(name, **dict(qtParent=parent))\n\t\t\t\n\t\telse:\n\t\t\tif cmds.loadUI(name, exists=True):\n\t\t\t\tsuper(LoadUI, self).__init__(name)\n\t\t\telse:\n\t\t\t\tname = cmds.loadUI(name, **kwargs)\n\t\t\t\tsuper(LoadUI, self).__init__(name, **dict(qtParent=parent))","repo_name":"SGSMarkNA/DML_Tools","sub_path":"DML_Maya/Maya_GUI/MiscUI/LoadUI.py","file_name":"LoadUI.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34465518726","text":"#\n# @lc app=leetcode id=1962 lang=python3\n#\n# [1962] Remove Stones to Minimize the Total\n#\n\n# @lc code=start\nfrom typing import List\n\nimport heapq\n\nclass Solution:\n def minStoneSum(self, piles: List[int], k: int) -> int:\n for i in range(len(piles)):\n piles[i] = -piles[i]\n\n heapq.heapify(piles)\n for i in range(k):\n curr_max = -heapq.heappop(piles)\n heapq.heappush(piles, -(curr_max - curr_max//2))\n\n return -sum(piles)\n\n# @lc code=end\n\n","repo_name":"nmg322/leetcode","sub_path":"1962.remove-stones-to-minimize-the-total.py","file_name":"1962.remove-stones-to-minimize-the-total.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34437244136","text":"# 📌 while문 \n\t# 조건이 참인 동안 반복 수행하여 주로 횟수가 정해지지 않은 경우에 사용,\n\t# 횟수가 정해지지 않은 경우, 반드시 탈출 조건이 필요\n\t# break & continue\nn = int(input(\"정수 입력 : \"))\nsum = 0\nwhile n :\n sum += n\t# sum = sum + n\n n -= 1\t\t# n-- 같은 증감 연산자가 없어서 명시적으로 1을 빼줘야 함 (복합 대입연산자)\nprint(sum)\t\t# n이 0이 되면 false가 되기 때문에 while문 빠져나올 수 있음\n\n\n# 📌 유효값 체크\nwhile True:\n age = int(input(\"나이를 입력 하세요 : \"))\n if 0 < age < 200: break # 정상적으로 값이 입력 되었으므로 반복문을 벗어 난다.\n else: print(\"나이 입력 범위를 벗어 났습니다.\")\n\n \n# 📌 for 요소 in 시퀀스\n\t# 자바의 향상된 for문과 동일, 시퀀스(리스트, 튜플, 문자열 등)의 각 요소를 순회할 때 사용\nfruits = [\"apple\", \"banana\", \"kiwi\"]\nfor e in fruits : \n print(e)\n\n\n# 📌 for 변수 in range(시작값, 종료값, 증감값)\n\t# 자바의 기본적인 for문과 동일, index사용, 숫자 범위를 순회할 때 사용\nn = int(input(\"정수 입력 : \"))\nsum = 0\nfor i in range(1, n+1) :\n sum += i\nprint(sum)\n\n\n# 📌 2중 for문 : 구구단 출력 \nfor i in range(2,10) :\t# 9단까지 돌리기! 미만 개념 항상 remember...✨\n\tfor j in range(1, 10) :\n\t\tprint(f\"{i} * {j} = {i*j}\")\n\tprint(\"-\"*25)\n\n\n# 📌 2중 for문과 조건문 활용하기 (별로인 예제...)\nn = int(input(\"정수 입력 : \"))\nfor i in range(0, n) :\t# 입력받은 갯수 만큼 순회\n\tfor j in range(0, n) :\n\t\tif j % 2 == 0 : print(f\"{j}는 짝수\")\n\t\telse : print(f\"{j}는 홀수\")\n\tprint()\n\n\n# ⭐️찍기 : 입력 받은 수 만큼 별 찍기\nn = int(input(\"별 찍기 정수 입력 : \"))\t# 행을 의미\nfor i in range(n) :\t# 하나만 넣으면 최종값, 0 부터 n 미만까지 💫\n\tfor j in range(i+1) : \n\t\tprint(\"*\", end=\"\")\t# 별을 옆으로 찍기 위해서 end=\"\"\n\tprint()\t# 내부 for문이 끝나면 줄바꿈\n\nn = int(input(\"reversed 별 찍기 : \"))\nfor i in range(n):\n for j in range(n-i):\n print(\"*\", end=\"\")\n print()\n\n\n# 📌 for문에서 continue 사용 \n\t# continue를 만나면 아래의 문장을 수행하지 않고 반복문의 조건문으로 이동\nn = int(input())\nfor i in range(n) :\n\tif i % 2 == 0 : continue\n\tprint(i)\n\n\n# 📌 for문을 역순으로 순회\nn = int(input())\nfor i in range(n, 0 - 1, -1) :\t# 미만의 개념이 있으니까 0까지 찍기 위해 0 - 1, 증감값 넣지 않으면 무한대로 돌 수 있음\n\tprint(f\"값 : {i}\")\n\n\n# 📌 for문으로 알파벳 출력 하기 : 파이썬은 유니코드 사용\n\t# ✨ chr : 유니코드값을 입력 받아 코드에 해당하는 문자를 출력\n\t# ✨ ord : 문자의 유니코드 값을 돌려주는 함수\nfor i in range(ord(\"A\"), ord(\"Z\")+1) : # 시작값은 65, 종료값은 90+1\n print(chr(i), end=\" \")\t# 유니코드 값을 입력 받아 문자로 돌려주기\n\n\n# 📌 학점 구하기 : 성적을 입력 받아 학점 출력 하기 (반복문 사용, 음수가 입력되면 종료, 100 보다 크면 재 입력 요구)\nwhile True :\t# 반복문 사용\n score = int(input(\"점수 입력 : \"))\n # 종료 조건\n if score < 0 : break\n\t# 재입력 요구 조건\n if score > 100:\n print(\"점수를 잘못 입력 하셨습니다.\")\n continue # 반복문으로 되돌아 가기\n if score >= 90 : grade = \"A\"\n elif score >= 80 : grade = \"B\"\n elif score >= 70 : grade = \"C\"\n elif score >= 60 : grade = \"D\"\n else : grade = \"F\"\n print(f\"{score}에 대한 학점은 \\\"{grade}\\\"입니다.\")\n \n\n\n\n\n\n","repo_name":"dekim0705/Python","sub_path":"230526_02_반복문.py","file_name":"230526_02_반복문.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24123025692","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport pymysql\nimport json\nimport time\nFs = 8000\ns = 10\n\ndef datareceive(): \n\tprint(\"Received 10 seconds data with 8000sps (fake)....\")\n\ty=[]\n\tfor i in range(Fs*s):\n\t\ttmpdata=\"%.4f\" % (random.random()-random.random())\n\t\ty.append(float(tmpdata))\n\n\tprint(\"Generated, Size:\",len(y))\n\tprint(\"saving to csv\")\n\tnp.savetxt(\"test.csv\", y,fmt=\"%.4f\", delimiter=\",\")\n\tprint(\"convert to byte string\")\n\tt=json.dumps(y)\n\n\n\n\tdb = pymysql.connect(host='192.168.123.100', port=3306, user='USERNAME', passwd='PASSWORD', db='SmartHealthyLossWeight', charset='utf8')\n\tcursor = db.cursor() \n\tcursor.execute('select RAW FROM RAWDATA')\n\tcursor.fetchall()\n\tlatestid=int(cursor.rowcount)+1\n\tprint(\"INSEART SQL ID:\",latestid)\n\n\tsql = \"INSERT INTO RAWDATA (ID, RAW) VALUES (%s, %s)\"\n\tval = (latestid, t)\n\tcursor.execute(sql, val)\n\tdb.commit()\n\tdb.close()\n\ttime.sleep(5)\n\n\n\n\n\n#print(bytedata)\n#print(np.fromstring(bytedata,dtype=float))\n\n","repo_name":"rddtw099999/THE-DETECTION-FOR-SENSE-OF-FULLNESS","sub_path":"Python_Project _Threading/Fake_Data_Receiver.py","file_name":"Fake_Data_Receiver.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23754186207","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.8.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # Diagram drawing\n\nimport numpy as np\nimport plotly.graph_objects as go\nimport drawSvg as draw\n\n\n# ## drawSvg\n#\n# The diagrams we use are based around the python module drawSvg which allows construction of svg diagrams straight from python. The main container for an image is always generated in the chapter file which is then filled with svg element groups which can pass on any arguments to its child elements (such as circles, lines etc.).\n\n# ## Utility functions\n#\n# ### Rotation matrix\n#\n# A function which returns a rotation matrix in the xy plane in two or three dimensions.\n#\n# :::{dropdown} `rot_mat(θ, dim=2)`\n# * `θ` rotation angle.\n# * `dim` dimension of desired matrix, either 2 or 3.\n# :::\n\ndef rot_mat(θ, dim=2):\n if dim == 2:\n return np.array([[np.cos(θ), -np.sin(θ)], [np.sin(θ), np.cos(θ)]])\n else:\n return np.array([[np.cos(θ), -np.sin(θ), 0], [np.sin(θ), np.cos(θ), 0], [0, 0, 1]])\n\n\n# ### Arrow\n#\n# Draws an arrow between two points.\n#\n# :::{dropdown} `arrow(start, end, stroke_width=0.1, stroke='black', **kwargs)`\n# * `start` starting point of the arrow.\n# * `end` end point of the arrow with arrowhead.\n# * `stroke_width` width of the arrow line.\n# * `stroke` color of the arrow.\n# :::\n\ndef arrow(start, end, stroke_width=0.1, stroke='black', **kwargs):\n start, end = np.array(start), np.array(end)\n Δx = 3\n my_arrow = draw.Marker(-1+Δx/4, -0.5, Δx/4, 0.5, scale=4, orient='auto')\n my_arrow.append(draw.Lines(-1+Δx/4, -0.5, -1+Δx/4, 0.5, Δx/4, 0, close=True, fill=stroke))\n p = draw.Path(stroke=stroke, stroke_width=stroke_width, fill='none',\n marker_end=my_arrow, **kwargs)\n t = 1 - stroke_width*Δx/np.linalg.norm(end-start)\n return p.M(*start).L(*(t*(end-start)+start))\n\n\n# ### Index\n#\n# Object which functions as the index of an atomic site. We overload the multiplication operator to allow for shorthand of $ia_1+ja_2$. \n#\n# ::::{dropdown} `Index`\n# * :::{dropdown} `__init__(i, j)`\n# * `i` first index.\n# * `j` second index.\n# :::\n# * :::{dropdown} `__mul__(other)`\n# * `other` a pair of two objects which we want to add with some integer weight, usually [$a_1$, $a_2$].\n# :::\n# ::::\n\nclass Index:\n \n\n def __init__(self, i, j):\n self.i = i\n self.j = j\n \n def __mul__(self, other):\n return self.i * other[0] + self.j * other[1]\n \n def __rmul__(self, other):\n return self * other\n\n\n# ## LatticeAtom\n#\n# We introduce a class which keeps track of the properties of individual atoms in the unit cell with methods:\n#\n# ::::{dropdown} `LatticeAtom`\n# * :::{dropdown} `__init__(position_in_unit_cell, name=None, atom_color='blue', atom_radius=None)`\n# * `position_in_unit_cell` the location of the atom in the unit cell.\n# * `name` letter e.g. C for carbon indicating the type of atom, if None no name included.\n# * `atom_color` color to draw atom in.\n# * `atom_radius` size of atom to plot, if None no bonds are plotted.\n# :::\n# * :::{dropdown} `draw_bonds(displacement, θ, **kwargs)`\n# draws atomic bonds around the atom.\n# * `displacement` translation with which to draw the bonds, usually a multiple of the lattice vectors.\n# * `θ` angle with which the lattice is rotated.\n# :::\n# * :::{dropdown} `draw_atom(displacement, θ, **kwargs)`\n# draws the atom.\n# * `displacement` translation with which to draw the atom, usually a multiple of the lattice vectors.\n# * `θ` angle with which the lattice is rotated.\n# :::\n# ::::\n\nclass LatticeAtom:\n \n \n def __init__(self, position_in_unit_cell, name=None, atom_color='blue', atom_radius=None): \n self.position = np.array(position_in_unit_cell)\n self.name = name\n self.atom_color = atom_color\n self.atom_radius = atom_radius\n self.bonds = []\n self.bond_style = [] #\n \n def draw_bonds(self, displacement, θ, **kwargs):\n group = draw.Group()\n origin = rot_mat(θ) @ (displacement + self.position)\n for bond in self.bonds:\n destination = rot_mat(θ) @ (displacement+bond)\n group.append(draw.Line(*origin, *destination, stroke='black', stroke_width=0.01, **kwargs))\n return group\n \n def draw_atom(self, displacement, θ, **kwargs):\n group = draw.Group()\n origin = rot_mat(θ) @ (displacement + self.position)\n gradient = draw.RadialGradient(*origin, self.atom_radius)\n gradient.addStop(0, 'white', 1)\n gradient.addStop(1, self.atom_color, 1)\n group.append(draw.Circle(*origin, self.atom_radius, stroke='black', stroke_width=0.01, fill=gradient, **kwargs))\n if self.name != None:\n group.append(draw.Text(self.name, self.atom_radius, *origin, text_anchor='middle', alignment_baseline=\"central\"))\n return group\n\n\n# ## Lattice\n#\n# We introduce a class which collects all LatticeAtom objects in the unit cell and expands the lattice to fit into a diagram of size $W\\times H$. \n#\n# ::::{dropdown} `Lattice`\n# * :::{dropdown} `__init__(a_1, a_2, W, H, θ=0)`\n# * `a_1` first lattice vector of 2d lattice.\n# * `a_2` second lattice vector of 2d lattice.\n# * `W` desired width of diagram.\n# * `H` desired height of diagram.\n# * `θ` rotation angle of lattice.\n# :::\n# * :::{dropdown} `add_atom(atom)`\n# * `atom` LatticeAtom object to be added to the unit cell.\n# :::\n# * :::{dropdown} `in_lattice(i, j, atom)`\n# checks if the position of `atom` plus $ia_1+ja_2$ lies within desired frame of the diagram.\n# * `i` first index.\n# * `j` second index.\n# * `atom` `LatticeAtom` object to be tested.\n# :::\n# * :::{dropdown} `NN(atom_1, atom_2, bond_list, **kwargs)`\n# designate two ```{python}LatticeAtom``` objects as nearest neighbors\n# * `atom_1` first `LatticeAtom` object.\n# * `atom_2` second `LatticeAtom` object.\n# :::\n# * :::{dropdown} `draw_lattice()`\n# returns svg group with all atoms and bonds in diagram frame. \n# :::\n# * :::{dropdown} `draw_lattice_vectors(vec_symbols=['a₁', 'a₂'], origin=(0, 0), centralize=True, stroke_width=0.1, color='black', **kwargs)`\n# returns svg group containing the two lattice vectors.\n# * `vec_symbols` list of two labels for the lattice vectors.\n# * `origin` coordinate from where to draw the lattice vectors.\n# * `centralize` if `True` translate the origin by $(a_1+a_2)/2$.\n# * `vec_symbols` list of two labels for the lattice vectors.\n# * `vec_symbols` list of two labels for the lattice vectors.\n# :::\n# * :::{dropdown} `draw_unit_cell(origin=(0, 0), **kwargs)`\n# returns svg group containing dashed lines showing the unit cells.\n# * `origin` coordinate from where to start the unit cell.\n# ::: \n# ::::\n\nclass Lattice:\n \n unit_cell_NN = [Index(i, j) for i in [-1, 0, 1] for j in [-1, 0, 1]]\n \n def __init__(self, a_1, a_2, W, H, θ=0):\n self.a = [np.array(a_1), np.array(a_2)]\n self.dim = len(a_1)\n self.W = W\n self.H = H\n self.unit_cell = []\n self.grid = []\n self.θ = -θ\n \n def add_atom(self, atom): \n N_1, N_2 = [1+min(int(self.W/np.abs(a[0]+0.00001)), int(self.H/np.abs(a[1]+0.00001))) for a in [rot_mat(self.θ, self.dim)@a for a in self.a]]\n self.unit_cell.append(atom)\n if atom.atom_radius == None:\n atom.atom_radius = min([np.linalg.norm(a) for a in self.a]) / min(N_1, N_2) / 5 \n self.grid.append([Index(i, j) for i in range(-N_1, N_1+1) for j in range(-N_2, N_2+1) if self.in_lattice(i, j, atom)])\n \n def in_lattice(self, i, j, atom):\n origin = np.abs(rot_mat(self.θ, self.dim) @ (atom.position+Index(i, j)*self.a))\n return np.all((origin-atom.atom_radius)[:2] < [self.W/2, self.H/2])\n \n def NN(self, atom_1, atom_2, bond_list, **kwargs):\n #atom_1.bond_style.append(kwargs)\n for bond in bond_list:\n atom_1.bonds.append(atom_2.position+Index(*bond)*self.a)\n if atom_1 != atom_2:\n #atom_2.bond_style.append(kwargs)\n #atom_2.bonds.append(-atom_1.bonds[-1])\n pass\n \n def draw_lattice(self):\n group = draw.Group()\n for i, atom in enumerate(self.unit_cell):\n for grid_point in self.grid[i]:\n group.append(atom.draw_bonds(grid_point*self.a, self.θ), z=0) \n group.append(atom.draw_atom(grid_point*self.a, self.θ), z=1)\n return group\n \n def draw_lattice_vectors(self, vec_symbols=['a₁', 'a₂'], origin=(0, 0), centralize=True, stroke_width=0.1, color='black',\n **kwargs):\n rot = rot_mat(self.θ)\n group = draw.Group()\n if centralize:\n origin += sum(self.a) / 2\n group.append(arrow(rot@origin, rot@(origin+self.a[0]), stroke_width=stroke_width, stroke=color, **kwargs))\n group.append(arrow(rot@origin, rot@(origin+self.a[1]), stroke_width=stroke_width, stroke=color, **kwargs))\n group.append(draw.Text(vec_symbols[0], stroke_width*10, *(rot@(origin+self.a[0])), fill=color))\n group.append(draw.Text(vec_symbols[1], stroke_width*10, *(rot@(origin+self.a[1])), fill=color))\n return group\n \n def draw_unit_cell(self, origin=(0, 0), **kwargs):\n rot = rot_mat(self.θ)\n group = draw.Group()\n for i in range(2):\n N = int(np.sqrt(self.H**2+self.W**2)/np.linalg.norm(self.a[1-i])) + 1\n for j in range(-N, N+1):\n vector = np.sqrt(self.H**2+self.W**2) * self.a[i]/np.linalg.norm(self.a[i])\n group.append(draw.Line(*(rot@(origin-vector+j*self.a[1-i])), *(rot@(origin+vector+j*self.a[1-i])), **kwargs))\n return group\n\n\n# ## Lattice3d\n\n# A child class of `Lattice` which supports 3d plotting to generate a plotly lattice.\n# ::::{dropdown} `Lattice3d`\n# * :::{dropdown} `gen_NN(R)`\n# generates a list of nearest neighbors for each atom in the unit cell within some radius $R$.\n# * `R` radius within which an atom is considered a nearest neighbor.\n# :::\n# * :::{dropdown} `draw_3d(fig, origin=(0, 0, 0))`\n# adds a lattice to a plotly figure.\n# * `fig` 3d plotly figure to which we wish to add a lattice.\n# * `origin` origin of the lattice.\n# :::\n# ::::\n\nclass Lattice3d(Lattice):\n \n def gen_NN(self, R):\n try:\n len(R)\n except:\n R = np.ones(len(self.unit_cell))*R\n for i in range(len(self.unit_cell)):\n for j in range(i, len(self.unit_cell)):\n for index in self.unit_cell_NN:\n ΔR = self.unit_cell[j].position+self.a*index-self.unit_cell[i].position\n if 10**-6 < np.linalg.norm(ΔR) < R[i]:\n self.unit_cell[i].bonds.append(self.unit_cell[j].position+self.a*index)\n if i != j:\n self.unit_cell[j].bonds.append(self.unit_cell[i].position-self.a*index)\n \n def draw_3d(self, fig, origin=(0, 0, 0)):\n for i, atom in enumerate(self.unit_cell):\n marker=dict(color=atom.atom_color)\n x, y, z = np.array([self.a*index+atom.position+origin for index in self.grid[i]]).T\n for bond in atom.bonds:\n for j in range(len(x)):\n x_2, y_2, z_2 = self.a*self.grid[i][j]+bond+origin\n fig.add_trace(go.Scatter3d(x=[x[j], x_2], y=[y[j], y_2], z=[z[j], z_2], \n mode='lines', marker=dict(color='black'), showlegend=False,\n name=atom.name))\n fig.add_trace(go.Scatter3d(x=x, y=y, z=z, mode='markers', marker=marker, showlegend=False, name=''))\n\n\n# ## Orbital\n#\n# A class to draw orbital diagrams.\n# ::::{dropdown} `Orbital`\n# * :::{dropdown} `lobe(color, rotate=0, translate=(0, 0), stroke=\"black\", **kwargs)`\n# a base lobe with which to construct more complex orbitals.\n# * `color`: color of the lobe.\n# * `rotate`: angle with which to rotate the lobe.\n# * `translate`: vector with which to translate the lobe.\n# * `stroke`: color of the border of lobe.\n# :::\n# * :::{dropdown} `circle(color, rotate=0, translate=(0, 0), stroke=\"black\", ellipse=False, **kwargs)`\n# a base circle with which to construct more complex orbitals.\n# * `color` color of the circle.\n# * `rotate` angle with which to rotate the circle.\n# * `translate` vector with which to translate the circle.\n# * `stroke` color of the border of circle.\n# * `ellipse` if * `True` the circle is deformed to an ellipse.\n# :::\n# * :::{dropdown} `d_xy(translate=(0, 0), rotate=0, neg_color=\"dodgerblue\", pos_color=\"red\", **kwargs)`\n# * `translate` vector with which to translate the orbital.\n# * `rotate` angle with which to rotate the orbital.\n# * `neg_color` color of the negative parts of the orbital. \n# * `pos_color` color of the positive parts of the orbital.\n# :::\n# * :::{dropdown} `d_z2(translate=(0, 0), rotate=0, neg_color=\"dodgerblue\", pos_color=\"red\", **kwargs)`\n# * `translate` vector with which to translate the orbital.\n# * `rotate` angle with which to rotate the orbital.\n# * `neg_color` color of the negative parts of the orbital. \n# * `pos_color` color of the positive parts of the orbital.\n# :::\n# * :::{dropdown} `d_x2y2(translate=(0, 0), rotate=0, neg_color=\"dodgerblue\", pos_color=\"red\", **kwargs)`\n# * `translate` vector with which to translate the orbital.\n# * `rotate` angle with which to rotate the orbital.\n# * `neg_color` color of the negative parts of the orbital. \n# * `pos_color` color of the positive parts of the orbital.\n# :::\n# ::::\n\nclass Orbital: \n \n \n def lobe(self, color, rotate=0, translate=(0, 0), stroke=\"black\", **kwargs):\n gradient = draw.RadialGradient(0, 1, 0.5)\n gradient.addStop(0, 'white', 0.7)\n gradient.addStop(np.sqrt(3), color, 0.7)\n transform = \"translate(\" + \" \".join([str(i) for i in translate]) + \")\\nrotate(\" + str(rotate) + \" 0 0)\"\n my_path = \"M 0,0 C \" + str(-np.sqrt(3)) + \",-2 \" + str(np.sqrt(3)) +\",-2 0,0 z\"\n return draw.Path(d=my_path, stroke=stroke, stroke_width=0.01, fill=gradient, transform=transform, **kwargs)\n \n def circle(self, color, rotate=0, translate=(0, 0), stroke=\"black\", ellipse=False, **kwargs):\n gradient = draw.RadialGradient(0, 0, 0.5)\n gradient.addStop(0, 'white', 0.7)\n gradient.addStop(np.sqrt(3), color, 0.7)\n transform = \"rotate(\" + str(rotate) + \" 0 0)\\ntranslate(\" + \" \".join([str(i) for i in translate]) + \")\"\n if ellipse:\n clip = draw.ClipPath()\n clip.append(draw.Ellipse(0, 0, 0.5, 0.125, transform=transform))\n return draw.Ellipse(0, 0, 1, 0.25, stroke=stroke, stroke_width=0.01, fill=gradient, transform=transform, **kwargs) \n else:\n return draw.Circle(0, 0, 0.5, stroke=stroke, stroke_width=0.01, fill=gradient, transform=transform, **kwargs)\n \n def d_xy(self, translate=(0, 0), rotate=0, neg_color=\"dodgerblue\", pos_color=\"red\",\n **kwargs):\n group = draw.Group(**kwargs)\n group.append(self.lobe(neg_color, rotate=85+rotate, translate=translate))\n group.append(self.lobe(pos_color, rotate=95+rotate, translate=translate))\n group.append(self.lobe(pos_color, rotate=275+rotate, translate=translate))\n group.append(self.lobe(neg_color, rotate=265+rotate, translate=translate))\n return group\n \n def d_z2(self, translate=(0, 0), rotate=0, neg_color=\"dodgerblue\", pos_color=\"red\",\n **kwargs):\n group = draw.Group(**kwargs)\n group.append(self.lobe(neg_color, rotate=180+rotate, translate=translate))\n group.append(self.circle(pos_color, ellipse=True, rotate=rotate, translate=translate))\n group.append(self.lobe(neg_color, rotate=rotate, translate=translate))\n return group\n \n def d_x2y2(self, translate=(0, 0), rotate=0, neg_color=\"dodgerblue\", pos_color=\"red\",\n **kwargs):\n group = draw.Group(**kwargs)\n group.append(self.lobe(neg_color, rotate=180+rotate, translate=translate))\n group.append(self.lobe(neg_color, rotate=rotate, translate=translate))\n group.append(self.lobe(pos_color, rotate=90+rotate, translate=translate))\n group.append(self.lobe(pos_color, rotate=270+rotate, translate=translate))\n return group\n\n\n","repo_name":"Hidde-Dijkstra/Hidde-Dijkstra.github.io","sub_path":"archive/code/moire/diagram_gen.py","file_name":"diagram_gen.py","file_ext":"py","file_size_in_byte":16932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35587751823","text":"import json\nfrom unittest.mock import patch, mock_open, MagicMock\n\nfrom io import StringIO\nfrom pytest import fixture\n\nfrom tests.fixtures import generic_app\nfrom vistas.core import preferences\n\ngeneric_app # Make the fixture import look used to IDEs\n\n@fixture(scope='function')\ndef preferences_cls():\n yield preferences.Preferences\n preferences.Preferences._app_preferences = None\n\n\n@patch('os.path.exists')\ndef test_singleton_makedirs(mock_exists, preferences_cls, generic_app):\n def run_test():\n with patch('{}.open'.format(preferences.__name__), mock_open(read_data='{}')):\n with patch('os.makedirs') as mock_makedirs:\n mock_exists.return_value = True\n preferences_cls.app()\n assert mock_makedirs.called is False\n preferences.Preferences._app_preferences = None\n\n with patch('os.makedirs') as mock_makedirs:\n mock_exists.return_value = False\n preferences_cls.app()\n assert mock_makedirs.called\n preferences.Preferences._app_preferences = None\n\n generic_app(run_test)\n\n\n@patch('os.path.exists', MagicMock(return_value=True))\ndef test_singleton_cache(preferences_cls, generic_app):\n def run_test():\n with patch('{}.open'.format(preferences.__name__), mock_open(read_data='{}')):\n prefs = preferences_cls.app()\n assert preferences_cls.app() == prefs\n\n generic_app(run_test)\n\n\n@patch('os.path.exists', MagicMock(return_value=False))\ndef test_save():\n with patch('{}.open'.format(preferences.__name__), mock_open(read_data='{}')) as open_mock:\n open_mock.return_value = StringIO()\n open_mock().close = MagicMock()\n\n prefs = preferences.Preferences('prefs.json')\n prefs['key'] = 'value'\n\n assert open_mock().getvalue() == json.dumps({'key': 'value'})\n\n\n@patch('os.path.exists', MagicMock(return_value=True))\ndef test_load():\n with patch('{}.open'.format(preferences.__name__), mock_open(read_data=json.dumps({'key': 'value'}))):\n prefs = preferences.Preferences('prefs.json')\n assert prefs['key'] == 'value'\n","repo_name":"VISTAS-IVES/pyvistas","sub_path":"source/tests/core/test_preferences.py","file_name":"test_preferences.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3557407972","text":"from .directory_object_py3 import DirectoryObject\n\n\nclass ServicePrincipal(DirectoryObject):\n \"\"\"Active Directory service principal information.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :param additional_properties: Unmatched properties from the message are\n deserialized this collection\n :type additional_properties: dict[str, object]\n :ivar object_id: The object ID.\n :vartype object_id: str\n :ivar deletion_timestamp: The time at which the directory object was\n deleted.\n :vartype deletion_timestamp: datetime\n :param object_type: Required. Constant filled by server.\n :type object_type: str\n :param account_enabled: whether or not the service principal account is\n enabled\n :type account_enabled: bool\n :param alternative_names: alternative names\n :type alternative_names: list[str]\n :ivar app_display_name: The display name exposed by the associated\n application.\n :vartype app_display_name: str\n :param app_id: The application ID.\n :type app_id: str\n :ivar app_owner_tenant_id:\n :vartype app_owner_tenant_id: str\n :param app_role_assignment_required: Specifies whether an\n AppRoleAssignment to a user or group is required before Azure AD will\n issue a user or access token to the application.\n :type app_role_assignment_required: bool\n :param app_roles: The collection of application roles that an application\n may declare. These roles can be assigned to users, groups or service\n principals.\n :type app_roles: list[~azure.graphrbac.models.AppRole]\n :param display_name: The display name of the service principal.\n :type display_name: str\n :param error_url: A URL provided by the author of the associated\n application to report errors when using the application.\n :type error_url: str\n :param homepage: The URL to the homepage of the associated application.\n :type homepage: str\n :param key_credentials: The collection of key credentials associated with\n the service principal.\n :type key_credentials: list[~azure.graphrbac.models.KeyCredential]\n :param logout_url: A URL provided by the author of the associated\n application to logout\n :type logout_url: str\n :ivar oauth2_permissions: The OAuth 2.0 permissions exposed by the\n associated application.\n :vartype oauth2_permissions:\n list[~azure.graphrbac.models.OAuth2Permission]\n :param password_credentials: The collection of password credentials\n associated with the service principal.\n :type password_credentials:\n list[~azure.graphrbac.models.PasswordCredential]\n :param preferred_token_signing_key_thumbprint: The thumbprint of preferred\n certificate to sign the token\n :type preferred_token_signing_key_thumbprint: str\n :param publisher_name: The publisher's name of the associated application\n :type publisher_name: str\n :param reply_urls: The URLs that user tokens are sent to for sign in with\n the associated application. The redirect URIs that the oAuth 2.0\n authorization code and access tokens are sent to for the associated\n application.\n :type reply_urls: list[str]\n :param saml_metadata_url: The URL to the SAML metadata of the associated\n application\n :type saml_metadata_url: str\n :param service_principal_names: A collection of service principal names.\n :type service_principal_names: list[str]\n :param service_principal_type: the type of the service principal\n :type service_principal_type: str\n :param tags: Optional list of tags that you can apply to your service\n principals. Not nullable.\n :type tags: list[str]\n \"\"\"\n\n _validation = {\n 'object_id': {'readonly': True},\n 'deletion_timestamp': {'readonly': True},\n 'object_type': {'required': True},\n 'app_display_name': {'readonly': True},\n 'app_owner_tenant_id': {'readonly': True},\n 'oauth2_permissions': {'readonly': True},\n }\n\n _attribute_map = {\n 'additional_properties': {'key': '', 'type': '{object}'},\n 'object_id': {'key': 'objectId', 'type': 'str'},\n 'deletion_timestamp': {'key': 'deletionTimestamp', 'type': 'iso-8601'},\n 'object_type': {'key': 'objectType', 'type': 'str'},\n 'account_enabled': {'key': 'accountEnabled', 'type': 'bool'},\n 'alternative_names': {'key': 'alternativeNames', 'type': '[str]'},\n 'app_display_name': {'key': 'appDisplayName', 'type': 'str'},\n 'app_id': {'key': 'appId', 'type': 'str'},\n 'app_owner_tenant_id': {'key': 'appOwnerTenantId', 'type': 'str'},\n 'app_role_assignment_required': {'key': 'appRoleAssignmentRequired', 'type': 'bool'},\n 'app_roles': {'key': 'appRoles', 'type': '[AppRole]'},\n 'display_name': {'key': 'displayName', 'type': 'str'},\n 'error_url': {'key': 'errorUrl', 'type': 'str'},\n 'homepage': {'key': 'homepage', 'type': 'str'},\n 'key_credentials': {'key': 'keyCredentials', 'type': '[KeyCredential]'},\n 'logout_url': {'key': 'logoutUrl', 'type': 'str'},\n 'oauth2_permissions': {'key': 'oauth2Permissions', 'type': '[OAuth2Permission]'},\n 'password_credentials': {'key': 'passwordCredentials', 'type': '[PasswordCredential]'},\n 'preferred_token_signing_key_thumbprint': {'key': 'preferredTokenSigningKeyThumbprint', 'type': 'str'},\n 'publisher_name': {'key': 'publisherName', 'type': 'str'},\n 'reply_urls': {'key': 'replyUrls', 'type': '[str]'},\n 'saml_metadata_url': {'key': 'samlMetadataUrl', 'type': 'str'},\n 'service_principal_names': {'key': 'servicePrincipalNames', 'type': '[str]'},\n 'service_principal_type': {'key': 'servicePrincipalType', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '[str]'},\n }\n\n def __init__(self, *, additional_properties=None, account_enabled: bool=None, alternative_names=None, app_id: str=None, app_role_assignment_required: bool=None, app_roles=None, display_name: str=None, error_url: str=None, homepage: str=None, key_credentials=None, logout_url: str=None, password_credentials=None, preferred_token_signing_key_thumbprint: str=None, publisher_name: str=None, reply_urls=None, saml_metadata_url: str=None, service_principal_names=None, service_principal_type: str=None, tags=None, **kwargs) -> None:\n super(ServicePrincipal, self).__init__(additional_properties=additional_properties, **kwargs)\n self.account_enabled = account_enabled\n self.alternative_names = alternative_names\n self.app_display_name = None\n self.app_id = app_id\n self.app_owner_tenant_id = None\n self.app_role_assignment_required = app_role_assignment_required\n self.app_roles = app_roles\n self.display_name = display_name\n self.error_url = error_url\n self.homepage = homepage\n self.key_credentials = key_credentials\n self.logout_url = logout_url\n self.oauth2_permissions = None\n self.password_credentials = password_credentials\n self.preferred_token_signing_key_thumbprint = preferred_token_signing_key_thumbprint\n self.publisher_name = publisher_name\n self.reply_urls = reply_urls\n self.saml_metadata_url = saml_metadata_url\n self.service_principal_names = service_principal_names\n self.service_principal_type = service_principal_type\n self.tags = tags\n self.object_type = 'ServicePrincipal'\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/service_principal_py3.py","file_name":"service_principal_py3.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"38520239182","text":"class ErrorMessage:\n def __init__(self, message_type=1):\n if message_type == 1:\n self.message = \"The operation cannot be performed.\"\n elif message_type == 2:\n self.message = \"This matrix doesn't have an inverse.\"\n\n def print(self):\n return print(self.message)\n\n\nclass Matrix:\n def __init__(self, values=None, nrows=0, ncols=0, size_message=\"Enter matrix size\", value_message=\"matrix\"):\n if values is None:\n values = []\n self.values = values\n self.nrows = nrows\n self.ncols = ncols\n if not values:\n self.get_matrix_size(size_message)\n self.get_matrix_values(value_message)\n elif nrows == ncols == 0:\n self.nrows = len(values)\n self.ncols = len(values[0])\n\n def get_matrix_size(self, matrix_message=\"Enter size of matrix\"):\n self.nrows, self.ncols = [int(x) if x.isdigit() else float(x) for x in input(matrix_message + \": \").split()]\n\n def get_matrix_values(self, matrix_message=\"matrix\"):\n print(\"Enter \" + matrix_message + \":\")\n self.values = [[int(x) if x.isdigit() else float(x) for x in input().split()] for _ in range(self.nrows)]\n\n def print(self):\n print(\"The result is: \")\n for row in self.values:\n print(*row)\n\n @staticmethod\n def sum_matrices(matrix1, matrix2):\n if matrix1.nrows == matrix2.nrows and matrix1.ncols == matrix2.ncols:\n values = [[a + b for a, b in zip(x, y)] for x, y in zip(matrix1.values, matrix2.values)]\n return Matrix(values)\n else:\n return ErrorMessage()\n\n @staticmethod\n def matrix_by_matrix(matrix1, matrix2):\n if matrix1.ncols == matrix2.nrows:\n values = [[sum([row[i] * matrix2.values[i][j] for i in range(matrix2.nrows)]) for j in range(matrix2.ncols)]\n for row in matrix1.values]\n return Matrix(values)\n else:\n return ErrorMessage()\n\n @staticmethod\n def matrix_by_constant(matrix, constant):\n return Matrix([[constant * x for x in row] for row in matrix.values])\n\n @staticmethod\n def diagonal_transpose(matrix, type_val=1):\n if type_val == 1:\n return Matrix([[matrix.values[j][i] for j in range(matrix.nrows)] for i in range(matrix.ncols)])\n else:\n return Matrix([[matrix.values[j][i] for j in range(matrix.nrows)[::-1]] for i in range(matrix.ncols)[::-1]])\n\n @staticmethod\n def line_transpose(matrix, type_val=3):\n if type_val == 3:\n return Matrix([row[::-1] for row in matrix.values])\n else:\n return Matrix([row for row in matrix.values[::-1]])\n\n @staticmethod\n def det(matrix):\n if matrix.nrows == matrix.ncols == 1:\n return matrix.values[0][0]\n if matrix.nrows == matrix.ncols == 2:\n return matrix.values[0][0] * matrix.values[1][1] - matrix.values[0][1] * matrix.values[1][0]\n out_det = sum([x * ((-1) ** (2 + i)) * Matrix.det(Matrix.minor(matrix, 0, i)) for i, x in enumerate(matrix.values[0])])\n return out_det\n\n @staticmethod\n def minor(matrix, row_index, col_index):\n return Matrix([row[:col_index] + row[col_index + 1:] for row in (matrix.values[:row_index] + matrix.values[row_index + 1:])])\n\n @staticmethod\n def adjoint(matrix):\n cof_values = list()\n for i in range(matrix.nrows):\n local_row = list()\n for j in range(matrix.ncols):\n minor = Matrix.minor(matrix, i, j)\n cof = ((-1) ** (2 + i + j)) * Matrix.det(minor)\n local_row.append(cof)\n cof_values.append(local_row)\n return Matrix.diagonal_transpose(Matrix(cof_values), 1)\n\n @staticmethod\n def inverse(matrix):\n det_value = Matrix.det(matrix)\n if det_value == 0:\n return ErrorMessage(2)\n return Matrix.matrix_by_constant(Matrix.adjoint(matrix), 1 / det_value)\n\n\ndef input_constant():\n const = input(\"Enter constant: \")\n return int(const) if const.isdigit() else float(const)\n\n\ndef add_matrices():\n Matrix.sum_matrices(*input_matrices()).print()\n\n\ndef multiply_matrices():\n Matrix.matrix_by_matrix(*input_matrices()).print()\n\n\ndef input_matrices():\n matrix1 = Matrix(size_message=\"Enter size of first matrix\", value_message=\"first matrix\")\n matrix2 = Matrix(size_message=\"Enter size of second matrix\", value_message=\"second matrix\")\n return matrix1, matrix2\n\n\ndef input_matrix(message_type=0):\n if message_type == 0:\n return Matrix(size_message=\"Enter size of matrix\",\n value_message=\"matrix\")\n return Matrix(size_message=\"Enter matrix size\",\n value_message=\"matrix\")\n\n\ndef matrix_by_constant():\n Matrix.matrix_by_constant(input_matrix(), input_constant()).print()\n\n\ndef transpose_matrix():\n transpose_type = select_transpose_type()\n if transpose_type in range(1, 5):\n matrix = input_matrix(1)\n if transpose_type <= 2:\n Matrix.diagonal_transpose(matrix, transpose_type).print()\n else:\n Matrix.line_transpose(matrix, transpose_type).print()\n\n\ndef calculate_determinant():\n matrix = input_matrix(1)\n print(\"The result is:\")\n print(Matrix.det(matrix))\n\n\ndef inverse_matrix():\n Matrix.inverse(input_matrix(1)).print()\n\n\ndef perform_operation(menu_option):\n operations = [add_matrices, matrix_by_constant, multiply_matrices,\n transpose_matrix, calculate_determinant, inverse_matrix]\n if menu_option in range(len(operations) + 1):\n operations[menu_option - 1]()\n print(\"\\n\")\n\n\ndef select_from_menu():\n print(\"1. Add matrices\")\n print(\"2. Multiply matrix by constant\")\n print(\"3. Multiply matrices\")\n print(\"4. Transpose matrix\")\n print(\"5. Calculate a determinant\")\n print(\"6. Inverse matrix\")\n print(\"0. Exit\")\n return int(input(\"Your choice: \"))\n\n\ndef select_transpose_type():\n print(\"\")\n print(\"1. Main diagonal\")\n print(\"2. Side diagonal\")\n print(\"3. Vertical line\")\n print(\"4. Horizontal line\")\n return int(input(\"Your choice: \"))\n\n\ndef main():\n while True:\n option = select_from_menu()\n if option == 0:\n break\n perform_operation(option)\n\n\nmain()\n","repo_name":"alcala21/numeric_matrix_processor","sub_path":"processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":6333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35887146130","text":"import cv2\nfrom matplotlib.animation import ArtistAnimation\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nfrom os.path import join\nimport pandas as pd\nimport time\nimport plotly.graph_objects as go\nfrom IPython.core.display import display, HTML, Image\n\n\nclass Timer:\n def __init__(self, msg='Time elapsed'):\n self.msg = msg\n def __enter__(self):\n self.start = time.time()\n return self\n def __exit__(self, *args):\n self.end = time.time()\n duration = self.end - self.start\n print(f'{self.msg}: {duration:.2f}s')\n\n\nclass Event:\n __slots__ = 't', 'x', 'y', 'p'\n def __init__(self, t, x, y, p):\n self.t = t\n self.x = x\n self.y = y\n self.p = p\n def __repr__(self):\n return f'Event(t={self.t:.3f}, x={self.x}, y={self.y}, p={self.p})'\n\n\ndef normalize_image(image, percentile_lower=1, percentile_upper=99):\n mini, maxi = np.percentile(image, (percentile_lower, percentile_upper))\n if mini == maxi:\n return 0 * image + 0.5 # gray image\n return np.clip((image - mini) / (maxi - mini + 1e-5), 0, 1)\n\n\nclass EventData:\n def __init__(self, event_list, width, height):\n self.event_list = event_list\n self.width = width\n self.height = height\n\n def add_frame_data(self, data_folder, max_frames=100):\n timestamps = np.genfromtxt(join(data_folder, 'image_timestamps.txt'), max_rows=int(max_frames))\n frames = []\n frame_timestamps = []\n with open(join(data_folder, 'image_timestamps.txt')) as f:\n for line in f:\n fname, timestamp = line.split(' ')\n timestamp = float(timestamp)\n frame = cv2.imread(join(data_folder, fname), cv2.IMREAD_GRAYSCALE)\n if not (frame.shape[0] == self.height and frame.shape[1] == self.width):\n continue\n frames.append(frame)\n frame_timestamps.append(timestamp)\n if timestamp >= self.event_list[-1].t:\n break\n frame_stack = normalize_image(np.stack(frames, axis=0))\n self.frames = [f for f in frame_stack]\n self.frame_timestamps = frame_timestamps\n\n\ndef animate(images, fig_title=''):\n fig = plt.figure(figsize=(0.1, 0.1)) # don't take up room initially\n fig.suptitle(fig_title)\n fig.set_size_inches(7.2, 5.4, forward=False) # resize but don't update gui\n ims = []\n for image in images:\n im = plt.imshow(normalize_image(image), cmap='gray', vmin=0, vmax=1, animated=True)\n ims.append([im])\n ani = ArtistAnimation(fig, ims, interval=50, blit=False, repeat_delay=1000)\n plt.close(ani._fig)\n return HTML(ani.to_html5_video())\n\n\ndef load_events(path_to_events, n_events=None):\n print('Loading events...')\n header = pd.read_csv(path_to_events, delim_whitespace=True, names=['width', 'height'],\n dtype={'width': np.int, 'height': np.int}, nrows=1)\n width, height = header.values[0]\n print(f'width, height: {width}, {height}')\n event_pd = pd.read_csv(path_to_events, delim_whitespace=True, header=None,\n names=['t', 'x', 'y', 'p'],\n dtype={'t': np.float64, 'x': np.int16, 'y': np.int16, 'p': np.int8},\n engine='c', skiprows=1, nrows=n_events, memory_map=True)\n event_list = []\n for event in event_pd.values:\n t, x, y, p = event\n event_list.append(Event(t, int(x), int(y), -1 if p < 0.5 else 1))\n print('Loaded {:.2f}M events'.format(len(event_list) / 1e6))\n return EventData(event_list, width, height)\n\ndef plot_3d(event_data, n_events=-1):\n x, y, t, c = [], [], [], []\n for e in event_data.event_list[:int(n_events)]:\n x.append(e.x)\n y.append(e.y)\n t.append(e.t * 1e3)\n c.append('rgb(255,0,0)' if e.p == 1 else 'rgb(0,0,255)')\n fig = go.Figure(data=[go.Scatter3d(x=t, y=x, z=y, \n mode='markers',\n marker=dict(\n size=2,\n color=c, # set color to an array/list of desired values\n opacity=0.8\n ))])\n\n fig.update_layout(scene = dict(\n xaxis_title='Time (ms)',\n yaxis_title='X',\n zaxis_title='Y'))\n fig.update_yaxes(autorange=\"reversed\")\n\n fig.show()\n\ndef event_slice(event_data, start=0, duration_ms=30):\n events, height, width = event_data.event_list, event_data.height, event_data.width\n mask = np.zeros((height, width), dtype=np.int8)\n start_idx = int(start * (len(events) - 1))\n end_time = events[start_idx].t + duration_ms / 1000.0\n for e in events[start_idx:]:\n mask[e.y, e.x] = e.p\n if e.t >= end_time:\n break\n img_rgb = np.ones((height, width, 3), dtype=np.uint8) * 255\n img_rgb[mask == -1] = (255, 0, 0)\n img_rgb[mask == 1] = (0, 0, 255)\n fig = plt.figure(figsize=(7.2, 5.4))\n plt.imshow(img_rgb)\n","repo_name":"Tobias-Fischer/RVSS","sub_path":"Robotic_Vision/common/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"54"} +{"seq_id":"8192759543","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'users'\nurlpatterns = [\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"signup\", views.signup_view, name=\"signup\"),\n path(\"userModify\", views.userModify_view, name=\"userModify\"),\n path(\"mypage\", views.mypage_view, name=\"mypage\"),\n path(\"order//\", views.order_view, name=\"order\"),\n # path(\"orderDetail//\", views.orderDetail_view, name=\"orderDetail\"),\n path(\"orderDel//\", views.orderDel_view, name=\"orderDel\"),\n path(\"payinfo//\", views.payinfo_view, name=\"payinfo\"),\n path(\"pay//\", views.pay_view, name=\"pay\"),\n]","repo_name":"cruzey/tyshop01","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20102648400","text":"import time, requests\nfrom contextlib import contextmanager\nfrom datetime import timedelta, datetime\nfrom django.utils.timezone import localtime, now\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.firefox.webdriver import WebDriver\nfrom selenium.webdriver.support import select, expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom appointments.models import Customer\n\n\nclass AddAppointmentTests(StaticLiveServerTestCase):\n HOME_URI = '/appointments/home/'\n LOGIN_URI = '/appointments/login/'\n LOGOUT_URI = '/appointments/logout/'\n APPOINTMENT_URI = '/appointments/add/'\n\n @contextmanager\n def wait_for_page_load(self, timeout=10):\n old_page = self.selenium.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.selenium, timeout).until(\n EC.staleness_of(old_page)\n )\n\n @classmethod\n def setUpClass(cls):\n super(AddAppointmentTests, cls).setUpClass()\n cls.selenium = WebDriver()\n\n @classmethod\n def tearDownClass(cls):\n cls.selenium.quit()\n super(AddAppointmentTests, cls).tearDownClass()\n\n def setUp(self):\n customer = Customer.objects.create_user('temp_username', 'temporary@email.com', 'temp_password',\n first_name='My',\n middle_name='First', last_name='Customer')\n customer.save()\n\n self.selenium.get('%s%s' % (self.live_server_url, self.LOGIN_URI))\n\n with self.wait_for_page_load():\n login_button = self.selenium.find_element_by_class_name('btn-primary')\n username_field = self.selenium.find_element_by_id('inputUsername')\n password_field = self.selenium.find_element_by_id('inputPassword')\n\n ActionChains(self.selenium).send_keys_to_element(username_field, 'temp_username').send_keys_to_element(\n password_field, 'temp_password').click(login_button).perform()\n\n with self.wait_for_page_load():\n self.selenium.get('%s%s' % (self.live_server_url, self.APPOINTMENT_URI))\n\n def tearDown(self):\n with self.wait_for_page_load():\n self.selenium.get('%s%s' % (self.live_server_url, self.LOGOUT_URI))\n\n # No Pet and Veterinary Registration Features yet\n def create_test_data(self):\n pet_params = '?name=Doggy&breed=Pug&age=1'\n veterinary_physician_params = '?username=veterinary_physician&first_name=Dr&middle_name=Veterinary&last_name=Physician&email=cs2602015project@gmail.com'\n\n with self.wait_for_page_load():\n self.selenium.get(\n '%s%s%s%s' % (\n self.live_server_url, self.APPOINTMENT_URI, 'create_test_vet/', veterinary_physician_params))\n\n with self.wait_for_page_load():\n self.selenium.get('%s%s%s%s' % (\n self.live_server_url, self.APPOINTMENT_URI, 'create_test_pet/', pet_params))\n\n with self.wait_for_page_load():\n self.selenium.get(\n '%s%s' % (self.live_server_url, self.APPOINTMENT_URI))\n\n def test_add_appointment_page_is_accessible(self):\n # No HTTP Response yet on Selenium WebDriver\n response = requests.get(self.selenium.current_url)\n\n self.assertEqual(response.status_code, 200)\n\n def test_appointment_input_fields_are_present(self):\n try:\n pet_name_field = self.selenium.find_element_by_id('id_pet_name')\n pet_description_field = self.selenium.find_element_by_id('id_pet_description')\n visit_schedule_field = self.selenium.find_element_by_id('id_visit_schedule')\n visit_description_field = self.selenium.find_element_by_id('id_visit_description')\n veterinary_physician_field = self.selenium.find_element_by_id('id_veterinary_physician')\n\n self.assertEqual(pet_name_field.get_attribute('type'), 'select-one')\n self.assertEqual(pet_description_field.get_attribute('type'), 'textarea')\n self.assertEqual(visit_schedule_field.get_attribute('type'), 'text')\n self.assertEqual(visit_description_field.get_attribute('type'), 'textarea')\n self.assertEqual(veterinary_physician_field.get_attribute('type'),\n 'select-one')\n except NoSuchElementException as e:\n self.fail(e)\n\n def test_pet_owner_field_is_prefilled(self):\n self.create_test_data()\n pet_owner_name_field = self.selenium.find_element_by_id('id_pet_owner_name')\n\n self.assertEqual(pet_owner_name_field.get_attribute('value'), 'Customer, My First')\n self.assertTrue(pet_owner_name_field.get_attribute('readonly'))\n\n def test_has_date_and_time_picker_widget(self):\n datetime_picker_icon = self.selenium.find_element_by_class_name('glyphicon-calendar')\n\n ActionChains(self.selenium).click(datetime_picker_icon).perform()\n\n try:\n datetime_picker_widget = WebDriverWait(self.selenium, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'bootstrap-datetimepicker-widget'))\n )\n active_day = datetime_picker_widget.find_element_by_class_name('active')\n\n ActionChains(self.selenium).click(active_day).perform()\n\n selected_datetime = self.selenium.find_element_by_id('id_visit_schedule').get_attribute('value')\n self.assertTrue(datetime_picker_widget.is_displayed())\n self.assertEqual(time.strftime('%m/%d/%Y %I: %p'),\n time.strftime('%m/%d/%Y %I: %p', time.strptime(selected_datetime, '%m/%d/%Y %I:%M %p')))\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n\n def test_has_navigation_buttons(self):\n try:\n clear_button = self.selenium.find_element_by_id('reset-id-reset')\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n cancel_button = self.selenium.find_element_by_id('cancel-id-cancel')\n\n self.assertEqual(submit_button.get_attribute('type'), 'submit')\n self.assertEqual(clear_button.get_attribute('type'), 'reset')\n self.assertTrue(cancel_button.get_attribute('href') is not None)\n except NoSuchElementException as e:\n self.fail(e)\n\n def test_submit_button_redirects_to_add_appointment_page(self):\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n\n ActionChains(self.selenium).click(submit_button).perform()\n\n self.assertEquals(self.selenium.current_url, '%s%s' % (self.live_server_url, self.APPOINTMENT_URI))\n\n def test_clear_button_clears_input(self):\n clear_button = self.selenium.find_element_by_id('reset-id-reset')\n pet_description_field = self.selenium.find_element_by_id('id_pet_description')\n\n ActionChains(self.selenium).send_keys_to_element(pet_description_field, 'Test Text').click(\n clear_button).perform()\n\n self.assertEqual(pet_description_field.get_attribute('value'), '')\n\n # No Homepage capabilities yet; Redirects to Add Appointment Page\n def test_cancel_button_redirects_to_expected_page(self):\n cancel_button = self.selenium.find_element_by_id('cancel-id-cancel')\n\n ActionChains(self.selenium).click(cancel_button).perform()\n\n self.assertEquals(self.selenium.current_url, '%s%s' % (self.live_server_url, self.HOME_URI))\n\n def test_pet_description_field_length_constraint(self):\n sample_text = 'Sample Text' * 50\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n pet_description_field = self.selenium.find_element_by_id('id_pet_description')\n\n ActionChains(self.selenium).send_keys_to_element(pet_description_field, sample_text).click(\n submit_button).perform()\n\n try:\n new_pet_description_field = WebDriverWait(self.selenium, 10).until(\n EC.presence_of_element_located((By.ID, 'id_pet_description'))\n )\n\n self.assertTrue(len(sample_text) > 500)\n self.assertEqual(len(new_pet_description_field.get_attribute('value')), 500)\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n\n def test_pet_description_field_format_constraint(self):\n sample_text = 'Text with ; character'\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n pet_description_field = self.selenium.find_element_by_id('id_pet_description')\n\n ActionChains(self.selenium).send_keys_to_element(pet_description_field, sample_text).click(\n submit_button).perform()\n\n try:\n pet_description_error = WebDriverWait(self.selenium, 10).until(\n EC.presence_of_element_located((By.ID, 'error_1_id_pet_description'))\n )\n\n self.assertEqual(pet_description_error.find_element_by_tag_name('strong').text,\n 'No Special Characters are allowed in this field')\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n\n def test_visit_description_field_length_constraint(self):\n sample_text = 'Sample Text' * 50\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n visit_description_field = self.selenium.find_element_by_id('id_visit_description')\n\n ActionChains(self.selenium).send_keys_to_element(visit_description_field, sample_text).click(\n submit_button).perform()\n\n try:\n new_visit_description_field = WebDriverWait(self.selenium, 10).until(\n EC.presence_of_element_located((By.ID, 'id_visit_description'))\n )\n\n self.assertTrue(len(sample_text) > 500)\n self.assertEqual(len(new_visit_description_field.get_attribute('value')), 500)\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n\n def test_visit_description_field_format_constraint(self):\n sample_text = 'Text with ; character'\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n visit_description_field = self.selenium.find_element_by_id('id_visit_description')\n\n ActionChains(self.selenium).send_keys_to_element(visit_description_field, sample_text).click(\n submit_button).perform()\n\n try:\n visit_description_error = WebDriverWait(self.selenium, 10).until(\n EC.presence_of_element_located((By.ID, 'error_1_id_visit_description'))\n )\n\n self.assertEqual(visit_description_error.find_element_by_tag_name('strong').text,\n 'No Special Characters are allowed in this field')\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n\n def test_scheduled_visit_field_past_time_validation(self):\n date_text = format(datetime.now() - timedelta(hours=1), '%m/%d/%Y %I:%M %p')\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n visit_schedule_field = self.selenium.find_element_by_name('visit_schedule')\n\n ActionChains(self.selenium).send_keys_to_element(visit_schedule_field, date_text).click(submit_button).perform()\n\n try:\n visit_schedule_error = WebDriverWait(self.selenium, 10).until(\n EC.presence_of_element_located((By.ID, 'error_1_id_visit_schedule'))\n )\n\n self.assertEqual(visit_schedule_error.find_element_by_tag_name('strong').text,\n 'Cannot Schedule an Appointment at this Date and Time')\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n\n def test_scheduled_visit_field_lead_time_validation(self):\n date_text = format(localtime(now()) + timedelta(hours=1), '%m/%d/%Y %I:%M %p')\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n visit_schedule_field = self.selenium.find_element_by_name('visit_schedule')\n\n ActionChains(self.selenium).send_keys_to_element(visit_schedule_field, date_text).click(submit_button).perform()\n\n try:\n visit_schedule_error = WebDriverWait(self.selenium, 10).until(\n EC.presence_of_element_located((By.ID, 'error_1_id_visit_schedule'))\n )\n\n self.assertEqual(visit_schedule_error.find_element_by_tag_name('strong').text,\n 'Kindly give use at least 24 hrs. lead time to schedule your appointment.')\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n\n def test_veterinary_physician_field_behavior(self):\n self.create_test_data()\n index = 0\n veterinary_physician_field = self.selenium.find_element_by_id('id_veterinary_physician')\n vet_option_fields = veterinary_physician_field.find_elements_by_tag_name('option')\n\n for i, option in enumerate(vet_option_fields):\n if (not option.get_attribute('selected')):\n index = i\n\n select_box = select.Select(veterinary_physician_field)\n select_box.select_by_value(vet_option_fields[index].get_attribute('value'))\n\n try:\n calendar_widget = WebDriverWait(self.selenium, 20).until(\n EC.visibility_of_element_located((By.ID, 'iframe_calendar'))\n )\n\n self.assertTrue(calendar_widget.get_attribute('src') is not None)\n self.assertTrue('display: none' not in calendar_widget.get_attribute('style'))\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n\n def test_appointment_page_form_checking(self):\n webDriverWait = WebDriverWait(self.selenium, 10)\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n\n ActionChains(self.selenium).click(submit_button).perform()\n\n try:\n pet_name_error = webDriverWait.until(\n EC.presence_of_element_located((By.ID, 'error_1_id_pet_name'))\n )\n visit_schedule_error = webDriverWait.until(\n EC.presence_of_element_located((By.ID, 'error_1_id_visit_schedule'))\n )\n visit_description_error = webDriverWait.until(\n EC.presence_of_element_located((By.ID, 'error_1_id_visit_description'))\n )\n veterinary_physician_error = webDriverWait.until(\n EC.presence_of_element_located((By.ID, 'error_1_id_veterinary_physician'))\n )\n\n self.assertEqual(pet_name_error.find_element_by_tag_name('strong').text,\n 'This field is required.')\n self.assertEqual(visit_schedule_error.find_element_by_tag_name('strong').text,\n 'This field is required.')\n self.assertEqual(visit_description_error.find_element_by_tag_name('strong').text,\n 'This field is required.')\n self.assertEqual(veterinary_physician_error.find_element_by_tag_name('strong').text,\n 'This field is required.')\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n\n def test_successful_appointment_scheduling(self):\n self.create_test_data()\n webDriverWait = WebDriverWait(self.selenium, 10)\n\n try:\n vet_index = 0\n pet_index = 0\n\n submit_button = self.selenium.find_element_by_id('submit-id-submit')\n pet_name_field = self.selenium.find_element_by_id('id_pet_name')\n pet_option_fields = pet_name_field.find_elements_by_tag_name('option')\n pet_description_field = self.selenium.find_element_by_id('id_pet_description')\n visit_schedule_field = self.selenium.find_element_by_id('id_visit_schedule')\n visit_description_field = self.selenium.find_element_by_id('id_visit_description')\n veterinary_physician_field = self.selenium.find_element_by_id('id_veterinary_physician')\n vet_option_fields = veterinary_physician_field.find_elements_by_tag_name('option')\n\n for i, option in enumerate(vet_option_fields):\n if (option.get_attribute('selected') is None):\n vet_index = i\n\n for i, option in enumerate(pet_option_fields):\n if (option.get_attribute('selected') is None):\n pet_index = i\n\n vet_select_box = select.Select(veterinary_physician_field)\n vet_select_box.select_by_value(vet_option_fields[vet_index].get_attribute('value'))\n\n pet_select_box = select.Select(pet_name_field)\n pet_select_box.select_by_value(pet_option_fields[pet_index].get_attribute('value'))\n\n current_datetime = format(datetime.now() + timedelta(hours=25), '%m/%d/%Y %I:%M %p')\n\n actions = ActionChains(self.selenium)\n actions.send_keys_to_element(pet_description_field, 'Siberian Husky')\n actions.send_keys_to_element(visit_schedule_field, current_datetime)\n actions.send_keys_to_element(visit_description_field, 'Checkup')\n actions.click(submit_button)\n actions.perform()\n\n success_message = webDriverWait.until(\n EC.presence_of_element_located((By.CLASS_NAME, 'alert-success'))\n )\n\n self.assertEqual(success_message.text,\n 'Your request for an Appointment has been saved. Please wait for the Physician\\'s Confirmation via email.')\n except TimeoutException as e:\n self.fail('Unable to Execute Test Properly')\n","repo_name":"asabalon/cs260","sub_path":"functional_tests/tests_add_appointment.py","file_name":"tests_add_appointment.py","file_ext":"py","file_size_in_byte":17964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25855958526","text":"from telebot.types import InlineKeyboardButton, InlineKeyboardMarkup\n\n\ndef request_gender():\n \"\"\" Inline-кнопки для запроса пола \"\"\"\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n markup.add(InlineKeyboardButton('Мужской', callback_data='мужской'),\n InlineKeyboardButton('Женский', callback_data='женский'))\n\n return markup\n","repo_name":"Munchen777/telegram_bot","sub_path":"keyboards/inline/request_gender_inline.py","file_name":"request_gender_inline.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23122950812","text":"from django.test import TestCase\n\nfrom rest_framework.test import APIClient\nfrom rest_framework import status\n\nfrom airport.models import Aircraft, StateChangeLog\nfrom airport.serializers import StateChangeLogSerializer\n\n\ndef create_aircraft(call_sign, state='PARKED', type='AIRLINER', longitude=0,\n latitude=0, altitude=0, heading=0):\n return Aircraft.objects.create(\n call_sign=call_sign,\n state=state,\n type=type,\n longitude=longitude,\n latitude=latitude,\n altitude=altitude,\n heading=heading\n )\n\n\ndef create_log(aircraft, from_state, to_state, outcome, description=\"\"):\n return StateChangeLog.objects.create(\n aircraft=aircraft,\n from_state=from_state,\n to_state=to_state,\n outcome=outcome,\n description=description\n )\n\n\nclass StateChangeLogApiTests(TestCase):\n\n def setUp(self):\n self.client = APIClient()\n\n def test_can_retrieve_logs(self):\n aircraft1 = create_aircraft('A1', state='PARKED', type='AIRLINER')\n aircraft2 = create_aircraft('A2', state='PARKED', type='AIRLINER')\n\n log1 = create_log(aircraft1, 'PARKED', 'TAKE_OFF', 'ACCEPTED')\n log2 = create_log(aircraft1, 'TAKE_OFF', 'LANDED', 'REJECTED', description='Not a valid state change')\n log3 = create_log(aircraft2, 'PARKED', 'TAKE_OFF', 'ACCEPTED')\n\n res = self.client.get('/api/state_logs/')\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n serialized_log1 = StateChangeLogSerializer(log1)\n serialized_log2 = StateChangeLogSerializer(log2)\n serialized_log3 = StateChangeLogSerializer(log3)\n\n self.assertIn(serialized_log1.data, res.data)\n self.assertIn(serialized_log2.data, res.data)\n self.assertIn(serialized_log3.data, res.data)\n\n def test_can_limit_results_count_and_retrieve_latest(self):\n aircraft1 = create_aircraft('A1', state='PARKED', type='AIRLINER')\n aircraft2 = create_aircraft('A2', state='PARKED', type='AIRLINER')\n\n log1 = create_log(aircraft1, 'PARKED', 'TAKE_OFF', 'ACCEPTED')\n log2 = create_log(aircraft1, 'TAKE_OFF', 'LANDED', 'REJECTED', description='Not a valid state change')\n log3 = create_log(aircraft2, 'PARKED', 'TAKE_OFF', 'ACCEPTED')\n log4 = create_log(aircraft1, 'TAKE_OFF', 'APPROACH', 'REJECTED', description='Not a valid state change')\n\n res = self.client.get('/api/state_logs/?limit=2')\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n serialized_log1 = StateChangeLogSerializer(log1)\n serialized_log2 = StateChangeLogSerializer(log2)\n serialized_log3 = StateChangeLogSerializer(log3)\n serialized_log4 = StateChangeLogSerializer(log4)\n\n self.assertNotIn(serialized_log1.data, res.data['results'])\n self.assertNotIn(serialized_log2.data, res.data['results'])\n self.assertIn(serialized_log3.data, res.data['results'])\n self.assertIn(serialized_log4.data, res.data['results'])\n","repo_name":"milorad-kukic/airport","sub_path":"app/airport/tests/test_state_change_logs_api.py","file_name":"test_state_change_logs_api.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19239136940","text":"import logging\n\nfrom telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update\nfrom telegram.ext import (\n Updater,\n CommandHandler,\n MessageHandler,\n Filters,\n ConversationHandler,\n CallbackContext,\n)\n\nfrom bot_info import BOT_TOKEN\n\n# Enable logging\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nFIRST, SECOND = range(2)\n\n\ndef start(update: Update, context: CallbackContext) -> int:\n \"\"\"Starts the conversation and asks the user if they've watched.\"\"\"\n reply_keyboard = [['Yes', 'Not Yet']]\n\n update.message.reply_text(\n \"\"\"Hi! My name is PanConBot. I'm a JoJo lover. \\n\\n\"\"\"\n \"\"\"Send /cancel to stop talking to me UwU.\\n\\n\"\"\"\n \"\"\"Have you watched JoJo's?\"\"\",\n reply_markup=ReplyKeyboardMarkup(\n reply_keyboard,\n one_time_keyboard=True,\n input_field_placeholder='Yes or Not Yet?'),\n )\n\n return FIRST\n\n\ndef watch(update: Update, context: CallbackContext) -> int:\n \"\"\"Stores the selected watch response and asks if wants to watch.\"\"\"\n reply_keyboard = [['Yes', 'Of course!']]\n\n user = update.message.from_user\n recieved_message = update.message.text\n logger.info(\"Has watched? of %s: %s\", user.first_name, recieved_message)\n if recieved_message == 'Yes':\n reply_message = 'Awesome! Do you want to watch it again?'\n elif recieved_message == 'Not Yet':\n reply_message = \"\"\"It's OK, Do you want to watch it?\"\"\"\n\n update.message.reply_text(reply_message,\n reply_markup=ReplyKeyboardMarkup(\n reply_keyboard,\n one_time_keyboard=True,\n input_field_placeholder='Si?',\n ))\n\n return SECOND\n\n\ndef info(update: Update, context: CallbackContext) -> int:\n \"\"\"Gives info on where to watch.\"\"\"\n\n user = update.message.from_user\n recieved_message = update.message.text\n logger.info(\"Will watch? of %s: %s\", user.first_name, recieved_message)\n reply_message = \"\"\"Great!\n\n You can watch parts 1-4 on:\n https://www.netflix.com/cl/title/80179831\n\n And part 5 on:\n https://www.crunchyroll.com/es/jojos-bizarre-adventure\n or just google it :v\n\n ENJOY!\n \"\"\"\n update.message.reply_text(reply_message,\n reply_markup=ReplyKeyboardRemove())\n\n return ConversationHandler.END\n\n\ndef cancel(update: Update, context: CallbackContext) -> int:\n \"\"\"Cancels and ends the conversation.\"\"\"\n user = update.message.from_user\n logger.info(\"User %s canceled the conversation.\", user.first_name)\n update.message.reply_text('Bye! I hope we can talk again some day.',\n reply_markup=ReplyKeyboardRemove())\n\n return ConversationHandler.END\n\n\ndef main() -> None:\n \"\"\"Run the bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n updater = Updater(BOT_TOKEN)\n\n # Get the dispatcher to register handlers\n dispatcher = updater.dispatcher\n\n # Add conversation handler with the states\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n states={\n FIRST: [MessageHandler(Filters.regex('^(Yes|Not Yet)$'), watch)],\n SECOND:\n [MessageHandler(Filters.regex('^(Yes|Of course!)$'), info)]\n },\n fallbacks=[CommandHandler('cancel', cancel)],\n )\n\n dispatcher.add_handler(conv_handler)\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()","repo_name":"bastianfuenza/telegrambot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9703775585","text":"import os\nfrom flask import jsonify, Blueprint, request\nfrom src.models import db\nfrom src.models.user_model import User, UserSchema\nfrom flask import current_app as app\nfrom sqlalchemy import exc\nfrom src.models import bcrypt\nfrom src.common.authentication import Auth\n\nuser_api = Blueprint('user_api', __name__)\nuser_schema = UserSchema()\n\n@user_api.route('/register', methods=['POST'])\ndef add_user():\n user = User(request.json)\n db.session.add(user)\n try:\n db.session.commit()\n except exc.SQLAlchemyError as ex:\n app.logger.debug(ex)\n return \"Error encountred\", 500\n return \"Successfully user registered\", 200\n\n@user_api.route('/user/', methods=['GET'])\n@Auth.auth_required\ndef get_user(username):\n user = User.query.filter_by(username=username).first()\n if not user:\n return \"user not registered\", 404\n return user_schema.jsonify(user), 200\n\n@user_api.route('/login', methods=['GET'])\ndef login():\n auth = request.authorization\n user = User.query.filter_by(username=auth.username).first()\n if not user:\n return \"user not registered\", 404\n if bcrypt.check_password_hash(user.password, auth.password):\n path = os.path.join(\"tmp\", \"cookies\", user.username)\n if os.path.exists(path):\n with open(path, 'r') as cookie:\n c = cookie.readlines()\n token = c[1].rstrip('\\n')\n return jsonify({'message': 'user already logged in', 'token': token})\n else:\n token = Auth.generate_token(user.username)\n with open(path, \"wa\") as cookie:\n cookie.write(user.username+'\\n')\n cookie.write(token+'\\n')\n return token\n else:\n return \"username or password incorrect\", 404\n\n@user_api.route('/logout', methods=['GET'])\n@Auth.auth_required\ndef logout():\n token=None\n if \"x-api-key\" in request.headers:\n token = request.headers.get(\"x-api-key\", None)\n if not token:\n return \"token missing\", 403\n userinfo = Auth.decode_token(token)\n username = userinfo.get('username', None)\n path = os.path.join(\"tmp\", \"cookies\", username)\n try:\n os.remove(path)\n except OSError as ex:\n app.logger.error(ex)\n return 'cookie matching or remove error', 500\n return 'user logged out successfully', 200\n\n@user_api.route('/user/', methods=['PUT'])\n@Auth.auth_required\ndef update_user(username):\n user = User.query.filter_by(username=username).first()\n if not user:\n return \"user not registered\", 404\n for key, value in request.json.items():\n user.__setattr__(key, value)\n try:\n db.session.commit()\n except exc.SQLAlchemyError as ex:\n app.logger.debug(ex)\n return \"Error encountred\", 500\n return user_schema.jsonify(user), 200\n\n@user_api.route('/user/', methods=['DELETE'])\n@Auth.auth_required\ndef delete_user(username):\n user = User.query.filter_by(username=username).first()\n if not user:\n return \"user not registered\", 404\n try:\n db.session.delete(user)\n db.session.commit()\n except exc.SQLAlchemyError as ex:\n app.logger.debug(ex)\n return \"Error encountred\", 500\n return \"user deleted successfully\", 200","repo_name":"siddharthcurious/inventory-orm","sub_path":"src/controllers/user_controller.py","file_name":"user_controller.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70178251361","text":"import os\nimport platform\n\nimport cat_service\nimport subprocess\n\ndef main():\n print_header()\n folder = get_or_create_output_folder()\n print('Found or created folder: ' + folder)\n download_cats(folder)\n display_cats(folder)\n\n\n\ndef print_header():\n print('----------------')\n print(' CAT ')\n print('----------------')\n\n\ndef get_or_create_output_folder():\n #print(__name__)\n #print(__file__)\n folder = 'cat_pictures'\n full_path = os.path.abspath(os.path.join('.', folder))\n\n if not os.path.exists(full_path) or not os.path.isdir(full_path):\n print (\"Creating new directory at {}\". format(full_path))\n os.mkdir(full_path)\n\n return full_path\n\n\ndef download_cats(folder):\n print('Contacting server to download cats...')\n cat_count = 8\n for i in range(1, cat_count+1):\n name = 'lolcat {}'.format(i)\n cat_service.get_cat(folder, name)\n print('Downloading cat ' + name)\n\n print(\"Done\")\n\ndef display_cats(folder):\n #open folder\n print('Displaying cats in OS window')\n if platform.system() == 'Darwin':\n subprocess.call(['open', folder])\n elif platform.system() == 'Windows':\n subprocess.call(['explorer', folder])\n elif platform.system() == \"Linux\":\n subprocess.call(['xdg-open', folder])\n else:\n print(\"We don't support your operating system \" + platform.system())\n\n\nif __name__ == '__main__':\n main()","repo_name":"brettdavidcarpenter/PythonJumpstart10Apps","sub_path":"06_lolcat_factory/you_try/programBrett.py","file_name":"programBrett.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23198432273","text":"# IMPORTING PACKAGES\n\nimport pandas as pd # working with data\nimport numpy as np # working with arrays\nimport matplotlib.pyplot as plt # visualization\nimport seaborn as sb # visualization\nfrom mpl_toolkits.mplot3d import Axes3D # 3d plot\nfrom termcolor import colored as cl # text customization\n\nfrom sklearn.preprocessing import StandardScaler # data normalization\nfrom sklearn.cluster import KMeans # K-means algorithm\n\nplt.rcParams['figure.figsize'] = (20, 10)\nsb.set_style('whitegrid')\n\n# IMPORTING DATA\n\ndf = pd.read_csv('cust_seg.csv')\ndf.drop('Unnamed: 0', axis = 1, inplace = True)\ndf.set_index('Customer Id', inplace = True)\n\nprint(cl(df.head(), attrs = ['bold']))\n\n# DATA ANALYSIS\n\n# Age distribution\n\nprint(cl(df['Age'].describe(), attrs = ['bold']))\n\nsb.distplot(df['Age'], \n color = 'orange')\nplt.title('AGE DISTRIBUTION', \n fontsize = 18)\nplt.xlabel('Age', \n fontsize = 16)\nplt.ylabel('Frequency', \n fontsize = 16)\nplt.xticks(fontsize = 14)\nplt.yticks(fontsize = 14)\n\nplt.savefig('age_distribution.png')\nplt.show()\n\n# Credit card default cases\n\nsb.countplot(df['Defaulted'], \n palette = ['coral', 'deepskyblue'], \n edgecolor = 'darkgrey')\nplt.title('Credit card default cases(1) and non-default cases(0)', \n fontsize = 18)\nplt.xlabel('Default value', \n fontsize = 16)\nplt.ylabel('Number of People', \n fontsize = 16)\nplt.xticks(fontsize = 14)\nplt.yticks(fontsize = 14)\n\nplt.savefig('default_cases.png')\nplt.show()\n\n# Age vs Income\n\nsb.scatterplot('Age', 'Income', \n data = df, \n color = 'deepskyblue', \n s = 150, \n alpha = 0.6, \n edgecolor = 'b')\nplt.title('AGE / INCOME', \n fontsize = 18)\nplt.xlabel('Age', \n fontsize = 16)\nplt.ylabel('Income', \n fontsize = 16)\nplt.xticks(fontsize = 14)\nplt.yticks(fontsize = 14)\n\nplt.savefig('age_income.png')\nplt.show()\n\n# Years Employed vs Income\n\narea = df.DebtIncomeRatio **2\n\nsb.scatterplot('Years Employed', 'Income', \n data = df, \n s = area, \n alpha = 0.6, \n edgecolor = 'white', \n hue = 'Defaulted', \n palette = 'spring')\nplt.title('YEARS EMPLOYED / INCOME', \n fontsize = 18)\nplt.xlabel('Years Employed', \n fontsize = 16)\nplt.ylabel('Income', \n fontsize = 16)\nplt.xticks(fontsize = 14)\nplt.yticks(fontsize = 14)\nplt.legend(loc = 'upper left', fontsize = 14)\n\nplt.savefig('y_income.png')\nplt.show()\n\n# DATA PROCESSING\n\nX = df.values\nX = np.nan_to_num(X)\n\nsc = StandardScaler()\n\ncluster_data = sc.fit_transform(X)\nprint(cl('Cluster data samples : ', attrs = ['bold']), cluster_data[:5])\n\n# MODELING\n\nclusters = 3\nmodel = KMeans(init = 'k-means++', \n n_clusters = clusters, \n n_init = 12)\nmodel.fit(X)\n\nlabels = model.labels_\nprint(cl(labels[:100], attrs = ['bold']))\n\n# MODEL INSIGHTS\n\ndf['cluster_num'] = labels\nprint(cl(df.head(), attrs = ['bold']))\n\nprint(cl(df.groupby('cluster_num').mean(), attrs = ['bold']))\n\narea = np.pi * (df.Edu) ** 4\n\nsb.scatterplot('Age', 'Income', \n data = df, \n s = area, \n hue = 'cluster_num', \n palette = 'spring', \n alpha = 0.6, \n edgecolor = 'darkgrey')\nplt.title('AGE / INCOMR (CLUSTERED)', \n fontsize = 18)\nplt.xlabel('Age', \n fontsize = 16)\nplt.ylabel('Income', \n fontsize = 16)\nplt.xticks(fontsize = 14)\nplt.yticks(fontsize = 14)\nplt.legend(loc = 'upper left', fontsize = 14)\n\nplt.savefig('c_age_income.png')\nplt.show()\n\nfig = plt.figure(1)\nplt.clf()\nax = Axes3D(fig, \n rect = [0, 0, .95, 1], \n elev = 48, \n azim = 134)\n\nplt.cla()\nax.scatter(df['Edu'], df['Age'], df['Income'], \n c = df['cluster_num'], \n s = 200, \n cmap = 'spring', \n alpha = 0.5, \n edgecolor = 'darkgrey')\nax.set_xlabel('Education', \n fontsize = 16)\nax.set_ylabel('Age', \n fontsize = 16)\nax.set_zlabel('Income', \n fontsize = 16)\n\nplt.savefig('3d_plot.png')\nplt.show()","repo_name":"Nikhil-Adithyan/Customer-Segmentation-with-K-Means","sub_path":"Customer_segmentation.py","file_name":"Customer_segmentation.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70978533603","text":"import numpy as np\nimport os\nimport random\n\nfrom data_processing_utilities import _readin_data, _normalize\n\nscale = (0,1)\n\npath_in = \"benchmark_tests/SIM-G/\"\npath_out = \"benchmark_tests/SIM-G_subsampled/\"\n\nN_samples = 1000\n\nfiles = os.listdir(path_in)\nfiles.sort()\n\nfor _file in files: \n X,Y = _readin_data(_file, path_in) \n X,Y = _normalize(X,Y, scale)\n\n if len(X) > N_samples:\n print(_file)\n data1 = list(X) \n data2 = list(Y)\n X_sub = []\n Y_sub = []\n for i in range(N_samples):\n index = random.randrange(len(data1))\n elem1 = data1[index]\n elem2 = data2[index]\n \n del data1[index]\n del data2[index]\n X_sub.append(elem1)\n Y_sub.append(elem2)\n \n p_out = path_out + _file[:-4] + \"_subsampled.txt\"\n with open(p_out, \"w\") as f:\n for x,y in zip(X_sub,Y_sub):\n f.write(\"{:.10e} {:.10e}\\n\".format(x,y))\n else:\n cmd = \"cp \" + path_in + _file + \" \" + path_out\n os.popen(cmd)\n","repo_name":"Cosmicstring/Bayesian-Causal-Inference-with-IFT","sub_path":"subsample.py","file_name":"subsample.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33618679828","text":"import random\n\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route(\"/omikuji\")\ndef hello():\n omikuji_list = [\"大吉\", '吉', '凶']\n result = random.choice(omikuji_list)\n return render_template(\"omikuji.html\", result=result)\n\n@app.route(\"/dice\")\ndef dice_shuffle():\n dice_list = range(0, 5)\n return str(random.choice(dice_list))\n\n@app.route(\"/members\")\ndef members():\n members = [\"Bob\", \"Tom\", \"Ken\"]\n return render_template(\"members.html\", members = members)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Zackno23/flask_sample02","sub_path":"02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72950175523","text":"import gnuradio.blocks as blocks\nimport gnuradio.filter as filter\nfrom . import fddsm_preamble_detector_cc as preamble_detector_cc\nfrom gnuradio import gr\nimport numpy as np\nimport lpwan\n\n\nclass SpaRSe_synchronization_cc(gr.hier_block2):\n \"\"\"\n Performs time and frequency synchronization for SpaRSe based on the preamble.\n \"\"\"\n def __init__(self, samp_rate_hz, sps, SF, shr, filtered_preamble_code, alpha=1e-3, beta=5, time_gap_chips=11, max_offset_hz=0, max_num_filters=1, output_correlator_index=0):\n gr.hier_block2.__init__(self,\n \"SpaRSe_synchronization_cc\",\n gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature3(3, 3, gr.sizeof_gr_complex, gr.sizeof_gr_complex, gr.sizeof_float)) # Output signature\n\n self.delta_phi = lpwan.SpaRSe_utils.calculate_phase_increments(samp_rate_hz, SF, sps, max_offset_hz, max_num_filters)\n\n # Define blocks\n self.rotators = [blocks.rotator_cc(-phi) for phi in self.delta_phi]\n self.matched_filters = [filter.fft_filter_ccf(1, np.flipud(np.conj(filtered_preamble_code))) for i in xrange(len(self.delta_phi))]\n self.preamble_detector = preamble_detector_cc(shr, sps, SF, time_gap_chips, alpha, beta, self.delta_phi, output_correlator_index)\n self.skiphead = blocks.skiphead(gr.sizeof_gr_complex, sps * (SF + time_gap_chips) + 4) # the +4 is \"empirical\" but well tested for sps=4\n\n # Connect blocks with preamble detector and outputs\n for i in xrange(len(self.delta_phi)):\n self.connect(self, self.rotators[i], self.matched_filters[i], (self.preamble_detector, i))\n self.connect(self, self.skiphead, (self.preamble_detector, len(self.delta_phi)))\n for i in xrange(3):\n self.connect((self.preamble_detector, i), (self, i))\n\n def set_alpha(self, alpha):\n self.preamble_detector.set_alpha(alpha)\n\n def set_beta(self, beta):\n self.preamble_detector.set_beta(beta)\n\n def set_output_correlator(self, output_correlator):\n self.preamble_detector.set_output_correlator(output_correlator)\n\n","repo_name":"kit-cel/gr-lpwan","sub_path":"python/SpaRSe_synchronization_cc.py","file_name":"SpaRSe_synchronization_cc.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"54"} +{"seq_id":"5838530665","text":"\nfrom cryptounifier_python_sdk import MerchantAPI, WalletAPI\n\n# WalletAPI\nclient = WalletAPI('', '', 'btc')\n\n#balance = client.getBalance()\nbalance = client.validateAddresses([\"ubc\"])\nprint(balance)\n\n# depositAddresses = client.getDepositAddresses()\n# print(depositAddresses)\n\n# MerchantAPI\n# client_MerchantAPI = MerchantAPI('', '')\n\n# # invoice = client_MerchantAPI.createInvoice(['btc', 'bch', 'eth'])\n# # print(invoice)\n# invoiceInfo = client_MerchantAPI.invoiceInfo(\"\")\n# print(invoiceInfo)","repo_name":"srdante/cryptounifier_python_sdk","sub_path":"test/merchant-test.py","file_name":"merchant-test.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72542708963","text":"\nALL_ELEMENTS = -2\n\n\ndef get(url):\n return lambda chrome: chrome.get(url)\n\n\ndef xpath(xpath, index=0):\n if index != ALL_ELEMENTS:\n return lambda chrome: chrome.xpath(xpath)[index]\n else:\n return lambda chrome: chrome.xpath(xpath)\n\n\ndef click():\n def _click(element):\n element.click()\n return element\n return _click\n\n\ndef send_keys(string):\n def _send_keys(element):\n element.send_keys(string)\n return element\n return _send_keys\n\n\ndef return_text():\n def _return_text(element):\n print(element.text)\n return element.text\n return _return_text\n\n\nRETERN_TEXT = return_text\n\n\ndef do_actions(chrome, action_rows):\n returns = []\n for raw_action in action_rows:\n action_func = parse_action(raw_action)\n returns.append(action_func(chrome))\n return returns\n\n\ndef parse_action(raw_action):\n tupled_action = (raw_action,) if type(raw_action) is str else raw_action\n first_string = tupled_action[0]\n print(first_string)\n if first_string.startswith(\"http\"):\n return get(first_string)\n\n if len(tupled_action) > 1 and type(tupled_action[1]) is int:\n xpath_index = tupled_action[1]\n rest_action = tupled_action[2:]\n else:\n xpath_index = 0\n rest_action = tupled_action[1:]\n\n element_find_func = xpath(first_string, index=xpath_index)\n if len(rest_action) == 0:\n action_func = click()\n else:\n if rest_action[0] is RETERN_TEXT:\n action_func = return_text()\n else:\n action_func = send_keys(rest_action[0])\n if xpath_index != ALL_ELEMENTS:\n return lambda chrome: action_func(element_find_func(chrome))\n else:\n return lambda chrome: [action_func(element)\n for element in element_find_func(chrome)]\n","repo_name":"umihico/umihico_commons","sub_path":"_chrome_actions.py","file_name":"_chrome_actions.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19308280705","text":"import pandas as pd \n\nlkmatch= pd.read_csv('/home/erika/Desktop/likeliest_match.csv')\ncolumns = lkmatch.columns\nsamples= [col for col in lkmatch if col.startswith('Sample')]\nanalID= [i.split('_', 1)[0] for i in samples]\nlID=[i.split('_', 1)[1] for i in samples]\nlaID=[i.split('_', 1)[0] for i in lID]\nlabID=[i.split('masslists', 1)[1] for i in laID]\n\nSampleName= [i.split('_', 1)[1] for i in lID]\nx = pd.Series(SampleName)\ny= x.str.split('_', expand=True)\nerikaName= y[3]\nerikaName= erikaName.tolist()\nSampleNames= pd.DataFrame({'SampleID':analID,'LabID': labID,'ErikaID': erikaName})\nSampleNames['ErikaID']= SampleNames['ErikaID'].astype('str')\nSampleNames['ErikaID'] = SampleNames['ErikaID'].apply(lambda x: 0 if len(x) < 5 else x)\nSampleNames['Site']= SampleNames['ErikaID'].str.slice(start=0, stop=2)\nSampleNames['Slope1']= SampleNames['ErikaID'].str.slice(start=2)\nSampleNames['Slope2']= SampleNames['ErikaID'].str.slice(start=2, stop=3)\nSampleNames['Depth1']= SampleNames['ErikaID'].str.slice(start=3)\nSampleNames['Slope'] = SampleNames['Slope1'].where(SampleNames['Slope1'].str.contains('ST'), SampleNames['Slope2'])\nSampleNames['Depth'] = SampleNames['Depth1'].where(SampleNames['Slope'].str.contains('ST'), SampleNames['Depth1'])\nSampleNames['Depth']= SampleNames['Depth'].astype('str')\nSampleNames['Depth'] = SampleNames['Depth'].apply(lambda x: 'Stream' if x.startswith('T') else x)\n\nSampleNames.drop(['Slope1', 'Slope2', 'Depth1'], axis=1, inplace=True)\nfdata= SampleNames.set_index('SampleID')\n#fdata.to_csv('/home/erika/Desktop/SampleNames.csv')\n\n\nedata_start = lkmatch[['mz']]\nedata_start.rename(columns={'mz': 'Mass'}, inplace=True)\nedata_end = lkmatch[samples]\nedata_end.columns = analID\nefinal0 = pd.DataFrame(edata_end)\nedata= efinal0.fillna(0)\nedata= edata.join(edata_start)\nedata= edata.set_index('Mass')\n\n\n\nemeta_cols = ['Mass\tC','H','O','N','C13','S','P','Error','NeutralMass']\nemeta = lkmatch[['mz','C','H','O','N','C13','S','P','SE','reference']]\nemeta.rename(columns={'mz':'Mass','SE': 'Error','reference': 'NeutralMass'}, inplace=True)\nemeta= emeta.set_index('Mass')\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\nwriter = pd.ExcelWriter('/home/erika/Desktop/pandas_multiple.xlsx', engine='xlsxwriter')\n\n# Write each dataframe to a different worksheet.\nedata.to_excel(writer, sheet_name='e_data')\nfdata.to_excel(writer, sheet_name='f_data')\nemeta.to_excel(writer, sheet_name='e_meta')\n\n# Close the Pandas Excel writer and output the Excel file.\nwriter.save()","repo_name":"erikafreeman/FTICRMS-analyses","sub_path":"fileprep.py","file_name":"fileprep.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74402800481","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\nchrome_driver_path = \"/home/cow/Documents/System/Development/chromedriver\"\n\ns = Service(chrome_driver_path)\n\ndriver = webdriver.Chrome(service=s)\n\ndriver.get(\"https://orteil.dashnet.org/experiments/cookie/\")\n\ncookie = driver.find_element(by=By.ID, value=\"cookie\")\n\n\n# check right panel\n\nseen = {}\nseconds = 0\nclicked = False\nseen_clicked = {}\nwhile True:\n now = int(time.time())\n cookie.click()\n minute = seconds / 60\n\n # start counting by seconds\n if now not in seen:\n seconds += 1\n seen[now] = None\n\n # make sure that right pane is only clicked once per 5 seconds\n if seconds not in seen_clicked:\n clicked = False\n\n # every 5 seconds check the right panel\n # click from button to top so that you can get most expensive first\n if not seconds % 5 and not clicked:\n store = driver.find_elements(\n by=By.CSS_SELECTOR, value=\"#store div:not(.grayed)\"\n )\n store = store[::-1]\n try:\n for item in store:\n item.click()\n\n except:\n store = driver.find_elements(\n by=By.CSS_SELECTOR, value=\"#store div:not(.grayed)\"\n )\n store = store[::-1]\n\n clicked = True\n seen_clicked[seconds] = None\n\n if minute == 5:\n per_second = driver.find_element(by=By.ID, value=\"cps\")\n print(per_second.text)\n","repo_name":"dave-cao/100-Days-of-Code-Python","sub_path":"day_48_Selenium/cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29395186278","text":"# -*- coding: utf-8 -*-\n\nfrom api.api import API\nfrom pages.android.common.super_page import SuperPage\nfrom pages.android.ffan.square_food_category_page_configs import SquareFoodPageConfigs as SFPC\nfrom api.logger import logger\nfrom pages.logger import logger\n\n\nclass SquareFoodPage(SuperPage):\n '''\n 作者 刘涛\n 首页=>广场=>美食汇\n '''\n def __init__(self, testcase, driver, logger):\n super(SquareFoodPage, self).__init__(testcase, driver, logger);\n\n def clickOnFindRestaurant(self):\n '''\n usage : 进入找餐厅界面\n '''\n logger.info(\"Click 找餐厅 begin\")\n API().clickElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n SFPC.resource_id_ll_find_restaurant_id,\n SFPC.click_child_module_timeout)\n logger.info(\"Click 找餐厅 end\")\n\n def clickOnFindFavourable(self):\n '''\n usage : 进入找优惠界面\n '''\n logger.info(\"Click 找优惠 begin\")\n API().clickElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n SFPC.resource_id_ll_find_favourable_id,\n SFPC.click_child_module_timeout)\n logger.info(\"Click 找优惠 end\")\n\n def clickOnQueue(self):\n '''\n usage : 进入智能排队界面\n '''\n logger.info(\"Click 智能排队 begin\")\n API().clickElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n SFPC.resource_id_ll_intelligent_queuing_id,\n SFPC.click_child_module_timeout)\n logger.info(\"Click 智能排队 end\")\n\n def clickOnStochastic(self):\n '''\n usage : 进入帮你选界面\n '''\n logger.info(\"Click 帮你挑 begin\")\n API().clickElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n SFPC.resource_id_ll_stochastic_id,\n SFPC.click_child_module_timeout)\n logger.info(\"Click 帮你挑 end\")\n\n def validFindRestaurant(self):\n '''\n usage : 检查找餐饮页面是否加载出来.\n '''\n logger.info(\"Check 找餐饮页面 begin\")\n API().assertElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n SFPC.verify_find_restaurant_resourceID,\n SFPC.verify_assert_timeout)\n logger.info(\"Check 找餐饮页面 end\")\n\n def validFindFavourable(self):\n '''\n usage : 检查找优惠页面是否加载出来.\n '''\n logger.info(\"Check 找优惠页面 begin\")\n API().assertElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n SFPC.verify_find_favourable_resourceID,\n SFPC.verify_assert_timeout)\n logger.info(\"Check 找优惠页面 end\")\n\n def validQueue(self):\n '''\n usage : 检查智能排队页面是否加载出来.\n '''\n logger.info(\"Check 智能排队页面 begin\")\n API().assertElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n SFPC.verify_intelligent_queuing_resourceID,\n SFPC.verify_assert_timeout)\n logger.info(\"Check 智能排队页面 end\")\n\n def validStochastic(self):\n '''\n usage : 检查帮你挑页面是否加载出来.\n '''\n logger.info(\"Check 帮你挑页面 begin\")\n API().assertElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n SFPC.verify_stochastic_resourceID,\n SFPC.verify_assert_timeout)\n logger.info(\"Check 帮你挑页面 end\")\n\n def validSelf(self):\n '''\n usage : 进入美食汇页面,检查是否加载出来\n '''\n logger.info(\"Check 美食汇页面 begin\")\n API().assertElementByText(self.testcase,\n self.driver,\n self.logger,\n SFPC.verify_text_food,\n SFPC.verify_assert_timeout)\n logger.info(\"Check 美食汇页面 end\")\n","repo_name":"liu111xiao111/UItest","sub_path":"pages/android/ffan/square_food_category_page.py","file_name":"square_food_category_page.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"44445728079","text":"print(\"=\"*19, \"CAFE\", \"=\"*19)\nprint(\"=\"*10, \"MASUKKAN JUMLAH PESANAN\", \"=\"*9)\nc= int(input(\"CAPPUCINO\\t DISKON 50%\\t Rp 25.000 : \"))\nv= int(input(\"VANILLA LATTE\\t DISKON 65%\\t Rp 22.000 : \"))\na= int(input(\"AMERICANO\\t DISKON 35% \\t Rp 20.000 : \"))\nb= int(input(\"BREWED COFFEE\\t DISKON 40%\\t Rp 20.000 : \"))\ncappucino= 25000*c\ndc=50/100*cappucino\nvanilla=22000*v\ndv=65/100*vanilla\namericano=30000*a\nda=35/100*americano\nbrewed=20000*b\ndb=40/100*brewed\nprint(\"=\"*17, \"TOTAL\", \"=\"*17)\nprint(f\"TOTAL CAPPUCINO\\t : Rp round{dc}\")\nprint(\"TOTAL VANILLA LATTE\\t : Rp round{dv}\")\nprint(\"TOTAL AMERICANO\\t : Rp round{da}\")\nprint(\"TOTAL BREWED COFFEE\\t : Rp round{db}\")\n\n","repo_name":"ameliaetsa/ameliakiw","sub_path":"asep4.py","file_name":"asep4.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22474582867","text":"import time\nfrom turtle import Screen, Turtle\nfrom paddle import Paddle\nfrom ball import Ball\nfrom scoreboard import Scoreboard\n\n\nturtle = Turtle()\nright_pad = Paddle((350, 0))\nleft_pad = Paddle((-350, 0))\nball = Ball()\nscoreboard = Scoreboard()\n\n\n# screen setup\nscreen = Screen()\nscreen.setup(width=800, height=600)\nscreen.bgcolor(\"black\")\nscreen.title(\"Pong\")\n# off screen animation\nscreen.tracer(0)\n\n# controlling paddles\nscreen.listen()\nscreen.onkey(right_pad.up, \"Up\")\nscreen.onkey(right_pad.down, \"Down\")\nscreen.onkey(left_pad.up, \"w\")\nscreen.onkey(left_pad.down, \"s\")\n\n\ngame_is_on = True\n\nwhile game_is_on:\n screen.update()\n time.sleep(ball.move_speed)\n \n # detect collision with wall and change direction\n if ball.ycor() > 280 or ball.ycor() < -280:\n #needs to bounce\n ball.bounce_y()\n\n # detect collision with paddle and change direction\n if ball.distance(right_pad) < 50 and ball.xcor() > 320 or ball.distance(left_pad) < 50 and ball.xcor() < -320:\n ball.bounce_x() \n\n # detect Right Paddle misses the boll\n if ball.xcor() > 380:\n ball.reset_position()\n scoreboard.left_point()\n\n # detect Left Paddle misses the boll\n if ball.xcor() < -380:\n ball.reset_position()\n scoreboard.right_point()\n\n\n ball.move()\n\n\n\n\nscreen.exitonclick()\n","repo_name":"mohammedaliyy/pong-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11961454926","text":"# LEADERBOARDCYCLOPEPTIDESEQUENCING(Spectrum, N)\n# Leaderboard <- {0-peptide}\n# LeaderPeptide <- 0-peptide\n# while Leaderboard is non-empty\n# Leaderboard <- Expand(Leaderboard)\n# for each Peptide in Leaderboard\n# if Mass(Peptide) = ParentMass(Spectrum)\n# if Score(Peptide, Spectrum) > Score(LeaderPeptide, Spectrum)\n# LeaderPeptide <- Peptide\n# else if Mass(Peptide) > ParentMass(Spectrum)\n# remove Peptide from Leaderboard\n# Leaderboard <- Cut(Leaderboard, Spectrum, N)\n# output LeaderPeptide\n\n# Input: Integer N and a collection of integers Spectrum.\n\n# Output: LeaderPeptide after running LEADERBOARDCYCLOPEPTIDESEQUENCING(Spectrum, N).\n\n# Sample Input:\n# 10\n# 0 71 113 129 147 200 218 260 313 331 347 389 460\n\n# Sample Output:\n# 113-147-71-129\n\n# Alternate solution without using amino acids as letters.\n# Necessary for adaptation to nonstandard amino acids (Stepic 26-7).\n\n# peptides are now represented as lists of ints\n\nimport inout\n\nN = int(inout.infilelines[0].strip())\nspectrum = map(int, inout.infilelines[1].strip().split(' '))\n\namino_acids = [57,71,87,97,99,101,103,113,114,115,128,129,131,137,147,156,163,186]\t\t\n\ndef cyclic_spectrum(peptide):\n\tout_spectrum = [0, sum(peptide)]\n\n\tpeptide_2 = peptide + peptide\t# for easy cyclic access\n\tfor k in range(1, len(peptide)):\n\t\tfor n in range(len(peptide)):\n\t\t\tsubpep = peptide_2[n:n+k]\n\t\t\tout_spectrum.append(sum(subpep))\n\treturn sorted(out_spectrum)\n\ndef branch(peptides):\n\tout_peptides = []\n\tfor p in peptides:\n\t\tfor amino_acid in amino_acids:\n\t\t\tout_peptides.append(p + [amino_acid])\n\treturn out_peptides\n\ndef score(candidate, target):\n\timport collections\n\tc_spectrum = cyclic_spectrum(candidate)\n\tc_counter = collections.Counter(c_spectrum)\n\tt_counter = collections.Counter(target)\n\t\n\ts = 0\n\tfor mass in c_counter:\n\t\tif mass in t_counter:\n\t\t\ts = s + min(c_counter[mass],t_counter[mass])\n\t\n\treturn s\n\ndef cut(candidates, target, n):\n\tif len(candidates) <= n:\n\t\treturn candidates\n\t\t\n\tleaderboard = {}\n\tfor candidate in candidates:\n\t\ts = score(candidate, target)\n\t\tif s in leaderboard:\n\t\t\tleaderboard[s].append(candidate)\n\t\telse:\n\t\t\tleaderboard[s] = [candidate]\n\t\n\tsurvivors = []\n\tsurvivors_to_choose = n\t\t\n\tfor s in sorted(leaderboard.keys(), reverse=True):\n\t\tsurvivors.extend(leaderboard[s])\n\t\tsurvivors_to_choose = survivors_to_choose - len(leaderboard[s])\n\t\tif survivors_to_choose < 0:\n\t\t\tbreak\n\t\t\t\n\treturn survivors\n\t\n# I'm sure there's a better way to do this but I don't know enough Python yet\ndef mklist(item):\n\treturn [item]\n\t\t\ncandidates = map(mklist,amino_acids)\nwinner = ''\nwinner_score = 0\nwhile candidates:\n\tcandidates = branch(candidates)\n\tnew_candidates = []\n\tfor candidate in candidates:\n\t\tc_mass = sum(candidate)\n\t\tt_mass = spectrum[-1]\n\n\t\t# if the mass of the candidate peptide equals the mass of the target peptide\n\t\tif c_mass == t_mass:\n\t\t\tnew_candidates.append(candidate)\n\t\t\tc_score = score(candidate, spectrum)\n\t\t\tif c_score > winner_score:\n\t\t\t\twinner = candidate\n\t\t\t\twinner_score = c_score\n\t\telif c_mass < t_mass:\n\t\t\tnew_candidates.append(candidate)\n\t\t# else: the candidate mass is too large, so it does not go on to the next round\n\tcandidates = cut(new_candidates, spectrum, N) \n\t\ninout.output('-'.join(map(str,winner)))","repo_name":"jmthibault79/rosalind","sub_path":"textbook/Stepic-24-4-no-alpha.py","file_name":"Stepic-24-4-no-alpha.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"28785140009","text":"import sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\n# 노드의 개수, 간선의 개수\nn,m = map(int, input().split())\n# 시작 노드 번호\nstart = int(input())\n\n# 그래프 형성 - 연결리스트\ngraph = [[] for i in range(n+1)]\n# 방문 여부 리스트\nvisited = [False] * (n+1)\n# 최단 거리 테이블\ndistance = [INF] * (n+1)\n\n# 간선 정보 입력\nfor _ in range(m):\n a,b,c = map(int, input().split())\n graph[a].append((b,c))\n\n# ==================================================\n# 순차탐색하며 방문하지 않은 노드 중에서 가장 작은 값을 가지는 노드 반환\ndef get_smallest_node():\n min_val = INF\n index = 0\n for i in range(1, n+1):\n if distance[i] < min_val and not visited[i]:\n min_val = distance[i]\n index = i\n return index\n\n# 다익스트라 알고리즘으로 로 최단 거리 추출\ndef dijkstra(start):\n # 1. 시작 노드에 대해 초기화\n distance[start] = 0\n visited[start] = True\n for i in graph[start]:\n distance[i[0]] = i[1]\n\n # 2. 나머지 n-1개 노드에 대해 반복\n for i in range(n-1):\n # 2-1. 가장 작은 노드 반환 후 방문\n now = get_smallest_node()\n visited[now] = True \n\n # 2-2. 인접한 노드 중에 갱신 가능한 노드 찾고 변환\n for j in graph[now]:\n cost = distance[now] + j[1]\n if cost < distance[j[0]]:\n distance[j[0]] = cost\n\ndijkstra(start)\n\nfor i in range(1,n+1):\n if distance[i] == INF:\n print(\"INF\", end = ' ')\n else:\n print(distance[i], end = ' ')\n\n\n# 6 11\n# 1\n# 1 2 2\n# 1 3 5\n# 1 4 1\n# 2 3 3\n# 2 4 2\n# 3 2 3\n# 3 6 5\n# 4 3 3\n# 4 5 1\n# 5 3 1\n# 5 6 2","repo_name":"82KJ/Coding-Test-with-python","sub_path":"====/ShortestPath/Dijkstra_기본.py","file_name":"Dijkstra_기본.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74340472801","text":"import pandas as pd\r\nimport math\r\nfrom pandas import DataFrame as Df\r\nfrom pandas import Series\r\nimport numpy as np\r\nimport random\r\n#——————————————————————————————————————————————模型自带参数,函数 ————————————————————————————————————————————-\r\n# 模型默认的参数值\r\n\r\nCC0=1.0192\r\nCC1=1.4242\r\nCC2=5.8715\r\nCC3=-17.1579\r\nCC4=-0.2312\r\nCC5=1.7946\r\nCC6=3.5519\r\nCC7=0.5350\r\nCC8=4.0101\r\nCC9=2.6549\r\nVDES=86.0492/3.6\r\nLen_Of_Car=5\r\nEff_veh_len=5\r\n\r\n# 实时阈值的定义\r\ncur_d_x=0\r\ncur_d_v=0\r\ncur_SDXc=0 # 最短的跟驰距离\r\ncur_slower=0\r\ncur_RND=0\r\ncur_SDV=0 # 速度差的感知阈值\r\ncur_SDXo=0 #最大跟驰距离\r\ncur_SDXv=0\r\ncur_CLDV=0\r\ncur_OPDV=0\r\nlast_acc=0\r\n\r\n# 计算实时的阈值\r\ndef get_Wiedemann99_Threhold(forehead_dis,forehead_acc,forehead_spe,cur_dis,cur_spe,\r\n CC0,CC1,CC2,CC3,CC4,CC5,CC6,CC7,CC8,CC9):\r\n\r\n # 当此时刻阈值的定义\r\n d_x = forehead_dis - cur_dis - Len_Of_Car\r\n d_v = forehead_spe - cur_spe\r\n RND=np.random.uniform(-0.5,0.5,1)[0]\r\n if d_x>0 or forehead_acc<-1:\r\n slower=cur_spe\r\n else:\r\n slower=forehead_spe-d_v*RND\r\n SDXc = CC0 + CC1 * slower\r\n SDV=CC6*math.pow(d_x-Len_Of_Car,2)\r\n SDXo=SDXc+CC2\r\n SDXv=SDXo+CC3*(d_v-CC4)\r\n if forehead_spe>0:\r\n CLDV=-SDV+CC4\r\n else:\r\n CLDV=0\r\n if cur_spe> CC5:\r\n OPDV=SDV+CC5\r\n else:\r\n OPDV=SDV\r\n\r\n return d_x,d_v,RND,slower,SDXc,SDV,SDXo,SDXv,CLDV,OPDV\r\n\r\n# Wiedemann计算实时的加速度\r\ndef get_Wiedemann99_acc(last_acc,forehead_acc,forehead_cur_spe,\r\n CC0,CC1,CC2,CC3,CC4,CC5,CC6,CC7,CC8,CC9,VDES,Len_Of_Car,\r\n d_x,d_v,RND,slower,SDXc,SDV,SDXo,SDXv,CLDV,OPDV):\r\n acc = 0\r\n if d_v < OPDV and d_x <= SDXc:\r\n # 紧急刹车\r\n if cur_spe > 0 and d_v < 0:\r\n if d_x > CC0:\r\n acc = min(forehead_acc + math.pow(d_v, 2) / (CC0 - d_x), last_acc)\r\n else:\r\n acc = min(forehead_acc + 0.5 * (d_v - OPDV), last_acc)\r\n if acc > -CC7:\r\n acc = -CC7\r\n else:\r\n acc = max(acc, -10 + 0.5 * math.sqrt(cur_spe))\r\n else:\r\n # 逐渐接近的过程\r\n if d_v < CLDV and d_x <= SDXv:\r\n acc = max(math.pow(d_v, 2) / (2 * (SDXc - d_x - 0.1)), -10)\r\n else:\r\n if d_v < OPDV and d_x <= SDXo:\r\n # 跟驰的过程\r\n if last_acc <= 0:\r\n acc = min(last_acc, -CC7)\r\n else:\r\n acc = max(last_acc, CC7)\r\n acc = min(acc, VDES - cur_spe)\r\n else:\r\n # 自由流的过程\r\n if d_x > SDXc:\r\n if cur_spe > VDES:\r\n acc = CC7\r\n else:\r\n amax = CC8 + 0.1 * CC9 * min(cur_spe, 22.2) + random.uniform(0, 1)\r\n if d_x < SDXo:\r\n acc = min(math.pow(d_v, 2) / (SDXo - d_x), amax)\r\n else:\r\n acc = amax\r\n acc = min(acc, VDES - cur_spe)\r\n\r\n return acc\r\n\r\n\r\n\r\n#——————————————————————————————————————————————模型自带参数,函数 ————————————————————————————————————————————-\r\n\r\n\r\n\r\n#——————————————————————————————————————————————中间变量,函数(通用) ————————————————————————————————————————————-\r\n\r\n#场景二实时输出的值:速度,加速度,车头间距,行驶的距离\r\nacceleration_list,speed_list,head_space_list,absolute_distance_list,time_list=[],[],[],[],[]\r\n\r\ntime=0.0\r\ntime_interval=0.01\r\n\r\n# 场景启动时间\r\nini_first_time=100\r\nini_second_time=150\r\n\r\n#第一辆车参数的设置\r\nforehead_acc_increase=1\r\nforehead_fir_car_speed=15\r\nforehead_sec_car_speed=5\r\nforehead_sec_car_de_speed=25\r\nforehead_total_time=ini_second_time+(forehead_sec_car_de_speed-forehead_sec_car_speed)/forehead_acc_increase\r\n\r\n# 前车插入的间距\r\nforehead_cut_in=20\r\n\r\n# 刚开始两车的间距\r\nori_head_space=100\r\n\r\n# 刚开始后车的速度\r\nori_spe=10\r\n\r\npath=\"C:\\\\Users\\\\cc_01\\\\Desktop\\\\基准分析\\\\数值仿真\\\\数值仿真\\\\场景2,跟驰,被抢道,跟驰,两辆车\\\\输出文件\\\\统计分析,version-8\\\\Wiedemann-cut_in=10.xlsx\"\r\n\r\n# 计算实时的速度以及加速度\r\ndef get_forehead_spe_acc(time,ini_first_time,ini_second_time,acc_increase,\r\n fir_car_speed,sec_car_speed,sec_car_de_speed):\r\n if time>=0 and time=ini_first_time and time =ini_second_time and \\\r\n time <=(ini_second_time+(sec_car_de_speed-sec_car_speed)/acc_increase):\r\n return sec_car_speed+acc_increase*(time-ini_second_time),acc_increase\r\n\r\n#——————————————————————————————————————————————中间变量,函数(通用) ————————————————————————————————————————————-\r\n\r\n\r\n\r\n# ————————————————临时参数值————————————————————#\r\n\r\n# 后车这一时刻以及上一时刻的加速度,速度,车头间距,行驶的距离\r\ncur_acc,cur_spe,cur_head_space,absolute_distance=0,ori_spe,ori_head_space,0\r\n\r\n# 前车行驶的路程\r\nforehead_dis=0\r\nforehead_dis_stage1=ori_head_space\r\nforehead_dis_stage2=0\r\n\r\n#模型自带参数\r\ncur_des_spa=0\r\nlast_spe=cur_spe\r\n\r\n# 加入初始时刻的参数值\r\nacceleration_list.append(cur_acc)\r\nspeed_list.append(cur_spe)\r\nhead_space_list.append(cur_head_space)\r\nabsolute_distance_list.append(absolute_distance)\r\ntime_list.append(time)\r\n\r\n# ————————————————临时的参数值————————————————————#\r\n\r\nwhile time <=forehead_total_time:\r\n\r\n # 前车的加速度,速度\r\n forehead_spe,forehead_acc = get_forehead_spe_acc(round(time, 2), ini_first_time, ini_second_time,\r\n forehead_acc_increase,\r\n forehead_fir_car_speed, forehead_sec_car_speed,\r\n forehead_sec_car_de_speed)\r\n time+=0.01\r\n\r\n print(forehead_spe)\r\n # 前车的绝对距离\r\n if round(time, 2) ini_first_time:\r\n forehead_dis_stage2+=forehead_spe*time_interval\r\n forehead_dis=forehead_dis_stage2\r\n\r\n if round(time, 2)> forehead_total_time:\r\n break\r\n\r\n\r\n # 后车参数的更新\r\n d_x, d_v, RND, slower, SDXc, SDV, SDXo, SDXv, CLDV, OPDV = \\\r\n get_Wiedemann99_Threhold(forehead_dis, forehead_acc, forehead_spe, absolute_distance, cur_spe,\r\n CC0, CC1, CC2, CC3, CC4, CC5, CC6, CC7, CC8, CC9)\r\n # 获取实时的加速度\r\n cur_acc = get_Wiedemann99_acc(last_acc, forehead_acc, forehead_spe,\r\n CC0, CC1, CC2, CC3, CC4, CC5, CC6, CC7, CC8, CC9, VDES, Len_Of_Car,\r\n d_x, d_v, RND, slower, SDXc, SDV, SDXo, SDXv, CLDV, OPDV)\r\n\r\n cur_spe = cur_spe + cur_acc * time_interval\r\n absolute_distance += cur_spe * time_interval\r\n last_spe = cur_spe\r\n # 车头间距的更新\r\n cur_head_space=forehead_dis-absolute_distance\r\n acceleration_list.append(cur_acc)\r\n speed_list.append(cur_spe)\r\n head_space_list.append(cur_head_space) # 两车间的车头间距\r\n absolute_distance_list.append(absolute_distance) # 后车的绝对距离\r\n time_list.append(time)\r\n print(\"----------------------------------------------------\")\r\n print(\"加速度\" + str(cur_acc))\r\n print(\"速度\" + str(cur_spe))\r\n print(\"相对距离\" + str(cur_head_space))\r\n print(\"仿真时间\" + str(round(time,2)))\r\n\r\ndata=pd.DataFrame({\"仿真时间\":time_list,\"Wiedemann加速度\":acceleration_list,\"Wiedemann速度\":speed_list,\r\n \"Wiedemann车头间距\":head_space_list,\"Wiedemann绝对距离\":absolute_distance_list})\r\n\r\ndata.to_excel(path)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"iyangli/Car-following-Model-Benchmark","sub_path":"Scenario 2/Wiedemann.py","file_name":"Wiedemann.py","file_ext":"py","file_size_in_byte":9129,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"18918650348","text":"import argparse\nfrom pybedtools import BedTool\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"bed12\")\n parser.add_argument(\"introns\")\n args = parser.parse_args()\n\n bed = BedTool(args.bed12)\n introns = bed.introns()\n introns.remove_invalid().saveas(args.introns)\n","repo_name":"huddlej/fasta_tools","sub_path":"refGene_bed12_to_introns.py","file_name":"refGene_bed12_to_introns.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"ur","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20378193237","text":"# WRITE YOUR SOLUTION HERE:\nclass ListHelper:\n \n @classmethod\n def greatest_frequency(cls, my_list: list):\n common_char = \"\"\n count = 0\n for char in my_list:\n if my_list.count(char) > count:\n count = my_list.count(char)\n common_char = char\n \n return common_char\n \n @classmethod\n def doubles(cls, my_list: list):\n doubles_list = []\n for char in my_list:\n if my_list.count(char) >= 2 and char not in doubles_list:\n doubles_list.append(char)\n\n return len(doubles_list)\n\nif __name__ == \"__main__\":\n numbers = [1, 1, 2, 1, 3, 3, 4, 5, 5, 5, 6, 5, 5, 5]\n print(ListHelper.greatest_frequency(numbers))\n print(ListHelper.doubles(numbers))","repo_name":"Athooh/mooc-programming-23","sub_path":"part09-14_list_helper/src/list_helper.py","file_name":"list_helper.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"582822238","text":"import tensorflow as tf\nimport tqdm\nfrom one_shot_learning_relation_network import RelationNetwork\nimport pdb\n\n\nclass ExperimentBuilder:\n\n def __init__(self, dataTrain, dataTest, batch_size):\n \"\"\"\n Initializes an ExperimentBuilder object. The ExperimentBuilder object takes care of setting up our experiment\n and provides helper functions such as run_training_epoch and run_validation_epoch to simplify out training\n and evaluation procedures.\n :param data: A data provider class\n \"\"\"\n # self.data = data\n self.dataTrain = dataTrain\n # self.dataVal = dataVal\n self.dataTest = dataTest\n self.batch_size = batch_size\n\n def build_experiment(self, batch_size, classes_per_set, samples_per_class, fce):\n\n \"\"\"\n\n :param batch_size: The experiment batch size\n :param classes_per_set: An integer indicating the number of classes per support set\n :param samples_per_class: An integer indicating the number of samples per class\n :param channels: The image channels\n :param fce: Whether to use full context embeddings or not\n :return: a matching_network object, along with the losses, the training ops and the init op\n \"\"\"\n # height, width, channels = self.data.x.shape[2], self.data.x.shape[3], self.data.x.shape[4]\n height, width, channels = 224, 224, 3\n self.support_set_images = tf.placeholder(tf.float32, [classes_per_set, samples_per_class, height, width,\n channels], 'support_set_images')\n self.support_set_labels = tf.placeholder(tf.int32, [classes_per_set, samples_per_class], 'support_set_labels')\n self.target_image = tf.placeholder(tf.float32, [classes_per_set , height, width, channels], 'target_image')\n self.target_label = tf.placeholder(tf.int32, [classes_per_set], 'target_label')\n self.training_phase = tf.placeholder(tf.bool, name='training-flag')\n self.rotate_flag = tf.placeholder(tf.bool, name='rotate-flag')\n self.keep_prob = tf.placeholder(tf.float32, name='dropout-prob')\n self.current_learning_rate = 5e-05\n self.learning_rate = tf.placeholder(tf.float32, name='learning-rate-set')\n self.one_shot_omniglot = RelationNetwork(batch_size=batch_size, support_set_images=self.support_set_images,\n support_set_labels=self.support_set_labels,\n target_image=self.target_image, target_label=self.target_label,\n keep_prob=self.keep_prob, num_channels=channels,\n is_training=self.training_phase, fce=fce, rotate_flag=self.rotate_flag,\n num_classes_per_set=classes_per_set,\n num_samples_per_class=samples_per_class,\n learning_rate=self.learning_rate)\n\n summary, self.losses, self.c_error_opt_op = self.one_shot_omniglot.init_train()\n init = tf.global_variables_initializer()\n self.total_train_iter = 0\n return self.one_shot_omniglot, self.losses, self.c_error_opt_op, init, summary\n\n def run_training_epoch(self, total_train_batches, sess):\n \"\"\"\n Runs one training epoch\n :param total_train_batches: Number of batches to train on\n :param sess: Session object\n :return: mean_training_categorical_crossentropy_loss and mean_training_accuracy\n \"\"\"\n total_c_loss = 0.\n total_accuracy = 0.\n # pdb.set_trace()\n with tqdm.tqdm(total=total_train_batches) as pbar:\n\n for i in range(total_train_batches): # train epoch\n x_support_set, y_support_set, x_target, y_target = self.dataTrain.get_batch(self.batch_size,\n shuffle=True)\n _, c_loss_value, acc = sess.run(\n [self.c_error_opt_op, self.losses[self.one_shot_omniglot.classify],\n self.losses[self.one_shot_omniglot.dn]],\n feed_dict={self.keep_prob: 0.5, self.support_set_images: x_support_set,\n self.support_set_labels: y_support_set, self.target_image: x_target,\n self.target_label: y_target,\n self.training_phase: True, self.rotate_flag: False,\n self.learning_rate: self.current_learning_rate})\n # print('similarities is',sim[:15])\n\n iter_out = \"train_loss: {}, train_accuracy: {}\".format(c_loss_value, acc)\n pbar.set_description(iter_out)\n# \n pbar.update(1)\n total_c_loss += c_loss_value\n total_accuracy += acc\n self.total_train_iter += 1\n # if self.total_train_iter % 2000 == 0:\n # self.current_learning_rate /= 2\n # print(\"change learning rate\", self.current_learning_rate)\n\n # if 0:\n # # pdb.set_trace()\n # loss, pred, sim, tar_label = sess.run(\n # [self.losses[self.one_shot_omniglot.classify],\n # self.one_shot_omniglot.preds, self.one_shot_omniglot.similarities,\n # self.one_shot_omniglot.target_label],\n # feed_dict={self.keep_prob: 0.5, self.support_set_images: x_support_set,\n # self.support_set_labels: y_support_set, self.target_image: x_target,\n # self.target_label: y_target,\n # self.training_phase: True, self.rotate_flag: False,\n # self.learning_rate: self.current_learning_rate})\n # pdb.set_trace()\n total_c_loss = total_c_loss / total_train_batches\n total_accuracy = total_accuracy / total_train_batches\n return total_c_loss, total_accuracy\n\n # def run_testing_epoch(self, total_val_batches, sess):\n \"\"\"\n Runs one validation epoch\n :param total_val_batches: Number of batches to train on\n # :param sess: Session object\n # :return: mean_validation_categorical_crossentropy_loss and mean_validation_accuracy\n # \"\"\"\n # total_val_c_loss = 0.\n # total_val_accuracy = 0.\n # # pdb.set_trace()\n # # with tqdm.tqdm(total=total_val_batches) as pbar:\n # for i in range(total_val_batches): # validation epoch\n # x_support_set, y_support_set, x_target, y_target = self.dataTest.get_batch(self.batch_size, shuffle=True)\n # c_loss_value, acc = sess.run(\n # [self.losses[self.one_shot_omniglot.classify], self.losses[self.one_shot_omniglot.dn]],\n # feed_dict={self.keep_prob: 1.0, self.support_set_images: x_support_set,\n # self.support_set_labels: y_support_set, self.target_image: x_target,\n # self.target_label: y_target,\n # self.training_phase: False, self.rotate_flag: False})\n\n # iter_out = \"val_loss: {}, val_accuracy: {}\".format(c_loss_value, acc)\n # # pbar.set_description(iter_out)\n # # pbar.update(1)\n\n # total_val_c_loss += c_loss_value\n # total_val_accuracy += acc\n\n # total_val_c_loss = total_val_c_loss / total_val_batches\n # total_val_accuracy = total_val_accuracy / total_val_batches\n\n # return total_val_c_loss, total_val_accuracy\n\n def run_testing_epoch(self, total_test_batches, sess):\n \"\"\"\n Runs one testing epoch\n :param total_test_batches: Number of batches to train on\n :param sess: Session object\n :return: mean_testing_categorical_crossentropy_loss and mean_testing_accuracy\n \"\"\"\n total_test_c_loss = 0.\n total_test_accuracy = 0.\n with tqdm.tqdm(total=total_test_batches) as pbar:\n for i in range(total_test_batches):\n x_support_set, y_support_set, x_target, y_target = self.dataTest.get_batch(self.batch_size,\n shuffle=False)\n c_loss_value, acc = sess.run(\n [self.losses[self.one_shot_omniglot.classify], self.losses[self.one_shot_omniglot.dn]],\n feed_dict={self.keep_prob: 1.0, self.support_set_images: x_support_set,\n self.support_set_labels: y_support_set, self.target_image: x_target,\n self.target_label: y_target,\n self.training_phase: False, self.rotate_flag: False})\n\n iter_out = \"test_loss: {}, test_accuracy: {}\".format(c_loss_value, acc)\n pbar.set_description(iter_out)\n pbar.update(1)\n\n total_test_c_loss += c_loss_value\n total_test_accuracy += acc\n total_test_c_loss = total_test_c_loss / total_test_batches\n total_test_accuracy = total_test_accuracy / total_test_batches\n return total_test_c_loss, total_test_accuracy\n","repo_name":"minweiqing/Few-Shot-Food-Recognition-via-Multi-View-Representation","sub_path":"MVFSL-TC/experiment_builder_food.py","file_name":"experiment_builder_food.py","file_ext":"py","file_size_in_byte":9402,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"24221619503","text":"\"\"\"\nCreated on 20 juin 2017\n\n@author: francois\n\"\"\"\n\nclass EstimationMethodValue():\n PROPERTY_ESTIMATION_METHOD = [\n {\n \"property_name\": \"Cost\",\n \"estimation_methods\": [\"Cost estimation\", \"Expert estimate float\"]\n },\n {\n \"property_name\": \"Development effort\",\n \"estimation_methods\": [\"Intermediate COCOMO\", \"Basic COCOMO\", \"Expert estimate float\"]\n },\n {\n \"property_name\": \"KLOC\",\n \"estimation_methods\": [\"Expert estimate float\"]\n }, \n {\n \"property_name\": \"Worst case execution time\",\n \"estimation_methods\": [\"Expert estimate integer\"]\n }\n ]\n \n def __init__(self, properties_estimation):\n self.properties_estimation = properties_estimation\n \n @classmethod\n def build_from_web_page(cls, driver):\n table_element = driver.find_element_by_class_name(\"properties_estimation_table\") \n\n tr_elements = table_element.find_elements_by_tag_name(\"tr\")\n th_alternatives = tr_elements[0].find_elements_by_tag_name(\"th\")\n alternatives_name_list = [cell.text for cell in th_alternatives[2:]]\n \n properties_estimation = []\n i = 1\n while i < len(tr_elements):\n (th_property, th_estimation_method) = tr_elements[i].find_elements_by_tag_name(\"th\")\n property_name = th_property.text\n properties_estimation.append({\"property_name\": property_name, \"estimation_methods\": []})\n \n estimation_method_name = th_estimation_method.text\n properties_estimation[-1][\"estimation_methods\"].append({\"estimation_method_name\": estimation_method_name,\n \"estimation_method_values\": []})\n cls._add_value_from_web_page(cls, tr_elements[i], alternatives_name_list, properties_estimation)\n estimation_methods_number = int(th_property.get_attribute(\"rowspan\"))\n j = i + 1\n while j < i + estimation_methods_number:\n estimation_method_name = tr_elements[j].find_element_by_tag_name(\"th\").text\n properties_estimation[-1][\"estimation_methods\"].append({\"estimation_method_name\": estimation_method_name,\n \"estimation_method_values\": []})\n cls._add_value_from_web_page(cls, tr_elements[j], alternatives_name_list, properties_estimation)\n j += 1\n \n i += estimation_methods_number\n \n return cls(properties_estimation)\n \n def _add_value_from_web_page(self, current_tr_element, alternatives_name_list, properties_estimation):\n estimation_method_values = properties_estimation[-1][\"estimation_methods\"][-1][\"estimation_method_values\"]\n td_estimations = current_tr_element.find_elements_by_tag_name(\"td\")\n for i, estimation_cell in enumerate(td_estimations):\n try:\n estimation_value = float(estimation_cell.text.strip())\n except ValueError:\n estimation_value = estimation_cell.text.strip()\n up_to_date = not (\"out_of_date\" in estimation_cell.get_attribute(\"class\"))\n \n estimation_method_values.append({\"alternative_name\": alternatives_name_list[i],\n \"up_to_date\": up_to_date,\n \"value\": estimation_value})\n \n @classmethod\n def build_expected_result(cls, alternatives_name_list, values = []):\n properties_estimation = []\n for property_ in cls.PROPERTY_ESTIMATION_METHOD:\n estimation_methods = []\n for estimation_method_name in property_[\"estimation_methods\"]:\n estimation_method_values = []\n for alternative_name in alternatives_name_list:\n estimation_method_values.append({\"alternative_name\": alternative_name, \"value\": \"\", \"up_to_date\": True})\n estimation_methods.append({\"estimation_method_name\": estimation_method_name, \n \"estimation_method_values\": estimation_method_values})\n properties_estimation.append({\"property_name\": property_[\"property_name\"],\n \"estimation_methods\": estimation_methods})\n \n instance = cls(properties_estimation)\n instance.add_values(values)\n return instance\n \n def add_value(self, alternative_name, property_name, estimation_method_name, value, up_to_date = True):\n property_ = self._find_dictionary_in_list(self.properties_estimation, \"property_name\", property_name)\n for estimation_method in property_[\"estimation_methods\"]:\n if estimation_method[\"estimation_method_name\"] == estimation_method_name:\n estimation = self._find_dictionary_in_list(estimation_method[\"estimation_method_values\"], \"alternative_name\", alternative_name)\n estimation[\"value\"] = value\n estimation[\"up_to_date\"] = up_to_date\n else:\n estimation = self._find_dictionary_in_list(estimation_method[\"estimation_method_values\"], \"alternative_name\", alternative_name)\n if estimation[\"value\"] == \"\":\n estimation[\"value\"] = \"---\"\n \n def add_values(self, values):\n for value in values:\n try:\n (alternative_name, property_name, estimation_method_name, value) = value\n up_to_date = True\n except ValueError:\n (alternative_name, property_name, estimation_method_name, value, up_to_date) = value\n \n if value == \"\":\n raise RuntimeError(\"The test can fail if an estimation is an empty string, whereas the application is ok:\\n\" +\n \"If another estimation with the same alternative and property but different estimation method \" +\n \"has been computed after, this value will wrongly be replaced by '---' in the expected result.\")\n self.add_value(alternative_name, property_name, estimation_method_name, value, up_to_date)\n \n \n def __eq__(self, other, self_name=\"self\", other_name=\"other\", verbose=True):\n if not isinstance(other, EstimationMethodValue):\n if verbose:\n print(\"{0} is not an EstimationMethodValue.\".format(other_name))\n return False\n \n if len(self.properties_estimation) != len(other.properties_estimation):\n if verbose:\n print(\"There are {0} properties in {1}, and {2} in {3}\".format(len(self.properties_estimation), self_name,\n len(other.properties_estimation), other_name))\n return False\n \n for i, self_property in enumerate(self.properties_estimation):\n other_property = other.properties_estimation[i]\n if len(self_property) != 2 or len(other_property) != 2:\n raise RuntimeError(\"Malformed object EstimationMethod : a property must have exactly 2 keys\")\n if self_property[\"property_name\"] != other_property[\"property_name\"]:\n if verbose:\n print(\"Properties differ between {0} and {1}. First different property: {2} in {0}, {3} in {1}.\"\n .format(self_name, other_name, self_property[\"property_name\"], other_property[\"property_name\"]) )\n return False\n \n self_estimation_methods_list = self_property[\"estimation_methods\"]\n other_estimation_methods_list = other_property[\"estimation_methods\"]\n if len(self_estimation_methods_list) != len(other_estimation_methods_list):\n if verbose:\n print((\"There is not the same number of estimation method for the property {0} in {1} and {2}. In {1}: {3}, \" +\n \"in {2}: {4}.\").format(self_property[\"property_name\"], self_name, other_name, len(self_estimation_methods_list), \n len(other_estimation_methods_list)))\n return False\n \n for self_estimation_method in self_estimation_methods_list:\n try:\n other_estimation_method = self._find_dictionary_in_list(other_estimation_methods_list, \"estimation_method_name\", \n self_estimation_method[\"estimation_method_name\"])\n except KeyError:\n if verbose:\n print(\"The estimation method {0} is not in {1}'s property {2}\".format(self_estimation_method[\"estimation_method_name\"],\n other_name, other_property[\"property_name\"]))\n return False\n if len(self_estimation_method) != 2 or len(other_estimation_method) != 2:\n raise RuntimeError(\"Malformed object EstimationMethod : an estimation method must have exactly 2 keys\")\n \n self_estimation_method_values_list = self_estimation_method[\"estimation_method_values\"]\n other_estimation_method_values_list = other_estimation_method[\"estimation_method_values\"]\n if len(self_estimation_method_values_list) != len(other_estimation_method_values_list):\n if verbose:\n print((\"There is not the same number of values for the property {0} and the estimation method {1} in {2} and {3}. \" +\n \"In {2}: {4}, in {3}: {5}\").format(self_property[\"property_name\"], \n self_estimation_method[\"estimation_method_name\"], self_name, other_name,\n len(self_estimation_method_values_list), \n len(other_estimation_method_values_list)))\n return False\n \n for self_estimation_method_value in self_estimation_method_values_list:\n try:\n other_estimation_method_value = self._find_dictionary_in_list(other_estimation_method_values_list, \n \"alternative_name\", \n self_estimation_method_value[\"alternative_name\"])\n except KeyError:\n if verbose:\n print(\"There is no estimation for ({0}, {1}, {2}) in {3}\"\n .format(self_estimation_method_value[\"alternative_name\"], self_property[\"property_name\"],\n self_estimation_method[\"estimation_method_name\"], other_name))\n return False\n if len(self_estimation_method_value) != 3 or len(other_estimation_method_value) != 3:\n raise RuntimeError(\"Malformed object EstimationMethod : an estimation method value must have exactly 3 keys\")\n \n if self_estimation_method_value[\"value\"] != other_estimation_method_value[\"value\"]:\n if verbose:\n print(\"The value of the estimation ({0}, {1}, {2}) is {3} in {4} and {5} in {6}\"\n .format(self_estimation_method_value[\"alternative_name\"], self_property[\"property_name\"],\n self_estimation_method[\"estimation_method_name\"], self_estimation_method_value[\"value\"],\n self_name, other_estimation_method_value[\"value\"], other_name))\n return False\n if self_estimation_method_value[\"up_to_date\"] != other_estimation_method_value[\"up_to_date\"]:\n if verbose:\n print(\"The estimation ({0}, {1}, {2}) has a up-to-date property {3} in {4} and {5} in {6}\"\n .format(self_estimation_method_value[\"alternative_name\"], self_property[\"property_name\"],\n self_estimation_method[\"estimation_method_name\"], self_estimation_method_value[\"up_to_date\"],\n self_name, other_estimation_method_value[\"up_to_date\"], other_name))\n return False\n \n return True\n \n def __str__(self):\n return str(self.properties_estimation)\n \n @classmethod\n def _find_dictionary_in_list(cls, dictionary_list, key_name, value):\n for dictionary in dictionary_list:\n if dictionary[key_name] == value:\n return dictionary\n raise KeyError(\"Dictionary with the property \" + key_name + \" equals to \" + value + \" not found.\")\n \n @classmethod\n def get_expected_properties_name_list(cls):\n return [p[\"property_name\"] for p in cls.PROPERTY_ESTIMATION_METHOD]\n \n @classmethod\n def get_expected_estimation_methods_name_list(cls, property_name):\n property_dictionary = cls._find_dictionary_in_list(cls.PROPERTY_ESTIMATION_METHOD, \"property_name\", property_name)\n return property_dictionary[\"estimation_methods\"]\n \n \n \n \n ","repo_name":"orion-research/coach","sub_path":"COACH/test/test_global/EstimationMethodValue.py","file_name":"EstimationMethodValue.py","file_ext":"py","file_size_in_byte":13753,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"16773827205","text":"import os\n\nfrom twitter_sentence_generator.extractors.file_words_extractor import FileWordsExtractor\nfrom twitter_sentence_generator.transformers.random_hashtag_transformer import RandomHashtagTransformer\nfrom twitter_sentence_generator.utils.markov_sentence_generator import build_mapping, build_sentence\n\nSCRUBBED_FILE = os.environ.get('SCRUBBED_FILE', './resources/twentyonepilots_scrubbed.txt')\nCHAIN_LENGTH = os.environ.get('MARKOV_CHAIN_LENGTH', '3')\nMAX_HASHTAGS = int(os.environ.get('TWITTER_MAX_HASHTAGS', '1'))\n\n\ndef _generate_sentence(*, chain_length=CHAIN_LENGTH):\n return build_sentence(int(chain_length))\n\n\ndef generate_sentence(*, file_name=SCRUBBED_FILE, chain_length=CHAIN_LENGTH, twitter_hashtags=None):\n with open(file_name, 'r') as file_handler:\n lines = [line.rstrip() for line in file_handler]\n\n file_words_extractor = FileWordsExtractor(file_name)\n words = file_words_extractor.extract_words()\n\n build_mapping(words, int(chain_length))\n\n while True:\n sentence = _generate_sentence(chain_length=chain_length)\n if len(sentence) > RandomHashtagTransformer.MAX_TWEET_LENGTH:\n continue\n\n is_not_unique = [line for line in lines if sentence == line]\n if is_not_unique:\n continue\n\n random_hashtag_transformer = RandomHashtagTransformer(sentence, twitter_hashtags)\n random_hashtag_transformer.append_random_hashtag()\n sentence = random_hashtag_transformer.tweet\n break\n\n return sentence\n\n\nif __name__ == \"__main__\":\n file_name = SCRUBBED_FILE.split('/')[-1]\n file_path = f'../../resources/{file_name}'\n for _ in range(10):\n print(generate_sentence(file_name=file_path))\n","repo_name":"DEV3L/python-heroku-twitter-tweet-generator","sub_path":"twitter_sentence_generator/utils/sentence_generator.py","file_name":"sentence_generator.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1339777355","text":"\"\"\"Common preprocessing functions for audio data.\"\"\"\nimport functools\nimport logging\nfrom pathlib import Path\nfrom typing import Callable, List, Optional, Tuple, Union\n\nimport torch\nimport torchaudio\nfrom torchaudio.functional import apply_codec\n\nfrom dfadetect.lfcc import LFCC\nfrom dfadetect.utils import find_wav_files\n\nLOGGER = logging.getLogger(__name__)\n\n\nSOX_SILENCE = [\n # trim all silence that is longer than 0.2s and louder than 1% volume (relative to the file)\n # from beginning and middle/end\n [\"silence\", \"1\", \"0.2\", \"1%\", \"-1\", \"0.2\", \"1%\"],\n]\n\n\nclass AudioDataset(torch.utils.data.Dataset):\n \"\"\"Torch dataset to load data from a provided directory.\n\n Args:\n directory_or_path_list: Path to the directory containing wav files to load. Or a list of paths.\n Raises:\n IOError: If the directory does ot exists or the directory did not contain any wav files.\n \"\"\"\n\n def __init__(\n self,\n directory_or_path_list: Union[Union[str, Path], List[Union[str, Path]]],\n sample_rate: int = 16_000,\n amount: Optional[int] = None,\n normalize: bool = True,\n trim: bool = True,\n phone_call: bool = False,\n ) -> None:\n super().__init__()\n\n self.trim = trim\n self.sample_rate = sample_rate\n self.normalize = normalize\n self.phone_call = phone_call\n\n if isinstance(directory_or_path_list, list):\n paths = directory_or_path_list\n elif isinstance(directory_or_path_list, Path) \\\n or isinstance(directory_or_path_list, str):\n directory = Path(directory_or_path_list)\n if not directory.exists():\n raise IOError(f\"Directory does not exists: {self.directory}\")\n\n paths = find_wav_files(directory)\n if paths is None:\n raise IOError(\n f\"Directory did not contain wav files: {self.directory}\")\n else:\n raise TypeError(\n f\"Supplied unsupported type for argument directory_or_path_list {type(directory_or_path_list)}!\")\n\n if amount is not None:\n paths = paths[:amount]\n\n self._paths = paths\n\n def __getitem__(self, index: int) -> Tuple[torch.Tensor, int]:\n path = self._paths[index]\n\n waveform, sample_rate = torchaudio.load(path, normalize=self.normalize)\n\n if sample_rate != self.sample_rate:\n waveform, sample_rate = torchaudio.sox_effects.apply_effects_file(\n path, [[\"rate\", f\"{self.sample_rate}\"]], normalize=self.normalize)\n\n if self.trim:\n waveform_trimmed, sample_rate_trimmed = torchaudio.sox_effects.apply_effects_tensor(\n waveform, sample_rate, SOX_SILENCE)\n\n if waveform_trimmed.size()[1] > 0:\n waveform = waveform_trimmed\n sample_rate = sample_rate_trimmed\n\n if self.phone_call:\n waveform, sample_rate = torchaudio.sox_effects.apply_effects_tensor(\n waveform,\n sample_rate,\n effects=[\n [\"lowpass\", \"4000\"],\n [\"compand\", \"0.02,0.05\",\n \"-60,-60,-30,-10,-20,-8,-5,-8,-2,-8\", \"-8\", \"-7\", \"0.05\"],\n [\"rate\", \"8000\"],\n ],\n )\n waveform = apply_codec(waveform, sample_rate, format=\"gsm\")\n\n return waveform, sample_rate\n\n def __len__(self) -> int:\n return len(self._paths)\n\n\nclass PadDataset(torch.utils.data.Dataset):\n\n def __init__(self, dataset: torch.utils.data.Dataset, cut: int = 64600, label=None):\n self.dataset = dataset\n self.cut = cut # max 4 sec (ASVSpoof default)\n self.label = label\n\n def __getitem__(self, index):\n waveform, sample_rate = self.dataset[index]\n waveform = waveform.squeeze(0)\n waveform_len = waveform.shape[0]\n if waveform_len >= self.cut:\n if self.label is None:\n return waveform[:self.cut], sample_rate\n else:\n return waveform[:self.cut], sample_rate, self.label\n # need to pad\n num_repeats = int(self.cut / waveform_len)+1\n padded_waveform = torch.tile(waveform, (1, num_repeats))[\n :, :self.cut][0]\n\n if self.label is None:\n return padded_waveform, sample_rate\n else:\n return padded_waveform, sample_rate, self.label\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass TransformDataset(torch.utils.data.Dataset):\n \"\"\"A generic transformation dataset.\n\n Takes another dataset as input, which provides the base input.\n When retrieving an item from the dataset, the provided transformation gets applied.\n\n Args:\n dataset: A dataset which return a (waveform, sample_rate)-pair.\n transformation: The torchaudio transformation to use.\n needs_sample_rate: Does the transformation need the sampling rate?\n transform_kwargs: Kwargs for the transformation.\n \"\"\"\n\n def __init__(\n self,\n dataset: torch.utils.data.Dataset,\n transformation: Callable,\n needs_sample_rate: bool = False,\n transform_kwargs: dict = {},\n ) -> None:\n super().__init__()\n self._dataset = dataset\n\n self._transform_constructor = transformation\n self._needs_sample_rate = needs_sample_rate\n self._transform_kwargs = transform_kwargs\n\n self._transform = None\n\n def __len__(self):\n return len(self._dataset)\n\n def __getitem__(self, index: int) -> Tuple[torch.Tensor, int]:\n waveform, sample_rate = self._dataset[index]\n\n if self._transform is None:\n if self._needs_sample_rate:\n self._transform = self._transform_constructor(\n sample_rate, **self._transform_kwargs)\n else:\n self._transform = self._transform_constructor(\n **self._transform_kwargs)\n\n return self._transform(waveform), sample_rate\n\n\nclass DoubleDeltaTransform(torch.nn.Module):\n \"\"\"A transformation to compute delta and double delta features.\n\n Args:\n win_length (int): The window length to use for computing deltas (Default: 5).\n mode (str): Mode parameter passed to padding (Default: replicate).\n \"\"\"\n\n def __init__(\n self,\n win_length: int = 5,\n mode: str = \"replicate\"\n ) -> None:\n super().__init__()\n self.win_length = win_length\n self.mode = mode\n\n self._delta = torchaudio.transforms.ComputeDeltas(\n win_length=self.win_length, mode=self.mode)\n\n def forward(self, X):\n \"\"\"\n Args:\n specgram (Tensor): Tensor of audio of dimension (..., freq, time).\n Returns:\n Tensor: specgram, deltas and double deltas of size (..., 3*freq, time).\n \"\"\"\n delta = self._delta(X)\n double_delta = self._delta(delta)\n\n return torch.hstack((X, delta, double_delta))\n\n\n# =====================================================================\n# Helper functions.\n# =====================================================================\n\n\ndef _build_preprocessing(\n directory_or_audiodataset: Union[Union[str, Path], AudioDataset],\n transform: torch.nn.Module,\n audiokwargs: dict = {},\n transformkwargs: dict = {},\n) -> TransformDataset:\n \"\"\"Generic function template for building preprocessing functions.\n \"\"\"\n if isinstance(directory_or_audiodataset, AudioDataset) or isinstance(directory_or_audiodataset, PadDataset):\n return TransformDataset(dataset=directory_or_audiodataset,\n transformation=transform,\n needs_sample_rate=True,\n transform_kwargs=transformkwargs)\n elif isinstance(directory_or_audiodataset, str) or isinstance(directory_or_audiodataset, Path):\n return TransformDataset(dataset=AudioDataset(directory=directory_or_audiodataset, **audiokwargs),\n transformation=transform,\n needs_sample_rate=True,\n transform_kwargs=transformkwargs)\n else:\n raise TypeError(\"Unsupported type for directory_or_audiodataset!\")\n\n\nmfcc = functools.partial(_build_preprocessing,\n transform=torchaudio.transforms.MFCC)\nlfcc = functools.partial(_build_preprocessing, transform=LFCC)\n\n\ndef double_delta(dataset: torch.utils.data.Dataset, delta_kwargs: dict = {}) -> TransformDataset:\n return TransformDataset(dataset=dataset, transformation=DoubleDeltaTransform, transform_kwargs=delta_kwargs)\n\n\ndef load_directory_split_train_test(\n path: Union[Path, str],\n feature_fn: Callable,\n feature_kwargs: dict,\n test_size: float,\n use_double_delta: bool = True,\n phone_call: bool = False,\n pad: bool = False,\n label: Optional[int] = None,\n amount_to_use: Optional[int] = None,\n) -> Tuple[TransformDataset, TransformDataset]:\n \"\"\"Load all wav files from directory, apply the feature transformation\n and split into test/train.\n\n Args:\n path (Union[Path, str]): Path to directory.\n feature_fn (Callable): This is assumed to be mfcc or lfcc function.\n feature_fn (dict): Kwargs for the feature_fn.\n test_size (float): Ratio of train/test split.\n use_double_delta (bool): Additionally calculate delta and double delta features (Default True)?\n amount_to_use (Optional[int]): If supplied, limit data.\n \"\"\"\n paths = find_wav_files(path)\n if paths is None:\n raise IOError(f\"Could not load files from {path}!\")\n\n if amount_to_use is not None:\n paths = paths[:amount_to_use]\n\n test_size = int(test_size * len(paths))\n\n train_paths = paths[:-test_size]\n test_paths = paths[-test_size:]\n\n LOGGER.info(f\"Loading data from {path}...!\")\n\n train_dataset = AudioDataset(\n train_paths, phone_call=phone_call)\n if pad:\n train_dataset = PadDataset(train_dataset, label=label)\n\n test_dataset = AudioDataset(\n test_paths, phone_call=phone_call)\n if pad:\n test_dataset = PadDataset(test_dataset, label=label)\n\n if feature_fn is None:\n return train_dataset, test_dataset\n\n dataset_train = feature_fn(\n directory_or_audiodataset=train_dataset,\n transformkwargs=feature_kwargs\n )\n\n dataset_test = feature_fn(\n directory_or_audiodataset=test_dataset,\n transformkwargs=feature_kwargs\n )\n if use_double_delta:\n dataset_train = double_delta(dataset_train)\n dataset_test = double_delta(dataset_test)\n\n return dataset_train, dataset_test\n","repo_name":"RUB-SysSec/WaveFake","sub_path":"dfadetect/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":10852,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"54"} +{"seq_id":"72065564961","text":"import SimpleITK as sitk\nimport sys\nimport os\nimport numpy as np\nimport json\n\n\n\nroot_path_data = \"C:/Users/pje33/Downloads/4D_CT_lyon_512/\"\nwith open(root_path_data + \"scan_dictionary.json\", 'r') as file:\n ct_path_dict = json.load(file)\n\ndesired_resolution = [0.9766*2, 0.9766*2, 3]\n\n\ny_offset = {\"121\": 85.9375,\n \"122\": 54.6875,\n \"123\": 54.4921875,\n \"124\": 55.46875,\n \"125\": 55.078125,\n \"126\": 55.078125}\n\n\n# for patient_id in ct_path_dict.keys():\nfor patient_id in ct_path_dict.keys():\n for scan_id in ct_path_dict[patient_id].keys():\n indexes_of_points_outside = []\n for phase in ct_path_dict[patient_id][scan_id].keys():\n\n file_path = ct_path_dict[patient_id][scan_id][phase]\n index = file_path[::-1].find(\"/\")\n input_file = root_path_data + file_path\n output_file = root_path_data + file_path[:-index] + \"{}_256.nii\".format(phase)\n print(\"Reading Dicom directory:\", input_file)\n print(\"Writing image:\", output_file)\n\n\n reader = sitk.ImageSeriesReader()\n dicom_names = reader.GetGDCMSeriesFileNames(input_file)\n reader.SetFileNames(dicom_names)\n image = reader.Execute()[:,:,-120:]\n spacing = image.GetSpacing()\n size = image.GetSize()\n # print(image)\n # print(\"Image size:\", size[0], size[1], size[2])\n # print(\"Origin\", image.GetOrigin())\n # print(\"Image spacing image:\", image.GetSpacing())\n\n\n\n new_size =[int(spacing[0] / desired_resolution[0] * 512) + 1,\n int(spacing[1] / desired_resolution[1] * 512) + 1,\n int(size[2] * spacing[2] / desired_resolution[2])]\n # print(\"new size:\", new_size)\n # print(size[2] * spacing[2] / desired_resolution[2])\n # resample the image to the new spacing\n image_256 = sitk.Resample(\n image1=image,\n size= new_size,\n transform=sitk.Transform(),\n interpolator=sitk.sitkLinear,\n outputOrigin=image.GetOrigin(),\n outputSpacing=[0.97*2,0.97*2,3],\n outputDirection=image.GetDirection(),\n defaultPixelValue=0,\n outputPixelType=image.GetPixelID(),\n )\n\n\n\n if new_size[0] < 256: # if it is to small pad it\n # print(\"padding\")\n\n # Make an image of the correct size and set all values to zero\n image_256_empty = sitk.Resample(\n image1=image,\n size=[256,256,new_size[-1]],\n transform=sitk.Transform(),\n interpolator=sitk.sitkLinear,\n outputOrigin=image.GetOrigin(),\n outputSpacing=[0.97 * 2, 0.97 * 2, 3],\n outputDirection=image.GetDirection(),\n defaultPixelValue=0,\n outputPixelType=image.GetPixelID(),\n )\n image_256_empty[:,:,:] = 0\n\n\n # fill the center of the matrix with the orinal image\n index = 256 - new_size[0]\n image_256_empty[index // 2 : -index // 2 , index // 2: -index // 2,:] = image_256\n\n # Update the origin\n image_256_empty.SetOrigin((image_256.GetOrigin()[0] - index // 2 * desired_resolution[0],\n image_256.GetOrigin()[1] - index // 2 * desired_resolution[1],\n image_256.GetOrigin()[-1]))\n image_256 = image_256_empty\n\n\n if new_size[0] > 256: # else crop\n print(\"cropping\")\n index = new_size[0] - 256\n image_256 = image_256[index//2:-index//2,index//2:-index//2,:]\n # The origin is automatically adjusted\n\n #\n # print(\"Image size:\", image_256[:,:,-80:].GetSize())\n # print(\"new origin\", image_256.GetOrigin())\n print(output_file)\n sitk.WriteImage(image_256, output_file)\n\n\n def convert_points_physical_to_voxel(points, origin, spacing):\n voxel_point = points - origin\n voxel_point /= spacing\n return voxel_point\n\n # # Read points:\n # try:\n # points_file = root_path_data + \"{}_HM10395/points/{}.pts.txt\".format(patient_id,phase.zfill(2))\n # points = np.loadtxt(points_file)\n # except:\n # continue\n #\n # points[:,1] += y_offset[patient_id] # add offset in the y-direction\n # voxel_point = convert_points_physical_to_voxel(points, image_256.GetOrigin(), image_256.GetSpacing())\n # print(len(voxel_point))\n #\n # # Now we need to remove points which are outsite of the resampled volume\n #\n # for i, point in enumerate(voxel_point):\n # if point[0] <= 0 or point[1] <= 0 or point[2] <= 0:\n # print(point, points[i])\n # indexes_of_points_outside.append(i)\n #\n #\n # np.savetxt(root_path_data + \"{}_HM10395/points/{}_shifted.pts.txt\".format(patient_id, phase.zfill(2)),points, fmt = \"%f\")\n # np.savetxt(root_path_data + \"{}_HM10395/points/{}_voxel.pts.txt\".format(patient_id, phase.zfill(2)),voxel_point, fmt = \"%f\")\n #\n # if len(indexes_of_points_outside) > 0:\n # print(set(indexes_of_points_outside))\n # for phase in ct_path_dict[patient_id][scan_id].keys():\n # try:\n # points_file = root_path_data + \"{}_HM10395/points/{}_shifted.pts.txt\".format(patient_id, phase.zfill(2))\n # points_voxel_file = root_path_data + \"{}_HM10395/points/{}_voxel.pts.txt\".format(patient_id, phase.zfill(2))\n # points = np.loadtxt(points_file)\n # voxel_point = np.loadtxt(points_voxel_file)\n #\n # except:\n # continue\n # # delete the points outsite of the volume.\n # print(len(voxel_point))\n # points = np.delete(points, list(set(indexes_of_points_outside)), axis = 0 )\n # voxel_point = np.delete(voxel_point, list(set(indexes_of_points_outside)), axis = 0 )\n # print(len(voxel_point))\n # # print(voxel_point)\n # np.savetxt(root_path_data + \"{}_HM10395/points/{}_shifted.pts.txt\".format(patient_id, phase.zfill(2)), points, fmt = \"%f\")\n # np.savetxt(root_path_data + \"{}_HM10395/points/{}_voxel.pts.txt\".format(patient_id, phase.zfill(2)), voxel_point, fmt = \"%f\")\n","repo_name":"pje336/master_thesis_project","sub_path":"elastix/dicom_series_to_3d.py","file_name":"dicom_series_to_3d.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73152026402","text":"import pyfiglet as figlet\n\nname = figlet.figlet_format(\"Himasnhu \\n Singh \\n Tomar \\n Programming\")\nprint(name)\ni = 1\nwhile i <= 3:\n customer = int(input(\"Enter product price :\"))\n age = int(input(\"enter the age of customer :\"))\n if customer > 1000:\n if age > 60:\n customer = customer - 200\n else:\n customer = customer - 100\n if customer < 1000:\n if age < 45:\n customer = customer - 50\n print(customer)\n i = i + 1\n\n","repo_name":"Himanshusinghtomar/customer_discount","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13882063751","text":"from flask import Response, request\nfrom flask_restplus import Resource, reqparse\n\nfrom apis.db import api\nfrom mongo import mongo\n\nfrom datetime import datetime\n\nfrom jwt_auth.token import token_required, token_optional\n\nns = api.namespace('users', description=\"users api\")\n\n######################\n# GET USER STATS\n######################\n@ns.route(\"/stats/\")\nclass UserStats(Resource):\n @token_required\n @api.doc(security='apikey')\n def get(self, _, name):\n coll = mongo.db.comments\n result = list(coll.aggregate([\n { \"$match\": { \"user_name\" : name }},\n { \"$group\": { \"_id\": {\"user_name\" : \"$user_name\"}, \n \"count_recomendations\": { \"$sum\": \"$recommendations\" }, \n \"count_comments\": { \"$sum\": 1 },\n \"count_comments_blocked\": { \"$sum\": { \"$cond\": [\"$blocked\", 1, 0] } },\n }}\n ]))\n if result and len(result) > 0:\n obj = list(result)[0]\n del obj['_id']\n return {'data' : obj}\n return {'data' : {\n \"count_recomendations\": 0,\n \"count_comments\": 0\n }}\n\n######################\n# GET COMMENTS BY USER\n######################\n\ndef getComments(name, mode):\n query = {'user_name': name, 'blocked': mode}\n coll = mongo.db.comments\n result_list = list(coll.find(query))\n \n for i in result_list:\n del i['_id'] \n i['timestamp'] = i['timestamp'].isoformat() \n return result_list\n\n@ns.route(\"/comments/blocked/\")\nclass UserComments1(Resource):\n @token_required\n @api.doc(security='apikey')\n def get(self, _, name):\n return {'data': getComments(name, True)} \n\n@ns.route(\"/comments/valid/\")\nclass UserComments2(Resource):\n @token_required\n @api.doc(security='apikey')\n def get(self, _, name):\n return {'data': getComments(name, False)} \n\n@ns.route(\"/comments/undecided/\")\nclass UserComments3(Resource):\n @token_required\n @api.doc(security='apikey')\n def get(self, _, name):\n return {'data': getComments(name, None)}\n\n\n######################\n# GET USER NAMES\n######################\n@ns.route(\"/names/\")\nclass UserNames(Resource):\n @token_required\n @api.doc(security='apikey')\n def get(self, _, search):\n coll = mongo.db.comments\n result = coll.distinct( \"user_name\", { \"user_name\": { \"$regex\": search, '$options' : 'i' } } )\n result.sort()\n return {'data': result }\n\n","repo_name":"jsandersen/REM","sub_path":"backend/apis/db/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5144715104","text":"import nltk\nimport string\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nnltk.download('punkt')\nnltk.download('stopwords')\n\ndef preprocess_text(text):\n tokens = nltk.word_tokenize(text.lower())\n tokens = [word for word in tokens if word not in string.punctuation]\n tokens = [word for word in tokens if word not in stopwords.words('english')]\n return ' '.join(tokens)\n\ndef rank_resumes(job_description, resumes):\n job_description = preprocess_text(job_description)\n preprocessed_resumes = [preprocess_text(resume) for resume in resumes]\n tfidf_vectorizer = TfidfVectorizer()\n tfidf_matrix = tfidf_vectorizer.fit_transform([job_description] + preprocessed_resumes)\n n_components = min(len(resumes), 20)\n lsa = TruncatedSVD(n_components=n_components)\n lsa_matrix = lsa.fit_transform(tfidf_matrix)\n similarity_scores = cosine_similarity(lsa_matrix[0:1], lsa_matrix[1:])\n rankings = {}\n for i, resume in enumerate(resumes):\n rankings[resume] = similarity_scores[0][i]\n ranked_resumes = sorted(rankings.items(), key=lambda x: x[1], reverse=True)\n return ranked_resumes\n\nif __name__ == \"__main__\":\n job_description = \"\"\"\nResponsibility:\n- Manage the Maintenance Slots to ensure optimization of the required aircraft maintenance and defect rectification objectives.\n- Prioritize and Co-ordinate Forward Maintenance Requirements.\n- Ensure availability of Aircraft Technical Documentation, (Aircraft Maintenance Manual (AMM) Illustrated Parts Catalogue (IPC) Structure Repair Manual (SRM).\n- Ensure Availability of Material & Tooling together with Hangar Availability.\n- Ensure Sufficient Competent Manpower.\n- Evaluation & Preparation of Work Scopes (Work Package).\n- Managing the Maintenance Activities Timeline to ensure Optimisation of Delivery.\n- Creating And Controlling Work Packages.\n- Managing Information related to Service Bulletins (SB’s) / Service Letters (SL’s).\n- Ensuring appropriate communication throughout the delivery of the Maintenance Activities.\n- Checking Completed Work Cards for completeness and following procedures to support the closeout and arrange for the issue.\n- Post Activity Evaluation of Completed and Closed Maintenance Work packages to Review Opportunities for Improvement & Optimisation.\n\nQUALIFICATIONS\n- Bachelor's Degree in Aeronautical Engineer/ Aircraft Maintenance Technology\n- 3-4 Yrs experience as Production Planner (Aviation)/ Aircraft Mechanic\n- Strong organizational and problem-solving skills\n- Excellent communication abilities\n \"\"\"\n\n resumesFile = '~/Projects/hau/csstudy/resume-screening-and-classification/demo-set/resumes/resumes.xlsx'\n resumesDf = pd.read_excel(resumesFile)\n resumes = resumesDf['Resume'].values\n\n ranked_resumes = rank_resumes(job_description, resumes)\n \n print(\"Ranking of resumes:\")\n for i, (resume, score) in enumerate(ranked_resumes, start=1):\n print(f\"{i}. Resume: \")\n print(f\" Similarity Score: {score:.4f}\\n\")\n\n","repo_name":"chelscelis/resume-screening-and-classification","sub_path":"cossim2.py","file_name":"cossim2.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10613977431","text":"class Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if len(nums) == 1:\n return [list(nums)]\n l = []\n nums = set(nums)\n for val in nums:\n perms = self.permute(nums - {val})\n for perm in perms:\n l.append([val] + perm)\n return l\n\nif __name__ == '__main__':\n sln = Solution()\n print(len(sln.permute([1,2,3,4])))","repo_name":"sorengoyal/python-practice","sub_path":"sourcewise/leetcode/30-permutations/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37007079210","text":"__metaclass__ = type\n__all__ = [\n 'TimelineRecordingClient',\n ]\n\nimport logging\n\nfrom lazr.restful.utils import get_current_browser_request\nimport memcache\n\nfrom lp.services import features\nfrom lp.services.timeline.requesttimeline import get_request_timeline\n\n\nclass TimelineRecordingClient(memcache.Client):\n\n def __get_timeline_action(self, suffix, key):\n request = get_current_browser_request()\n timeline = get_request_timeline(request)\n return timeline.start(\"memcache-%s\" % suffix, key)\n\n @property\n def _enabled(self):\n configured_value = features.getFeatureFlag('memcache')\n if configured_value is None:\n return True\n else:\n return configured_value\n\n def get(self, key):\n if not self._enabled:\n return None\n action = self.__get_timeline_action(\"get\", key)\n try:\n return memcache.Client.get(self, key)\n finally:\n action.finish()\n\n def set(self, key, value, time=0, min_compress_len=0):\n if not self._enabled:\n return None\n action = self.__get_timeline_action(\"set\", key)\n try:\n success = memcache.Client.set(self, key, value, time=time,\n min_compress_len=min_compress_len)\n if success:\n logging.debug(\"Memcache set succeeded for %s\", key)\n else:\n logging.warning(\"Memcache set failed for %s\", key)\n return success\n finally:\n action.finish()\n","repo_name":"beluxx/launchpad","sub_path":"lib/lp/services/memcache/timeline.py","file_name":"timeline.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30062723230","text":"\"\"\"\nЗакодируйте любую строку из трех слов по алгоритму Хаффмана.\n\"\"\"\n\nfrom collections import Counter\n\n\nclass Node:\n def __init__(self, value=None, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n def __repr__(self):\n return f'Node[value = {self.value}, left = {self.left}, right = {self.right}]'\n\n\ndef get_tree_of_symbols(dict_of_frequency):\n code_list = [(value, Node(key)) for key, value in dict_of_frequency.items()]\n\n while len(code_list) != 1:\n code_list = sorted(code_list, key=lambda x: (x[0]), reverse=True)\n right_element = code_list.pop()\n left_element = code_list.pop()\n new_node = Node('')\n new_node.left = left_element\n new_node.right = right_element\n code_list.append((left_element[0] + right_element[0], new_node))\n\n return code_list[0]\n\n\ndef get_code_lists_of_tree(node, total_code):\n if node[1].left is None and node[1].right is None:\n dict_of_coding[node[1].value] = total_code\n return None\n get_code_lists_of_tree(node[1].left, total_code + '1')\n get_code_lists_of_tree(node[1].right, total_code + '0')\n\n\ndef get_arc(code_string, dict_of_coding):\n return ''.join(dict_of_coding[symbol] for symbol in code_string)\n\n\ndict_of_coding = dict()\n\ncode_string = input('Введите строку для архивирования: ')\ndict_of_frequency = dict(Counter(code_string).most_common())\n\ncode_tree = get_tree_of_symbols(dict_of_frequency)\nget_code_lists_of_tree(code_tree, '')\n\n\narc_string = get_arc(code_string, dict_of_coding)\n\nprint('Словарь для архивирования: {}'.format(dict_of_coding))\nprint('Полученная последовательность для архива: {}'.format(arc_string))","repo_name":"FeuerFrei117/algorithms","sub_path":"hw_8/ex_2.py","file_name":"ex_2.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14584113404","text":"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import Twist\n\nif __name__ == \"__main__\":\n rospy.init_node('turtlebot_supervisor', anonymous=True)\n cmd_vel_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\n input_str = \"\"\n\n while input_str != \"Q\":\n input_str = raw_input(\"Press anything to send stop. \\\"Q\\\" to quit > \")\n cmd_vel_publisher.publish(Twist())","repo_name":"asharalam11/AA274_Winter-2019_Final_Project","sub_path":"scripts/stop_robot.py","file_name":"stop_robot.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31529066309","text":"import discord\nfrom discord.ext import commands\nimport logging\nimport os\nimport random\nfrom string import ascii_letters\nimport asyncio\nimport websockets\nimport json\nimport chess\nimport aiohttp\nimport time\nfrom datetime import datetime\n\nTIMEOUT = 60\nTIMEOUT_CHECK_INTERVAL = 3\nDRAW_THROTTLE = 1\nBASE_BOARD_STATE = {\"fen\": \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR\", \"uci\": None}\nBOARD_SIZE = 120\nGAME_TIME = 240\nHEARTBEAT_INTERVAL = 5\nHEARTBEAT_FAIL_COUNT = GAME_TIME // HEARTBEAT_INTERVAL\n\n\nclass Chess(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.sessions = {}\n\n @commands.command()\n async def sessions(self, ctx):\n output = [\"Sessions:\"]\n if self.sessions:\n for session in self.sessions:\n output.append(\n f\"{session}: {self.sessions[session][0]} vs. {self.sessions[session][1]}\"\n )\n output = \"\\n\".join(output)\n await ctx.send(f\"```{output}```\")\n else:\n await ctx.send(\"No sessions.\")\n\n @commands.max_concurrency(4)\n @commands.command()\n async def chess(self, ctx, opponent: discord.Member):\n \"\"\"Initiate a new lichess game with an opponent. The initiator will always be white.\"\"\"\n if ctx.author == opponent:\n return await ctx.send(\"You can't play with yourself.\")\n players = set(\n [user for session in self.sessions for user in self.sessions[session]]\n )\n if ctx.author in players:\n return await ctx.send(\"You're already playing a game!\")\n elif opponent in players:\n return await ctx.send(\"The opponent is already playing a game!\")\n\n game = None\n\n async with aiohttp.ClientSession() as s:\n form = aiohttp.FormData()\n form.add_field(\"clock.limit\", GAME_TIME)\n form.add_field(\"clock.increment\", 0)\n async with s.post(\n \"https://lichess.org/api/challenge/open\", data=form\n ) as res:\n if res.status == 200:\n data = await res.json()\n game = data[\"challenge\"][\"id\"]\n\n author_url = data[\"urlWhite\"]\n opponent_url = data[\"urlBlack\"]\n self.sessions[game] = [ctx.author, opponent]\n if random.random() < 0.5:\n self.sessions[game] = [opponent, ctx.author]\n opponent_url = data[\"urlWhite\"]\n author_url = data[\"urlBlack\"]\n\n await ctx.author.send(\n f\"Your unique URL is in the next message, and will be deleted in {TIMEOUT} seconds.\"\n )\n await ctx.author.send(author_url, delete_after=TIMEOUT)\n await opponent.send(\n f\"{ctx.message.jump_url}\\nYou've been invited to play Chess! Your unique URL is in the next message, and will be deleted in {TIMEOUT} seconds.\"\n )\n await opponent.send(opponent_url, delete_after=TIMEOUT)\n await ctx.send(\n f\"Confirming game with opponents, please wait. This invite will time out in {TIMEOUT} seconds.\"\n )\n else:\n return await ctx.send(\"Failed to create.\")\n\n logging.info(f\"Created a new lichess game with ID: {game}\")\n await self.wait_for_start(ctx, game)\n logging.info(f\"Cleaning up game: {game}\")\n del self.sessions[game]\n\n async def wait_for_start(self, ctx, game: str):\n async with aiohttp.ClientSession() as s:\n ready = False\n for _ in range(TIMEOUT // TIMEOUT_CHECK_INTERVAL):\n async with s.get(f\"https://lichess.org/game/export/{game}\") as res:\n if res.status == 200:\n ready = True\n break\n else:\n await asyncio.sleep(TIMEOUT_CHECK_INTERVAL)\n logging.info(f\"Game status check: {res.status}\")\n if ready:\n await self.game(ctx, game)\n else:\n logging.info(\"Challengers failed to ready up, cancelling.\")\n await ctx.send(\"The challengers failed to join the game.\")\n\n async def game(self, ctx, game):\n sri = \"\".join(random.sample(ascii_letters, 10))\n shard = random.randint(1, 5)\n try:\n async with websockets.connect(\n f\"wss://socket{shard}.lichess.org/watch/{game}/white/v5?sri={sri}v=100\",\n ssl=True,\n ) as ws:\n sem = asyncio.Semaphore(1)\n moves = [BASE_BOARD_STATE]\n signal = 9\n ping = asyncio.create_task(self.ping(ws))\n draws = asyncio.create_task(self.queue_draws(ctx, ws, sem, moves, game))\n msg = None\n try:\n async for ms in ws:\n logging.debug(f\"Data from socket: {ms}\")\n if ms == \"0\":\n signal += 1\n if signal >= HEARTBEAT_FAIL_COUNT:\n await ctx.send(f\"`{game}`: cancelled due to inactivity.\")\n raise RuntimeError(\"Heartbeat didn't receive any actions for too long.\")\n else:\n signal = 0\n payload = json.loads(ms)\n status = None\n if payload.get(\"t\", None) == \"end\":\n status = payload.get(\"d\", None)\n if not status:\n status = \"Draw!\"\n if status == \"white\":\n status = f\"{self.sessions[game][0].mention} won!\"\n elif status == \"black\":\n status = f\"{self.sessions[game][1].mention} won!\"\n elif payload.get(\"t\", None) == \"crowd\":\n if all(\n [not presence for presence in payload[\"d\"].values()]\n ):\n status = \"Both players disconnected.\"\n else:\n if \"d\" in payload and \"fen\" in payload[\"d\"]:\n moves.append(payload[\"d\"])\n logging.info(f\"Pushed move: {payload['d']}\")\n logging.info(f\"Moves available: {len(moves)}\")\n sem.release()\n else:\n logging.warn(f\"Data is malformed: {payload}\")\n if status:\n await ctx.send(f\"`{game}`: {status}\")\n raise RuntimeWarning(\"I could probably do better but this is an exit.\")\n except Exception:\n while len(moves):\n await asyncio.sleep(0) # Finish the rest of the turns\n ping.cancel()\n draws.cancel()\n try:\n await ping\n except asyncio.CancelledError:\n logging.info(\"Heartbeat successfully cancelled.\")\n try:\n await draws\n except asyncio.CancelledError:\n logging.info(\"Drawing task successfully cancelled.\")\n await ws.close()\n raise\n except Exception as err:\n logging.warn(err)\n\n async def ping(self, ws):\n logging.info(\"Ping task started.\")\n try:\n while not ws.closed:\n try:\n logging.debug(\"Sending keepalive.\")\n await ws.send(json.dumps({\"t\": \"p\", \"l\": 20}))\n await asyncio.sleep(HEARTBEAT_INTERVAL)\n except websockets.ConnectionClosedOK:\n pass\n except asyncio.CancelledError:\n logging.info(\"Ping task received cancellation.\")\n raise\n\n async def queue_draws(self, ctx, ws, sem, moves, game):\n logging.info(\"Draw task started.\")\n try:\n msg = None\n while await sem.acquire():\n d = moves.pop(0)\n logging.info(f\"Refreshing board: {d}\")\n board = chess.Board(d[\"fen\"])\n em = discord.Embed(\n description=f\"```{str(board)}```\\n[watch on lichess.org](https://lichess.org/{game})\",\n timestamp=datetime.now(),\n )\n url = f\"https://backscattering.de/web-boardimage/board.png?fen={d['fen']}&size={BOARD_SIZE}\"\n if d[\"uci\"]:\n url += f\"&lastMove={d['uci']}\"\n em.set_thumbnail(url=url)\n em.set_footer(text=\"Powered by lichess.org\")\n em.add_field(\n name=\"Players\",\n value=f\":white_large_square: {self.sessions[game][0].mention}\\n:black_large_square: {self.sessions[game][1].mention}\",\n )\n if not msg:\n msg = await ctx.send(content=None, embed=em)\n else:\n await msg.edit(content=None, embed=em)\n await asyncio.sleep(DRAW_THROTTLE * len(self.sessions))\n except asyncio.CancelledError:\n logging.info(\"Draw task received cancellation.\")\n raise\n\n\ndef setup(bot):\n chess = Chess(bot)\n bot.add_cog(chess)\n","repo_name":"funkyhippo/lichess-discord-bot","sub_path":"cogs/chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":9820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25073867993","text":"import os\nfrom spacy.matcher import Matcher\nfrom spacy.tokens import Token\nfrom util import util\n\nCWD = util.get_file_directory(__file__)\nVOCAB_PATH = os.path.join(CWD, 'neutral_causal_vocab/terms.txt')\n\n\nclass NeutralCausalAnnotator:\n name = 'neutral_causal_annotator'\n\n def __init__(self, nlp):\n with open(VOCAB_PATH) as f:\n vocab = [line.strip() for line in f.readlines()]\n\n self.matcher = Matcher(nlp.vocab)\n for term in vocab:\n self.matcher.add('neutral_causal', None, [{'LOWER': term}])\n\n Token.set_extension('is_neutral_causal_term', default=False)\n\n def __call__(self, doc):\n matches = self.matcher(doc)\n for label, start, end in matches:\n token = doc[start]\n token._.set('is_neutral_causal_term', True)\n return doc\n","repo_name":"cyclecycle/lipa-db","sub_path":"custom/neutral_causal_annotator.py","file_name":"neutral_causal_annotator.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36447569978","text":"from datetime import date\n\nfrom django.db import models\nfrom django.db.models import Model\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=150)\n description = models.TextField()\n url = models.SlugField(max_length=150, unique=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"Category\"\n verbose_name_plural = \"Categories\"\n\n\nclass Actor(models.Model):\n name = models.CharField(max_length=100)\n age = models.PositiveIntegerField(default=0)\n description = models.TextField()\n image = models.ImageField(upload_to=\"actors/\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"Actor and Director\"\n verbose_name_plural = \"Actors and Directors\"\n\n\nclass Genre(models.Model):\n name = models.CharField(max_length=100)\n description = models.TextField()\n url = models.SlugField(max_length=100, unique=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"Genre\"\n verbose_name_plural = \"Genres\"\n\n\nclass Movie(models.Model):\n title = models.CharField(max_length=100)\n tagline = models.CharField(max_length=200, default='')\n description = models.TextField()\n poster = models.ImageField(upload_to=\"movies/\")\n year = models.PositiveIntegerField(default=2021)\n country = models.CharField(max_length=100)\n directors = models.ManyToManyField(Actor, verbose_name=\"director\", related_name=\"film_director\")\n actors = models.ManyToManyField(Actor, verbose_name=\"actors\", related_name=\"film_actors\")\n genres = models.ManyToManyField(Genre, verbose_name=\"genres\")\n world_premiere = models.DateField(default=date.today)\n budget = models.PositiveIntegerField(default=0, help_text=\"specify the amount in dollars\")\n feels_in_usa = models.PositiveIntegerField(default=0, help_text=\"specify the amount in dollars\")\n fees_in_the_world = models.PositiveIntegerField(default=0, help_text=\"specify the amount in dollars\")\n category = models.ForeignKey(Category, verbose_name=\"Category\", on_delete=models.CASCADE, null=True)\n url = models.SlugField(max_length=150, unique=True)\n draft = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = \"Movie\"\n verbose_name_plural = \"Movies\"\n\n\nclass MovieShots(models.Model):\n title = models.CharField(max_length=100)\n description = models.TextField()\n image = models.ImageField(upload_to=\"movie_shots/\")\n movie = models.ForeignKey(Movie, verbose_name=\"Movie\", on_delete=models.CASCADE)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = \"Movie shot\"\n verbose_name_plural = \"Movie shots\"\n\n\nclass RatingStar(models.Model):\n value = models.SmallIntegerField(default=0)\n\n def __str__(self):\n return self.value\n\n class Meta:\n verbose_name = \" Rating Star\"\n verbose_name_plural = \"Rating Stars\"\n\n\nclass Rating(models.Model):\n ip = models.CharField(max_length=10)\n star = models.ForeignKey(RatingStar, on_delete=models.CASCADE, verbose_name=\"star\")\n movie = models.ForeignKey(Movie, on_delete=models.CASCADE, verbose_name=\"movie\")\n\n def __str__(self):\n return f'{self.star} - {self.movie}'\n\n class Meta:\n verbose_name = \"Rating\"\n verbose_name_plural = \"Ratings\"\n\n\nclass Reviews(models.Model):\n email = models.EmailField()\n name = models.CharField(max_length=100)\n text = models.TextField(max_length=2000)\n parent = models.ForeignKey('self', verbose_name=\"Parent\", on_delete=models.SET_NULL, blank=True, null=True)\n movie = models.ForeignKey(Movie, verbose_name=\"Movie\", on_delete=models.CASCADE)\n\n def __str__(self):\n return f'{self.name} - {self.movie}'\n\n class Meta:\n verbose_name = \"Review\"\n verbose_name_plural = \"Reviews\"\n","repo_name":"Aizdylagady/Skreenz","sub_path":"movie_screenz/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26721681297","text":"# --- Part Two ---\n# On the other hand, it might be wise to try a different strategy: let the giant \n# squid win.\n# \n# You aren't sure how many bingo boards a giant squid could play at once, so \n# rather than waste time counting its arms, the safe thing to do is to figure out \n# which board will win last and choose that one. That way, no matter which boards \n# it picks, it will win for sure.\n# \n# In the above example, the second board is the last to win, which happens after \n# 13 is eventually called and its middle column is completely marked. If you were \n# to keep playing until this point, the second board would have a sum of unmarked \n# numbers equal to 148 for a final score of 148 * 13 = 1924.\n# \n# Figure out which board will win last. Once it wins, what would its final score \n# be?\n\n\nclass BingoBoard:\n def __init__(self):\n self.rows = [set() for _ in range(5)]\n self.columns = [set() for _ in range(5)]\n self.all = set()\n self.called = set()\n self.bingo = False\n \n def add_row(self, num, cols):\n self.all.update(cols)\n self.rows[num].update(cols)\n for i, val in enumerate(cols):\n self.columns[i].add(val)\n\n def call_number(self, num):\n if self.bingo:\n return\n self.all.discard(num)\n self.called.add(num)\n for line in self.rows + self.columns:\n if line.issubset(self.called):\n self.bingo = True\n print(\"BINGO\")\n print( sum(self.all)*num )\n\n\nboards = []\nb = None\nfor line in open(\"input.txt\").readlines():\n if \",\" in line:\n numbers = [int(n) for n in line.split(\",\")]\n elif line == \"\\n\":\n row = 0\n if b:\n boards.append(b)\n b = BingoBoard()\n else:\n cols = [int(n) for n in line.split()]\n b.add_row(row, cols)\n row += 1\n\nfor num in numbers:\n for b in boards:\n b.call_number(num)\n\n\n","repo_name":"timabrmsn/advent_of_code","sub_path":"2021/day4/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5103455881","text":"#############\n# Templates #\n#############\n\nCONVERT_TEMPLATES_DISPS = {\n \"ctarget\":(lambda x: x.color_name()),\n \"csource\":(lambda x: x.color_name()),\n \"ctarget_army\":(lambda x: x.color_name()),\n \"csource_army\":(lambda x: x.color_name()),\n \"order\":(lambda x: x.color_abbrev()),\n}\n\ndef convert_templates(templates):\n \"\"\"\n templates: anything with a __getitem__ (so dictionaries, Contexts, etc.)\n \"\"\"\n newtemplates = {}\n for key in templates:\n if key in CONVERT_TEMPLATES_DISPS:\n func = CONVERT_TEMPLATES_DISPS[key]\n newtemplates[key] = func(templates[key])\n else:\n # could just be a string\n newtemplates[key] = templates[key]\n return newtemplates\n","repo_name":"krzhang/fotd","sub_path":"fotd/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"39367341707","text":"def minAsciiDeleteSumForTwoStrings(s1, s2):\n\n l1, l2 = len(s1) + 1, len(s2) + 1\n\n matrix = [[0 for i in range(l2)] for j in range(l1)]\n \n for i in range(1, l2):\n matrix[0][i] = matrix[0][i - 1] + ord(s2[i - 1])\n \n for i in range(1, l1):\n matrix[i][0] = matrix[i - 1][0] + ord(s1[i - 1])\n\n for i in range(1, l1):\n for j in range(1, l2):\n if s1[i - 1] == s2[j - 1]:\n matrix[i][j] = matrix[i - 1][j - 1]\n else:\n matrix[i][j] = min(matrix[i - 1][j] + ord(s1[i - 1]), matrix[i][j - 1] + ord(s2[j - 1]))\n \n return matrix[-1][-1]\n\nprint(minAsciiDeleteSumForTwoStrings(\"delete\", \"leet\"))","repo_name":"nikhiilll/Algorithms-using-Python","sub_path":"Dynamic Programming/LeetCode/MinAsciiDeleteSumForTwoStrings_712.py","file_name":"MinAsciiDeleteSumForTwoStrings_712.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27751666216","text":"from collections import defaultdict\nimport re\n\nval_to = re.compile(r'value (\\d+) goes to (bot|output) (\\d+)')\ngive_to = re.compile(r'bot (\\d+) gives low to (bot|output) (\\d+) and high to (bot|output) (\\d+)')\n\nstate = {\n 'output': defaultdict(list),\n 'bot': defaultdict(list),\n}\ndownstream = {}\n\nwith open('input.txt') as f:\n for line in f:\n m = val_to.match(line)\n if m:\n val, typ, num = m.groups()\n state[typ][num].append(int(val))\n else:\n m = give_to.match(line)\n bot, typ_l, num_l, typ_h, num_h = m.groups()\n downstream[bot] = [state[typ_l][num_l], state[typ_h][num_h]]\n\nwhile True:\n n = 0\n for bot, chips in state['bot'].items():\n if len(chips) == 2:\n n += 1\n if all(a == b for a, b in zip(sorted(chips), [17, 61])):\n print('Answer part 1:', bot)\n for i, chip in enumerate(sorted([chips.pop(), chips.pop()])):\n downstream[bot][i].append(chip)\n if n == 0:\n break\n\nbins = list(state['output'][str(i)][0] for i in range(3))\n\nprint('Answer part 2:', bins[0]*bins[1]*bins[2])\n","repo_name":"belteshassar/AoC-2016","sub_path":"10/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5224072851","text":"from . import parser,models\nfrom django import forms\n\nclass ParserForm(forms.Form):\n MEDIA_CHOISES = (\n ('FILMS_KG', 'FILMS_KG'),\n )\n media_type = forms.ChoiceField(choices=MEDIA_CHOISES)\n class Meta:\n fields = [\n 'media_type',\n ]\n def parser_data(self):\n if self.data['media_type'] == 'FILMS_KG':\n film_parser = parser.parser()\n for i in film_parser:\n models.Film.objects.create(**i)\n","repo_name":"narmuhamedov/month3hw","sub_path":"parser_film/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26596636641","text":"import time\nimport os\nimport pydenticon\n\n\ndef gen_avatar() -> str:\n seed = str(time.time_ns())\n\n foreground = [\"rgb(45,79,255)\",\n \"rgb(254,180,44)\",\n \"rgb(226,121,234)\",\n \"rgb(30,179,253)\",\n \"rgb(232,77,65)\",\n \"rgb(49,203,115)\",\n \"rgb(141,69,170)\"]\n\n # Set-up a background colour.\n background = \"rgb(224,224,224)\"\n\n # Set up the padding (top, bottom, left, right) in pixels.\n padding = (20, 20, 20, 20)\n\n # Generate a PNG image using a generator that will create 10x10 block identicons using SHA1 digest.\n identicon = pydenticon.Generator(10, 10, foreground=foreground,\n background=background).generate(seed, 200, 200,\n padding=padding,\n output_format=\"png\")\n\n filename = \"avatar-%s.png\" % seed\n # get config\n from random_img_api.src.config import config\n _config = config.Config(\"config.json\")\n img_path = _config.get(\"img_path\")\n with open(os.path.join(img_path, filename), \"wb\") as write_avatar:\n write_avatar.write(identicon)\n\n return filename\n","repo_name":"BrandenXia/Random_Img_API","sub_path":"random_img_api/src/get_img/gen_avatar.py","file_name":"gen_avatar.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"27344556173","text":"from time import time\nfrom typing import List\n\n\nclass Solution:\n def minCostII(self, costs: List[List[int]]) -> int:\n\n # time O(n*k)\n # space O(k)\n\n def best_results(result):\n first_best = float('inf')\n idx_best = -1\n second_best = float('inf')\n for j in range(len(costs[0])):\n price = result[j]\n if price < first_best:\n second_best = first_best\n first_best = price\n idx_best = j\n elif price < second_best:\n second_best = price\n return first_best, second_best, idx_best\n\n dp = costs[0]\n first_best, second_best, idx_best = best_results(dp)\n for i in range(1, len(costs)):\n for j in range(len(costs[0])):\n if j == idx_best:\n dp[j] = costs[i][j] + second_best\n else:\n dp[j] = costs[i][j] + first_best\n first_best, second_best, idx_best = best_results(dp)\n return min(dp)\n\n # first solution\n # time O(n*k*k)\n # space O(n*k)\n #\n # dp = [[0 for _ in range(len(costs[0]))] for _ in range(len(costs))]\n # dp[0] = costs[0]\n # for i in range(1, len(costs)):\n # for j in range(len(costs[0])):\n # prev_best = float('inf')\n # for k in range(len(costs[0])):\n # if k == j:\n # continue\n # prev_best = min(prev_best, dp[i-1][k])\n # dp[i][j] = costs[i][j] + prev_best\n # return min(dp[-1])\n\n\nstart_time = time()\n\n_costs = [[1,5,3],[2,9,4]]\n# Example 1:\n# Input: costs = [[1,5,3],[2,9,4]]\n# Output: 5\n# Explanation:\n# Paint house 0 into color 0, paint house 1 into color 2. Minimum cost: 1 + 4 = 5;\n# Or paint house 0 into color 2, paint house 1 into color 0. Minimum cost: 3 + 2 = 5.\n#\n# Example 2:\n# Input: costs = [[1,3],[2,4]]\n# Output: 5\n\nprint(Solution().minCostII(_costs))\n\nprint(\"--- %s seconds ---\" % (time() - start_time))\n","repo_name":"Sadomtsevvs/Leetcode","sub_path":"265. Paint House II.py","file_name":"265. Paint House II.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29433534788","text":"from sqlalchemy import Column, String, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship, backref\nfrom Modules.Config.base import Base\nfrom Modules.Config.Data import Message\nfrom Modules.Classes.Classification import Classification\n\n\nclass Category(Base):\n \"\"\"\n A class used to represent a category. A category object has attributes:\n\n :param id: identifier of object in the database. This is the primary key\n :type id: int\n :param name: name of the category\n :type name: str\n :param classification_id: identifier of the classification object which the category belongs to. This is a foreign key\n :type classification_id: int\n :param classification: classification object which the category belongs to\n :type classification: Modules.Classes.Classification.Classification\n \"\"\"\n\n __tablename__ = 'categories'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n classification_id = Column(Integer, ForeignKey('classifications.id'))\n\n classification = relationship(\"Classification\", backref=backref(\"categories\", cascade=\"all, delete-orphan\",\n single_parent=True))\n\n def __init__(self, name, classification):\n \"\"\"\n Constructor of the class\n \"\"\"\n self.name = name\n self.classification = classification\n\n def __str__(self):\n \"\"\"\n Method that represents the object as a string\n \"\"\"\n return '{}¥{}¥{}'.format(self.id, self.name, self.classification_id)\n\n @staticmethod\n def create(parameters, session):\n \"\"\"\n Creates a 'Category' object and stores it into the DB, the data for the object is inside the 'parameters'\n variable.\n\n :param parameters: list of important information that is needed in this function\n :type parameters: list\n :param session: session established with the database\n :type session: Modules.Config.base.Session\n :return msg_rspt: message ready to send to a client (response of requested action)\n :rtype msg_rspt: Modules.Config.Data.Message\n \"\"\"\n # Received 'parameters' --> [name, id_classification]\n classification_aux = session.query(Classification).filter(Classification.id == parameters[1]).first()\n category_aux = Category(parameters[0], classification_aux)\n session.add(category_aux)\n session.commit()\n session.close()\n msg_rspt = Message(action=2, comment='Register created successfully')\n return msg_rspt\n\n @staticmethod\n def read(parameters, session):\n \"\"\"\n Retrieves a list of 'Categories' registered into the DB. The target objects depends of the length of the\n 'parameters'. The list contains a string representation of each 'Category' (__str__()).\n\n :param parameters: list of important information that is needed in this function\n :type parameters: list\n :param session: session established with the database\n :type session: Modules.Config.base.Session\n :return msg_rspt: message ready to send to a client (response of requested action)\n :rtype msg_rspt: Modules.Config.Data.Message\n \"\"\"\n if len(parameters) == 0: # Ask for all categories stored in DB\n categories = session.query(Category).all()\n # Received 'parameters' --> [id_classification]\n else: # Ask only for categories associated with a 'Classification' object\n categories = session.query(Category).filter(Category.classification_id == parameters[0]).all()\n msg_rspt = Message(action=2, information=[])\n for item in categories:\n msg_rspt.information.append(item.__str__())\n session.close()\n return msg_rspt\n\n @staticmethod\n def delete(parameters, session):\n \"\"\"\n Removes a 'Category' object from the DB. The 'parameters' contains de id of the 'Classification' object that the\n categories are associated with.\n\n :param parameters: list of important information that is needed in this function\n :type parameters: list\n :param session: session established with the database\n :type session: Modules.Config.base.Session\n :return msg_rspt: message ready to send to a client (response of requested action)\n :rtype msg_rspt: Modules.Config.Data.Message\n \"\"\"\n # Received --> [id_classification]\n categories_aux = session.query(Category).filter(Category.classification_id == parameters[0]).all()\n for item in categories_aux:\n session.delete(item)\n session.commit()\n session.close()\n msg_rspt = Message(action=2, comment='Register deleted successfully')\n return msg_rspt\n\n @staticmethod\n def select(parameters, session):\n \"\"\"\n Retrieve information (attributes) of a 'Category' object from the DB. The 'parameters' contains de id of the\n desired 'Category'. Each attribute occupies a space of the returned list.\n\n :param parameters: list of important information that is needed in this function\n :type parameters: list\n :param session: session established with the database\n :type session: Modules.Config.base.Session\n :return msg_rspt: message ready to send to a client (response of requested action)\n :rtype msg_rspt: Modules.Config.Data.Message\n \"\"\"\n # Received --> [id_category]\n category_aux = session.query(Category).filter(Category.id == parameters[0]).first()\n msg_rspt = Message(action=2, information=[])\n msg_rspt.information.append(category_aux.name)\n msg_rspt.information.append(category_aux.classification_id)\n session.close()\n return msg_rspt\n","repo_name":"toolexp/ET_Server","sub_path":"Modules/Classes/Category.py","file_name":"Category.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1849137049","text":"#########################\n# Utils file for random faff\n#########################\n\ndef five_letter_only(data_file):\n with open(\"data\\\\five_letter_words.txt\", \"w\") as out_f:\n with open(data_file, \"r\") as english_words:\n word_list = english_words.readlines()\n\n for word in word_list:\n if len(word) == 6: # There is a newline character still included apparently \n out_f.write(word)\n\ndef get_five_unique_words():\n letters_so_far = set()\n word_list = []\n\n with open(\"data/five_letter_words.txt\", \"r\") as out_f:\n full_word_list = out_f.readlines()\n\n curr_word_idx = 0 # 10222\n word_indices = {}\n highest_letter = ''\n while (len(word_list) < 5):\n curr_word = full_word_list[curr_word_idx][:-1]\n\n # Check first letter \n if highest_letter > curr_word[0]:\n removed = word_list.pop()\n curr_word_idx = word_indices[removed] + 1# Backtracking \n for letter in removed:\n letters_so_far.remove(letter)\n\n highest_letter = '' if len(letters_so_far) == 0 else max(letters_so_far)\n \n continue\n\n unique = True\n temp_list = set()\n for letter in curr_word:\n if letter in letters_so_far or letter in temp_list:\n unique = False\n break\n temp_list.add(letter)\n\n if unique:\n word_indices[curr_word] = curr_word_idx\n word_list.append(curr_word)\n\n letters_so_far = letters_so_far.union(temp_list)\n\n highest_letter = max(highest_letter, max(temp_list))\n\n print(f\"{word_list}, with current idx: {curr_word_idx}\")\n\n if curr_word_idx == len(full_word_list) - 1:\n while curr_word_idx == len(full_word_list) - 1:\n if len(word_list) == 0:\n print(\"No Combinations\")\n return \n\n removed = word_list.pop()\n curr_word_idx = word_indices[removed] # Backtracking \n for letter in removed:\n letters_so_far.remove(letter)\n\n highest_letter = '' if len(letters_so_far) == 0 else max(letters_so_far)\n\n curr_word_idx += 1\n \n\nif __name__ == \"__main__\":\n # Sort through five letter only words \n get_five_unique_words()","repo_name":"MHokinson38/BotMacklin","sub_path":"WordleWrecker/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"32567481761","text":"import logging\n\nfrom randovania.game_connection.connection_base import ConnectionBase, GameConnectionStatus, Inventory\nfrom randovania.game_connection.connector.corruption_remote_connector import CorruptionRemoteConnector\nfrom randovania.game_connection.connector.echoes_remote_connector import EchoesRemoteConnector\nfrom randovania.game_connection.connector.prime1_remote_connector import Prime1RemoteConnector\nfrom randovania.game_connection.connector.prime_remote_connector import PrimeRemoteConnector\nfrom randovania.game_connection.connector.remote_connector import RemoteConnector\nfrom randovania.game_connection.executor.memory_operation import MemoryOperationException, MemoryOperation, \\\n MemoryOperationExecutor\nfrom randovania.game_connection.memory_executor_choice import MemoryExecutorChoice\nfrom randovania.game_description.resources.pickup_entry import PickupEntry\nfrom randovania.game_description.world.world import World\nfrom randovania.games.game import RandovaniaGame\nfrom randovania.games.prime1.patcher import prime1_dol_versions\nfrom randovania.games.prime2.patcher import echoes_dol_versions\nfrom randovania.games.prime3.patcher import corruption_dol_versions\n\nPermanentPickups = tuple[tuple[str, PickupEntry], ...]\n\n\nclass ConnectionBackend(ConnectionBase):\n executor: MemoryOperationExecutor\n connector: RemoteConnector | None = None\n\n _checking_for_collected_index: bool = False\n _inventory: Inventory\n _enabled: bool = True\n\n # Detected Game\n _world: World | None = None\n _last_world: World | None = None\n\n # Messages\n message_cooldown: float = 0.0\n\n # Multiworld\n _expected_game: RandovaniaGame | None\n _permanent_pickups: PermanentPickups\n\n def __init__(self, executor: MemoryOperationExecutor):\n super().__init__()\n self.logger = logging.getLogger(type(self).__name__)\n self.executor = executor\n\n self._inventory = {}\n self._expected_game = None\n self._permanent_pickups = tuple()\n\n @property\n def current_status(self) -> GameConnectionStatus:\n if not self.executor.is_connected():\n return GameConnectionStatus.Disconnected\n\n if self.connector is None:\n return GameConnectionStatus.UnknownGame\n\n if self._expected_game is not None and self._expected_game != self.connector.game_enum:\n return GameConnectionStatus.WrongGame\n\n if self._world is None:\n return GameConnectionStatus.TitleScreen\n\n elif not self.checking_for_collected_index:\n return GameConnectionStatus.TrackerOnly\n\n else:\n return GameConnectionStatus.InGame\n\n @property\n def backend_choice(self) -> MemoryExecutorChoice:\n return self.executor.backend_choice\n\n @property\n def name(self) -> str:\n raise NotImplementedError()\n\n @property\n def lock_identifier(self) -> str | None:\n return self.executor.lock_identifier\n\n @property\n def checking_for_collected_index(self):\n return self._checking_for_collected_index\n\n @checking_for_collected_index.setter\n def checking_for_collected_index(self, value):\n self._checking_for_collected_index = value\n\n def set_connection_enabled(self, value: bool):\n self._enabled = value\n if not value:\n self.connector = None\n\n async def _identify_game(self) -> RemoteConnector | None:\n all_connectors: list[PrimeRemoteConnector] = [\n Prime1RemoteConnector(version)\n for version in prime1_dol_versions.ALL_VERSIONS\n ]\n all_connectors.extend([\n EchoesRemoteConnector(version)\n for version in echoes_dol_versions.ALL_VERSIONS\n ])\n all_connectors.extend([\n CorruptionRemoteConnector(version)\n for version in corruption_dol_versions.ALL_VERSIONS\n ])\n read_first_ops = [\n MemoryOperation(connectors.version.build_string_address,\n read_byte_count=min(len(connectors.version.build_string), 4))\n for connectors in all_connectors\n ]\n try:\n first_ops_result = await self.executor.perform_memory_operations(read_first_ops)\n except (RuntimeError, MemoryOperationException) as e:\n self.logger.debug(f\"Unable to probe for game version: {e}\")\n return None\n\n possible_connectors = [\n connectors\n for connectors, read_op in zip(all_connectors, read_first_ops)\n if first_ops_result.get(read_op) == connectors.version.build_string[:4]\n ]\n\n for connector in possible_connectors:\n try:\n is_version = await connector.is_this_version(self.executor)\n except (RuntimeError, MemoryOperationException) as e:\n return None\n\n if is_version:\n self.logger.info(f\"identified game as {connector.game_enum.long_name}: {connector.version.description}\")\n return connector\n\n def get_current_inventory(self) -> Inventory:\n return self._inventory\n\n def set_expected_game(self, game: RandovaniaGame | None):\n self._expected_game = game\n\n def set_permanent_pickups(self, pickups: PermanentPickups):\n self.logger.info(\"num permanent pickups: %d\", len(pickups))\n self._permanent_pickups = pickups\n\n async def update_current_inventory(self):\n self._inventory = await self.connector.get_inventory(self.executor)\n\n async def _multiworld_interaction(self):\n if self._expected_game is None:\n return\n\n locations, patches = await self.connector.known_collected_locations(self.executor)\n for location in locations:\n await self._emit_location_collected(self.connector.game_enum, location)\n\n if patches:\n await self.connector.execute_remote_patches(self.executor, patches)\n else:\n patches, has_message = await self.connector.find_missing_remote_pickups(\n self.executor, self._inventory, self._permanent_pickups, self.message_cooldown > 0.0,\n )\n if patches and (self.message_cooldown <= 0.0 or not has_message):\n await self.connector.execute_remote_patches(self.executor, patches)\n if has_message:\n self.message_cooldown = 4.0\n\n async def _interact_with_game(self, dt):\n has_pending_op, world = await self.connector.current_game_status(self.executor)\n self._world = world\n if world is not None:\n await self.update_current_inventory()\n if not has_pending_op:\n self.message_cooldown = max(self.message_cooldown - dt, 0.0)\n await self._multiworld_interaction()\n\n def _is_unexpected_game(self):\n \"\"\"\n If has an expected game, True if connected game isn't that.\n Otherwise, False.\n :return:\n \"\"\"\n if self._expected_game is not None:\n return self._expected_game != self.connector.game_enum\n return False\n\n async def update(self, dt: float):\n if not self._enabled:\n return\n\n if not await self.executor.connect():\n return\n\n if self.connector is None or self._is_unexpected_game() or self._world is None:\n self.connector = await self._identify_game()\n\n try:\n if self.connector is not None and not self._is_unexpected_game():\n await self._interact_with_game(dt)\n\n except MemoryOperationException as e:\n self.logger.warning(f\"Unable to perform memory operations: {e}\")\n self._world = None\n","repo_name":"vgm5/randovania","sub_path":"randovania/game_connection/connection_backend.py","file_name":"connection_backend.py","file_ext":"py","file_size_in_byte":7677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"70398716643","text":"from airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom datetime import datetime, timedelta\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport os\nfrom time import sleep\n\n\ndefault_args = {\n 'owner': 'vitor',\n 'retries': 1,\n 'retry_delay': timedelta(minutes=1)\n}\n\n\ndef init(ti):\n url = 'http://pdet.mte.gov.br/novo-caged'\n response = requests.get(url)\n\n soup = BeautifulSoup(response.content, 'html.parser')\n\n # Procurando o texto com a data de lançamento do Novo Caged\n data = soup.find('h2', class_='outstanding-title').text\n\n return data\n\n\ndef formatando_data(ti):\n data = ti.xcom_pull(task_ids='init')\n # Selecionando apenas a data com regex\n padrao = r\"\\w+ - (.*)\"\n data_completa = re.findall(padrao, data)[0]\n\n # Selecionando apenas o mês e o ano separadamente\n padrao = r'(\\w+)' # Separa a string em ['Abril', 'de', '2023']\n mes = re.findall(padrao, data_completa)[0]\n ano = re.findall(padrao, data_completa)[2]\n\n # Dicionário de correspondência entre nomes dos meses e os seus números\n meses_para_numeros = {\n 'Janeiro': '01',\n 'Fevereiro': '02',\n 'Março': '03',\n 'Abril': '04',\n 'Maio': '05',\n 'Junho': '06',\n 'Julho': '07',\n 'Agosto': '08',\n 'Setembro': '09',\n 'Outubro': '10',\n 'Novembro': '11',\n 'Dezembro': '12'\n }\n\n # Obter o número correspondente usando o dicionário\n numero_mes = meses_para_numeros[mes]\n\n return ano, numero_mes\n\n\ndef download_arquivo(ti):\n ano = ti.xcom_pull(task_ids='formatando_data')[0]\n numero_mes = ti.xcom_pull(task_ids='formatando_data')[1]\n\n caminho_arquivo = 'csv_tratados/3-tabelas.xlsx'\n\n url_download = f'http://pdet.mte.gov.br/images/Novo_CAGED/{ano}/{ano}{numero_mes}/3-tabelas.xlsx'\n response = requests.get(url_download)\n\n if response.status_code == 200:\n with open(caminho_arquivo, 'wb') as f:\n f.write(response.content)\n print(\"Arquivo baixado com sucesso.\")\n\n # Novo nome do arquivo\n novo_nome_arquivo = f'{ano}{numero_mes}_caged.xlsx'\n novo_caminho_arquivo = os.path.join('csv_tratados', novo_nome_arquivo)\n\n # Renomeando o arquivo\n os.rename(caminho_arquivo, novo_caminho_arquivo)\n print(\"Arquivo renomeado com sucesso.\")\n\n else:\n print(\"Erro ao baixar o arquivo.\")\n\n\nwith DAG(\n default_args=default_args,\n dag_id='caged_dag',\n start_date=datetime(2023, 8, 1),\n schedule_interval='@daily',\n catchup=False\n) as dag:\n\n init_task = PythonOperator(\n task_id='init',\n python_callable=init\n )\n\n formatando_data_task = PythonOperator(\n task_id='formatando_data',\n python_callable=formatando_data\n )\n\n download_arquivo_task = PythonOperator(\n task_id='download_arquivo',\n python_callable=download_arquivo\n )\n\ninit_task >> formatando_data_task >> download_arquivo_task\n","repo_name":"vsaito10/airflow","sub_path":"dags/caged_dag.py","file_name":"caged_dag.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27442593156","text":"# 创建主窗口\nimport tkinter as tk\n\nwindow = tk.Tk()\nwindow.title('弹窗')\nwindow.geometry('200x200')\n\n\ndef hit_me():\n # 创建弹窗\n # 下面有六种类型的弹窗,可以一一分别尝试\n # tk.messagebox.showinfo(title='Hi', message='hahahaha') # 通知型弹窗\n # tk.messagebox.showwarning(title='Hi', message='nononono') # 警告型弹窗\n # tk.messagebox.showerror(title='Hi', message='No!! never') # 报错型弹窗\n # print(tk.messagebox.askquestion(title='Hi', message='hahahaha')) # 询问型弹窗\n # print(tk.messagebox.askyesno(title='Hi', message='hahahaha')) # 询问型弹窗\n print(tk.messagebox.askyesnocancel(title=\"Hi\", message=\"haha\")) # 询问,有三种回答\n\n\n# 创建按钮,点击该按钮会弹出新窗口\ntk.Button(window, text='点击我', command=hit_me).pack()\n\nwindow.mainloop()\n\n\n\n\n\nimport tkinter.messagebox # 弹窗库\n\n# 1、提示消息框\n\ntkinter.messagebox.showinfo('提示','人生苦短')\n\n# 2、消息警告框\n\ntkinter.messagebox.showwarning('警告','明日有大雨')\n\n# 3、错误消息框\n\ntkinter.messagebox.showerror('错误','出错了')\n\n# 4、对话框\n\ntkinter.messagebox.askokcancel('提示', '要执行此操作吗')#确定/取消,返回值true/false\n\ntkinter.messagebox.askquestion('提示', '要执行此操作吗')#是/否,返回值yes/no\n\ntkinter.messagebox.askyesno('提示', '要执行此操作吗')#是/否,返回值true/false\n\ntkinter.messagebox.askretrycancel('提示', '要执行此操作吗')#重试/取消,返回值true/false\n\n# 5、文件对话框\n\nimport tkinter.filedialog\na=tkinter.filedialog.asksaveasfilename()#返回文件名\nprint(a)\na =tkinter.filedialog.asksaveasfile()#会创建文件\nprint(a)\na =tkinter.filedialog.askopenfilename()#返回文件名\nprint(a)\na =tkinter.filedialog.askopenfile()#返回文件流对象\nprint(a)\na =tkinter.filedialog.askdirectory()#返回目录名\nprint(a)\na =tkinter.filedialog.askopenfilenames()#可以返回多个文件名\nprint(a)\na =tkinter.filedialog.askopenfiles()#多个文件流对象\nprint(a)\n\n","repo_name":"flyingtothe/Python","sub_path":"14-tkinter/p20.py","file_name":"p20.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16782857593","text":"ip = int(input())\n\nfor case in range(ip):\n a, b = map(int, input().split())\n while a != b:\n if a > b:\n a = a//2\n else:\n b = b//2\n print(10*a)\n","repo_name":"yyytae0/algorithm-training","sub_path":"baekjoon/13116.py","file_name":"13116.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31005705353","text":"class RadiusVector2D:\n MIN_COORD = 0\n MAX_COORD = 1024\n\n def __init__(self, x=0, y=0):\n self.__x = x if self.check(x) else 0\n self.__y = y if self.check(y) else 0\n\n @property\n def x(self):\n return self.__x\n\n @x.setter\n def x(self, x):\n self.__x = x if self.check(x) else self.__x\n\n @property\n def y(self):\n return self.__y\n\n @y.setter\n def y(self, y):\n self.__y = y if self.check(y) else self.__y\n\n @staticmethod\n def norm2(vector):\n return vector.x**2 + vector.y**2\n\n @classmethod\n def __check(cls, value):\n if type(value) in (int, float):\n if cls.MIN_COORD <= value <= cls.MAX_COORD:\n return True\n return False\n\n\nv1 = RadiusVector2D()\nv2 = RadiusVector2D(3, 4)\nv3 = RadiusVector2D(-12, 2)\n\nprint(v2.x, v2.y)\nv2.x = 14\nprint(v2.x)\nprint(v3.x)\nprint(RadiusVector2D.norm2(v1), RadiusVector2D.norm2(v2), RadiusVector2D.norm2(v3))\n","repo_name":"koromsergei/Python_tasks_algorithm","sub_path":"STEPIK/OOP/2 Режимы доступа, свойства и дескрипторы/2.2 Свойства property. Декоратор @property/2.2.7.py","file_name":"2.2.7.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34496093274","text":"import numpy as np\nimport logging\nimport math\n\nimport config\n\nif config.get(\"use_h5py\"):\n import h5py\nelse:\n import h5pyd as h5py\n\nfrom common import ut, TestCase\n\n\nclass TestDatasetCompound(TestCase):\n def test_create_compound_dset(self):\n filename = self.getFileName(\"create_compound_dset\")\n print(\"filename:\", filename)\n f = h5py.File(filename, \"w\")\n\n #curl -v --header \"Host: create_compound_dset.h5pyd_test.hdfgroup.org\" http://127.0.0.1:5000\n\n\n count = 10\n\n dt = np.dtype([('real', float), ('img', float)])\n dset = f.create_dataset('complex', (count,), dtype=dt)\n\n elem = dset[0]\n for i in range(count):\n theta = (4.0 * math.pi)*(float(i)/float(count))\n elem['real'] = math.cos(theta)\n elem['img'] = math.sin(theta)\n dset[i] = elem\n\n val = dset[0]\n self.assertEqual(val['real'], 1.0)\n f.close()\n\n def test_onefield_compound_dset(self):\n filename = self.getFileName(\"test_onefield_compound_dset\")\n print(\"filename:\", filename)\n f = h5py.File(filename, \"w\")\n \n count = 10\n\n dt = np.dtype([('a_field', int),])\n dset = f.create_dataset('a_field', (count,), dtype=dt)\n\n elem = dset[0]\n for i in range(count):\n elem['a_field'] = i*2\n dset[i] = elem\n\n val = dset[5]\n self.assertEqual(val['a_field'], 10)\n self.assertEqual(len(dset.dtype), 1)\n self.assertEqual(dset.dtype.kind, \"V\")\n f.close()\n\nif __name__ == '__main__':\n loglevel = logging.ERROR\n logging.basicConfig(format='%(asctime)s %(message)s', level=loglevel)\n ut.main()\n","repo_name":"HDFGroup/h5pyd","sub_path":"test/hl/test_dataset_compound.py","file_name":"test_dataset_compound.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"54"} +{"seq_id":"15177853019","text":"import openpyxl\n\n# Load the workbook\nwb = openpyxl.load_workbook(\"testbook1.xlsx\")\n\n# Select the sheet you want to modify\nsheet = wb[\"Sheet1\"]\n\n# Define the row number you want to delete\nrow_to_delete = 17\n\n# Delete the row\nsheet.delete_rows(row_to_delete, 1)\n\n# Save the changes to the workbook\nwb.save(\"testbook1.xlsx\")\n","repo_name":"CrismonicWave-org/pythonProjects","sub_path":"excelProcessing/deleterow.py","file_name":"deleterow.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73529590881","text":"##\n# Plot grafico\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfig, ax = plt.subplots() # Create a figure containing a single axes\nax.plot([1, 2, 3, 4], [1, 4, 3, 3]) # Plot some data on the axes\n\nplt.show()\n\n# Plot array\nx = np.arange(0,10,0.1)\ny = np.sin(x)\n\nplt.plot(x,y)\nplt.show() # Abbiamo plottato l'array xy\n","repo_name":"forlanosimone/course-python","sub_path":"8 - Librerie/mathplotlib_example.py","file_name":"mathplotlib_example.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5553396044","text":"#!/usr/bin/env python3\n\nimport sys\nimport binhack_helper\n\ndef main():\n game = binhack_helper.default_arg_parser(require_game=True).parse_args().game\n thc = binhack_helper.ThcrapGen('ExpHP.bullet-cap.')\n defs = binhack_helper.NasmDefs.from_file_rel('common.asm')\n\n # Uncomment this to give yourself a chance to attach CE if you need to debug a crash in life before main.\n # if 'th07' <= game <= 'th08':\n # thc.binhack('loop', {\n # 'addr': {\n # 'th07': 0x47ea7d,\n # 'th08': 0x4a619e,\n # }[game],\n # 'expected': thc.asm(f'push 0x60'),\n # 'code': thc.asm('loop: jmp loop'),\n # })\n\n add_initializing_binhack(game, thc, defs)\n add_other_binhacks(game, thc, defs)\n\n thc.print()\n\ndef add_initializing_binhack(game, thc, defs):\n # Here we add the binhack which does all of the dword search-and-replace stuff.\n\n # There aren't many places that are guaranteed to run exactly once,\n # so to avoid contention with other patches we choose an innocuous place\n # in code that runs while starting a new game, and simply make our\n # implementation idempotent.\n if 'th06' <= game <= 'th08':\n # Early games: GameThread global doesn't exist yet,\n # so do it right before the call to BulletManager::initialize instead.\n start_addr, end_addr, orig_call_addr, expected = {\n 'th06': (0x41c05a, 0x41c05f, 0x4148f0, \"e89188ffff\"),\n 'th07': (0x42f0db, 0x42f0e0, 0x4276a0, \"e8c085ffff\"),\n 'th08': (0x43b414, 0x43b419, 0x4311a0, \"e8875dffff\"),\n }[game]\n thc.binhack('install', {\n 'addr': start_addr,\n 'expected': expected,\n 'codecave': thc.asm(lambda c: f'''\n call {c.rel_auto('initialize')}\n\n # Original code\n mov eax, {orig_call_addr:#x}\n call eax\n {c.jmp(end_addr)}\n '''),\n })\n\n elif game == 'th09':\n # Similar place to above but before the loop over the two sets of per-player globals\n thc.binhack('install', {\n 'addr': 0x41b209,\n 'expected': 'bec47d4a00',\n 'codecave': thc.asm(lambda c: f'''\n call {c.rel_auto('initialize')}\n\n # Original code\n mov esi, 0x4a7dc4\n {c.jmp(0x41b20e)}\n '''),\n })\n\n elif 'th10' <= game:\n # MoF onwards: Do it right before spawning the game thread\n start_addr, end_addr, orig_call_addr, expected = {\n 'th10': (0x420ec8, 0x420ecd, 0x44c150, 'e883b20200'),\n 'th11': (0x420328, 0x42032d, 0x42a500, 'e8d3a10000'),\n 'th12': (0x422758, 0x42275d, 0x430500, 'e8a3dd0000'),\n 'th125': (0x41d9a3, 0x41d9a8, 0x42a1a0, 'e8f8c70000'),\n 'th128': (0x426970, 0x426975, 0x434cb0, 'e83be30000'),\n 'th13': (0x42c4f0, 0x42c4f5, 0x43b280, 'e88bed0000'),\n 'th14': (0x4365c5, 0x4365ca, 0x445b00, 'e836f50000'),\n 'th143': (0x432f6a, 0x432f6f, 0x444ad0, 'e8611b0100'),\n 'th15': (0x43cbef, 0x43cbf4, 0x44d7f0, 'e8fc0b0100'),\n 'th16': (0x42d76e, 0x42d773, 0x43c5b0, 'e83dee0000'),\n 'th165': (0x429719, 0x42971e, 0x4397b0, 'e892000100'),\n 'th17': (0x4312ff, 0x431304, 0x442280, 'e87c0f0100'),\n 'th18': (0x443814, 0x443819, 0x454860, 'e847100100'),\n }[game]\n thc.binhack('install', {\n 'addr': start_addr,\n 'expected': expected,\n 'codecave': thc.asm(lambda c: f'''\n push ecx # save; might be an arg to the original function\n call {c.rel_auto('initialize')}\n pop ecx\n\n # original code\n mov eax, {orig_call_addr:#x}\n call eax\n # (can't use call-codecave and ret because it'd mess with stack args to the above call)\n {c.jmp(end_addr)}\n '''),\n })\n\n else:\n assert False, game\n\ndef add_other_binhacks(game, thc: binhack_helper.ThcrapGen, defs):\n # UFO (and GFW) actually has a bug in some of its loops over items where it misses the\n # last 16 cancel items because ZUN forgot to include UFOs in the iteration count.\n #\n # Fix these with direct binhacks so that they get picked up by our search and replace.\n def fix_ufo_item_bugs(true_item_count, binhack_addrs):\n thc.binhack('fix-ufo-item-bugs', {\n 'addr': binhack_addrs,\n 'expected': thc.data(true_item_count - 16),\n 'code': thc.data(true_item_count),\n })\n if game == 'th12':\n fix_ufo_item_bugs(true_item_count=0xa68, binhack_addrs=[\n 0x427243, # ItemManager::on_draw\n 0x427b5d, # involves PLAYER, seems to be dead code though\n ])\n elif game == 'th128':\n fix_ufo_item_bugs(true_item_count=0x2cc, binhack_addrs=[\n 0x429223, # ItemManager::on_draw\n ])\n\n # Normally bullet_cap uses its search-and-replace framework to substitute this sort of thing,\n # but the laser cap in th06 and th07 is so tiny that it sometimes gets optimized to a single byte.\n #\n # To make matters even worse, the 'cmp' intruction ends up being 4 bytes, too small to fit a jump,\n # so we also have to replace the 'jge'!\n if 'th06' <= game <= 'th07':\n old_laser_cap = {'th06': 0x40, 'th07': 0x40}[game]\n laser_cap = thc.binhack_collection('fix-laser-cap', lambda offset, br_not_taken, br_taken: {\n 'expected': [\n thc.asm(f'cmp dword ptr [ebp-{offset:#x}], {old_laser_cap:#x}'),\n '0f8d' # first bytes of the jge; we can't easily get the whole thing because it's a relative address\n ],\n 'codecave': thc.asm(lambda c: f'''\n push {defs.CAPID_LASER:#x}\n call {c.rel_auto('get-new-cap')}\n cmp dword ptr [ebp-{offset:#x}], eax\n jge taken\n {c.jmp(br_not_taken)}\n taken:\n {c.jmp(br_taken)}\n '''),\n })\n def add_laser_cap_binhack(address, offset, br_taken):\n br_not_taken = address + 4 + 6 # after the cmp and jge\n laser_cap.at(address, offset, br_not_taken=br_not_taken, br_taken=br_taken)\n\n if game == 'th06':\n add_laser_cap_binhack(0x41421e, 0x10, br_taken=0x41432b) # BulletManager::sub_414160_cancels\n add_laser_cap_binhack(0x41446d, 0x10, br_taken=0x414593) # BulletManager::sub_414360_cancels\n add_laser_cap_binhack(0x4146a2, 0x04, br_taken=0x4148e2) # BulletManager::sub_414670_does_smn_to_lasers\n add_laser_cap_binhack(0x415e1d, 0x08, br_taken=0x416499) # BulletManager::on_tick_0b\n add_laser_cap_binhack(0x416547, 0x04, br_taken=0x416769) # BulletManager::on_draw\n\n if game == 'th07':\n add_laser_cap_binhack(0x4188b3, 0x24, br_taken=0x418b39) # Enemy::hardcoded_func_07_s4_set\n add_laser_cap_binhack(0x418b73, 0x2c, br_taken=0x418e6e) # Enemy::hardcoded_func_08_s4_set\n add_laser_cap_binhack(0x424830, 0x10, br_taken=0x424984) # BulletManager::sub_424740_cancels_bullets\n add_laser_cap_binhack(0x424ab1, 0x10, br_taken=0x424be2) # BulletManager::sub_4249a0_cancels_bullets\n add_laser_cap_binhack(0x424e56, 0x04, br_taken=0x4250bf) # BulletManager::shoot_laser\n add_laser_cap_binhack(0x4263ec, 0x08, br_taken=0x426a4e) # BulletManager::on_tick_0c\n add_laser_cap_binhack(0x426c72, 0x04, br_taken=0x426f03) # BulletManager::on_draw_0a\n\n # TH09 also has a small laser cap, but the optimized lines are so different\n # from TH06/TH07 that we handle it separately.\n if game == 'th09':\n old_laser_cap = 0x30\n laser_size = 0x59c\n\n thc.binhack('fix-laser-cmp', {\n 'addr': 0x4132d1,\n 'expected': [\n thc.asm(f'cmp eax, {old_laser_cap:#x}'),\n '7cea' # a jl imm8\n ],\n 'codecave': thc.asm(lambda c: f'''\n push eax # save\n push {defs.CAPID_LASER:#x}\n call {c.rel_auto('get-new-cap')}\n mov ecx, eax\n pop eax\n\n cmp eax, ecx\n jl taken\n {c.jmp(0x4132d6)}\n taken:\n {c.jmp(0x4132c0)}\n '''),\n })\n\n thc.binhack('fix-laser-push', {\n 'addr': 0x4150a0,\n 'expected': thc.asm(f'''\n push {old_laser_cap:#x}\n push {laser_size:#x}\n '''),\n 'codecave': thc.asm(lambda c: f'''\n push {defs.CAPID_LASER:#x}\n call {c.rel_auto('get-new-cap')}\n push eax\n push {laser_size:#x}\n {c.jmp(0x4150a7)}\n '''),\n })\n\n # Patch for where games without cancel item freelists increment the next index.\n #\n # Due to the compiler optimizing this check into a bitwise operation,\n # we can't use the same value-substituting machinery we use for everything else.\n def cancel_index_hack(binhack_addr, reg, jmp_addr):\n thc.binhack('fix-next-cancel', {\n 'addr': binhack_addr,\n 'expected': thc.asm(f'''\n inc {reg}\n and {reg}, 0x800007ff\n '''),\n 'codecave': thc.asm(lambda c: f'''\n push {reg}\n call {c.rel_auto('next-cancel-index')}\n mov {reg}, eax\n {c.jmp(jmp_addr)}\n '''),\n })\n # jmp_addr should skip past the stuff that deals with negative values\n if game == 'th10': cancel_index_hack(0x41bdf9, 'edx', jmp_addr=0x41be0a)\n if game == 'th11': cancel_index_hack(0x42454d, 'ecx', jmp_addr=0x42455e)\n if game == 'th12': cancel_index_hack(0x427859, 'edx', jmp_addr=0x42786a)\n\n # Fixes the huge lag spikes that causes the game to appear to freeze when\n # canceling >10000 bullets.\n def perf_hack(binhack_addr, jmp_addr, expected, extra_cleanup=''):\n thc.binhack('cancel-perf-fix', {\n 'addr': binhack_addr,\n 'expected': expected,\n 'codecave': thc.asm(lambda c: f'''\n push edx # save\n push ecx # save\n push ecx # argument\n call {c.rel_auto('less-spikey-find-world-vm')}\n pop ecx\n pop edx\n\n test eax, eax\n jz continue\n\n success:\n # exit early from this function\n {extra_cleanup}\n ret 0x4\n\n continue:\n # go to part that checks UI list\n push esi # stack operation in code we're skipping over\n {c.jmp(jmp_addr)}\n '''),\n })\n # jmp_addr should point to the part that checks the UI list\n if game == 'th10': perf_hack(0x4491cd, jmp_addr=0x4491e5, expected='8b82d4da7200')\n if game == 'th11': perf_hack(0x4561ed, jmp_addr=0x456205, expected='8b822c567b00')\n if game == 'th13': perf_hack(0x46fbae, jmp_addr=0x46fbd1, expected='8b820882f400', extra_cleanup='pop ebp')\n\n # Leak and reuse Bullet Manager in TH15-TH165.\n #\n # Fixes crashes on starting a new game after a previous one in these games.\n # The crashes are due to difficulty finding contiguous memory regions for reallocating bullet manager,\n # especially when using the anm_leak patch to fix midgame crashes.\n if 'th15' <= game <= 'th165':\n bullet_mgr, item_mgr, malloc = {\n 'th15': (0x4e9a6c, 0x4e9a9c, 0x49039f),\n 'th16': (0x4a6dac, 0x4a6ddc, 0x4749ac),\n 'th165': (0x4b550c, 0x4b5634, 0x47a78d),\n }[game]\n\n # nops out a 'call free'\n keep_manager = thc.binhack('leak-mgrs', {'code': '90 90909090'}).at\n\n _no_zero_manager = thc.binhack_collection('no-zero-mgr', lambda mgr: {\n 'expected': thc.asm(f'mov dword ptr [{mgr:#x}], 0x0'),\n 'code': '9090 90909090 90909090',\n })\n no_zero_manager = lambda binhack_addr, mgr: _no_zero_manager(mgr=mgr).at(binhack_addr)\n\n _reuse_manager = thc.binhack_collection('reuse-mgr', lambda mgr, expected, jmp_addr: {\n 'expected': expected,\n 'codecave': thc.asm(lambda c: f'''\n mov eax, dword ptr [{mgr:#x}]\n test eax, eax\n jnz noalloc\n mov eax, {malloc:#x}\n call eax\n noalloc:\n {c.jmp(jmp_addr)}\n ''')\n })\n reuse_manager = lambda binhack_addr, mgr, expected: _reuse_manager(mgr=mgr, expected=expected, jmp_addr=binhack_addr+5).at(binhack_addr)\n\n # Apply reuse_bullet_manager at the 'call malloc' in BulletManager::operator new.\n # Apply keep_bullet_manager at every 'call free' after a call to the BulletManager destructor.\n if game == 'th15':\n reuse_manager(0x4191f9, mgr=bullet_mgr, expected='e8 a1710700')\n reuse_manager(0x43f76a, mgr=item_mgr, expected='e8 300c0500')\n no_zero_manager(0x419184, mgr=bullet_mgr)\n no_zero_manager(0x43f6ee, mgr=item_mgr)\n keep_manager([0x41923a, 0x419279, 0x4192a3, 0x421739, 0x43c8dd]) # bullet_mgr\n keep_manager([0x421799, 0x43c8f7, 0x43f7f9, 0x43f823]) # item_mgr\n elif game == 'th16':\n reuse_manager(0x411df9, mgr=bullet_mgr, expected='e8 ae2b0600')\n reuse_manager(0x42f469, mgr=item_mgr, expected='e8 3e550400')\n no_zero_manager(0x411d88, mgr=bullet_mgr)\n no_zero_manager(0x42f3db, mgr=item_mgr)\n keep_manager([0x411e37, 0x42d463]) # bullet_mgr\n keep_manager([0x42d482, 0x42f4a7]) # item_mgr\n elif game == 'th165':\n reuse_manager(0x40ebab, mgr=bullet_mgr, expected='e8 ddbb0600'),\n reuse_manager(0x42bb2a, mgr=item_mgr, expected='e8 5eec0400'),\n no_zero_manager(0x40ee7f, mgr=bullet_mgr)\n no_zero_manager(0x42bcd1, mgr=item_mgr)\n keep_manager([0x40eeb0]) # bullet_mgr\n keep_manager([0x42bce7]) # item_mgr\n else: assert False, game\n\nif __name__ == '__main__':\n main()\n","repo_name":"ExpHP/thcrap-patches","sub_path":"patches/bullet_cap/binhacks.py","file_name":"binhacks.py","file_ext":"py","file_size_in_byte":14351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42712631151","text":"#!/usr/bin/env python3\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = np.genfromtxt(\"data.txt\", delimiter=\" \")\nxdata = data[::2]\nydata = data[1:][::2]\nplt.scatter(xdata, ydata, marker='+')\n# plt.scatter(xdata, ydata, marker='o', c='', edgecolor='blue')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Random number graph of 16807')\nplt.savefig('rnggraph.png')\nplt.show()\n","repo_name":"regymm/USTCcomputer","sub_path":"2019Autumn/CompPhys/HW1-2/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"11564489002","text":"import sys\nimport os\nimport json\nimport config\nimport speaker\nimport spoken_datetime\n\ndef learn_name(name):\n # build a response\n response = \"Okay {}\".format(name)\n # say it\n speaker.speak(response)\n # save the data\n config.save_config(\"name\",name)\n\ndef decode(speech):\n # set config data\n config_data = config.get_config()\n\n if (speech == \"stop listening\"):\n speaker.speak(\"Goodbye\")\n sys.exit(0)\n else:\n if \"call me\" in speech:\n # get name from text\n name = speech.split(\"all me \")[1]\n learn_name(name)\n if \"my name is\" in speech:\n # get name from text\n name = speech.split(\"name is \")[1]\n learn_name(name)\n elif \"what's my name\" in speech:\n try:\n # using a try/catch here to see if the data exists in the config\n name = config_data[\"name\"]\n response = \"Your name is {}\".format(name)\n speaker.speak(response)\n except KeyError:\n speaker.speak(\"I don't know what to call you yet.\")\n elif \"what time is it\" in speech:\n import time\n timeObj = time.gmtime()\n hour = int(time.strftime(\"%H\"))\n minute = time.strftime(\"%M\")\n spokenTime = spoken_datetime.getSpokenTime(hour,minute)\n response = \"It's {}\".format(spokenTime)\n print(response)\n speaker.speak(response)\n\n return","repo_name":"jsownz/ai-start","sub_path":"decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23818789312","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom functions import *\nfrom Club_functions.CDWS_func import CDWS_base\nfrom database import *\nimport altair as alt\nfrom html_temp import *\nimport os\nimport time\n\ndef app():\n create_CDWS()\n st.title('1. function CDWS process function')\n st.write('Welcome to metrics')\n username = return_username()\n\n i = (username[0])\n res = str(''.join(map(str, i)))\n\n delite_temp_user(res)\n create_CDWS()\n col1,col2 = st.columns(2)\n with col1:\n \n st.info(\" For restart data you must delete data and start over !!!\")\n # Processd data\n if st.checkbox(\"Process data\"):\n df = pd.read_sql('SELECT * FROM Clubs_datas', conn)\n df_new = df[[\"Order_of_Expend\",\"Club\",\"State\",\"Competition\",\"Expenditures\",\"Arrivals\",\"Income\",\"Departures\",\"Balance\",\"Season\"]]\n st.dataframe(df_new)\n a_leuge_DF = CDWS_base(df_new)\n my_form = st.form(key = \"form123\")\n submit = my_form.form_submit_button(label = \"Submit\")\n if submit:\n st.success(\"Datas processes :\")\n my_form_save = st.form(key = \"form1\")\n st.info(\"For process data you must save data to database\")\n submit = my_form_save.form_submit_button(label = \"Save data\")\n if submit:\n return_user_idd = return_user_id(res)\n i = (return_user_idd[0])\n res = int(''.join(map(str, i)))\n te = int(res)\n flag = return_id_CDWS_table(te)\n if flag == []:\n df = a_leuge_DF\n size = NumberOfRows(df)\n size = len(df)\n list1 = [0] * size\n for i in range(0,size):\n list1[i] = te\n df['user_id'] = list1\n create_CDWS()\n df.to_sql('CDWS_table',con=conn,if_exists='append')\n st.success(\"Data successfuly saved !\")\n\n else:\n st.warning(\"Please first delite your records from database !!\")\n # Export datas\n form_export_csv = st.form(key = \"export_form\")\n submit = form_export_csv.form_submit_button(label = \"Export datas\")\n if submit: \n if submit:\n return_user_idd = return_user_id(res)\n i = (return_user_idd[0])\n res = int(''.join(map(str, i)))\n te = int(res)\n flag = return_id_CDWS_table(te)\n if flag != []:\n if int(te) > 0:\n df = pd.read_sql_query('SELECT * FROM CDWS_table WHERE user_id = \"{}\"'.format(te),conn)\n df_new = df[[\"Order_of_Expend\",\"Club\",\"State\",\"Competition\",\"Expenditures\",\"Arrivals\",\"Income\",\"Departures\",\"Balance\",\"Season\",\"Inflacion_Income\",\"Inflacion_Expenditures\",\"Inflacion_Balance\"]]\n st.markdown(get_table_download_link_csv(df_new), unsafe_allow_html=True)\n st.success(\"Export Datas\")\n else:\n st.warning(\"file not found\")\n st.info(\"Please procces data again !\")\n # Delite datas \n my_form_delite = st.form(key = \"form12\")\n submit = my_form_delite.form_submit_button(label = \"Delite datas\")\n if submit:\n return_user_idd = return_user_id(res)\n i = (return_user_idd[0])\n res = int(''.join(map(str, i)))\n te = int(res)\n flag = (return_id_CDWS_table(te)) \n if flag != []:\n if int(te) > 0 :\n delite_CDWS(te)\n st.success(\"Delite Datas\")\n st.info(\"Please procces data\")\n else:\n st.warning(\"file not found\")\n st.info(\"Please procces data again !\")\n try:\n if st.checkbox(\"Viusalise data !!!\"):\n # Viusalise datas\n #st.write(\"Viusalise datas\",res)\n return_user_idd = return_user_id(res)\n st.write(\"\")\n i = (return_user_idd[0])\n res = int(''.join(map(str, i)))\n te = int(res)\n flag = return_id_CDWS_table(te)\n if flag != []:\n if int(te) > 0:\n st.success(\"Visualization of top 22 Club Expenditures with year of expend\")\n st.success(\"Without inflation rate\")\n df = pd.read_sql_query('SELECT * FROM CDWS_table WHERE user_id = \"{}\"'.format(te),conn)\n df_new = df[[\"Order_of_Expend\",\"Club\",\"State\",\"Competition\",\"Expenditures\",\"Arrivals\",\"Income\",\"Departures\",\"Balance\",\"Season\",\"Inflacion_Income\",\"Inflacion_Expenditures\",\"Inflacion_Balance\"]]\n df_new['Season']= pd.to_datetime(df_new['Season'],format='%Y')\n df = df_new.nlargest(22,'Expenditures')\n\n brush = alt.selection(type='interval')\n\n points = alt.Chart(df).mark_point(size=200,filled=True).encode(\n x='Season',\n y='Expenditures',\n color=alt.condition(brush, 'Club', alt.value('lightgray'))\n ).add_selection(\n brush\n )\n\n bars = alt.Chart(df).mark_bar().encode(\n y='Club',\n color='Club',\n x='sum(Expenditures)'\n ).transform_filter(\n brush\n )\n\n st.write(points & bars)\n st.success(\"Visualization of top 22 Club Expenditures with year of expend\")\n st.success(\"With inflation rate\")\n df2 = df_new.nlargest(22,'Inflacion_Expenditures')\n brush1 = alt.selection(type='interval')\n\n points1 = alt.Chart(df2).mark_point(size=200,filled=True).encode(\n x='Season',\n y='Inflacion_Expenditures',\n color=alt.condition(brush1, 'Club', alt.value('lightgray'))\n ).add_selection(\n brush1\n )\n\n bars1 = alt.Chart(df2).mark_bar().encode(\n y='Club',\n color='Club',\n x='sum(Inflacion_Expenditures)'\n ).transform_filter(\n brush1\n )\n\n st.write(points1 & bars1)\n\n \n st.success(\"Viusalise Datas\")\n else:\n st.warning(\"file not found\")\n st.info(\"Please procces data again !!\")\n\n except Exception as e:\n st.write(\"Error, please resart Visaulsation checkboc !! \") ","repo_name":"Kpavicic00/FDR","sub_path":"apps/login_pages/club_apps/CDWS.py","file_name":"CDWS.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22702673347","text":"from pygame.locals import *\nimport pygame\nimport time\n\n\nfrom astar import read_maze, get_path_astart\nfrom config import CONFIG\n\nWINDOWS_WIDTH = CONFIG[\"UI\"][\"WINDOWS_WIDTH\"]\nWINDOWS_HEIGHT = CONFIG[\"UI\"][\"WINDOWS_HEIGHT\"]\n\n\nclass Player:\n def __init__(self):\n self.x = 0\n self.y = 0\n\n def move_to(self, maze_coordonates, maze_size):\n \"\"\"\n moves the player from the 1 unit grid to the pixel Ui grid\n \"\"\"\n x_unit_size = WINDOWS_WIDTH // maze_size[0]\n y_unit_size = WINDOWS_HEIGHT // maze_size[1]\n\n self.x = maze_coordonates[0] * x_unit_size\n self.y = maze_coordonates[1] * y_unit_size\n\n def display(self, display_surf, maze_size):\n \"\"\"\n displayes the player on the curent possition\n \"\"\"\n x_unit_size = WINDOWS_WIDTH // maze_size[0]\n y_unit_size = WINDOWS_HEIGHT // maze_size[1]\n\n player_surf = pygame.image.load(\"player.png\").convert()\n player_surf = pygame.transform.scale(player_surf, (x_unit_size, y_unit_size))\n display_surf.blit(player_surf, (self.x, self.y))\n\n\nclass Maze:\n def __init__(self):\n self.maze = read_maze()\n self.size = (len(self.maze[0]), len(self.maze))\n self.solution = get_path_astart(\n self.maze, CONFIG[\"MAZE\"][\"START\"], CONFIG[\"MAZE\"][\"END\"]\n )\n\n def draw(self, display_surf):\n \"\"\"\n draw the 1 grid maze in relations woth window boundaries\n \"\"\"\n x_unit_size = WINDOWS_WIDTH // self.size[0]\n y_unit_size = WINDOWS_HEIGHT // self.size[1]\n\n block_surf = pygame.image.load(\"block.jpg\").convert()\n block_surf = pygame.transform.scale(block_surf, (x_unit_size, y_unit_size))\n\n for x in range(self.size[0]):\n for y in range(self.size[1]):\n\n if self.maze[y][x] == 1:\n display_surf.blit(block_surf, (x * x_unit_size, y * y_unit_size))\n\n def get_size(self):\n return self.size\n\n\nclass App:\n player = 0\n\n def __init__(self):\n self.player = Player()\n self.maze = Maze()\n self.maze_size = self.maze.get_size()\n\n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(\n (WINDOWS_WIDTH, WINDOWS_HEIGHT), pygame.HWSURFACE\n )\n pygame.display.set_caption(\"Pygame maze with A*!\")\n\n def on_render(self, poz):\n time.sleep(CONFIG[\"UI\"][\"SLEEP\"])\n\n self._display_surf.fill((0, 0, 0))\n\n self.player.move_to(poz, self.maze_size)\n self.player.display(self._display_surf, self.maze_size)\n\n self.maze.draw(self._display_surf)\n pygame.display.flip()\n\n def on_cleanup(self):\n pygame.quit()\n\n def on_execute(self):\n if self.maze.solution is None:\n exit()\n\n self.on_init()\n print(self.maze.solution)\n\n while 1:\n for poz in self.maze.solution:\n pygame.event.pump()\n keys = pygame.key.get_pressed()\n if keys[K_ESCAPE]:\n pygame.quit()\n\n self.on_render(poz)\n\n self.on_cleanup()\n\n\nif __name__ == \"__main__\":\n theApp = App()\n theApp.on_execute()\n","repo_name":"SaladBreaker/Astar-with-pygame","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16047277029","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom dataclasses import dataclass\n\nimport sys\nfrom itertools import count\nfrom typing import List, Tuple, Union\n\nINPUT_FILE = \"input.txt\"\n\n\n@dataclass(frozen=True, eq=True)\nclass BusSchedule:\n timestamp: int\n ids: List[int]\n\n\ndef get_closest_time(_id: int, timestamp: int) -> int:\n for value in count(start=0, step=_id):\n if value >= timestamp:\n return value\n return -1\n\n\ndef find_earliest_bus(schedule: BusSchedule) -> Tuple[Union[int, float], int]:\n min_closest_time = float(\"inf\")\n id_for_min_closest_time = -1\n for bus_id in schedule.ids:\n closest_time_for_id = get_closest_time(bus_id, schedule.timestamp)\n if closest_time_for_id >= schedule.timestamp:\n if closest_time_for_id < min_closest_time:\n id_for_min_closest_time = bus_id\n min_closest_time = closest_time_for_id\n return min_closest_time, id_for_min_closest_time\n\n\ndef read_bus_schedules() -> BusSchedule:\n timestamp = 0\n ids = list()\n with open(INPUT_FILE, \"r\") as f_handle:\n for index, line in enumerate(f_handle):\n if index == 0:\n if line:\n timestamp = int(line.rstrip())\n else:\n ids = [int(_id) for _id in line.rstrip().split(\",\") if _id != \"x\"]\n\n return BusSchedule(timestamp=timestamp, ids=ids)\n\n\ndef main():\n schelude = read_bus_schedules()\n min_time, _id = find_earliest_bus(schelude)\n print(f\"Result: {(min_time - schelude.timestamp) * _id}\")\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"mikeleppane/Advent_of_Code","sub_path":"2020/Day_13/solution_part1.py","file_name":"solution_part1.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40206309604","text":"x = 0\ny = 0\n\ndef init (a, b):\n global x ## команда глобал присваивает значение переменной вне метода\n global y\n x = a\n y = b\n\ninit (11, 22)\n\nprint(x)\nprint(y)","repo_name":"GeorgeMig/Lection-4-PY","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72471415201","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport inspect\nimport math\nfrom multiprocessing import Pool\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport numpy as np\nimport os\nfrom pprint import pprint\nimport subprocess\nimport sys\n\nfrom lib.collection_utils import *\nfrom lib.io_utils import *\nfrom lib.math_utils import *\nfrom lib.processing_utils import *\n\n# input\nparser = argparse.ArgumentParser()\nparser.add_argument('-in', dest=\"INPUT_FILE\", default=\"tmp/items.csv\", help=\"Input file\")\nparser.add_argument('-dir', dest=\"SAMPLE_FILE_DIRECTORY\", default=\"tmp/ia_fedflixnara_samples/\", help=\"Directory to where the .csv files with sample data is found\")\nparser.add_argument('-out', dest=\"OUTPUT_DIR\", default=\"output/phrases/\", help=\"Output csv file\")\nparser.add_argument('-params', dest=\"PARAMS\", default=\"\", help=\"Parameters in query string format\")\nparser.add_argument('-probe', dest=\"PROBE\", action=\"store_true\", help=\"Just show details?\")\nparser.add_argument('-overwrite', dest=\"OVERWRITE\", action=\"store_true\", help=\"Overwrite existing data?\")\nparser.add_argument('-threads', dest=\"THREADS\", default=3, type=int, help=\"Number of concurrent threads, -1 for all available\")\nparser.add_argument('-pyv', dest=\"PYTHON_NAME\", default=\"python3\", help=\"Name of python command\")\na = parser.parse_args()\n\n# Read files\nfieldNames, items = readCsv(a.INPUT_FILE)\n\nfor i, item in enumerate(items):\n items[i][\"samplefilename\"] = a.SAMPLE_FILE_DIRECTORY + item[\"filename\"] + \".csv\"\n items[i][\"phrasefilename\"] = a.OUTPUT_DIR + item[\"filename\"] + \".csv\"\n\ndef getItemPhrases(item):\n global a\n\n if not a.OVERWRITE and os.path.isfile(item['phrasefilename']):\n print(\"%s already exists\" % item['phrasefilename'])\n return\n\n command = [a.PYTHON_NAME, 'samples_to_phrases.py',\n '-in', item[\"samplefilename\"],\n '-out', item[\"phrasefilename\"]]\n\n if len(a.PARAMS) > 0:\n params = parseQueryString(a.PARAMS)\n for key in params:\n command += ['-'+key, str(params[key])]\n\n if a.PROBE:\n command += ['-probe']\n\n printCommand(command)\n finished = subprocess.check_call(command)\n\npool = ThreadPool(getThreadCount(a.THREADS))\nresults = pool.map(getItemPhrases, items)\npool.close()\npool.join()\n\nprint(\"Done.\")\n","repo_name":"beefoo/media-tools","sub_path":"items_to_phrases.py","file_name":"items_to_phrases.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"54"} +{"seq_id":"4200834516","text":"def remove_common(str_1, str_2):\r\n # Returns the number of uncommon elements.\r\n common = set(str_1).intersection(set(str_2))\r\n list_1, list_2 = [x for x in str_1], [x for x in str_2]\r\n for x in common:\r\n list_1.remove(x), list_2.remove(x)\r\n return len(list_1) + len(list_2)\r\n\r\n\r\ndef count_flames(count):\r\n # Based on uncommon elements, Returns a relation.\r\n flames = [\"Friends\", \"Love\", \"Affection\", \"Marriage\", \"Enemy\", \"Siblings\"]\r\n while len(flames) > 1:\r\n flames.pop(count % len(flames) - 1)\r\n return flames[0]\r\n\r\n\r\nname_1, name_2 = input('Enter Name 1: ').lower(), input('Enter Name 2: ').lower()\r\nprint(f'Relationship: {count_flames(remove_common(name_1, name_2))}')\r\n","repo_name":"Chiranjeev-Kartik/Pyone","sub_path":"Games/Flames.py","file_name":"Flames.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30153664057","text":"# 찾아라 프로그래밍 마에스터\n# 폰켓몬 (https://programmers.co.kr/learn/courses/30/lessons/1845)\n\ndef solution(nums):\n answer = 0\n nums.sort()\n prev = -1\n for num in nums:\n if num != prev:\n answer += 1\n prev = num\n if answer == len(nums)//2: break\n \n return answer\n\n# =================================================================","repo_name":"eagerithm/algorithms","sub_path":"bugoverdose/math/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31682738003","text":"try:\n from Akoma.reasoner.BasicReasoner import BasicReasoner\n from Akoma.tokenizer.TokenType import TokenType\nexcept ModuleNotFoundError:\n try:\n from reasoner.BasicReasoner import BasicReasoner\n from tokenizer.TokenType import TokenType\n from utilities import utilities\n except ModuleNotFoundError as e:\n print(e)\n print(\"error\")\n exit(-1)\n\n\nclass OdlukaReasoner(BasicReasoner):\n\n def start(self, meta=None):\n body = False\n preface = []\n while self.current_token is not None:\n self.current_token = self.tokenizer.get_next_token()\n\n if (self.current_token is None):\n break\n if body is False and self.current_token.type <= TokenType.TACKA:\n DOC_TYPE = utilities.get_doc_type(\"\".join([s.value for s in self.preface]))\n if meta is not None:\n meta.change_subtype_url(DOC_TYPE)\n body = True\n self.akomabuilder.build_preface(preface)\n else:\n preface.append(self.current_token)\n if body:\n self.reason()\n","repo_name":"Gorluxor/MasterProject","sub_path":"Akoma/reasoner/OdlukaReasoner.py","file_name":"OdlukaReasoner.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8823833607","text":"#!/usr/bin/python3\n\"\"\"takes url sends request and displays value of X-Request_ID\"\"\"\nimport urllib.request\nimport sys\n\nif __name__ == \"__main__\":\n req = urllib.request.Request(sys.argv[1])\n with urllib.request.urlopen(req) as response:\n page = response.getheader(\"X-Request-Id\")\n print(page)\n","repo_name":"NikShiskobcki/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/1-hbtn_header.py","file_name":"1-hbtn_header.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4141772809","text":"\n#f문자열포매팅\n\nname = '홍길동'\nage = 30\nf'나의 이름은 {name}입니다. 나이는 {age}입니다.'\n\nage = 30\nf'나는 내년이면 {age+1}살이 된다.'\n\nd = {'name':'홍길동ㅋ','age':50}\n\nf'나의 이름은 {d[\"name\"]}이다. 나이는 {d[\"age\"]}이다.'\n\n\na = \"hoooobby\"\na.find('b')\na.count(\"o\")\n\n\naa = \"Life is too short\"\naa.index(\"t\")\n\n#위치 알려주기는 find랑 index가 존재한다.\n\naa.find(\"o\")\naa.index(\"o\")\n\n\n\n# 문자열 삽입 join\n\",\".join('abcd')\n\na = \"Hi\"\na.lower()\n\n\naa = \"Life is too short\"\n\naa.split()\n\n\n\n\n","repo_name":"includesorrow/KOSTA","sub_path":"Python/Exam/새 폴더/PythonExam/PythonExam/PythonExam.py","file_name":"PythonExam.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73077562403","text":"user_age = int(input('Сколько вам лет?'))\n\ndef user_def(age):\n if age < 7:\n answer = \"детский сад\"\n elif age >= 7:\n answer = \"школа\"\n elif age >= 18:\n answer = \"университет\"\n elif age > 23:\n answer = \"работать уже пора, детка\"\n return(answer)\n\nhey_hey = user_def(user_age)\nprint(hey_hey)","repo_name":"smirnov8181/lesson2","sub_path":"age_elif.py","file_name":"age_elif.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3463847208","text":"synonyms = {\n \"thing\": [\"thing\", \"object\"],\n \"sphere\": [\"sphere\", \"ball\"],\n \"cube\": [\"cube\", \"block\"],\n \"large\": [\"large\", \"big\"],\n \"small\": [\"small\", \"tiny\"],\n \"metal\": [\"metallic\", \"metal\", \"shiny\"],\n \"rubber\": [\"rubber\", \"matte\"],\n \"left\": [\"left of\", \"to the left of\", \"on the left side of\"],\n \"right\": [\"right of\", \"to the right of\", \"on the right side of\"],\n \"behind\": [\"behind\"],\n \"front\": [\"in_front_of\"],\n \"above\": [\"above\"],\n \"below\": [\"below\"],\n}\n\nsyn_attrs = {\n \"thing\": [\"thing\", \"object\"],\n \"sphere\": [\"sphere\", \"ball\"],\n \"cube\": [\"cube\", \"block\"],\n \"large\": [\"large\", \"big\"],\n \"small\": [\"small\", \"tiny\"],\n \"metal\": [\"metallic\", \"metal\", \"shiny\"],\n \"rubber\": [\"rubber\", \"matte\"],\n}\n\ntype_attrs = {\n \"Shape\": [\n \"cube\", \"sphere\", \"cylinder\"\n ],\n \"Color\": [\n \"gray\", \"red\", \"blue\", \"green\", \"brown\", \"purple\", \"cyan\", \"yellow\"\n ],\n \"Relation\": [\n \"left\", \"right\", \"behind\", \"in_front_of\"\n ],\n \"Size\": [\n \"small\", \"large\"\n ],\n \"Material\": [\n \"rubber\", \"metal\"\n ]\n}\nattr_types = {}\nfor k, v in type_attrs.items():\n for vv in v:\n attr_types[vv] = k\n\ntype_attrs_ext = {}\nfor k, v in type_attrs.items():\n v_new = [] + v\n for vv in v:\n syns = syn_attrs.get(vv, None)\n if syns is None:\n continue\n v_new.extend(syns)\n v_new = list(set(v_new))\n type_attrs_ext[k] = v_new\n","repo_name":"zhaoyanpeng/sgi","sub_path":"sgi/data/clevr_constant.py","file_name":"clevr_constant.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36938532697","text":"import gym\nimport ic3net_envs\nfrom experiments.config_args import parse_args\nfrom sarnet_td3.common.env_wrapper import GymWrapper\nimport argparse\nimport sys\n\ndef init_args_for_env(parser):\n env_dict = {\n 'levers': 'Levers-v0',\n 'number_pairs': 'NumberPairs-v0',\n 'predator_prey': 'PredatorPrey-v0',\n 'traffic_junction': 'TrafficJunction-v0',\n 'starcraft': 'StarCraftWrapper-v0'\n }\n\n args = sys.argv\n # args_env = parser.parse_args()\n # env_name = args_env.scenario\n\n env_name = None\n for index, item in enumerate(args):\n if item == '--scenario':\n env_name = args[index + 1]\n\n if not env_name or env_name not in env_dict:\n return\n import gym\n import ic3net_envs\n if env_name == 'starcraft':\n import gym_starcraft\n env = gym.make(env_dict[env_name])\n env.init_args(parser)\n\ndef init(args, final_init=True):\n if args.scenario == 'levers':\n env = gym.make('Levers-v0')\n env.multi_agent_init(args.total_agents, args.nagents)\n env = GymWrapper(env)\n elif args.scenario == 'number_pairs':\n env = gym.make('NumberPairs-v0')\n m = args.max_message\n env.multi_agent_init(args.nagents, m)\n env = GymWrapper(env)\n elif args.scenario == 'predator_prey':\n env = gym.make('PredatorPrey-v0')\n if args.display:\n env.init_curses()\n env.multi_agent_init(args)\n env = GymWrapper(env)\n elif args.scenario == 'traffic_junction':\n env = gym.make('TrafficJunction-v0')\n if args.display:\n env.init_curses()\n env.multi_agent_init(args)\n env = GymWrapper(env)\n elif args.scenario == 'starcraft':\n env = gym.make('StarCraftWrapper-v0')\n env.multi_agent_init(args, final_init)\n env = GymWrapper(env.env)\n\n else:\n raise RuntimeError(\"wrong env name\")\n\n return env\n\ndef ic3_parser_args(main_args):\n ic3_parser = main_args.add_argument_group('IC3 Env')\n # environment\n ic3_parser.add_argument('--nactions', default='1', type=str,\n help='the number of agent actions (0 for continuous). Use N:M:K for multiple actions')\n\n ic3_parser.add_argument('--random', action='store_true', default=False,\n help=\"enable random model\")\n init_args_for_env(main_args)\n\n return ic3_parser\n\ndef make_ic3_env(main_args):\n env = init(main_args, False)\n\n return env","repo_name":"caslab-vt/SARNet","sub_path":"sarnet_td3/common/ic3_env_setup.py","file_name":"ic3_env_setup.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"54"} +{"seq_id":"1024641161","text":"import json\nimport logging\nfrom datetime import datetime\n\nfrom bs4 import BeautifulSoup\nimport requests\nfrom opengraph_parse import parse_page\n\nfrom flaskog import db\nfrom flaskog.models import OGP\n\n\ndef get_canonical(page_url: str) -> str:\n response = requests.get(page_url)\n if response.status_code is not 200:\n return page_url\n\n return parse_canonical(page_url, response.content)\n\n\ndef parse_canonical(page_url: str, content: bytes):\n soup = BeautifulSoup(content, 'html.parser')\n canonical = soup.find(\"link\", rel=\"canonical\")\n if canonical:\n value = canonical[\"href\"]\n return value\n canonical = soup.find(\"meta\", property=\"og:url\")\n if canonical:\n value = canonical[\"content\"]\n return value\n return page_url\n\n\ndef scrape_og_tags(url: str, url_id: int):\n record = OGP.query.filter_by(url_id=url_id).first()\n if not record:\n return\n content = {\"id\": str(url_id), \"url\": url}\n image = {}\n og_tags = parse_page(url)\n if og_tags:\n # convert og tags to our tags\n for key, value in og_tags.items():\n clean_key = key.replace(\"og:\", \"\")\n if clean_key == \"image\":\n image[\"url\"] = value\n elif clean_key.startswith(\"image\"):\n image[clean_key.replace(\"image:\", \"\")] = value\n else:\n content[clean_key] = value\n if image:\n content[\"images\"] = [image]\n content[\"scrape_status\"] = \"done\"\n else:\n content[\"scrape_status\"] = \"error\"\n content[\"updated_time\"] = str(datetime.now())\n record.json = json.dumps(content, default=str)\n db.session.commit()\n","repo_name":"rsperer/flask_opengraph","sub_path":"flaskog/og_parse.py","file_name":"og_parse.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30877881996","text":"# 2480\nnumbers = list(map(int, input().split()))\none = numbers[0]\ntwo = numbers[1]\nthree = numbers[2]\nresult = 0\nif one == two and two == three:\n result = 10000+one*1000\nelif one == two and two != three:\n result = 1000+one*100\nelif two == three and three != one:\n result = 1000+two*100\nelif three == one and one != two:\n result = 1000+three*100\nelse:\n max_value = max(numbers)\n result = max_value*100\nprint(result)\n","repo_name":"Yoon2442/python-study","sub_path":"baekjoon/baekjoon-2480.py","file_name":"baekjoon-2480.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73284768161","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom collections import OrderedDict\nfrom prune.pruning import *\n\nclass ResBlock(PruningModule):\n def __init__(self, in_channels, out_channels, stride=(1,1), mask=True):\n super(ResBlock, self).__init__()\n conv2d = MaskedConv2d if mask else nn.Conv2d\n self.conv1 = nn.Sequential(OrderedDict([\n ('conv', conv2d(in_channels, out_channels, kernel_size=(3,3), stride=stride, padding=(1,1), bias=True)),\n ('batchnorm', nn.BatchNorm2d(out_channels)),\n ]))\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Sequential(OrderedDict([\n ('conv',conv2d(out_channels, out_channels, kernel_size=(3,3), stride=(1,1), padding=(1,1), bias=True)),\n ('batchnorm', nn.BatchNorm2d(out_channels))\n ]))\n self.shortcut = nn.Sequential()\n self.check = False # To place shortcut or not\n if stride != (1,1) or in_channels != out_channels:\n self.check = True\n self.shortcut = nn.Sequential(OrderedDict([\n ('conv', conv2d(in_channels, out_channels, kernel_size=(1,1), stride=stride, padding=(0,0), bias=True)),\n ('batchnorm', nn.BatchNorm2d(out_channels))\n ]))\n self.relu2 = nn.ReLU(inplace=True)\n \n def forward(self, x):\n out = self.conv1(x)\n out = self.relu1(out)\n out = self.conv2(out)\n if self.check == True:\n out = out + self.shortcut(x)\n out = self.relu2(out)\n return out\n\nclass ResNet18(PruningModule):\n # For CIFAR10, the \"num_classes\" should be set to 10.\n # For ImageNet, the \"num_classes\" should be set to 1000.\n def __init__(self, ResBlock, dataset=\"cifar10\", num_classes=10, feature_num=512, mask=True):\n super(ResNet18, self).__init__()\n # linear = MaskedLinear if mask else nn.Linear\n linear = nn.Linear\n conv2d = MaskedConv2d if mask else nn.Conv2d\n \n if dataset == \"cifar10\" or dataset == \"cifar100\":\n self.input_channels = 3\n mulScale = 1\n elif dataset == \"imagenet-tiny\":\n self.input_channels = 3\n mulScale = 49 # 224x224 -> 7x7\n elif dataset == \"mnist\":\n self.input_channels = 1\n mulScale = 1\n \n # Input layer\n self.inputConv = nn.Sequential(OrderedDict([\n ('conv', conv2d(self.input_channels, 64, kernel_size=(3,3), stride=(1,1), padding=(1,1), bias=True)), \n ('batchnorm', nn.BatchNorm2d(64)),\n ]))\n self.inReLU = nn.ReLU(inplace=True)\n \n # ResBlocks : Each one contains 2 convolution layers.\n self.Res1 = ResBlock(64, 64, stride=(1,1))\n self.Res2 = ResBlock(64, 64, stride=(1,1))\n self.Res3 = ResBlock(64, 128, stride=(2,2))\n self.Res4 = ResBlock(128, 128, stride=(1,1))\n self.Res5 = ResBlock(128, 256, stride=(2,2))\n self.Res6 = ResBlock(256, 256, stride=(1,1))\n self.Res7 = ResBlock(256, 512, stride=(2,2))\n self.Res8 = ResBlock(512, 512, stride=(1,1))\n \n # Fully-connected and AvgPool2d\n self.fc = linear(feature_num * mulScale, num_classes)\n self.avgpool2d = nn.AvgPool2d(4) # square window of (kernel) size = 4\n\n def forward(self, x):\n out = self.inputConv(x)\n out = self.inReLU(out)\n \n out = self.Res1(out)\n out = self.Res2(out)\n out = self.Res3(out)\n out = self.Res4(out)\n out = self.Res5(out)\n out = self.Res6(out)\n out = self.Res7(out)\n out = self.Res8(out)\n\n out = self.avgpool2d(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n ","repo_name":"ChengShaoFong/Yolov7_pytorch_rebuild","sub_path":"models/resnet18/resnet18_net.py","file_name":"resnet18_net.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71587142881","text":"import dgl\nimport dgl.function as fn\nimport torch.nn as nn\nfrom modules.initializers import GlorotOrthogonal\nfrom modules.residual_layer import ResidualLayer\n\n\nclass InteractionPPBlock(nn.Module):\n def __init__(\n self,\n emb_size,\n int_emb_size,\n basis_emb_size,\n num_radial,\n num_spherical,\n num_before_skip,\n num_after_skip,\n activation=None,\n ):\n super(InteractionPPBlock, self).__init__()\n\n self.activation = activation\n # Transformations of Bessel and spherical basis representations\n self.dense_rbf1 = nn.Linear(num_radial, basis_emb_size, bias=False)\n self.dense_rbf2 = nn.Linear(basis_emb_size, emb_size, bias=False)\n self.dense_sbf1 = nn.Linear(\n num_radial * num_spherical, basis_emb_size, bias=False\n )\n self.dense_sbf2 = nn.Linear(basis_emb_size, int_emb_size, bias=False)\n # Dense transformations of input messages\n self.dense_ji = nn.Linear(emb_size, emb_size)\n self.dense_kj = nn.Linear(emb_size, emb_size)\n # Embedding projections for interaction triplets\n self.down_projection = nn.Linear(emb_size, int_emb_size, bias=False)\n self.up_projection = nn.Linear(int_emb_size, emb_size, bias=False)\n # Residual layers before skip connection\n self.layers_before_skip = nn.ModuleList(\n [\n ResidualLayer(emb_size, activation=activation)\n for _ in range(num_before_skip)\n ]\n )\n self.final_before_skip = nn.Linear(emb_size, emb_size)\n # Residual layers after skip connection\n self.layers_after_skip = nn.ModuleList(\n [\n ResidualLayer(emb_size, activation=activation)\n for _ in range(num_after_skip)\n ]\n )\n\n self.reset_params()\n\n def reset_params(self):\n GlorotOrthogonal(self.dense_rbf1.weight)\n GlorotOrthogonal(self.dense_rbf2.weight)\n GlorotOrthogonal(self.dense_sbf1.weight)\n GlorotOrthogonal(self.dense_sbf2.weight)\n GlorotOrthogonal(self.dense_ji.weight)\n nn.init.zeros_(self.dense_ji.bias)\n GlorotOrthogonal(self.dense_kj.weight)\n nn.init.zeros_(self.dense_kj.bias)\n GlorotOrthogonal(self.down_projection.weight)\n GlorotOrthogonal(self.up_projection.weight)\n\n def edge_transfer(self, edges):\n # Transform from Bessel basis to dense vector\n rbf = self.dense_rbf1(edges.data[\"rbf\"])\n rbf = self.dense_rbf2(rbf)\n # Initial transformation\n x_ji = self.dense_ji(edges.data[\"m\"])\n x_kj = self.dense_kj(edges.data[\"m\"])\n if self.activation is not None:\n x_ji = self.activation(x_ji)\n x_kj = self.activation(x_kj)\n\n x_kj = self.down_projection(x_kj * rbf)\n if self.activation is not None:\n x_kj = self.activation(x_kj)\n return {\"x_kj\": x_kj, \"x_ji\": x_ji}\n\n def msg_func(self, edges):\n sbf = self.dense_sbf1(edges.data[\"sbf\"])\n sbf = self.dense_sbf2(sbf)\n x_kj = edges.src[\"x_kj\"] * sbf\n return {\"x_kj\": x_kj}\n\n def forward(self, g, l_g):\n g.apply_edges(self.edge_transfer)\n\n # nodes correspond to edges and edges correspond to nodes in the original graphs\n # node: d, rbf, o, rbf_env, x_kj, x_ji\n for k, v in g.edata.items():\n l_g.ndata[k] = v\n\n l_g_reverse = dgl.reverse(l_g, copy_edata=True)\n l_g_reverse.update_all(self.msg_func, fn.sum(\"x_kj\", \"m_update\"))\n\n g.edata[\"m_update\"] = self.up_projection(l_g_reverse.ndata[\"m_update\"])\n if self.activation is not None:\n g.edata[\"m_update\"] = self.activation(g.edata[\"m_update\"])\n # Transformations before skip connection\n g.edata[\"m_update\"] = g.edata[\"m_update\"] + g.edata[\"x_ji\"]\n for layer in self.layers_before_skip:\n g.edata[\"m_update\"] = layer(g.edata[\"m_update\"])\n g.edata[\"m_update\"] = self.final_before_skip(g.edata[\"m_update\"])\n if self.activation is not None:\n g.edata[\"m_update\"] = self.activation(g.edata[\"m_update\"])\n\n # Skip connection\n g.edata[\"m\"] = g.edata[\"m\"] + g.edata[\"m_update\"]\n\n # Transformations after skip connection\n for layer in self.layers_after_skip:\n g.edata[\"m\"] = layer(g.edata[\"m\"])\n\n return g\n","repo_name":"dmlc/dgl","sub_path":"examples/pytorch/dimenet/modules/interaction_pp_block.py","file_name":"interaction_pp_block.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"71639843361","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 18 20:51:47 2020\n\n@author: omedeiro\n\"\"\"\n\n\nfrom __future__ import division, print_function, absolute_import\nimport numpy as np\nfrom phidl import Device\nimport phidl.geometry as pg\nimport phidl.routing as pr\nfrom phidl import quickplot as qp\n# import colang as mc\nimport string\nfrom datetime import datetime\nimport os\nimport sys\nfrom time import sleep\nfrom phidl.device_layout import _parse_layer, DeviceReference\nimport math\nfrom argparse import Namespace \n\nsys.path.append(r'Q:\\qnngds')\nimport qnngds.utilities as qu\n\n\nfrom phidl import set_quickplot_options\nset_quickplot_options(show_ports=True, show_subports=True)\n\ndef outline(elements, distance = 1, precision = 1e-4, num_divisions = [1, 1],\n join = 'miter', tolerance = 2, join_first = True,\n max_points = 4000, layer = 0, open_ports=-1, rotate_ports=False):\n \"\"\" Creates an outline around all the polygons passed in the `elements`\n argument. `elements` may be a Device, Polygon, or list of Devices.\n Parameters\n ----------\n elements : Device(/Reference), list of Device(/Reference), or Polygon\n Polygons to outline or Device containing polygons to outline.\n distance : int or float\n Distance to offset polygons. Positive values expand, negative shrink.\n precision : float\n Desired precision for rounding vertex coordinates.\n num_divisions : array-like[2] of int\n The number of divisions with which the geometry is divided into \n multiple rectangular regions. This allows for each region to be \n processed sequentially, which is more computationally efficient.\n join : {'miter', 'bevel', 'round'}\n Type of join used to create the offset polygon.\n tolerance : int or float\n For miter joints, this number must be at least 2 and it represents the \n maximal distance in multiples of offset between new vertices and their \n original position before beveling to avoid spikes at acute joints. For \n round joints, it indicates the curvature resolution in number of \n points per full circle.\n join_first : bool\n Join all paths before offsetting to avoid unnecessary joins in \n adjacent polygon sides.\n max_points : int\n The maximum number of vertices within the resulting polygon.\n layer : int, array-like[2], or set\n Specific layer(s) to put polygon geometry on.\n open_ports : int or float\n Trims the outline at each port of the element. The value of open_port\n scales the length of the trim gemoetry (must be positive). \n Useful for positive tone layouts. \n Returns\n -------\n D : Device\n A Device containing the outlined polygon(s).\n \"\"\"\n D = Device('outline')\n if type(elements) is not list: elements = [elements]\n for e in elements:\n if isinstance(e, Device): D.add_ref(e)\n else: D.add(e)\n gds_layer, gds_datatype = _parse_layer(layer)\n D_bloated = pg.offset(D, distance = distance, join_first = join_first,\n num_divisions = num_divisions, precision = precision,\n max_points = max_points, join = join,\n tolerance = tolerance, layer = layer)\n Outline = pg.boolean(A = D_bloated, B = D, operation = 'A-B',\n num_divisions = num_divisions, max_points = max_points,\n precision = precision, layer = layer)\n if open_ports>=0:\n for i in e.ports:\n trim = pg.rectangle(size=(distance, e.ports[i].width+open_ports*distance))\n\n trim.rotate(e.ports[i].orientation)\n trim.move(trim.center, destination=e.ports[i].midpoint)\n if rotate_ports:\n trim.movex(-np.cos(e.ports[i].orientation/180*np.pi)*distance/2)\n trim.movey(-np.sin(e.ports[i].orientation/180*np.pi)*distance/2)\n else:\n trim.movex(np.cos(e.ports[i].orientation/180*np.pi)*distance/2)\n trim.movey(np.sin(e.ports[i].orientation/180*np.pi)*distance/2)\n\n Outline = pg.boolean(A = Outline, B = trim, operation = 'A-B',\n num_divisions = num_divisions, max_points = max_points,\n precision = precision, layer = layer)\n for i in e.ports: Outline.add_port(port=e.ports[i])\n return Outline\n\n\ndef nw_same_side(wire_width = 0.2, wire_pitch=0.6,size=(22,11),layer = 1):\n \"\"\"\n Create a two port nanowire meander with 1um ports extended 15um.\n\n Parameters\n ----------\n wire_width : FLOAT, optional\n MEANDER WIDTH. The default is 0.2.\n wire_pitch : FLOAT, optional\n MEANDER PITCH. The default is 0.6.\n size : TUPLE, optional\n (X,Y) MEANDER AREA DIMENSIONS. The default is (22,11).\n layer : INT, optional\n Layer for device to be created on. The default is 1.\n\n Returns\n -------\n wire : DEVICE\n PHIDL device object is returned.\n\n Example\n -------\n qp(om.nw_same_side())\n \n \"\"\"\n \n wire = Device('wire')\n nw = pg.snspd(wire_width = wire_width, wire_pitch=wire_pitch,size=size,terminals_same_side=True,layer = layer)\n NW = wire.add_ref(nw)\n \n extend = pg.straight(size=(1,15))\n EXTEND = wire.add_ref(extend)\n EXTEND.rotate(-90).move(EXTEND.ports[1],destination=NW.ports[1]).movex(-5)\n \n EXTEND1 = wire.add_ref(extend)\n EXTEND1.rotate(-90).move(EXTEND1.ports[1],destination=NW.ports[2]).movex(-5)\n \n bump = pr.route_basic(NW.ports[1],EXTEND.ports[1],path_type='sine',width_type='sine')\n wire.add_ref(bump)\n \n bump = pr.route_basic(NW.ports[2],EXTEND1.ports[1],path_type='sine',width_type='sine')\n wire.add_ref(bump)\n wire.move(origin=NW.center,destination=(0,0))\n wire.flatten(single_layer=layer)\n wire.add_port(name=1,midpoint=(wire.bbox[0][0],wire.bbox[1][1]-1/2),orientation=180)\n wire.add_port(name=2,midpoint=(wire.bbox[0][0],-wire.bbox[1][1]+1/2),orientation=180)\n \n\n return wire\n\n\ndef nw_same_side_port(wire_width = 0.2, wire_pitch=0.6,size=(22,11),layer = 1):\n \"\"\"\n Create a nanowire meander section coupled to two macroscopic ports for\n pad connection. \n \n Future: define destination as an imput. Make connection straight taper\n\n Parameters\n ----------\n wire_width : FLOAT, optional\n MEANDER WIDTH. The default is 0.2.\n wire_pitch : FLOAT, optional\n MEANDER PITCH. The default is 0.6.\n size : TUPLE, optional\n (X,Y) MEANDER AREA DIMENSIONS. The default is (22,11).\n layer : INT, optional\n Layer for device to be created on. The default is 1.\n\n Returns\n -------\n nwOut : DEVICE\n PHIDL device object is returned.\n\n \"\"\"\n \n device = Device('nw')\n WIRE = nw_same_side(wire_width = wire_width, wire_pitch=wire_pitch,size=size,layer=layer)\n WIRE.rotate(-90).move(origin=(0,0),destination=(52.5, 52.2))\n wire = device.add_ref(WIRE)\n \n d = pads_adam_quad(layer=1)\n d.move(origin=d.center,destination=(0,0))\n \n hTAPER = hyper_taper(length = 50, wide_section=45, narrow_section=5,layer=0)\n htaper = device.add_ref(hTAPER)\n htaper.rotate(90).move(origin=htaper.ports[2],destination=d.ports['21'])\n ROUT = pr.route_basic(wire.ports[1],htaper.ports[1],width_type='straight',path_type='sine')\n rout = device.add_ref(ROUT)\n \n htaper1 = device.add_ref(hTAPER)\n htaper1.rotate(90).move(origin=htaper1.ports[2],destination=d.ports['22'])\n ROUT = pr.route_basic(wire.ports[2],htaper1.ports[1],width_type='straight',path_type='sine')\n rout = device.add_ref(ROUT)\n\n nwOut = pg.outline(device,distance=.1,precision=1e-4,layer=0)\n trim = pg.rectangle(size=(150,.2))\n trim.move(origin=trim.center,destination=(nwOut.center[0],nwOut.bbox[1][1]))\n t = nwOut.add_ref(trim)\n nwOut = pg.boolean(nwOut,t,'A-B',precision=1e-4,layer=layer)\n nwOut.add_port(name = 'wide0', port = htaper.ports[2])\n nwOut.add_port(name = 'wide1', port = htaper1.ports[2])\n\n return nwOut\n\n\n\ndef nw_same_side_port_single(wire_width = 0.2, wire_pitch=0.6,size=(22,11),terminals_same_side=True,layer = 1, portLoc1 = (37.5,131.25), portLoc2 = (-52.5,131.25),nwLoc = (0,0)):\n \"\"\" Broken do not use...\n \n \"\"\"\n device = Device('nw')\n WIRE = nw_same_side(wire_width = wire_width, wire_pitch=wire_pitch,size=size,terminals_same_side=terminals_same_side,layer=layer)\n WIRE.rotate(-90).move(origin=(0,0),destination=nwLoc)\n wire = device.add_ref(WIRE)\n \n d = pads_adam_quad(layer=1)\n d.move(origin=d.center,destination=(0,0))\n \n hTAPER = hyper_taper(length = 50, wide_section=45, narrow_section=5,layer=0)\n htaper = device.add_ref(hTAPER)\n htaper.rotate(90).move(origin=htaper.ports[2],destination=d.ports['23'])\n ROUT = pr.route_basic(wire.ports[1],htaper.ports[1],width_type='straight',path_type='sine')\n rout = device.add_ref(ROUT)\n \n hTAPER1 = hyper_taper(length = 15, wide_section=15, narrow_section=5,layer=0)\n htaper1 = device.add_ref(hTAPER1)\n htaper1.rotate(90).move(origin=htaper1.ports[2],destination=[nwLoc[0]-95,nwLoc[1]+95])\n ROUT = pr.route_basic(wire.ports[2],htaper1.ports[1],width_type='straight',path_type='sine')\n rout = device.add_ref(ROUT)\n\n nwOut = pg.outline(device,distance=.1,precision=1e-4,layer=0)\n trim = pg.rectangle(size=(55,.1))\n trim.move(origin=trim.center,destination=(htaper.center[0],htaper.bbox[1][1]+.05))\n trim1 = pg.rectangle(size=(20,.1))\n trim1.move(origin=trim1.center,destination=(htaper1.center[0],htaper1.bbox[1][1]+.05))\n\n t = nwOut.add_ref(trim)\n t1 = nwOut.add_ref(trim1)\n nwOut = pg.boolean(nwOut,t,'A-B',precision=1e-4,layer=layer)\n nwOut = pg.boolean(nwOut,t1,'A-B',precision=1e-4,layer=layer)\n nwOut.add_port(name = 'wide0', port = htaper.ports[2])\n nwOut.add_port(name = 'wide1', port = htaper1.ports[2])\n return nwOut\n\n\ndef heat_sameSidePort(wire_width = 0.2, wire_pitch=0.6,size=(22,11),layer = 1, portLoc1 = (37.5,131.25), portLoc2 = (-52.5,131.25),nwLoc=(0,0)):\n \"\"\"\n Filled nanowire meander with poits on same side. Used as heater for \n hTron devices \n\n Parameters\n ----------\n wire_width : FLOAT, optional\n MEANDER WIDTH. The default is 0.2.\n wire_pitch : FLOAT, optional\n MEANDER PITCH. The default is 0.6.\n size : TUPLE, optional\n (X,Y) MEANDER AREA DIMENSIONS. The default is (22,11).\n layer : INT, optional\n Layer for device to be created on. The default is 1.\n\n portLoc1 : TUPLE, optional\n Location of port 1. The default is (37.5,131.25).\n portLoc2 : TUPLE, optional\n Location of port 2. The default is (-52.5,131.25).\n nwLoc : TUPLE, optional\n Location of center of nanowire. The default is (0,0).\n\n Returns\n -------\n device : DEVICE\n PHIDL device object is returned.\n\n \"\"\"\n device = Device('nw')\n WIRE = nw_same_side(wire_width = wire_width, wire_pitch=wire_pitch,size=size,layer=layer)\n WIRE.rotate(-90).move(origin=(0,0),destination=nwLoc)\n wire = device.add_ref(WIRE)\n \n PADc = pg.straight(size=(5,5),layer=layer)\n PADc.move(origin=PADc.ports[2],destination=portLoc1)\n padc = device.add_ref(PADc)\n \n PADl = pg.straight(size=(5,5),layer=layer)\n PADl.move(origin=PADl.ports[2],destination=portLoc2)\n padl = device.add_ref(PADl)\n \n \n\n \n r1 = pr.route_basic(wire.ports[1],PADc.ports[2],width_type='straight',path_type='sine',layer=layer)\n device.add_ref(r1)\n r2 = pr.route_basic(wire.ports[2],PADl.ports[2],width_type='straight',path_type='sine',layer=layer)\n device.add_ref(r2)\n \n return device\n\n\n\n\ndef alignment_marks(locations = ((-3500, -3500), (3500, 3500), (-3500, 3500), (3500, -3500)), size = (200,5), layer = 1):\n \"\"\"\n Create cross-style alignment marks.\n\n Parameters\n ----------\n locations : TUPLE, optional\n Tuple of (X,Y) locations. The default is ((-3500, -3500), (3500, 3500)).\n layer : INT, optional\n Layer for device to be created on. The default is 1.\n \n \n Returns\n -------\n marks : DEVICE\n PHIDL device object is returned.\n\n \"\"\"\n marks = Device('Marks')\n alignMARK=pg.cross(size[0], size[1],layer=layer)\n\n for i in np.arange(0,len(locations),1):\n alignMark = marks.add_ref(alignMARK)\n alignMark.move(origin=alignMark.center,destination=locations[i])\n \n marks = pg.union(marks, layer=layer)\n marks.flatten()\n return marks\n\n\ndef etch_square(layers=[1], size=(1500,1500), location=(2500, 1000), outline=None):\n D = Device('etch_square')\n for l in layers: \n rec = pg.rectangle(size=size, layer=l)\n if outline:\n rec = pg.outline(rec, distance=outline, layer=l)\n r = D< 0:\n turn=D< 0:\n turn=D< 0:\n turn=D< 0:\n vert.connect(vert.ports[1], next_port)\n next_port = vert.ports[2]\n \n port_list = D.get_ports()\n D = pg.union(D, precision=1e-10)\n D.add_port(name=1, port=port_list[0])\n D.add_port(name=2, port=port_list[-1])\n return D\n\n\ndef via_square(width=3, inset=2, layers=[0, 1, 2], outline=False):\n D = Device('via') \n via0 = pg.compass(size=(width+2*inset, width+2*inset), layer=layers[0])\n v0 = D< 0.6um !!\n (The via should be smaller than the route)\n \n \n\n Returns\n -------\n VR : Device\n A Device containing the test via structures.\n\n Usage\n -----\n Call via_route_test_structure() by indicating the number of vias you want\n drawn. You can also change the other parameters however if you do not\n specifiy a value for a parameter it will just use the default value\n Ex::\n\n via_route_test_structure(num_vias=54)\n\n - or -::\n\n via_route_test_structure(num_vias=12, pad_size=(100,100),wire_width=8)\n\n ex: via_route(54, min_pad_spacing=300)\n \"\"\"\n VR = Device(\"test_via\")\n \n nub = VR.add_ref(pg.compass(size=(3 * wire_width, wire_width), layer=wiring1_layer))\n #nub_overlay = VR.add_ref(pg.compass(size=(3 * wire_width, wire_width), layer=wiring1_layer))\n \n # Square at the start of the chain\n head = VR.add_ref(pg.compass(size=(wire_width, wire_width), layer=wiring1_layer))\n #head_overlay = VR.add_ref(pg.compass(size=(wire_width, wire_width), layer=wiring1_layer))\n nub.ymax = wire_width/2\n nub.xmin = 0\n #nub_overlay.ymax = wire_width/2\n #nub_overlay.xmin = 0\n head.connect(port=\"W\", destination=nub.ports[\"E\"])\n #head_overlay.connect(port=\"W\", destination=nub_overlay.ports[\"E\"])\n #pad1_overlay.xmin = pad1.xmin\n #pad1_overlay.ymin = pad1.ymin\n\n old_port = head.ports[\"N\"]\n count = 0\n width_via_iter = 2 * via_spacing - 2 * wire_width\n\n current_width = 3 * wire_width + wire_width # width of nub and 1 overlap\n obj_old = head\n obj = head\n via_iterable = _via_iterable(\n via_spacing, wire_width, wiring1_layer, wiring2_layer, via_layer, via_width\n )\n \n while (count + 2) <= num_vias:\n obj = VR.add_ref(via_iterable)\n obj.connect(port=\"W\", destination=old_port, overlap=wire_width)\n old_port = obj.ports[\"E\"]\n # Check if the vias chain reaches the max height\n if obj.ymax > max_y_spread/2:\n obj.connect(port=\"W\", destination=obj_old.ports[\"S\"], overlap=wire_width)\n old_port = obj.ports[\"S\"]\n current_width += width_via_iter\n\n elif obj.ymin < -max_y_spread/2:\n obj.connect(port=\"W\", destination=obj_old.ports[\"N\"], overlap=wire_width)\n old_port = obj.ports[\"N\"]\n current_width += width_via_iter\n count = count + 2\n obj_old = obj\n \n # Square at the end\n tail = VR.add_ref(pg.compass(\n size=(wire_width, wire_width),\n layer=wiring1_layer,\n )\n )\n \n tail.connect(port=\"W\", destination=obj.ports[\"S\"], overlap=wire_width)\n\n VR.add_port(name=1, midpoint=(obj.center[0] + 2*via_spacing+wire_width, 0), width=wire_width, orientation = 180)\n VR</edit/', edit_car, name='edit car'),\n path('/delete/', delete_car, name='delete car'),\n path('/details/', details_car, name='details car'),\n ])),\n path('profile/', include([\n path('edit/', edit_profile, name='edit profile'),\n path('delete/', delete_profile, name='delete profile'),\n path('details/', details_profile, name='details profile'),\n path('create/', create_profile, name='create profile'),\n ])),\n)\n","repo_name":"tidorino/PythonWebBasics_SoftUni","sub_path":"CarCollectionApp/CarCollectionApp/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74693016801","text":"import logging\nfrom typing_extensions import deprecated\n\nfrom src.logs.types import LogFile\nfrom src.heuristics.types import Heuristic\nfrom src.heuristics.simple import SimpleHeuristic\nfrom src.heuristics.histogram_time import TimeHeuristic\nfrom src.heuristics.filler import FillerHeuristic\n\nHEURISTICS: list[Heuristic] = [\n SimpleHeuristic(),\n TimeHeuristic(),\n FillerHeuristic(), # This heuristic should be the last one !\n]\n\nlogger = logging.getLogger(\"heuristics_manager\")\n\n\ndef apply_heuristics(grand_truth: LogFile, checked: LogFile):\n grand_truth.clear_heuristics()\n checked.clear_heuristics()\n for heuristic in HEURISTICS:\n heuristic.load_grand_truth(grand_truth)\n heuristic.calculate_heuristic(checked)\n logger.info(f\"Applied heuristic: {heuristic.get_heuristic_name()}\")\n logger.info(\"All heuristics applied !\")\n\n\n@deprecated(\"You should use `.get_heuristic_name()` instead\")\ndef query_heuristic_name(t: type):\n for instance in HEURISTICS:\n if type(instance) is t:\n return instance.get_heuristic_name()\n\n raise TypeError(f\"Unknown heuristic type: {t}\")\n","repo_name":"Mazurel/analyzer","sub_path":"src/heuristics/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29132747063","text":"import turtle\nimport winsound\n\ndef bruhSound():\n winsound.PlaySound(\"bruh.wav\", winsound.SND_ASYNC | winsound.SND_ALIAS)\n\nclass Border:\n def __init__(self, upper, down, left, right):\n self.upper = upper\n self.left = left\n self.right = right\n self.down = down\n\n def __init__(self, width, height):\n self.upper = height/2 - 10\n self.down = - (height/2 - 10)\n self.left = -(width/2 - 10)\n self.right = width/2 - 10\n\n def setUpper(self, upper):\n self.upper = upper\n\n def setDown(self, down):\n self.down = down\n\n def setLeft(self, left):\n self.left = left\n\n def setRight(self, right):\n self.right = right\n\n def getUpper(self):\n return self.upper\n\n def getDown(self):\n return self.down\n\n def getLeft(self):\n return self.left\n\n def getRight(self):\n return self.right\n\n\n# Functions\ndef paddle_a_up():\n y = paddle_a.ycor()\n y += 20\n paddle_a.sety(y)\n\n\ndef paddle_a_down():\n y = paddle_a.ycor()\n y -= 20\n paddle_a.sety(y)\n\n\ndef paddle_b_up():\n y = paddle_b.ycor()\n y += 20\n paddle_b.sety(y)\n\n\ndef paddle_b_down():\n y = paddle_b.ycor()\n y -= 20\n paddle_b.sety(y)\n\n# border\nborder = Border(800, 600)\n\nwin = turtle.Screen()\nwin.title(\"Bruh Pong\")\nwin.bgcolor(\"black\")\nwin.setup(width=800, height=600)\nwin.tracer(0)\n\n#Score\nscore1 = 0\nscore2 = 0\n\n# Paddle A\npaddle_a = turtle.Turtle()\npaddle_a.speed(0)\npaddle_a.shape(\"square\")\npaddle_a.color(\"white\")\npaddle_a.shapesize(stretch_wid=5, stretch_len=1)\npaddle_a.penup()\npaddle_a.goto(border.getLeft()+40, 0)\n\n# Paddle B\npaddle_b = turtle.Turtle()\npaddle_b.speed(0)\npaddle_b.shape(\"square\")\npaddle_b.color(\"white\")\npaddle_b.shapesize(stretch_wid=5, stretch_len=1)\npaddle_b.penup()\npaddle_b.goto(border.getRight()-40, 0)\n\n# Ball\nball = turtle.Turtle()\nball.speed(0)\nball.shape(\"square\")\nball.color(\"white\")\nball.penup()\nball.goto(0, 0)\nball.dx = 1\nball.dy = 1\n\n\n# Line\nline = turtle.Turtle()\nline.speed(0)\nline.shape(\"square\")\nline.color(\"white\")\nline.shapesize(stretch_wid=30, stretch_len=0.1)\nline.penup()\n\n# pen\npen = turtle.Turtle()\npen.speed(0)\npen.color(\"white\")\npen.penup()\npen.hideturtle()\npen.goto(0, 260)\npen.write(f\"->Player 1 : {score1} Player 2: {score2}<-\", align = \"center\", font=(\"Consolas\", 24, \"normal\"))\n\n\n# keyboard binding\nwin.listen()\nwin.onkeypress(paddle_a_up, \"w\")\nwin.onkeypress(paddle_a_down, \"s\")\nwin.onkeypress(paddle_b_up, \"Up\")\nwin.onkeypress(paddle_b_down, \"Down\")\n\n\n\nwhile True:\n win.update()\n\n ball.setx(ball.xcor() + ball.dx)\n ball.sety(ball.ycor() + ball.dy)\n\n if ball.ycor() > border.getUpper():\n ball.sety(border.getUpper())\n ball.dy *= -1\n bruhSound()\n \n\n if ball.ycor() < border.getDown():\n ball.sety(border.getDown())\n ball.dy *= -1\n bruhSound()\n\n if ball.xcor() > border.getRight():\n ball.goto(0, 0)\n ball.dx *= -1\n score1 += 1\n pen.clear()\n pen.write(f\"->Player 1 : {score1} Player 2: {score2}<-\", align = \"center\", font=(\"Cascadia Code PL\", 24, \"normal\"))\n bruhSound()\n\n if ball.xcor() < border.getLeft():\n ball.goto(0, 0)\n ball.dx *= -1\n score2 += 1\n pen.clear()\n pen.write(f\"->Player 1 : {score1} Player 2: {score2}<-\", align = \"center\", font=(\"Cascadia Code PL\", 24, \"normal\"))\n bruhSound()\n\n # paddble b collide\n if ball.xcor() > border.getRight()-50 and (ball.ycor() < paddle_b.ycor() + 50\n and ball.ycor() > paddle_b.ycor() - 50):\n ball.dx *= -1\n ball.setx(border.getRight()-50)\n bruhSound()\n\n # paddle a collide\n if ball.xcor() < border.getLeft()+50 and (ball.ycor() < paddle_a.ycor() + 50\n and ball.ycor() > paddle_a.ycor() - 50):\n ball.dx *= -1\n ball.setx(border.getLeft()+50)\n bruhSound()\n\n ","repo_name":"wkusaa/bruh-pong-a-python-game","sub_path":"bruh_pong.py","file_name":"bruh_pong.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3596308062","text":"import unittest\n\nimport azure.mgmt.powerbidedicated\nfrom devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer\n\nAZURE_LOCATION = 'eastus'\n\nclass MgmtPowerBIDedicatedTest(AzureMgmtTestCase):\n\n def setUp(self):\n super(MgmtPowerBIDedicatedTest, self).setUp()\n self.mgmt_client = self.create_mgmt_client(\n azure.mgmt.powerbidedicated.PowerBIDedicatedManagementClient\n )\n \n @unittest.skip(\"skip test\")\n @ResourceGroupPreparer(location=AZURE_LOCATION)\n def test_powerbidedicated(self, resource_group):\n\n SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID\n TENANT_ID = self.settings.TENANT_ID\n RESOURCE_GROUP = resource_group.name\n DEDICATED_CAPACITY_NAME = \"mydedicatedcapacity\"\n LOCATION = \"myLocation\"\n\n # /Capacities/put/Create capacity[put]i\n BODY = {\n \"sku\": {\n \"name\": \"A1\",\n \"tier\": \"PBIE_Azure\"\n },\n \"tags\": {\n \"test_key\": \"testValue\"\n },\n \"administration\": {\n \"members\": [\n \"user1@microsoft.com\",\n \"user2@microsoft.com\"\n ]\n },\n \"location\": \"eastus\"\n }\n result = self.mgmt_client.capacities.create(resource_group_name=RESOURCE_GROUP, dedicated_capacity_name=DEDICATED_CAPACITY_NAME, capacity_parameters=BODY)\n result = result.result()\n\n # /Capacities/get/List eligible SKUs for an existing capacity[get]\n result = self.mgmt_client.capacities.list_skus_for_capacity(resource_group_name=RESOURCE_GROUP, dedicated_capacity_name=DEDICATED_CAPACITY_NAME)\n\n # /Capacities/get/Get details of a capacity[get]\n result = self.mgmt_client.capacities.get_details(resource_group_name=RESOURCE_GROUP, dedicated_capacity_name=DEDICATED_CAPACITY_NAME)\n\n # /Capacities/get/List capacities in resource group[get]\n result = self.mgmt_client.capacities.list_by_resource_group(resource_group_name=RESOURCE_GROUP)\n\n # /Capacities/get/Get details of a capacity[get]\n result = self.mgmt_client.capacities.get_details(resource_group_name=RESOURCE_GROUP, dedicated_capacity_name=DEDICATED_CAPACITY_NAME)\n\n # /Capacities/get/List eligible SKUs for a new capacity[get]\n result = self.mgmt_client.capacities.list_skus()\n\n # /Capacities/post/Suspend capacity[post]\n result = self.mgmt_client.capacities.suspend(resource_group_name=RESOURCE_GROUP, dedicated_capacity_name=DEDICATED_CAPACITY_NAME)\n result = result.result()\n\n # /Capacities/post/Get details of a capacity[post]\n result = self.mgmt_client.capacities.resume(resource_group_name=RESOURCE_GROUP, dedicated_capacity_name=DEDICATED_CAPACITY_NAME)\n result = result.result()\n\n # /Capacities/patch/Update capacity parameters[patch]\n BODY = {\n \"sku\": {\n \"name\": \"A1\",\n \"tier\": \"PBIE_Azure\"\n },\n \"tags\": {\n \"test_key\": \"testValue\"\n },\n \"administration\": {\n \"members\": [\n \"user1@microsoft.com\",\n \"user2@microsoft.com\"\n ]\n }\n }\n result = self.mgmt_client.capacities.update(resource_group_name=RESOURCE_GROUP, dedicated_capacity_name=DEDICATED_CAPACITY_NAME, capacity_update_parameters=BODY)\n result = result.result()\n\n # /Capacities/post/Check name availability of a capacity[post]\n result = self.mgmt_client.capacities.check_name_availability(location=\"eastus\", name=\"azsdktest\", type=\"Microsoft.PowerBIDedicated/capacities\")\n\n # /Capacities/delete/Get details of a capacity[delete]\n result = self.mgmt_client.capacities.delete(resource_group_name=RESOURCE_GROUP, dedicated_capacity_name=DEDICATED_CAPACITY_NAME)\n result = result.result()\n\n\n#------------------------------------------------------------------------------\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/powerbidedicated/azure-mgmt-powerbidedicated/tests/disable_test_cli_mgmt_powerbidedicated.py","file_name":"disable_test_cli_mgmt_powerbidedicated.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"27288894697","text":"#usage: python3.6 pairwise_alignment.py seq1.fa seq2.fa\nimport sys\nsys.setrecursionlimit(10000)\n##compare if there is a match\ndef compare(a,b,match_score,mismatch_score):\n\tif a==b:\n\t\tvalue=match_score\n\telse:\n\t\tvalue=mismatch_score\n\treturn value\n\n\n##assign score based on the above,left and diagonal score \n##while store the position from where the new score inferred \ndef assign(mat,i,j,match_score,mismatch_score,gap_score):\n\ty=mat[i-1][j]+gap_score\n\tz=mat[i][j-1]+gap_score\n\tw=mat[i-1][j-1]+compare(mat[i][0],mat[0][j],match_score,mismatch_score)\n\tvalue=y\n\tn=[i-1,j]\n\tif z>value:\n\t\tvalue=z\n\t\tn=[i,j-1]\n\tif w>value:\n\t\tvalue=w\n\t\tn=[i-1,j-1]\n\treturn value,n\n\n##trace back after matix filled\ndef path_back(mat,i,j,x_align,y_align,match_score,mismatch_score,gap_score):\n\tn=[i,j]\n\tif n[0]<2 or n[1]<2:\n\t\treturn x_align,y_align\n\telse:\n\t\tn=assign(mat,i,j,match_score,mismatch_score,gap_score)[1]\n\t\tif n[0]==i-1 and n[1]==j-1:\n\t\t\tx_value=mat[0][j]\n\t\t\ty_value=mat[i][0]\n\t\telif n[0]==i and n[1]==j-1:\n\t\t\tx_value=mat[0][j]\n\t\t\ty_value=\"-\"\n\t\telse:\n\t\t\tx_value=\"-\"\n\t\t\ty_value=mat[i][0]\n\t\tx_align.append(x_value)\n\t\ty_align.append(y_value)\n\t\tpath_back(mat,n[0],n[1],x_align,y_align,match_score,mismatch_score,gap_score)\n\n##print format\ndef insert_newlines(string, every=60):\n lines = []\n for i in range(0, len(string), every):\n lines.append(string[i:i+every])\n return '\\n'.join(lines)\n\n##read in files\nx_file=sys.argv[1]\ny_file=sys.argv[2]\nmatch_score=float(sys.argv[3])\nmismatch_score=float(sys.argv[4])\ngap_score=float((sys.argv[5]))\nwith open(x_file,\"r\") as x:\n\tx_name=x.readline().rstrip().replace(\">\",\"\")\n\tx_seq=x.read().replace(\"\\n\",\"\").upper()\nwith open(y_file,\"r\") as y:\n\ty_name=y.readline().rstrip().replace(\">\",\"\")\n\ty_seq=y.read().replace(\"\\n\",\"\").upper()\nx_len=len(x_seq)\ny_len=len(y_seq)\n\n##build the initial matrix\nmat=[[None for i in range(x_len+2)] for j in range(y_len+2)]\nmat[1][1]=0\nfor i in range(x_len):\n\tmat[0][i+2]=x_seq[i]\n\tmat[1][i+2]=mat[1][i+1]+gap_score\nfor j in range(y_len):\n\tmat[j+2][0]=y_seq[j]\n\tmat[j+2][1]=mat[j+1][1]+gap_score\n\n##fill the matrix\nfor i in range(2,y_len+2):\n\tfor j in range(2,x_len+2):\n\t\tmat[i][j]=assign(mat,i,j,match_score,mismatch_score,gap_score)[0]\n\n##track back\nx_align=[]\ny_align=[]\npath_back(mat,y_len+1,x_len+1,x_align,y_align,match_score,mismatch_score,gap_score)\n\n##polish alignment ends\nif len(\"\".join(x_align).replace(\"-\",\"\")) mysteryNum:\r\n\t\tprint(\"c'mon man, too high\")\r\n\telse:\r\n\t\tprint(\"Really? Go higher\")\r\n\r\nprint(\"It took you \" + str(score) + \" guesses\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"CaveSheep/Backup","sub_path":"Guessgame.py","file_name":"Guessgame.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31737920598","text":"#-*- code: utf-8 -*-\nimport sys\nfrom PIL import Image, ImageOps\nfrom lines import FewArgumentsError,ManyArgumentsError\n\ndef main():\n if assert_input():\n apply()\n\ndef assert_input():\n while True:\n try:\n if len(sys.argv) < 3:\n raise FewArgumentsError\n elif len(sys.argv) > 3:\n raise ManyArgumentsError\n elif not open(sys.argv[1]).name.endswith(\".jpg\"):\n raise TypeError\n else:\n return open(sys.argv[1])\n except FewArgumentsError:\n sys.exit(\"Too few command-line arguments.\")\n except ManyArgumentsError:\n sys.exit(\"Too many command-line arguments.\")\n except TypeError:\n sys.exit(\"Invalid input.\")\n except FileNotFoundError:\n sys.exit(\"Input file does not exist\")\n\ndef apply():\n with Image.open(sys.argv[1]) as im:\n im_cropped = ImageOps.fit(im, (600, 600))\n iim = Image.open(sys.argv[2])\n im_cropped.paste(iim, iim) \n im_cropped.save(\"output.png\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ydrazel/Code","sub_path":"Python/CS50P/ProblemSet6/shirt.py","file_name":"shirt.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69894146722","text":"# -*- coding:utf-8 -*-\n'''\nlog api example: log('output is: ' + str(output))\n'''\nimport math\n\nfrom scipy.stats import t\n\nfrom log_api import log\n\n\nclass Solution():\n def solve(self):\n d = t.ppf(0.025, 21)\n mean = 52.1 - 27.1\n sd = math.sqrt(45.1 ** 2 + 26.4 ** 2)\n sv = mean / sd * math.sqrt(22)\n con = sv < d\n return [21, round(sv, 2), con]\n\n\n'''\nA group of researchers are interested in the possible effects of distracting stimuli during eating, such as an increase or decrease in the amount of food consumption. To test this hypothesis, they monitored food intake for a group of 44 patients who were randomised into two equal groups. The treatment group ate lunch while playing solitaire, and the control group ate lunch without any added distractions. Patients in the treatment group ate 52.1 grams of biscuits, with a standard deviation of 45.1 grams, and patients in the control group ate 27.1 grams of biscuits with a standard deviation of 26.4 grams. Do these data provide convincing evidence that the average food intake is different for the patients in the treatment group? Assume the conditions for inference are satisfied.\n\nNull hypothesis is H0: u_t - u_c = 0, alpha is 0.05\n\nOutput Description\n\n[degree-of-freedom-of-distribution, statistical values, conclusion],'degree-of-freedom-of-distribution' and 'statistical values' are accurate to the second decimal place, 'conclusion' is True, which means the H0 is accepted, or False\n'''\nlog(Solution().solve())\n","repo_name":"DeepAQ/Python-Statistics","sub_path":"Exercise3/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24880871193","text":"import logging\nimport csv\nfrom functools import partial\nfrom typing import List\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nfrom utils import *\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass ZeroShotInference:\n def __init__(\n self,\n data_path: str,\n prompt_path: str,\n text_col: str,\n label_col: str,\n model_type: str = \"gpt-3.5-turbo\",\n max_workers: int = 4,\n ) -> None:\n self.prompt = read_txt(prompt_path)\n self.data = load_data(data_path, text_col, label_col)\n self.texts = [row[0] for row in self.data]\n self.labels = [row[1] for row in self.data]\n self.predictions = []\n self.max_workers = max_workers\n if model_type == \"gpt-3.5-turbo\" or model_type == \"gpt-4\":\n self.model_type = model_type\n logger.info(f\"Using model: {self.model_type}\")\n else:\n raise ValueError(\"Model must be either gpt-3.5-turbo or gpt-4\")\n\n def generate_predictions(self) -> List[str]:\n inputs = [self.prompt.format(text) for text in self.texts]\n logger.info(f\"Generating predictions for {len(inputs)} inputs\")\n with Pool(self.max_workers) as pool:\n self.predictions = list(\n tqdm(\n pool.imap(\n partial(openai_service, model_type=self.model_type),\n inputs,\n chunksize=1,\n ),\n total=len(inputs),\n desc=\"Generating predictions\",\n )\n )\n return self.predictions\n\n def save_predictions(self, path: str) -> None:\n with open(path, \"w\") as f:\n writer = csv.writer(f)\n writer.writerow([\"text\", \"label\", \"prediction\"])\n for text, label, prediction in zip(\n self.texts, self.labels, self.predictions\n ):\n writer.writerow([text, label, prediction])\n\n def sample_prediction(self, num_samples: int = 10) -> None:\n for text, label in zip(self.texts[:num_samples], self.labels[:num_samples]):\n # Generate the prediction for the given text\n input = self.prompt.format(text)\n prediction = openai_service(input, model_type=self.model)\n logger.info(f\"Prompt: {text}\")\n logger.info(f\"Text: {text}\")\n logger.info(f\"Label: {label}\")\n logger.info(f\"Prediction: {prediction}\")\n","repo_name":"toreleon/icl-text-classification","sub_path":"src/icl/zero_shot.py","file_name":"zero_shot.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30347448844","text":"\"\"\"\n점심시간이 되면 반 학생 모두가 한 줄로 줄을 서서 급식을 탄다. 그런데 매일 같이 앞자리에 앉은 학생들이 앞에 줄을 서 먼저 점심을 먹고, 뒷자리에 앉은 학생들은 뒤에 줄을 서 늦게 점심을 먹게 된다. 어떻게 하면 이러한 상황을 바꾸어 볼 수 있을까 고민하던 중 선생님이 한 가지 방법을 내 놓았다. 그 방법은 다음과 같다.\n\n학생들이 한 줄로 줄을 선 후, 첫 번째 학생부터 차례로 번호를 뽑는다. 첫 번째로 줄을 선 학생은 무조건 0번 번호를 받아 제일 앞에 줄을 선다. 두 번째로 줄을 선 학생은 0번 또는 1번 둘 중 하나의 번호를 뽑는다. 0번을 뽑으면 그 자리에 그대로 있고, 1번을 뽑으면 바로 앞의 학생 앞으로 가서 줄을 선다. 세 번째로 줄을 선 학생은 0,1 또는 2 중 하나의 번호를 뽑는다. 그리고 뽑은 번호만큼 앞자리로 가서 줄을 선다. 마지막에 줄을 선 학생까지 이와 같은 방식으로 뽑은 번호만큼 앞으로 가서 줄을 서게 된다. 각자 뽑은 번호는 자신이 처음에 선 순서보다는 작은 수이다.\n\n예를 들어 5명의 학생이 줄을 서고, 첫 번째로 줄을 선 학생부터 다섯 번째로 줄을 선 학생까지 차례로 0, 1, 1, 3, 2번의 번호를 뽑았다고 하자, 첫 번째 학생부터 다섯 번째 학생까지 1부터 5로 표시하면 학생들이 줄을 선 순서는 다음과 같이 된다.\n\n첫 번째 학생이 번호를 뽑은 후 : 1\n\n두 번째 학생이 번호를 뽑은 후 : 2 1\n\n세 번째 학생이 번호를 뽑은 후 : 2 3 1\n\n네 번째 학생이 번호를 뽑은 후 : 4 2 3 1\n\n다섯 번째 학생이 번호를 뽑은 후 : 4 2 5 3 1\n\n따라서 최종적으로 학생들이 줄을 선 순서는 4, 2, 5, 3, 1이 된다.\n\n줄을 선 학생들이 차례로 뽑은 번호가 주어질 때 학생들이 최종적으로 줄을 선 순서를 출력하는 프로그램을 작성하시오.\n\n> input\n첫째 줄에는 학생의 수가 주어지고 둘째 줄에는 줄을 선 차례대로 학생들이 뽑은 번호가 주어진다.\n학생의 수가 100 이하이고, 학생들이 뽑는 번호는 0 또는 자연수이며 학생들이 뽑은 번호 사이에는 빈 칸이 하나씩 있다.\n\n5\n0 1 1 3 2\n\n> output\n학생들이 처음에 줄을 선 순서대로 1번부터 번호를 매길 때, 첫째 줄에 학생들이 최종적으로 줄을 선 순서를 그 번호로 출력한다.\n학생 번호 사이에는 한 칸의 공백을 출력한다.\n\n4 2 5 3 1\n\n\"\"\"\nimport sys\nsys.stdin = open('2605.txt', 'r')\n\n\nclass Node:\n def __init__(self, item, prev=None):\n self.data = item\n self.prev = prev\n\n\nN = int(input())\nnums = list(map(int, input().split()))\ntail = Node(1)\nfor i in range(1, N):\n if nums[i] == 0:\n tail = Node(i + 1, tail)\n else:\n p = tail\n for j in range(nums[i] - 1):\n # print(p.data)\n p = p.prev\n p.prev = Node(i + 1, p.prev)\n\np = tail\nresult = \"\"\nwhile p.prev:\n # print(p.data, end=\" \")\n result = str(p.data) + \" \" + result\n p = p.prev\n# print(p.data)\nif result:\n result = str(p.data) + \" \" + result\nelse:\n result = str(p.data)\nprint(result)\n\n\n# idea\n# 1. many insertion -> linked list\n# 2. need forward and backward traverse both.","repo_name":"egyeasy/TIL_public","sub_path":"baekjoon/2605_줄세우기.py","file_name":"2605_줄세우기.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39105489102","text":"from typing import Any, List\n\nfrom fastapi import APIRouter, Body, Depends, HTTPException\nfrom fastapi.encoders import jsonable_encoder\nfrom pydantic.networks import EmailStr\nfrom sqlalchemy.orm import Session\n\nfrom app import crud, models, schemas\nfrom app.api import deps\nfrom app.core.config import settings\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", response_model=List[schemas.Label])\ndef read_labels(\n db: Session = Depends(deps.get_db),\n skip: int = 0,\n limit: int = 100,\n current_user: models.User = Depends(deps.get_current_active_superuser),\n) -> Any:\n \"\"\"\n Retrieve labels.\n \"\"\"\n labels = crud.label.get_multi(db, skip=skip, limit=limit)\n return labels\n\n\n@router.post(\"/\", response_model=schemas.Label)\ndef create_label(\n *,\n db: Session = Depends(deps.get_db),\n label_in: schemas.LabelCreate,\n current_user: models.User = Depends(deps.get_current_active_superuser),\n) -> Any:\n \"\"\"\n Create new label.\n \"\"\"\n label = crud.label.get_by_name(db, name=label_in.name)\n if label:\n raise HTTPException(\n status_code=400,\n detail=\"A label with this name already exists in the system.\",\n )\n label = crud.label.create(db, obj_in=label_in)\n return label\n\n\n@router.get(\"/{label_id}\", response_model=schemas.Label)\ndef read_user_by_id(\n label_id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n db: Session = Depends(deps.get_db),\n) -> Any:\n \"\"\"\n Get a specific label by id.\n \"\"\"\n label = crud.label.get(db, id=label_id)\n if not label:\n raise HTTPException(status_code=404, detail=\"Label not found\")\n return label\n\n\n@router.put(\"/{label_id}\", response_model=schemas.Label)\ndef update_label(\n *,\n db: Session = Depends(deps.get_db),\n label_id: int,\n label_in: schemas.LabelUpdate,\n current_user: models.User = Depends(deps.get_current_active_superuser),\n) -> Any:\n \"\"\"\n Update a label.\n \"\"\"\n label = crud.label.get(db, id=label_id)\n if not label:\n raise HTTPException(\n status_code=404,\n detail=\"The label with this id does not exist in the system\",\n )\n label = crud.label.update(db, db_obj=label, obj_in=label_in)\n return label\n","repo_name":"pattersam/ytta-app","sub_path":"backend/app/api/api_v1/endpoints/labels.py","file_name":"labels.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"33989757256","text":"from function import *\r\nfrom standardisation import *\r\nfrom completion import *\r\nfrom determinisation import *\r\nimport pandas as pd\r\nimport string\r\nimport wx\r\nimport wx.grid\r\nimport os\r\n\r\ndeterminiser_etat = 0\r\nfilename = input(\"Entrez le nom du fichier à ouvrir : \")\r\ntry:\r\n file_number = int(filename.split(\".\")[0])\r\nexcept ValueError:\r\n print(\"Le nom de fichier n'est pas valide.\")\r\n exit()\r\nif file_number < 1 or file_number > 46:\r\n print(\"Le nom de fichier n'est pas valide.\")\r\n exit()\r\n\r\nf = open(\"Automate_test/\" + filename + \".txt\")\r\n\r\nfichier = get_next_line(f)\r\n#Enleve les retours a ligne dans la liste \"Fichier\"\r\nremove_n(fichier)\r\nf.close()\r\n\r\n#Va regarder sil ny a que un seul etat terminaux/ initial (car implique different changement dans la lecture du fichier)\r\nmanage_file(fichier)\r\n\r\n#remplis un tableau automate avec les informations du fichiers (sans les transitions)\r\nauto = fill_file(fichier)\r\n\r\n#Ajoute toutes les transitions du fichiers au tableau de lautomate\r\nadd_all_transition(auto, fichier)\r\n\r\n#Affiche toute les informations sur lautomates (standard, complet, deterministe)\r\nprint_info(auto)\r\n\r\nimport wx\r\n\r\n# Définir la classe MyFrame pour l'affichage de la grille\r\nclass MyFrame(wx.Frame):\r\n def __init__(self, auto, fichier):\r\n wx.Frame.__init__(self, None, title=\"Automate : \" + str(len(auto)) + \" états\", size=(500, 300))\r\n panel = wx.Panel(self)\r\n grid = wx.grid.Grid(panel)\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n\r\n # Importer votre tableau avec Pandas\r\n df = pd.DataFrame(auto)\r\n # Ajouter les données à la grille\r\n grid.CreateGrid(df.shape[0], df.shape[1]-1)\r\n # Ajouter les en-têtes de colonnes\r\n grid.SetColLabelValue(0, \"États\")\r\n for i in range(0, df.shape[1]):\r\n grid.SetColLabelValue(i + 1, string.ascii_lowercase[i])\r\n for i in range(0, df.shape[0]):\r\n grid.SetRowLabelValue(i, str(auto[i][0]))\r\n\r\n\r\n for col in range(1, df.shape[1]):\r\n for row in range(df.shape[0]):\r\n grid.SetCellValue(row, col-1, str(df.iloc[row, col]))\r\n # Verrouiller la cellule pour éviter l'édition\r\n grid.SetReadOnly(row, col-1)\r\n\r\n # Ajouter la grille au sizer\r\n sizer.Add(grid, 1, wx.EXPAND)\r\n panel.SetSizer(sizer)\r\n\r\n\r\n# Créer l'objet wx.App avant la boucle while\r\napp = wx.App()\r\nframe = None\r\ndefault = 0\r\nwhile(1):\r\n # Affiche lautomate proprement dans le terminal grace a pandas\r\n print(\"Voici l'automate : \\n\")\r\n print(\"-----------------------------------------------------------\")\r\n print_pandas(auto)\r\n print(\"-----------------------------------------------------------\")\r\n print(\"\\n\\nQue faire avec l'automate ?\")\r\n if check_standard(auto, \"noprint\") == 0:\r\n print(\"1 : Standardiser\")\r\n if check_complet(auto, \"noprint\") == 0 :\r\n print(\"2 : Completer\")\r\n if check_deterministe(auto, \"noprint\") == 0 and determiniser_etat == 0:\r\n print(\"3 : Determiner\")\r\n if default == 0:\r\n print(\"4 : Afficher par defaut\")\r\n print(\"5 : Information sur automate\")\r\n print(\"STOP : Met fin au programme\")\r\n\r\n do = input(\"\\nSaisir votre choix : \")\r\n\r\n if do == '1' or do == '2' or do == '3' or do == '4' or do == '5' or do == \"STOP\" or do == \"S\":\r\n if do == '1' and check_standard(auto, \"noprint\") == 0:\r\n standardiser(auto)\r\n elif do == '2' and check_complet(auto, \"noprint\") == 0:\r\n completer(auto)\r\n elif do == '3' and check_deterministe(auto, \"noprint\") == 0 and determiniser_etat == 0:\r\n auto = determiniser(auto)\r\n determiniser_etat = 1\r\n elif do == \"4\" :\r\n print(\"\\n\")\r\n elif do == \"STOP\" or do == \"S\":\r\n exit()\r\n elif do == \"5\":\r\n print_info(auto)\r\n default = 1\r\n continue\r\n else :\r\n print(\"Le choix n'est pas valide\")\r\n\r\n default = 1\r\n # Afficher la grille dans une fenêtre wx.Frame\r\n if not frame:\r\n frame = MyFrame(auto, fichier)\r\n else:\r\n frame.Refresh()\r\n frame.Raise()\r\n frame.Show()\r\n app.MainLoop()\r\n","repo_name":"ylm78/Projet_automate","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72561726882","text":"\"\"\"\nDecision Tree Regression.\n\"\"\"\nfrom __future__ import print_function\n\nfrom pyspark import SparkContext\n# $example on$\nfrom pyspark.mllib.tree import DecisionTree, DecisionTreeModel\nfrom pyspark.mllib.util import MLUtils\n# $example off$\n\nif __name__ == \"__main__\":\n\n Config = {}\n\n f = open('DecisionTree.cfg', 'r')\n line = f.readline()\n while (line != \"\"):\n line = line.rstrip()\n x = line.split('=')\n print(x[0])\n print(x[1])\n Config[x[0]] = x[1]\n line = f.readline()\n\n file = Config['file']\n trainRatio = float(Config['train.ratio'])\n testRatio = float(Config['test.ratio'])\n\n sc = SparkContext(appName=\"DecisionTreeRegression\")\n\n # $example on$\n # Load and parse the data file into an RDD of LabeledPoint.\n data = MLUtils.loadLibSVMFile(sc, file)\n # Split the data into training and test sets (30% held out for testing)\n (trainingData, testData) = data.randomSplit([trainRatio, testRatio])\n\n # Train a DecisionTree model.\n # Empty categoricalFeaturesInfo indicates all features are continuous.\n model = DecisionTree.trainRegressor(trainingData, categoricalFeaturesInfo={},\n impurity='variance', maxDepth=5, maxBins=32)\n\n # Evaluate model on test instances and compute test error\n predictions = model.predict(testData.map(lambda x: x.features))\n labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)\n testMSE = labelsAndPredictions.map(lambda lp: (lp[0] - lp[1]) * (lp[0] - lp[1])).sum() /\\\n float(testData.count())\n print('Test Mean Squared Error = ' + str(testMSE))\n print('Learned regression tree model:')\n print(model.toDebugString())\n\n # Save and load model\n #model.save(sc, \"target/tmp/myDecisionTreeRegressionModel\")\n #sameModel = DecisionTreeModel.load(sc, \"target/tmp/myDecisionTreeRegressionModel\")\n # $example off$","repo_name":"evoisec/ptp","sub_path":"DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16506769440","text":"from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport numpy as np\nimport logging\nimport logging.handlers\nimport torch\nimport torch.optim as optim\n#from tensorboardX import SummaryWriter\nimport time\n\nfrom models.CAFE.knowledge_graph import *\nfrom models.CAFE.data_utils import OnlinePathLoader, OnlinePathLoaderWithMPSplit, KGMask\nfrom models.CAFE.symbolic_model import EntityEmbeddingModel, SymbolicNetwork, create_symbolic_model\nfrom models.CAFE.cafe_utils import *\nfrom easydict import EasyDict as edict\nimport wandb\nfrom models.utils import MetricsLogger\nlogger = None\n\ndef set_logger(logname):\n global logger\n logger = logging.getLogger(logname)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('[%(levelname)s] %(message)s')\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n fh = logging.handlers.RotatingFileHandler(logname, mode='w')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n\ndef train(args):\n train_dataloader = OnlinePathLoader(args.dataset, args.batch_size, topk=args.topk_candidates)\n valid_dataloader = OnlinePathLoader(args.dataset, args.batch_size, topk=args.topk_candidates)\n metapaths = train_dataloader.kg.metapaths\n\n \n kg_embeds = load_embed(args.dataset) if train else None\n\n model = create_symbolic_model(args, train_dataloader.kg, train=True, pretrain_embeds=kg_embeds)\n params = [name for name, param in model.named_parameters() if param.requires_grad]\n logger.info(f'Trainable parameters: {params}')\n logger.info('==================================')\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr)\n total_steps = args.epochs * train_dataloader.total_steps\n \n\n metrics = MetricsLogger(args.wandb_entity, \n f'{MODEL}_{args.dataset}',\n config=args)\n metrics.register('train_loss')\n metrics.register('train_regloss')\n metrics.register('train_rankloss')\n\n metrics.register('avg_train_loss')\n metrics.register('avg_train_regloss')\n metrics.register('avg_train_rankloss')\n\n metrics.register('valid_loss')\n metrics.register('valid_regloss')\n metrics.register('valid_rankloss') \n\n metrics.register('avg_valid_loss')\n metrics.register('avg_valid_regloss')\n metrics.register('avg_valid_rankloss') \n\n loaders = {'train': train_dataloader,\n 'valid': valid_dataloader\n }\n\n step_counter = {\n 'train': 0,\n 'valid':0\n }\n first_iterate = True\n\n torch.save(model.state_dict(), '{}/symbolic_model_epoch{}.ckpt'.format(args.log_dir, 0))\n start_time = time.time()\n first_iterate = True\n model.train()\n \n for epoch in range(1, args.epochs + 1):\n\n splits_to_compute = list(loaders.items())\n if first_iterate:\n first_iterate = False\n splits_to_compute.insert(0, ('valid', valid_dataloader))\n for split_name, dataloader in splits_to_compute: \n if split_name == 'valid' and epoch%5 == 0:\n model.eval()\n else:\n model.train()\n iter_counter = 0\n ### Start epoch ###\n dataloader.reset()\n while dataloader.has_next():\n # Update learning rate\n if split_name == 'train':\n lr = args.lr * max(1e-4, 1.0 - step_counter[split_name] / total_steps)\n for pg in optimizer.param_groups:\n pg['lr'] = lr\n\n # pos_paths: [bs, path_len], neg_paths: [bs, n, path_len]\n mpid, pos_paths, neg_pids = dataloader.get_batch()\n pos_paths = torch.from_numpy(pos_paths).to(args.device)\n neg_pids = torch.from_numpy(neg_pids).to(args.device)\n\n optimizer.zero_grad()\n reg_loss, rank_loss = model(metapaths[mpid], pos_paths, neg_pids)\n loss = reg_loss + args.rank_weight * rank_loss\n if split_name == 'train':\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)\n optimizer.step()\n\n\n cur_metrics = {f'{split_name}_loss': loss.item(),\n f'{split_name}_regloss':reg_loss.item(), \n f'{split_name}_rankloss':rank_loss.item(), \n f'{split_name}_iter': step_counter[split_name]}\n\n for k,v in cur_metrics.items():\n metrics.log(k, v)\n #metrics.push(cur_metrics.keys())\n \n step_counter[split_name] += 1\n iter_counter += 1\n\n del pos_paths\n del neg_pids\n\n cur_metrics = [f'{split_name}_epoch']\n cur_metrics.extend([f'{split_name}_loss',\n f'{split_name}_regloss', \n f'{split_name}_rankloss'\n ])\n for k in cur_metrics[1:]:\n metrics.log(f'avg_{k}', sum(metrics.history(k, iter_counter))/max(iter_counter,1) )\n \n metrics.log(f'{split_name}_epoch', epoch)\n #metrics.log(f'std_{split_name}_reward',np.std(metrics.history( f'{split_name}_reward', iter_counter)) )\n info = \"\"\n for k in cur_metrics:\n if isinstance(getattr(metrics,k)[-1],float):\n x = '{:.5f}'.format(getattr(metrics, k)[-1])\n else:\n x = '{:d}'.format(getattr(metrics, k)[-1])\n info = info + f'| {k}={x} ' \n\n\n metrics.push(cur_metrics)\n logger.info(info)\n if epoch % 10 == 0:\n policy_file = '{}/symbolic_model_epoch{}.ckpt'.format(args.log_dir, epoch)\n torch.save(model.state_dict(), policy_file)\n #metrics.push_model(policy_file, f'{MODEL}_{args.dataset}_{epoch}')\n makedirs(args.dataset)\n metrics.write(TEST_METRICS_FILE_PATH[args.dataset])#metrics.write(os.path.join(TMP_DIR[args.dataset], VALID_METRICS_FILE_NAME))\n metrics.close_wandb()\ndef main():\n args = parse_args()\n if not os.path.isdir(args.log_dir):\n os.makedirs(args.log_dir)\n set_logger(args.log_dir + '/train_log.txt')\n logger.info(args)\n os.makedirs(TMP_DIR[args.dataset], exist_ok=True)\n with open(os.path.join(TMP_DIR[args.dataset],HPARAMS_FILE), 'w') as f:\n import json\n import copy\n args_dict = dict()\n for x,y in copy.deepcopy(args._get_kwargs()):\n args_dict[x] = y\n if 'device' in args_dict:\n del args_dict['device']\n json.dump(args_dict,f)\n \n\n train(args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"giacoballoccu/rep-path-reasoning-recsys","sub_path":"models/CAFE/train_neural_symbol.py","file_name":"train_neural_symbol.py","file_ext":"py","file_size_in_byte":6848,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"2274837494","text":"from os import listdir\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport csv\r\n\r\ndf=pd.read_csv('data//1.csv')\r\n\r\nheader=['Seller Platform','Seller SKU','Manufacturer Name','Manufacturer Code','Product Title','Description','Packaging','QTY','Category','Subcategory','Product Page URL','Attachment URL','Images URL']\r\n\r\n\r\ndef saveData(data):\r\n with open('data.csv',mode='w',encoding='UTF-8',newline='') as f:\r\n writer=csv.writer(f)\r\n writer.writerow(header)\r\n writer.writerows(data)\r\n\r\nclass Scrapper():\r\n def __init__(self,url) -> None:\r\n self.productPageURL=url\r\n self.sellerPlatform='Benco Dental'\r\n self.sellerSKU=None\r\n self.manufacturerName=None\r\n self.manufacturerCode=None\r\n self.productTitle=None\r\n self.description=None\r\n self.Packaging=\"-1\"\r\n self.QTY=\"-1\"\r\n self.category=None\r\n self.subCategory=None\r\n self.AttachmentURL=\"-1\"\r\n self.ImagesURL=''\r\n\r\n self.soup=None\r\n \r\n def sendRequest(self):\r\n r=requests.get(self.productPageURL)\r\n self.soup=BeautifulSoup(r.text,'lxml')\r\n\r\n def getSellerSKU(self):\r\n self.sellerSKU=self.soup.find(\"span\",itemprop=\"sku\").text\r\n \r\n\r\n def getManufactureName(self):\r\n self.manufacturerName=self.soup.find(\"span\",itemprop=\"brand\").text\r\n \r\n def getManufacturerCode(self):\r\n self.manufacturerCode=self.soup.find(\"meta\",itemprop=\"model\").get('content')\r\n\r\n def getProductTitle(self):\r\n self.productTitle=self.soup.find(class_=\"product-name\",itemprop=\"name\").text.strip()\r\n\r\n def getDescription(self):\r\n try:\r\n self.description=self.soup.find(class_='product-description',itemprop=\"description\").text.strip()\r\n except:\r\n self.description=\"-1\"\r\n\r\n def getCategory(self):\r\n self.category=self.soup.find(class_='breadcrumb-bar').findAll('li')[1].text.replace('/','').strip()\r\n\r\n def getSubCategory(self):\r\n self.subCategory=self.soup.find(class_='breadcrumb-bar').findAll('li')[2].text.replace('/','').strip()\r\n\r\n def getImages(self):\r\n \r\n x=self.soup.find(id='activeImageArea')\r\n y=self.soup.find(id='alternateImageArea')\r\n \r\n if x is not None:\r\n x=x.find('img').get('src').split('?')[0]\r\n self.ImagesURL=str(self.ImagesURL)+x\r\n\r\n if y is not None:\r\n y=y.find('img').get('src').split('?')[0]\r\n self.ImagesURL+=\",\"+self.ImagesURL\r\n\r\n\r\nmasterData=[]\r\nfor index,i in df.iterrows():\r\n obj=Scrapper(i['url'])\r\n print(index)\r\n obj.sendRequest()\r\n obj.getSellerSKU()\r\n obj.getCategory()\r\n obj.getSubCategory()\r\n obj.getDescription()\r\n obj.getImages()\r\n obj.getManufactureName()\r\n obj.getManufacturerCode()\r\n obj.getProductTitle()\r\n header=['Seller Platform','Seller SKU','Manufacturer Name',\r\n 'Manufacturer Code','Product Title','Description','Packaging','QTY','Category','Subcategory','Product Page URL','Attachment URL','Images URL']\r\n masterData.append([obj.sellerPlatform,obj.sellerSKU,obj.manufacturerName,obj.manufacturerCode,obj.productTitle,obj.description,obj.Packaging,obj.QTY,obj.category,\r\n obj.subCategory,obj.productPageURL,obj.AttachmentURL,obj.ImagesURL])\r\n if index%50:\r\n saveData(masterData)\r\n \r\n\r\nsaveData()\r\n","repo_name":"muhamdasim/benco","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73952723042","text":"import pytest\nimport torch\nfrom torch.utils.data.dataloader import DataLoader\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.utilities.data import (\n extract_batch_size,\n get_len,\n has_iterable_dataset,\n has_len,\n has_len_all_ranks,\n warning_cache,\n)\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset\n\n\ndef test_extract_batch_size():\n \"\"\"Tests the behavior of extracting the batch size.\"\"\"\n\n def _check_warning_not_raised(data, expected):\n with pytest.warns(None) as record:\n assert extract_batch_size(data) == expected\n assert len(record) == 0\n\n def _check_warning_raised(data, expected):\n with pytest.warns(UserWarning, match=f\"Trying to infer the `batch_size` .* we found is {expected}.\"):\n assert extract_batch_size(batch) == expected\n warning_cache.clear()\n\n batch = \"test string\"\n _check_warning_not_raised(batch, 11)\n\n batch = torch.zeros(11, 10, 9, 8)\n _check_warning_not_raised(batch, 11)\n\n batch = {\"test\": torch.zeros(11, 10)}\n _check_warning_not_raised(batch, 11)\n\n batch = [torch.zeros(11, 10)]\n _check_warning_not_raised(batch, 11)\n\n batch = {\"test\": [{\"test\": [torch.zeros(11, 10)]}]}\n _check_warning_not_raised(batch, 11)\n\n batch = {\"test\": [{\"test\": [torch.zeros(11, 10), torch.zeros(10, 10)]}]}\n _check_warning_raised(batch, 11)\n\n batch = {\"test\": [{\"test\": [torch.zeros(10, 10), torch.zeros(11, 10)]}]}\n _check_warning_raised(batch, 10)\n\n batch = [{\"test\": torch.zeros(10, 10), \"test_1\": torch.zeros(11, 10)}]\n _check_warning_raised(batch, 10)\n\n\ndef test_has_iterable_dataset():\n assert has_iterable_dataset(DataLoader(RandomIterableDataset(1, 1)))\n\n assert not has_iterable_dataset(DataLoader(RandomDataset(1, 1)))\n\n class MockDatasetWithoutIterableDataset(RandomDataset):\n def __iter__(self):\n yield 1\n return self\n\n assert not has_iterable_dataset(DataLoader(MockDatasetWithoutIterableDataset(1, 1)))\n\n\ndef test_has_len():\n assert has_len(DataLoader(RandomDataset(1, 1)))\n\n with pytest.raises(ValueError, match=\"`Dataloader` returned 0 length.\"):\n assert has_len(DataLoader(RandomDataset(0, 0)))\n\n assert not has_len(DataLoader(RandomIterableDataset(1, 1)))\n\n\ndef test_get_len():\n assert get_len(DataLoader(RandomDataset(1, 1))) == 1\n\n value = get_len(DataLoader(RandomIterableDataset(1, 1)))\n\n assert isinstance(value, float)\n assert value == float(\"inf\")\n\n\ndef test_has_len_all_rank():\n trainer = Trainer(fast_dev_run=True)\n model = BoringModel()\n\n with pytest.raises(MisconfigurationException, match=\"Total length of `Dataloader` across ranks is zero.\"):\n assert not has_len_all_ranks(DataLoader(RandomDataset(0, 0)), trainer.training_type_plugin, model)\n\n assert has_len_all_ranks(DataLoader(RandomDataset(1, 1)), trainer.training_type_plugin, model)\n","repo_name":"Eashurox/CPDP_ML","sub_path":"Dataset/ML Projects/Lightning_Versions/lightning-1.5.0/tests/utilities/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2753466502","text":"numerosBuenos=[1,2,3]\nnumerosVenenosos=[4,5,6]\n\nwhile True:\n var=int(input(\"\\033[95mIntroduce el número\\033[0m\\n\"))\n if var in numerosBuenos:\n print(\"\\033[92m¡Felicidades, has ganado!\\033[0m\")\n break\n elif var in numerosVenenosos:\n print(\"\\033[91mIntrodujiste un número venenoso. Suerte para la próxima\\033[0m\")\n break\n else:\n print(\"\\033[93mEl número elegido no te puede dar la victoria, vuelve a intentarlo\\033[0m\")\n\n\n\n\n","repo_name":"memoherreraacosta/BeautifulPatternsMIT","sub_path":"day3/if in/ifContains.py","file_name":"ifContains.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13076414192","text":"\nimport requests\nfrom parsel import Selector\nfrom array import*\narr1=[]\narr2=[]\nww=[]\na=[]\narr=[]\nR1=[]\nR2=[]\nr= requests.get('https://zeenews.india.com/india')\ntext=r.text\n#print(text)\n\ns= Selector(r.text)\nhref_links = [s.xpath('//img/@title').getall()]\nl2=len(href_links[0])\n#print(l2)\nk2=0\nt2=0\ng=\"\"\nk3=\"\"\nk4=0\nt1=0\nt=0\nfor y in range(0,l2):\n l1=len(href_links[0][y])\n #print(l1)\n #print(href_links[0][y])\n for i in range(0,l1):\n if href_links[0][y][i]==\" \":\n k4=k4+1\n t=0\n \n for i in range(0,l1):\n r=t\n g=\"\"\n if href_links[0][y][i]==\" \":\n k2=k2+1\n t=i\n k3=\"\"\n for j in range(r,i):\n k3=k3+href_links[0][y][j]\n if( k3==\"Attack\" or k3==\"Collapsed\" or k3==\"collapsed\"or k3==\"pulwama\" or k3==\"collapses\" or k3==\"collase\" or k3==\"Collapsed\" or k3==\"dead\" or k3==\"death\" or k3==\"attacks\" or k3==\"injured\" or k3==\"Pulwama\" or k3==\"attack\"):\n \n g=y\n ww.append(y)\n t1=j+2\n \n t=t+1\n \n k3=\"\"\n if k2==k4:\n for k in range(t1,l1):\n k3=k3+href_links[0][y][k]\n \n\n if(k3==\"Attack\" or k3==\"Collapsed\" or k3==\"collapsed\" or k3==\"pulwama\" or k3==\"collapses\" or k3==\"Collapsed\" or k3==\"collase\" or k3==\"dead\" or k3==\"death\" or k3==\"attacks\" or k3==\"injured\" or k3==\"Pulwama\" or k3==\"attack\"):\n g=y\n ww.append(y)\n\n\nprint(\"REQUIRED OUTPUT\")\nllist=len(ww) \nprint(ww)\nfor i in ww:\n print(href_links[0][i])\nprint(\"\\n\")\nprint(\"NEXT NEWS\")\n\n\nww1=[]\na1=[]\nr1= requests.get('https://timesofindia.indiatimes.com/india')\ntext1=r1.text\n#print(text1)\n\ns= Selector(r1.text)\nhref_links1 = [s.xpath('//a/@title').getall()]\nl21=len(href_links1[0])\n#print(l21)\nk21=0\nt21=0\ng1=\"\"\nk31=\"\"\nk41=0\nt11=0\ntt=0\nfor y in range(0,l21):\n l11=len(href_links1[0][y])\n #print(l1)\n #print(href_links1[0][y])\n for i in range(0,l11):\n if href_links1[0][y][i]==\" \":\n k41=k41+1\n tt=0\n \n for i in range(0,l11):\n r=tt\n g=\"\"\n if href_links1[0][y][i]==\" \":\n k21=k21+1\n tt=i\n k31=\"\"\n for j in range(r,i):\n k31=k31+href_links1[0][y][j]\n if( k31==\"Attack\" or k31==\"Collapsed\" or k31==\"collapsed\" or k31==\"pulwama\" or k31==\"collapses\" or k31==\"collase\" or k31==\"Collapsed\" or k31==\"dead\" or k31==\"death\" or k31==\"attack\" or k31==\"attacks\" or k31==\"injured\" or k31==\"Pulwama\"):\n \n g1=y\n ww1.append(y)\n t11=j+2\n \n tt=tt+1\n \n k31=\"\"\n if k21==k41:\n for k in range(t11,l11):\n k31=k31+href_links1[0][y][k]\n \n if( k31==\"Attack\" or k31==\"Collapsed\" or k31==\"collapsed\" or k31==\"pulwama\" or k31==\"collapses\" or k31==\"collase\" or k31==\"Collapsed\" or k31==\"dead\" or k31==\"death\" or k31==\"attack\" or k31==\"attacks\" or k31==\"injured\" or k31==\"Pulwama\"):\n \n \n g1=y\n ww1.append(y)\n\n\nprint(\"REQUIRED OUTPUT\")\nllist1=len(ww1)\nprint(ww1)\nfor i in ww1:\n print(href_links1[0][i])\n\n\nk2=0\nt2=0\ng=\"\"\nk3=\"\"\nk4=0\nt1=0\nt=0\nb=len(ww)\nq=len(ww1)\nk21=0\nt21=0\ng1=\"\"\nk31=\"\"\nk41=0\nt11=0\ntt=0\nc=0\nr=0\nr1=0\nfor y in ww:\n b1=len(href_links[0][y])\n for i in range(0,b1):\n if href_links[0][y][i]==\" \":\n k4=k4+1\n \n for i in range(0,b1):\n \n if href_links[0][y][i]==\" \":\n k2=k2+1\n t=i\n k3=0\n if r==0:\n k3=str(k3)+(href_links[0][y][r])\n for j in range(r+1,i):\n k3=str(k3)+(href_links[0][y][j])\n t1=j+2\n x=k3\n arr1.append(x)\n #print(x)\n r=t\n k3=0\n if k2==k4:\n for k in range(t1,b1):\n k3=str(k3)+(href_links[0][y][k])\n x=k3\n arr1.append(x)\n #print(x)\n r=0\n \n #print(arr1)\n arr=arr1.copy()\n R1.append(arr)\n arr1.clear()\nprint(R1)\n \nfor y1 in ww1:\n q1=len(href_links1[0][y1])\n for i in range(0,q1):\n if href_links1[0][y1][i]==\" \":\n k41=k41+1\n \n for i in range(0,q1):\n if href_links1[0][y1][i]==\" \":\n k21=k21+1\n tt=i\n k31=0\n if r1==0:\n k31=str(k31)+(href_links1[0][y1][r1])\n for j in range(r1+1,i):\n k31=str(k31)+(href_links1[0][y1][j])\n t1=j+2\n y=k31\n arr2.append(y)\n #print(y)\n r1=tt\n k31=0\n if k21==k41:\n for k in range(t1,q1):\n k31=str(k31)+(href_links1[0][y1][k])\n y=k31\n arr2.append(y)\n #print(y)\n r1=0\n #print(arr2)\n ar=arr2.copy()\n R2.append(ar)\n arr2.clear()\nprint(R2) #now we are having two list now compare those two\n\nc=[]\ncount=0\nfor i in range(0,len(R1)):\n \n a=len(R1[i])\n for i1 in range(0,a):\n for j in range(0,len(R2)):\n b=len(R2[j])\n for j1 in range(0,b):\n if R1[i][i1]==R2[j][j1]:\n print(\"comon =\",R1[i][i1])\n count=count+1\n \n print(\" total count =\",count)\n c.append(count)\n count=0\nprint(\"array for all counts =\",c)\nlar=max(c)\nz=(ww[c.index(lar)])\nprint(\"TODAY'S TOP NEWS ::\",href_links[0][z])\n\n\n\n\n\n\n\n","repo_name":"ramyasree19/NewsExtraction","sub_path":"news-extraction-to-find-disaster.py","file_name":"news-extraction-to-find-disaster.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72370412963","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Author : RaXianch\n# CreatDATE : 2020/10/26 \n# CreatTIME : 20:17 \n# Blog : https://blog.raxianch.moe/\n# Github : https://github.com/DeSireFire\n\n__author__ = 'RaXianch'\nimport time\nimport json\nfrom apps.index import router\n\n@router.get(\"/\")\nasync def index():\n context = {\n \"TIME\": time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),\n \"/ero\": {\n \"/nh\": \"nh模块\",\n \"/exh\": \"exh模块\",\n \"/eh\": \"eh来源,还未完成\",\n },\n \"todo\": \"各种处理器(上传、各类检查和验证器、数据清洗格式化、异步数据库操作、爬虫)、应用分级路由及其视图开发和设计、docs验证、设置文件的完善、docker封装\"\n }\n return context\n\n\n@router.get(\"/reload\")\nasync def iReload():\n from config.settings import BASE_DIR\n import os\n with open(os.path.join(BASE_DIR, \"runServer.py\"), 'a+') as f:\n f.write(\"\\n print(%s)\" % time.time())\n return {}\n\n\n@router.get(\"/demo\")\nasync def exh():\n from handlers.getWeb import base_load_web\n tempStr = \"{}\"\n start_time = time.time()\n req = base_load_web(\"https://nhentai.net/g/249664/\")\n if req != None:\n from handlers.dbFormat import reglux\n tempStr = \"\".join(reglux(req.text, r'window._gallery = JSON.parse\\(\"([\\s\\S]*?)\"\\);', False)).encode(\"utf-8\").decode('unicode-escape')\n end_time = time.time()\n return {\"costTime\": end_time-start_time, \"content\": json.loads(tempStr)}\n","repo_name":"DeSireFire/hentaiFast","sub_path":"apps/index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2193725918","text":"import datetime\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\nfrom list.forms import ListaForm\nfrom .models import Lista\nimport datetime\n\n@login_required\ndef lista(request):\n\n search = request.GET.get('titulo')\n filter = request.GET.get('filter')\n\n if search:\n\n listas = Lista.objects.filter(titulo__icontains=search, user=request.user)\n \n elif filter:\n\n listas = Lista.objects.filter(status=filter, user=request.user)\n \n else:\n listas_pg = Lista.objects.all().order_by('-created_at').filter(user=request.user)\n paginacao = Paginator(listas_pg, 5)\n\n page = request.GET.get('page')\n\n listas = paginacao.get_page(page) \n\n # def get_queryset(self):\n # titulo = self.objects.filter('12')\n\n\n # return Lista.objects.filter(titulo__icontais= titulo)\n\n return render(request, 'list/lista.html', {'listas' : listas})\n@login_required\ndef listaview(request, id):\n lista = get_object_or_404(Lista, pk=id )\n return render(request, 'list/list.html', {'lista' : lista })\n\n@login_required\ndef adicionar(request):\n form = ListaForm(request.POST or None)\n if form.is_valid():\n lista = form.save(commit=False)\n lista.user = request.user\n lista.status = 'A Assistir'\n lista.save()\n\n return redirect('lista')\n return render(request, 'list/adicionar.html', {'form' : form})\n\n@login_required\ndef editar(request, id):\n lista = Lista.objects.get(pk=id)\n form = ListaForm(request.POST or None, instance=lista)\n\n if form.is_valid():\n form.save()\n return redirect('lista')\n\n return render(request, 'list/adicionar.html', {'form' : form, 'lista' : lista})\n\n@login_required \ndef delete(request, id):\n lista = Lista.objects.get(pk=id)\n lista.delete()\n return redirect('lista')\n\n# @login_required \n# def user(request):\n# listassss = get_object_or_404(Lista)\n# return render(request, 'list/user.html', {'listassss' : listassss })\n\n@login_required \ndef changestatus(request, id):\n lista = get_object_or_404(Lista, pk=id)\n\n if(lista.status == 'A Assistir'):\n lista.status = 'Assistindo'\n\n elif(lista.status == 'Assistindo'):\n lista.status = 'Assistido'\n\n else:\n lista.status = 'A Assistir'\n\n lista.save()\n\n return redirect('/')\n\n\n\n\n\ndef user(request):\n \n search = request.GET.get('titulo')\n filter = request.GET.get('filter')\n assistidorecent = Lista.objects.filter(status = 'Assistido', updated_at__gt=datetime.datetime.now()-datetime.timedelta(days=30), user=request.user).count()\n listassistindo = Lista.objects.filter(status = 'Assistindo', user=request.user).count()\n listassistido = Lista.objects.filter(status = 'Assistido' ,user=request.user).count()\n listaassistir = Lista.objects.filter(status = 'A Assistir' ,user=request.user).count()\n\n if search:\n listas = Lista.objects.filter(titulo__icontains=search, user=request.user)\n\n\n listas = Lista.objects.filter(titulo__icontains=search, user=request.user)\n \n elif filter:\n\n listas = Lista.objects.filter(status=filter, user=request.user)\n \n else:\n listas_pg = Lista.objects.all().order_by('-created_at').filter(user=request.user)\n paginacao = Paginator(listas_pg, 5)\n\n page = request.GET.get('page')\n\n listas = paginacao.get_page(page) \n\n # def get_queryset(self):\n # titulo = self.objects.filter('12')\n\n\n # return Lista.objects.filter(titulo__icontais= titulo)\n\n return render(request, 'list/user.html', {'listas' : listas, 'assistidorecent': assistidorecent,\n 'listassistindo': listassistindo,\n 'listassistido':listassistido,\n 'listaassistir':listaassistir})\n","repo_name":"VicentShiug/Projetos-de-estudo-de-Django","sub_path":"Assistidos/list/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12548719651","text":"##########\n# q74.py #\n##########\n# レシート明細データ(df_receipt)の売上日(sales_ymd)に対し,\n# 当該週の月曜日からの経過日数を計算し,売上日,直前の月曜日付とともに10件表示せよ.\n# sales_ymdは数値でデータを保持している点に注意.\n\nimport pandas as pd\nfrom dateutil.relativedelta import relativedelta\n\ndf_receipt = pd.read_csv(\"../data/receipt.csv\", dtype={'store_cd':str, 'customer_id':str, 'product_cd':str})\n\ndf_tmp = df_receipt[['sales_ymd']].copy()\n\ndf_tmp['sales_ymd'] = pd.to_datetime(df_tmp['sales_ymd'].astype(str))\ndf_tmp['elapsed_days'] = df_tmp['sales_ymd'].apply(lambda x: x.weekday())\ndf_tmp['monday'] = df_tmp['sales_ymd'].apply(lambda x: x - relativedelta(days=x.weekday()))\n\nans74 = df_tmp.head(10)\n\nans74.to_csv(\"../answer/ans74.csv\")\n#print(ans74)\n","repo_name":"makotoyamaai/statistics","sub_path":"100knocks/program/q74.py","file_name":"q74.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34707040506","text":"import pygame.transform\nimport os\n\nfrom classes.gameobject import GameObject\n\nX, Y = 1200, 720\n\n\nclass Spaceship(pygame.sprite.Sprite):\n def __init__(self, image, spaceshipX, spaceshipY, speed, fuel, gravity_x, gravity_y):\n self.image = image\n self.spaceshipX = spaceshipX\n self.spaceshipY = spaceshipY\n self.speed = speed\n self.fuel = fuel\n self.gravity_x = gravity_x\n self.gravity_y = gravity_y\n self.rect = self.image.get_rect()\n\n def update(self, screen):\n if self.image != None:\n self.rect = self.image.get_rect()\n self.rect.move_ip(int(self.spaceshipX), int(self.spaceshipY))\n screen.blit(self.amountoffuel(), (40, 40))\n screen.blit(self.image, (self.spaceshipX, self.spaceshipY))\n self.gravity()\n self.amountoffuel()\n if self.fuel != 0:\n self.changePosition()\n\n def changePosition(self):\n keys = pygame.key.get_pressed()\n\n # if left arrow key is pressed\n if keys[pygame.K_LEFT] and self.spaceshipX > 0:\n self.spaceshipX -= self.speed\n\n # if left down key is pressed\n if keys[pygame.K_RIGHT] and self.spaceshipX < X - 60:\n # increment in y co-ordinate\n self.spaceshipX += self.speed\n\n # if up arrow key is pressed\n if keys[pygame.K_UP] and self.spaceshipY > 0:\n # decrement in y co-ordinate\n self.spaceshipY -= self.speed\n\n # if left down key is pressed\n if keys[pygame.K_DOWN] and self.spaceshipY < Y - 100:\n # increment in y co-ordinate\n self.spaceshipY += self.speed\n\n def amountoffuel(self):\n keys = pygame.key.get_pressed()\n\n # Kijkt of er nog brandstof\n if self.fuel > 0:\n if keys[pygame.K_DOWN] or keys[pygame.K_RIGHT] or keys[pygame.K_LEFT] or keys[pygame.K_UP]:\n self.fuel -= .5\n\n amount = str(self.fuel)\n amountfuel = pygame.font.Font(\"assets/font.ttf\", 23).render('Hoeveelheid brandstof :' + amount, True, \"White\")\n return amountfuel\n\n def gravity(self):\n if self.spaceshipY < Y - 100 and self.spaceshipX < X - 60 and self.spaceshipX > 0:\n self.spaceshipY += self.gravity_y\n self.spaceshipX += self.gravity_x\n\n def collided_with(self, other_object):\n return self.rect.colliderect(other_object.rect)\n","repo_name":"Ruben-1058461/LunarLander-pygame-","sub_path":"classes/spaceship.py","file_name":"spaceship.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27618790912","text":"__author__ = 'ClarkWong, Justsavor'\n\nfrom app import db, app\nfrom flask.ext.restful import reqparse, abort, Api, Resource, fields, marshal_with, marshal\nfrom models import *\napi = Api(app)\n\ntype_fields = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'type_id': fields.Integer\n}\n\nclass TypeApi(Resource):\n\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('name', type=unicode, required=True, location='json')\n self.parser.add_argument('type_id', type=int, required=True, location='json')\n super(TypeApi, self).__init__()\n\n @marshal_with(type_fields)\n def get(self, type_id):\n type_self = Type.query.filter_by(id=type_id).first()\n if type_self:\n return type_self, 201\n else:\n abort(404, message='Type {} not found'.format(type_id))\n\n def delete(self, type_id):\n type_self = Type.query.filter_by(id=type_id).first()\n if type_self:\n db.session.delete(type_self)\n db.session.commit()\n return { 'message' : 'Delete Type {} succeed'.format(type_id)}, 201\n else:\n abort(404, message='Type {} not found'.format(type_id))\n\n @marshal_with(type_fields)\n def put(self, type_id):\n type_self = Type.query.filter_by(id=type_id).first()\n if type_self:\n args = self.parser.parse_args()\n for k,v in args.iteritems():\n if v!= None:\n setattr(type_self, k, v)\n return type_self, 201\n else:\n abort(404, message='Type {} not found'.format(type_id))\n\nclass TypeListApi(Resource):\n\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('name', type=unicode, required=True, location='json')\n self.parser.add_argument('type_id', type=int, required=True, location='json')\n super(TypeListApi, self).__init__()\n\n def get(self):\n typeList = Type.query.all()\n if typeList:\n return [marshal(type_self, type_fields) for type_self in typeList]\n else:\n abort(404, message='No Type at all')\n\n @marshal_with(type_fields)\n def post(self):\n args = self.parser.parse_args()\n name = args['name']\n type_id = args['type_id']\n type_self = Type(name, type_id)\n db.session.add(type_self)\n db.session.commit()\n return type, 201\n\nclass TypeQueryApi(Resource):\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('name', type=unicode, required=True, location='json')\n self.parser.add_argument('type_id', type=int, required=True, location='json')\n super(TypeQueryApi, self).__init__()\n\n def post(self):\n args = self.parser.parse_args()\n q = Type.query\n for attr, value in args.items():\n if value:\n q = q.filter(getattr(Type, attr).like(\"%%%s%%\" % value))\n if q:\n return [marshal(type_self, type_fields) for type_self in q]\n else:\n abort(404, message='No such type at all')\n\n # def post(self):\n # args = self.parser.parse_args()\n # name = args['name']\n # type_id = args['type_id']\n # typeList = Type.query.filter_by(name=name, type_id=type_id)\n # if typeList:\n # return [marshal(type_self, type_fields) for type_self in typeList]\n # else:\n # abort(404, message='No such type at all')\n\n\napi.add_resource(TypeApi, '/api/v1/types/', endpoint='type')\napi.add_resource(TypeListApi, '/api/v1/types', endpoint='typeList')\napi.add_resource(TypeQueryApi, '/api/v1/types/query', endpoint='typeQuery')","repo_name":"njuwangchen/TRMS","sub_path":"app/type_views.py","file_name":"type_views.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40470378889","text":"# Questao 25\ndef ppa(a,b):\n if (a==3 and b==1) or (a==3 and b==2) or (a==1 and b==2):\n return 'Jogador 1 venceu'\n elif (a==1 and b==3) or (a==2 and b==1) or (a==2 and b==3):\n return 'Jogador 2 venceu'\n elif a==2 and b==2:\n return 'Ambos venceram'\n elif a==1 and b==1:\n return 'Sem ganhador'\n else:\n return 'Aniquilacao mutua'\n \n \n# Testes # Respostas\nprint(ppa(1, 1)) # Sem ganhador\nprint(ppa(1, 2)) # Jogador 1 venceu\nprint(ppa(1, 3)) # Jogador 2 venceu\nprint(ppa(2, 2)) # Ambos venceram","repo_name":"l-ricardo/APCLista3","sub_path":"Questao25.py","file_name":"Questao25.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25856797815","text":"import uvicorn\nfrom fastapi import FastAPI,File,UploadFile\nfrom io import BytesIO\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\nimport cv2\n\napp = FastAPI()\n\nMODEL = tf.keras.models.load_model('models\\elon_musk_amar_rahe.h5')\nCLASS_NAMES = ['neutral','happy','sad']\n\n@app.get('/ping')\nasync def index():\n return {'message':'Hello, Harsh'}\n\ndef read_file_as_image(data) -> np.ndarray:\n img = np.array(Image.open(BytesIO(data)))\n return img\n\n@app.post('/predict')\nasync def predict(file: UploadFile = File(...)):\n image = read_file_as_image(await file.read())\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(gray,1.1,4)\n face_roi = None\n for x, y, w, h in faces:\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = image[y:y+h, x:x+w]\n cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)\n facess = faceCascade.detectMultiScale(roi_gray)\n if len(facess) == 0:\n print(\"Face not detected\")\n else:\n for (ex, ey, ew, eh) in facess:\n face_roi = roi_color[ey:(ey+eh), ex:(ex+eh)]\n \n img_size = 224\n final_image = cv2.resize(face_roi,(img_size,img_size))\n final_image = np.expand_dims(final_image,axis= 0)\n final_image = final_image/255.0\n\n Predictions = MODEL.predict(final_image)\n p = CLASS_NAMES[np.argmax(Predictions[0])]\n\n return {\"Emotion\" : p } \n\n\nif __name__ == '__main__':\n uvicorn(app, host = '127.0.0.1',port=8000)","repo_name":"Harususan/Music4Mood-backend-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36359539790","text":"import re\nimport numpy\n#from distutils.core import setup, Extension\nfrom setuptools import setup, Extension\n\nVERSIONFILE='src/_version.py'\nverstrline = open(VERSIONFILE, \"rt\").read()\nVSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\nmo = re.search(VSRE, verstrline, re.M)\nif mo:\n verstr = mo.group(1)\nelse:\n raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,))\n\nmarsh_module = Extension('Marsh', \n sources = ['src/c-code/OptimalExtraction/Marsh.c'], \n libraries=['gsl', 'gslcblas', 'm'], \n include_dirs=[numpy.get_include(),'/usr/local/include'])\n\nccf_module = Extension('CCF',\n sources = ['src/c-code/Utilities/CCF.c'],\n libraries=['m'],\n include_dirs=[numpy.get_include(),'/usr/local/include'])\n\nsetup(name='transitspectroscopy',\n version=verstr,\n description='transitspectroscopy: a library for all your transit spectroscopy needs',\n url='http://github.com/nespinoza/transitspectroscopy',\n author='Nestor Espinoza',\n author_email='nespinoza@stsci.edu',\n license='MIT',\n packages=['transitspectroscopy'],\n package_dir={'transitspectroscopy': 'src'},\n install_requires=['numpy','scipy', 'jwst', 'astropy', 'jdcal'],\n python_requires='>=3.0',\n ext_modules = [marsh_module, ccf_module],\n zip_safe=False)\n","repo_name":"nespinoza/transitspectroscopy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"72088718562","text":"import warnings\nwarnings.filterwarnings('ignore', category=UserWarning)\ntry:\n # ignore ShapelyDeprecationWarning from fvcore\n from shapely.errors import ShapelyDeprecationWarning\n warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning)\nexcept:\n pass\n\nimport copy\nimport logging\nimport os\nimport sys\nimport torch\nimport detectron2.utils.comm as comm\nimport wandb\n\nsys.path.append('Detic/third_party/CenterNet2')\nsys.path.append('Detic/third_party/Deformable-DETR')\n\nfrom collections import OrderedDict\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.config import get_cfg\nfrom detectron2.data import (MetadataCatalog,\n build_detection_test_loader)\n\nfrom detectron2.engine import (DefaultTrainer,\n default_argument_parser,\n default_setup,\n launch)\n\nfrom detectron2.projects.deeplab import add_deeplab_config\nfrom detectron2.utils.logger import setup_logger\nfrom detectron2.utils.comm import is_main_process, synchronize\nfrom detectron2.evaluation import verify_results, inference_on_dataset, print_csv_format\n\nfrom part_distillation import (add_maskformer2_config,\n add_wandb_config,\n add_pixel_grouping_confing,\n add_custom_datasets_config)\n\nfrom part_distillation.data.datasets.register_imagenet import register_imagenet\nfrom part_distillation.data.datasets.register_part_imagenet import register_part_imagenet\nfrom part_distillation.data.dataset_mappers.part_imagenet_mapper import PartImageNetMapper\nfrom part_distillation.evaluation.proposal_evaluator import ProposalEvaluator\n\nclass Trainer(DefaultTrainer):\n @classmethod\n def build_evaluator(self, *args, **kwargs):\n return ProposalEvaluator()\n\n @classmethod\n def build_test_loader(self, cfg, dataset_name):\n mapper = PartImageNetMapper(cfg, is_train=False)\n\n return build_detection_test_loader(cfg, dataset_name, mapper=mapper)\n\n\n @classmethod\n def test(cls, cfg, model):\n logger = logging.getLogger(\"part_distillation\")\n results = OrderedDict()\n for idx, dataset_name in enumerate(cfg.DATASETS.TEST):\n data_loader = cls.build_test_loader(cfg, dataset_name)\n evaluator = cls.build_evaluator(cfg, dataset_name)\n results_i = inference_on_dataset(model, data_loader, evaluator)\n\n results.update(results_i)\n if comm.is_main_process():\n assert isinstance(results_i, dict), \\\n \"Evaluator must return a dict on the main process. Got {} instead.\".format(results_i)\n logger.info(\"Evaluation results for {} in csv format:\".format(dataset_name))\n print_csv_format(results_i)\n comm.synchronize()\n\n if len(results) == 1:\n results = list(results.values())[0]\n\n comm.synchronize()\n if comm.is_main_process() and not cfg.WANDB.DISABLE_WANDB:\n wandb.log(results)\n\n return results\n\n\ndef setup(args):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n cfg = get_cfg()\n # for poly lr schedule\n add_deeplab_config(cfg)\n add_maskformer2_config(cfg)\n add_pixel_grouping_confing(cfg)\n add_custom_datasets_config(cfg)\n add_wandb_config(cfg)\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n\n # Setup logger\n setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name=\"part_distillation\")\n # To use the metadata\n register_imagenet(\"imagenet_1k_meta_train\", \"train\",\n partitioned_imagenet=False)\n for dataset_name in cfg.DATASETS.TEST:\n if \"part_imagenet\" in dataset_name:\n register_part_imagenet(name=dataset_name,\n images_dirname=cfg.CUSTOM_DATASETS.PART_IMAGENET.IMAGES_DIRNAME,\n annotations_dirname=cfg.CUSTOM_DATASETS.PART_IMAGENET.ANNOTATIONS_DIRNAME,\n split=dataset_name.split('_')[-1],\n debug=cfg.CUSTOM_DATASETS.PART_IMAGENET.DEBUG,\n )\n else:\n raise ValueError(\"{} not supported for pixel grouping evaluation.\".format(dataset_name))\n\n return cfg\n\n\ndef main(args):\n cfg = setup(args)\n if comm.is_main_process() and not cfg.WANDB.DISABLE_WANDB:\n run_name = cfg.WANDB.RUN_NAME\n wandb.init(project=cfg.WANDB.PROJECT, sync_tensorboard=True, name=run_name,\n group=cfg.WANDB.GROUP, config=cfg.PIXEL_GROUPING, dir=cfg.VIS_OUTPUT_DIR)\n\n assert args.eval_only, \"pixel grouping is eval-only.\"\n model = Trainer.build_model(cfg)\n DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume\n )\n res = Trainer.test(cfg, model)\n if comm.is_main_process():\n verify_results(cfg, res)\n if comm.is_main_process() and not cfg.WANDB.DISABLE_WANDB:\n wandb.finish()\n return res\n\n\n\n\nif __name__ == \"__main__\":\n args = default_argument_parser().parse_args()\n print(\"Command Line Args:\", args)\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n )\n","repo_name":"facebookresearch/PartDistillation","sub_path":"pixel_grouping_test_net.py","file_name":"pixel_grouping_test_net.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"54"} +{"seq_id":"15782503150","text":"from collections import defaultdict\nclass Solution:\n def maximumDetonation(self, bombs: List[List[int]]) -> int:\n res = 0\n #visited = set()\n d = defaultdict(list)\n for i in range (len(bombs)):\n for j in range(len(bombs)):\n if i!= j and bombs[i][2] >= math.sqrt((bombs[i][0]- bombs[j][0])**2 + (bombs[i][1]- bombs[j][1])**2):\n d[i].append(j)\n\n def dfs(i, visited):\n for l in d[i]:\n if l not in visited:\n visited.add(l)\n dfs(l, visited)\n for k in range(len(bombs)):\n visited = set([k])\n dfs(k,visited)\n res= max(res, len(visited))\n return res","repo_name":"debiB/competitive-programming-","sub_path":"detonate-the-maximum-bombs.py","file_name":"detonate-the-maximum-bombs.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26255708797","text":"# coding: utf-8\n\n__author__ = '代码会说话'\n\n\"\"\"\n给定一个二叉搜索树,编写一个函数 kthSmallest 来查找其中第 k 个最小的元素。\n\n说明:\n你可以假设 k 总是有效的,1 ≤ k ≤ 二叉搜索树元素个数。\n\n示例 1:\n\n输入: root = [3,1,4,null,2], k = 1\n 3\n / \\\n 1 4\n \\\n 2\n输出: 1\n示例 2:\n\n输入: root = [5,3,6,2,4,null,null,1], k = 3\n 5\n / \\\n 3 6\n / \\\n 2 4\n /\n 1\n输出: 3\n进阶:\n如果二叉搜索树经常被修改(插入/删除操作)并且你需要频繁地查找第 k 小的值,你将如何优化 kthSmallest 函数?\n\n\n\"\"\"\n\nfrom tree_node import *\n\nclass Solution:\n def kthSmallest(self, root:TreeNode, k:int) -> int:\n generator = bst_generator(root)\n i = 1\n while i < k:\n next(generator)\n i+=1\n return next(generator).val\n\n\n\n\ndef test():\n s = Solution()\n t1 = make_simple_tree(3,make_simple_tree(1,None,2), 4)\n assert s.kthSmallest(t1, 1) == 1\n\n t2 = make_simple_tree(3, make_simple_tree(3,make_simple_tree(2,1,None),4), 6)\n assert s.kthSmallest(t2, 3) == 3\n\n","repo_name":"codetalks-new/leetcode-qa","sub_path":"answers/python3/test_p230_kth_smallest_element_in_a_bst.py","file_name":"test_p230_kth_smallest_element_in_a_bst.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28182474967","text":"from collections import deque\n\nn,k = map(int,input().strip().split())\n\nboard = [[False,False] for _ in range(500001)]\n\ndef solv():\n global board, k\n\n q = deque()\n q.appendleft((n,0))\n\n board[n][0] = True\n t = 0\n while q:\n k += t\n\n if k > 500000:\n return -1\n\n q_len = len(q)\n\n for _ in range(q_len):\n now,cnt = q.pop()\n\n if now == k or board[k][cnt%2]:\n return t\n if now+1 <= 500000 and not board[now+1][(cnt+1)%2]:\n q.appendleft((now+1,cnt+1))\n board[now+1][(cnt+1)%2] = True\n\n if now-1 >= 0 and not board[now-1][(cnt+1)%2]:\n q.appendleft((now-1,cnt+1))\n board[now-1][(cnt+1)%2] = True\n\n if now*2 <= 500000 and not board[now*2][(cnt+1)%2]:\n q.appendleft((now*2,cnt+1))\n board[now*2][(cnt+1)%2] = True\n\n t += 1\n\n return -1\nprint(solv())","repo_name":"alsgh9948/Problem-Solving","sub_path":"baekjoon/17071.py","file_name":"17071.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36807747520","text":"import curses\n\nREADY = 0\nRUNNING = 1\n\nfrom gui.controllers.main import MainMenu\nfrom gui.controllers.quick_actions import QuickActionsMenu\nfrom gui.window import Window, Renderer\nfrom storage import Storage\n\nimport gui.colors as Colors\n\nimport time\n\nTASK_FPS = 1\nINPUT_FPS = 60\n\nclass Gui():\n\n def __init__(self, stdscr):\n self.height, self.width = stdscr.getmaxyx()\n self.begin_y = 0\n self.begin_x = 0\n self.last_render = None\n\n self.height -= 1\n\n self.s = Storage()\n\n self.win = stdscr\n\n curses.start_color()\n curses.use_default_colors()\n for c in Colors.ALL:\n curses.init_pair(c[0], c[1], c[2])\n\n stdscr.clear()\n\n def main_window(title):\n return Window(title, -2, 0, 2, 0, self)\n\n def quick_actions(title):\n return Window(title, 3, 0, 0, 0, self)\n\n main = MainMenu(main_window)\n self.quick_actions = QuickActionsMenu(quick_actions, main.window)\n\n self.windows = [self.quick_actions, main]\n\n self.running = True\n self.had_task = False\n self.fps = None\n self.update_fps(INPUT_FPS)\n\n while self.running:\n self.render()\n self.tick()\n\n def is_active(self):\n return False\n\n def update_fps(self, fps):\n if fps == self.fps:\n return\n\n self.fps = fps\n self.update_frequency = 1/self.fps\n self.force_next_render = True\n\n def quit(self):\n self.running = False\n\n def tick(self):\n current_task = self.s.get('task')\n if current_task is not None:\n self.had_task = True\n self.update_fps(TASK_FPS)\n current_task()\n return\n\n if self.had_task is True:\n self.had_task = False\n self.update_fps(INPUT_FPS)\n return\n\n c = self.win.getkey()\n\n r = self.windows[-1].input(c)\n if not r:\n self.windows[0].input(c)\n\n new_window = self.s.get('new_window')\n if new_window is not None:\n self.windows.append(new_window)\n self.s.set('new_window', None)\n\n remove_window = self.s.get('remove_window')\n if remove_window is not None:\n self.windows.remove(remove_window)\n self.s.set('remove_window', None)\n if remove_window == self.quick_actions:\n self.running = False\n\n def render(self):\n cur_time = time.time()\n if not self.force_next_render and (self.last_render is not None and (cur_time - self.last_render) < self.update_frequency):\n return\n\n self.force_next_render = False\n Renderer.reset_cursor()\n self.win.clear()\n\n for w in self.windows:\n w.set_active(w == self.windows[-1])\n w.render()\n\n self.refresh()\n\n self.last_render = time.time()\n\n def refresh(self):\n self.win.refresh()\n","repo_name":"eduardoHoefel/dear-tool","sub_path":"gui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25059917590","text":"#########################################################\n# classes.py\n# define some classes used throughout the package\n#########################################################\nclass Param:\n type = 'continuous'\n\n######################################################### \ndef validate_params(params):\n import numpy as np\n params = np.atleast_1d(params)\n n_dim = len(params)\n for i in range(n_dim):\n if params[i].type == 'continuous':\n if not hasattr(params[i], 'min_val'):\n raise Exception('min_val not specified for param '+str(i))\n if not hasattr(params[i], 'max_val'):\n raise Exception('max_val not specified for param '+str(i))\n if params[i].max_val <= params[i].min_val:\n raise Exception('max_val <= min_val for param '+str(i))\n elif params[i].type == 'ordered':\n if not hasattr(params[i], 'min_val'):\n raise Exception('min_val not specified for param '+str(i))\n if not hasattr(params[i], 'max_val'):\n raise Exception('max_val not specified for param '+str(i))\n if params[i].max_val <= params[i].min_val:\n raise Exception('max_val <= min_val for param '+str(i))\n elif params[i].type == 'categorical':\n if hasattr(params[i], 'min_val'):\n raise Exception('min_val should not be specified for categorical params (param '+str(i)+')')\n if hasattr(params[i], 'max_val'):\n raise Exception('max_val should not be specified for categorical params (param '+str(i)+')')\n if not hasattr(params[i], 'categories'):\n raise Exception('Categories not specified for param['+str(i)+'].')\n if len(params[i].categories) != len(set(params[i].categories)):\n raise Exception('Duplicates found in param['+str(i)+'].categories.')\n for c in params[i].categories:\n if not isinstance(c, str):\n raise Exception('All categories of param['+str(i)+'] must be strings.')\n else:\n raise Exception('Unrecognized type for parameter '+str(i))\n # Check that the first n_cont_vars are all continuous and the last n_cont_vars are either ordered or categorical\n for i in range(n_dim):\n if params[i].type == 'ordered' or params[i].type == 'categorical':\n break\n for ii in range(i+1,n_dim):\n if params[ii].type != 'ordered' and params[ii].type != 'categorical':\n raise Exception('Reorder the Param objects in the list of Params so that the Params of continuous data types'+\n ' appear in the list before the other data types.')\n return True\n\n#########################################################\nclass ModelOptions:\n # set the default options\n deterministic = True # random seeds are set deterministically\n perform_lower_sims = True # if a simulation is conducted at a fidelity level, it is also run at all lower fidelity levels\n mask_nans = True # Not-a-Number values are replaced with estimates from the surrogate model for the purpose of Bayesian Optimization. Otherwise, these values are excluded from the surrogate model. \n mask_oob_values = True # out of bounds values are replaced with estimates from the surrogate model for the purpose of Bayesian Optimization. Otherwise, these values are excluded from the surrogate model.\n\n#########################################################\nclass VizOptions:\n # set the default options are below. Set boolean to true to create this vizualization\n animation_1d = False\n animation_2d = False\n animation_nd = False\n plot_1d = False\n plot_2d = False\n plot_nd = False\n output_dir = './plots'\n\n#########################################################\nclass BoOptions:\n # set the default options\n acq_func = 'EI'\n minimization_method = 'SLSQP'\n n_opt_pts = 20 # number of initial guesses used to probe the acquisition function\n\n#########################################################\n","repo_name":"kevingriffin1/pytest-github-actions","sub_path":"ac_common/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32712106850","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\ndef eliminate_duplicate(head):\n base_value = None\n starter = None\n while head:\n if base_value is None:\n base_value = head\n if starter is None:\n starter = head\n else:\n currNode = head\n if currNode.data == base_value.data:\n if currNode is None:\n base_value.next = None\n else:\n base_value.next = currNode.next\n else:\n base_value = currNode\n head = head.next\n\n return starter\n\ndef ll(arr):\n if len(arr)==0:\n return None\n head = Node(arr[0])\n last = head\n for data in arr[1:]:\n last.next = Node(data)\n last = last.next\n return head\n\ndef printll(head):\n while head:\n print(head.data, end=' ')\n head = head.next\n print()\n\n# Main\n# Read the link list elements including -1\narr=list(int(i) for i in input().strip().split(' '))\n# Create a Linked list after removing -1 from list\nl = ll(arr[:-1])\nl = eliminate_duplicate(l)\nprintll(l)\n","repo_name":"ashisharora24/learning_tutorials_practice","sub_path":"Data Structures and Algorithms in Python/10_linked_list/11_Eliminate_duplicates_from_LL.py","file_name":"11_Eliminate_duplicates_from_LL.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30522804690","text":"import logging\n\nfrom route import app, set_session\nfrom session.session import Session\nfrom tool import load_config\nfrom quart_cors import cors\nfrom os import environ\n\n\ndef main():\n environ.setdefault(\"CHATGPT_BASE_URL\", \"https://ai.fakeopen.com/api/\")\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n app.logger.setLevel(logging.INFO)\n config = load_config()\n session = Session(config=config)\n port = config[\"engine\"][\"port\"]\n debug = config[\"engine\"].get(\"debug\", False)\n set_session(session)\n\n cors_app = cors(app, allow_origin=\"*\")\n cors_app.run(host=\"0.0.0.0\", port=port, debug=debug)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"peanut996/chatgpt-engine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"40685124220","text":"import os\nfrom pprint import pprint\n\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\nimport gdrive_data.auth as auth\nfrom constants import SCOPES\n\ndef search_file(service, categories):\n drive_pics = {}\n try:\n page_token = None\n pg_token = None\n for category in categories:\n while True:\n response = service.files().list(q=f\"name='{category}' and mimeType='application/vnd.google-apps.folder' and trashed=false\",\n fields='nextPageToken,'\n 'files(id, name, webViewLink)', pageSize=1000,\n pageToken=page_token).execute()\n for folder in response.get('files', []):\n while True:\n drive_pics[folder.get('name')] = {}\n response = service.files().list(q=f\"'{folder.get('id')}' in parents\",\n fields='nextPageToken,'\n 'files(id, name, webViewLink)', pageSize=1000,\n pageToken=pg_token).execute()\n for file in response.get('files', []):\n drive_pics[folder.get('name')][os.path.splitext(file.get('name'))[0]] = file.get(\"webViewLink\")\n pg_token = response.get('nextPageToken', None)\n if pg_token is None:\n break\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n\n\n except HttpError as error:\n print(F'An error occurred: {error}')\n drive_pics = None\n\n return drive_pics\n","repo_name":"dimemajor/Custom-Odoo-Web-Report","sub_path":"gdrive_data/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29320958294","text":"from numpy import (\n sqrt, cos, sin, deg2rad, tan,\n arctan, pi, arange, fromiter, array,\n unique, linspace, floor, ceil, arctan2,\n rad2deg,\n)\nfrom matplotlib import ticker\n\nclass OrbitalSolution:\n def __init__(self):\n self.NAME = ''\n self.RA = 0\n self.DEC = 0\n self.P = 0\n self.err_P = 0\n self.T0 = 0\n self.err_T0 = 0\n self.a = 0\n self.a_err = 0\n self.e = 0\n self.e_err = 0\n self.w = 0\n self.w_err = 0\n self.W = 0\n self.W_err = 0\n self.i = 0\n self.i_err = 0\n\n def set_parameters_with_errors(self, P, P_e, T0, T0_e, a, a_e, e, e_e, W, W_e, w, w_e, i, i_e):\n self.P = P\n self.T0 = T0\n self.a = a\n self.e = e\n self.W = W\n self.W_rad = deg2rad(W)\n self.w = w\n self.w_rad = deg2rad(w)\n self.i = i\n self.i_rad = deg2rad(i)\n self.P_err = P_e\n self.T0_err = T0_e\n self.a_err = a_e\n self.e_err = e_e\n self.W_err = W_e\n self.w_err = w_e\n self.i_err = i_e\n\n def set_parameters(self, P, T0, a, e, W, w, i):\n self.P = P\n self.T0 = T0\n self.a = a\n self.e = e\n self.W = W\n self.W_rad = deg2rad(W)\n self.w = w\n self.w_rad = deg2rad(w)\n self.i = i\n self.i_rad = deg2rad(i)\n \n def set_errors(self, P_e, T0_e, a_e, e_e, W_e, w_e, i_e):\n self.P_err = P_e\n self.T0_err = T0_e\n self.a_err = a_e\n self.e_err = e_e\n self.W_err = W_e\n self.w_err = w_e\n self.i_err = i_e\n\n def set_info(self, parameter, value):\n if parameter == 'name':\n self.NAME = value\n elif parameter == 'RA':\n self.RA = value\n elif parameter == 'DEC':\n self.DEC = value\n\nclass Point:\n def __init__(self, epoch, theta, rho, weight, koeff):\n self.epoch = epoch\n self.theta = theta\n self.rho = rho\n self.weight = weight\n self.koeff = koeff\n\n\ndef ephemeris(orb_sol, epoch_list, rho=False, rv=False):\n output_ephemeris = []\n A = orb_sol.a * (\n +cos(orb_sol.w_rad) * cos(orb_sol.W_rad) - sin(orb_sol.w_rad) * sin(orb_sol.W_rad) * cos(orb_sol.i_rad))\n B = orb_sol.a * (\n +cos(orb_sol.w_rad) * sin(orb_sol.W_rad) + sin(orb_sol.w_rad) * cos(orb_sol.W_rad) * cos(orb_sol.i_rad))\n F = orb_sol.a * (\n -sin(orb_sol.w_rad) * cos(orb_sol.W_rad) - cos(orb_sol.w_rad) * sin(orb_sol.W_rad) * cos(orb_sol.i_rad))\n G = orb_sol.a * (\n -sin(orb_sol.w_rad) * sin(orb_sol.W_rad) + cos(orb_sol.w_rad) * cos(orb_sol.W_rad) * cos(orb_sol.i_rad))\n for epoch in epoch_list:\n time_delta = epoch - orb_sol.T0\n phase = (time_delta / orb_sol.P) % 1\n if phase < 0:\n phase += 1\n anomaly = phase * 2 * pi\n E = float(anomaly)\n E1 = E + (anomaly + orb_sol.e * sin(E) - E) / (1 - orb_sol.e * cos(E))\n while abs(E1 - E) > 1e-5:\n E = float(E1)\n E1 = E + (anomaly + orb_sol.e * sin(E) - E) / (1 - orb_sol.e * cos(E))\n V = 2 * arctan(sqrt((1 + orb_sol.e) / (1 - orb_sol.e)) * tan(E1 / 2))\n R = (1 - orb_sol.e ** 2) / (1. + orb_sol.e * cos(V))\n X = R * cos(V)\n Y = R * sin(V)\n output_ephemeris.append((A * X + F * Y, B * X + G * Y))\n return array(output_ephemeris)\n\n\ndef correct(points, T0):\n for number, point in enumerate(points):\n if point.epoch < 3e3 and T0 > 3e3:\n points[number].epoch = 365.242198781 * (point.epoch - 1900.) + 15020.31352\n if point.epoch > 3e3 and T0 < 3e3:\n points[number].epoch = 1900.0 + (point.epoch - 15020.31352) / 365.242198781\n return points\n\n\ndef get_points(fName, orbital_solution):\n points = []\n\n with open(fName) as f:\n data = f.read().split('\\n')\n bdata = data[13:]\n\n for line in bdata:\n if not line or line.startswith('#'):\n continue\n splitted_line = line.split()\n if splitted_line[-1].startswith('I1'):\n points.append(Point(*map(float, splitted_line[:-1])))\n\n points = correct(points.copy(), orbital_solution.T0)\n PR = 0.0057 * sin(deg2rad(orbital_solution.RA)) / cos(deg2rad(orbital_solution.DEC))\n for number, point in enumerate(points):\n points[number].theta += (2000 - point.epoch) * PR\n return points\n\n\ndef find_ext(data, t):\n if t == 'min':\n if min(data[0]) < min(data[1]):\n return min(data[0])\n else:\n return min(data[1])\n elif t == 'max':\n if max(data[0]) > max(data[1]):\n return max(data[0])\n else:\n return max(data[1])\n\n\ndef get_orbit(points, orb_sol):\n num_of_points = 500\n\n epochs = fromiter((point.epoch for point in points), float)\n rhos = fromiter((point.rho for point in points), float)\n thetas = fromiter((deg2rad(point.theta) for point in points), float)\n\n mod_epochs = arange(num_of_points) / (num_of_points - 1) * orb_sol.P + orb_sol.T0\n xye = ephemeris(orb_sol, mod_epochs)\n xobs = -rhos * sin(thetas)\n yobs = rhos * cos(thetas)\n xy0 = ephemeris(orb_sol, epochs)\n return xye, xy0, xobs, yobs\n\ndef calculate_residuals(orbit_params):\n return fromiter(\n map(\n lambda X, Y, X_bind, Y_bind: sqrt((X-X_bind)**2 + (Y - Y_bind)**2),\n orbit_params['x'],\n orbit_params['y'],\n -orbit_params['bind'][:, 1],\n orbit_params['bind'][:, 0]\n ),\n float\n )\n\ndef calculate_drho_and_dtheta(orbit_params):\n drho = []\n dtheta = []\n for X, Y, X_bind, Y_bind in zip(orbit_params['x'], orbit_params['y'], -orbit_params['bind'][:, 1], orbit_params['bind'][:, 0]):\n rho = get_rho(X, Y)\n theta = get_theta(X, Y)\n rho_bind = get_rho(X_bind, Y_bind)\n theta_bind = get_theta(X_bind, Y_bind)\n drho.append(rho - rho_bind)\n dtheta.append(rad2deg(theta - theta_bind))\n return array(drho), array(dtheta)\n\ndef get_tick_positions_for_epochs(data):\n if data.max() - data.min() > 2:\n epochs_ticks = unique(linspace(floor(data.min()), ceil(data.max()), 6, dtype='uint16'))\n else:\n step = 0\n while True:\n step += 0.1\n if step > data.max() - data.min():\n exit()\n epochs_ticks = arange(floor((data.min()-0.05)*10)/10, ceil((data.max()+0.05)*10)/10+step/2, step)\n if epochs_ticks.min() < data.min() and epochs_ticks.max() > data.max() and epochs_ticks.size <= 7:\n break\n return epochs_ticks\n\ndef draw_brake_diagnoal_lines(axis, trans_axis, bottom, top, axis_type, trans_axis_side, p):\n d = 0.025\n k = 3 * 0.9**(abs(5-p))\n sh = 1.75/7\n if trans_axis:\n kwargs = dict(transform=trans_axis.transAxes, color='k', clip_on=False)\n if axis_type == 'box':\n if trans_axis_side == 'bottom':\n if not top:\n axis.plot((1-d, 1+d), (1-d, 1+d), **kwargs)\n axis.plot((1+sh-d, 1+sh+d), (1-d, 1+d), **kwargs)\n if not bottom:\n axis.plot((1-d, 1+d), (1-d+k*d, 1+d+k*d), **kwargs)\n axis.plot((1+sh-d, 1+sh+d), (1-d+k*d, 1+d+k*d), **kwargs)\n else:\n if not top:\n axis.plot((1-d, 1+d), (-d-k*d, +d-k*d), **kwargs)\n axis.plot((1+sh-d, 1+sh+d), (-d-k*d, +d-k*d), **kwargs)\n if not bottom:\n axis.plot((1-d, 1+d), (-d, +d), **kwargs)\n axis.plot((1+sh-d, 1+sh+d), (-d, +d), **kwargs)\n else:\n if not top:\n axis.plot((-d, +d), (-d-k*d, d-k*d), **kwargs)\n axis.plot((1-d, 1+d), (-d-k*d, d-k*d), **kwargs)\n pass\n if not bottom:\n axis.plot((-d, +d), (1-d+k*d, 1+d+k*d), **kwargs)\n axis.plot((1-d, 1+d), (1-d+k*d, 1+d+k*d), **kwargs)\n\n else:\n kwargs = dict(transform=axis.transAxes, color='k', clip_on=False)\n if not top:\n axis.plot((-d, +d), (1-d, 1+d), **kwargs)\n axis.plot((1-d, 1+d), (1-d, 1+d), **kwargs)\n if not bottom:\n axis.plot((-d, +d), (-d, +d), **kwargs)\n axis.plot((1-d, 1+d), (-d, +d), **kwargs)\n \n return axis\n\ndef create_axis(axis, plot_params, axis_type='orbit', bottom=True, top=True, trans_axis=None, trans_axis_side='bottom'):\n axis.tick_params(direction='in')\n axis.xaxis.set_ticks_position('both')\n if axis_type == 'orbit':\n axis.axis('equal')\n elif axis_type in ['residuals', 'errors']:\n axis.yaxis.set_ticks_position('both')\n if not bottom:\n axis.tick_params(labelbottom='off')\n axis.spines['bottom'].set_visible(False)\n axis.xaxis.set_ticks_position('top')\n elif not top:\n axis.tick_params(labeltop='off')\n axis.spines['top'].set_visible(False)\n axis.xaxis.set_ticks_position('bottom')\n elif axis_type == 'box':\n axis.get_xaxis().set_visible(False)\n axis.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: ''))\n axis.yaxis.set_ticks_position('both')\n\n if not bottom:\n axis.tick_params(labelbottom='off')\n axis.spines['bottom'].set_visible(False)\n axis.xaxis.set_ticks_position('top')\n elif not top:\n axis.tick_params(labeltop='off')\n axis.spines['top'].set_visible(False)\n axis.xaxis.set_ticks_position('bottom')\n else:\n raise ValueError(f'Unknown type of axis: {axis_type}')\n axis = draw_brake_diagnoal_lines(axis, trans_axis, bottom, top, axis_type, trans_axis_side, plot_params['sub_1_brake_rate'])\n return axis\n\ndef get_rho(x, y):\n return sqrt(x**2 + y**2)\n\ndef get_theta(x, y):\n theta = arctan2(y, x) % (2 * pi)\n if theta < 0:\n theta += 2 * pi\n return theta","repo_name":"beskakotov/mavr-ums","sub_path":"ums/orvi/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":10009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13190690778","text":"import datetime\n\nfrom django.forms import forms, ModelForm, fields, ModelChoiceField\nfrom django.core.exceptions import ValidationError\nfrom django.forms.widgets import Textarea\n\nfrom reports.models import *\n\n\n__all__ = [\"AlterLayoutForm\", \"LayoutSettingsForm\", \"SelectPageLayoutForm\", \"MovePageForm\"]\n\n\nclass AlterLayoutForm(ModelForm):\n contents = fields.CharField(widget=Textarea(attrs={'style': \"width: 100%;\"}))\n\n class Meta:\n model = PageLayout\n fields = ['margins', 'contents']\n\n def __init__(self, *args, **kwargs):\n super(AlterLayoutForm, self).__init__(*args, **kwargs)\n\n # self.instance = PageLayout.objects.get()\n self.fields['contents'].initial = self.instance.template_content\n\n def save(self, commit=True):\n super(AlterLayoutForm, self).save()\n if commit:\n file = self.instance.template\n file.open(mode='tw')\n file.write(self.cleaned_data['contents'])\n file.close()\n\n\nclass LayoutSettingsForm(ModelForm):\n class Meta:\n model = PageLayout\n fields = ['name', 'description']\n\n\nclass SelectPageLayoutForm(ModelForm):\n\n class Meta:\n model = ReportPage\n fields = ['layout']\n\n def __init__(self, *args, page=None, **kwargs):\n super(SelectPageLayoutForm, self).__init__(*args, instance=page, **kwargs)\n\n\nclass MovePageForm(forms.Form):\n report_page = ModelChoiceField(queryset=ReportPage.objects.all(), required=True)\n move_up = fields.BooleanField(required=False)\n\n def __init__(self, *args, report=None, **kwargs):\n self.report = report\n super(MovePageForm, self).__init__(*args, **kwargs)\n\n def clean_report_page(self):\n report_page = self.cleaned_data['report_page']\n if report_page not in self.report.get_pages():\n raise ValidationError(\"Page is not part of this report\", code='report_invalid')\n return report_page\n\n def clean(self):\n if 'report_page' not in self.cleaned_data:\n # There is no report page, so likely an error occured in clean_report_page\n return\n cur_link = ReportPageLink.objects.get(page=self.cleaned_data['report_page'])\n\n if self.cleaned_data['move_up']:\n if ReportPageLink.objects.filter(report=self.report, page_number__lt=cur_link.page_number).count() == 0:\n raise ValidationError(\n \"This page was already the first page\", code='is_first_page'\n )\n else:\n if ReportPageLink.objects.filter(report=self.report, page_number__gt=cur_link.page_number).count() == 0:\n raise ValidationError(\n \"This page was already the last page\", code='is_last_page'\n )\n\n return self.cleaned_data\n\n def save(self):\n \"\"\" Save the move by switching the page numbers of the two pages \"\"\"\n this_page_link = self.cleaned_data['report_page'].reportpagelink\n current_page_number = this_page_link.page_number\n\n switch_with_link = ReportPageLink.objects. \\\n filter(report=self.report). \\\n order_by('page_number')\n\n if self.cleaned_data['move_up']:\n switch_with_link = switch_with_link.\\\n filter(page_number__lt=current_page_number).\\\n last()\n else:\n switch_with_link = switch_with_link. \\\n filter(page_number__gt=current_page_number). \\\n first()\n\n this_page_link.page_number = switch_with_link.page_number\n this_page_link.save()\n switch_with_link.page_number = current_page_number\n switch_with_link.save()\n","repo_name":"DutcherNL/Shakespear","sub_path":"reports/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12241639946","text":"if __name__ == \"__main__\" :\n N, K = map(int, input().split())\n array = list()\n for _ in range(2) :\n tmp = list(map(int, input().split()))\n tmp = sorted(tmp)\n array.append(tmp)\n\n print(array)\n array[1] = sorted(array[1], reverse=True)\n\n for i in range(K):\n if array[0][i] < array[1][i] :\n array[0][i] = array[1][i]\n\n print(array)\n print(sum(array[0]))","repo_name":"sjly3k/Coding_Test","sub_path":"Sorting/book/두_배열의_원소_교체.py","file_name":"두_배열의_원소_교체.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6196271134","text":"from psycopg2.extras import RealDictCursor\nimport data_handler\nimport util\nfrom service_user import add_user_answer_activity, add_user_question_activity\n\n\n@data_handler.connection_handler\ndef add_new_entry(cursor: RealDictCursor, table_name: str, form_data=None, request_files=None, question_id=None, user_id=None):\n\n complete_dict_data = util.init_complete_dict_entry(table_name, form_data, request_files, question_id)\n\n columns_sql_str = \", \".join([str(key) for key in complete_dict_data.keys()])\n values_sql_str = \", \".join(f'%({key})s' for key in complete_dict_data.keys())\n\n comment = f\"\"\"\n INSERT INTO \n {table_name} ({columns_sql_str})\n VALUES ({values_sql_str})\n RETURNING id\n \"\"\"\n\n cursor.execute(comment, complete_dict_data)\n entry_id = str(cursor.fetchone()['id'])\n\n if table_name == 'answer':\n add_user_answer_activity(user_id, entry_id)\n else:\n add_user_question_activity(user_id, entry_id)\n\n if request_files['image'].filename:\n\n if table_name == 'question':\n data_handler.save_image(request_files['image'], 'questions', entry_id)\n elif table_name == 'answer':\n data_handler.save_image(request_files['image'], 'answers', entry_id)\n\n # if table_name == 'question':\n return entry_id\n\n\n@data_handler.connection_handler\ndef vote_on_post(cursor: RealDictCursor, entry_id, vote_value, entry_type):\n params = {'entry_id': entry_id}\n\n if vote_value == 'vote_up':\n params['vote'] = + 1\n else:\n params['vote'] = - 1\n\n comment = f\"\"\"\n UPDATE {entry_type}\n SET vote_number = vote_number + %(vote)s\n WHERE id=%(entry_id)s\"\"\"\n\n cursor.execute(comment, params)\n\n@data_handler.connection_handler\ndef add_vote(cursor: RealDictCursor, user_id, vote_value, question_id=None, answer_id=None):\n column = \"question_id\" if question_id else \"answer_id\"\n entry_id = question_id if column == 'question_id' else answer_id\n command = f\"\"\"\n INSERT INTO users_votes(user_id, {column}, vote_value)\n VALUES (%(user_id)s, %(entry_id)s, %(vote_value)s)\n \"\"\"\n cursor.execute(command, {'user_id': user_id, 'entry_id': entry_id, 'vote_value': vote_value})\n\n\n@data_handler.connection_handler\ndef clear_vote(cursor: RealDictCursor, user_vote):\n vote_value = -user_vote.get('vote_value')\n vote_value = \"vote_up\" if vote_value == 1 else \"vote_down\"\n\n if user_vote.get('answer_id', None) is not None:\n entry_type = 'answer'\n elif user_vote.get('question_id', None) is not None:\n entry_type = 'question'\n\n entry_id = user_vote.get(f\"{entry_type}_id\")\n vote_on_post(entry_id, vote_value, entry_type)\n\n command = f\"\"\"\n DELETE FROM users_votes\n WHERE {entry_type}_id = %(entry_id)s\n \"\"\"\n cursor.execute(command, {'entry_id': entry_id})\n\n\n\n\n\n\n#\n# ------>> DELETIONS <<------\n#\n","repo_name":"DariuszOkonski/ask-mate","sub_path":"data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8283713886","text":"# The Ultimate Choose Your Own Adventure Game\nimport PySimpleGUI as sg\n\n\ndef main ():\n sg.theme(\"default1\") \n # All the stuff inside your window.\n char_info = character_creation()\n # character_creation returns the following\n # (char_name, char_gender, window.close())\n # The window.close closes the character_creation window.\n char_name = char_info[0]\n char_gender = char_info[1]\n\n # TODO #87 Get current_scene to update based on player choice\n # Build each scene into a self sustaining window that \n # will work similar to how the character creation window works\n\n\n # Maps the player choices to the variable current_scene\n cabin_scene(char_name, char_gender)\n # Returns (story_text, player_choices, next_scenes)\n\n # # Pulls display text from current scene to display to user\n # display_text = current_scene [0]\n \n # # Pulls returned options from current scene to be mapped to display\n # current_options = current_scene [1]\n # option_1 = current_options [\"option_1\"]\n # option_2 = current_options [\"option_2\"]\n\n # # Format of dictionary values\n # # {\"option_1\":first_scene,\"option_2\":second_scene}\n # next_scenes = current_scene [2]\n # first_scene = next_scenes [\"option_1\"]\n # second_scene = next_scenes [\"option_2\"]\n\n # layout = [ [sg.Text(display_text) ],\n # [sg.Button(option_1),sg.Button(option_2)], \n # [sg.Button(\"Cancel\")] ]\n\n \n # # TODO: #77 BUG Options 1 and 2 not working. \n # # Create the Window\n # window = sg.Window(\"The Ultimate Choose Your Own Adventure Story\", layout)\n # # Event Loop to process \"events\" and get the \"values\" of the inputs\n # while True:\n # event, values = window.read()\n # if event == sg.WIN_CLOSED or event == \"Cancel\": # if user closes window or clicks cancel\n # break\n # # TODO: #89 Possibly put another call to current_scene here\n # # Another possibility is to build a while loop in a function that will be responsible for calling other functions. \n # window.close()\n\n\n\ndef personalized_dialog (char_gender, key):\n \"\"\"\n The dialog in this adventure commonly uses \n gender references that are meant to be tailored \n directly to the player. The purpose of this function\n is to allow those references to be dynamic to the character\n which the player created.\n \"\"\"\n if char_gender == \"Male\":\n male = {\n \"he_she\" :\"he\", \n \"him_her\" : \"him\",\n \"his_her\" : \"his\",\n \"boy_girl\" : \"boy\",\n \"brother_sister\" : \"brother\",\n \"son_daughter\" : \"son\",\n \"male_female\" : \"male\",\n \"men_women\" : \"men\",\n \"man_woman\" : \"man\",\n \"love_interest\" : \"girl\",\n \"opposite_male_female\" : \"female\",\n \"opposite_men_women\" : \"women\",\n \"opposite_he_she\" : \"she\",\n \"opposite_his_her\" : \"her\",\n \"opposite_him_her\" : \"her\",\n \"enemy_leader\" : \"Lady\"\n }\n return male.get(key)\n elif char_gender == \"Female\":\n female = {\n \"he_she\" : \"she\",\n \"him_her\" : \"her\",\n \"his_her\" : \"her\",\n \"boy_girl\" : \"girl\",\n \"brother_sister\" : \"sister\",\n \"son_daughter\" : \"daughter\",\n \"male_female\" : \"female\",\n \"men_women\" : \"women\",\n \"man_woman\" : \"woman\",\n \"love_interest\" : \"guy\",\n \"opposite_male_female\" : \"male\",\n \"opposite_men_women\" : \"men\",\n \"opposite_he_she\" : \"he\",\n \"opposite_his_her\" : \"his\",\n \"opposite_him_her\" : \"him\",\n \"enemy_leader\" : \"Lord\"\n }\n return female.get(key)\n\ndef character_creation ():\n sg.theme(\"default1\") # Add a touch of color\n # All the stuff inside your window.\n layout = [ [sg.Text('Welcome to our story. This is where you create your character.')],\n [sg.Text(\"Please enter your character name, then select your character's gender.\")], \n [sg.Text('Name'), sg.InputText(key=\"-NAME-\")],\n [sg.Radio('Male', \"RADIO\", key= \"-MALE-\", default= True),\n sg.Radio('Female', \"RADIO\", key= \"-FEMALE-\")],\n [sg.Submit() ,sg.Cancel()] ]\n\n window = sg.Window('The Ultimate Choose Your Own Adventure Story', layout)\n # Event Loop to process \"events\" and get the \"values\" of the inputs\n while True:\n event, values = window.read()\n char_name = values [\"-NAME-\"]\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\n break\n window.close()\n elif len(char_name)<1:\n sg.popup (\"Please enter a name and select a gender for your character to continue.\")\n elif len (char_name) >=1:\n if event == \"Submit\":\n char_gender = \"\"\n if values [\"-MALE-\"]:\n char_gender = \"Male\"\n elif values [\"-FEMALE-\"]:\n char_gender = \"Female\"\n \n # Next line was used for testing purposes\n #sg.popup(f\"You entered {character_name} and {character_gender}\")\n return char_name, char_gender, window.close()\n \n window.close()\n\n\ndef cabin_scene(char_name, char_gender):\n \"\"\"\n This function is used to stage user interaction \n for activities that happen in the cabin scene of the story.\n \"\"\"\n dialog_used = {\n \"love_interest\" : personalized_dialog(char_gender,\"love_interest\"),\n \"opposite_him_her\" : personalized_dialog(char_gender,\"opposite_him_her\"),\n \"char_name\" : char_name.capitalize (),\n \"him_her\" : personalized_dialog(char_gender,\"him_her\"),\n \"enemy_leader\" : personalized_dialog(char_gender,\"enemy_leader\"),\n \"him_her\" : personalized_dialog(char_gender,\"him_her\"),\n \"men_women\" : personalized_dialog(char_gender,\"men_women\")}\n\n story_text = f\"\"\"\"Today is going to be great!\" You think to yourself.\nI got the day off.\nMy friends and I have an amazing weekend planned.\nI might even see that really cute {dialog_used[\"love_interest\"]} again.\nWho knows! Maybe I'll even ask {dialog_used[\"opposite_him_her\"]} on a date.\nNothing could possibly ruin this day!\nIn the middle of your preparations to get ready for your weekend, \nyou hear an unfamiliar voice shouting just outside your house.\n\"{dialog_used [\"char_name\"]} should be inside, get {dialog_used[\"him_her\"]} now. \nThe High {dialog_used[\"enemy_leader\"]} wants {dialog_used[\"him_her\"]} alive and in one piece.\"\nAs you look out the window, you see a dozen strangely dressed \n{dialog_used[\"men_women\"]} carrying large swords angrily moving towards your home.\nYou're at your dad's old cabin, miles out of town. \nEven if they went 80, it would take the police an hour to get out here, \nand you're not sure if you can get cell signal anyway.\n\"Police can't help me.\" You think to yourself. \"I need to think of other options.\"\nI could try to HIDE and hope they don't find me, \nor I could try to ESCAPE out the window and make a break for it. \nThe forest is not far, I could run out there forever.\"\"\"\n \n player_choices = {\n \"option_1\":\"HIDE\",\n \"option_2\":\"ESCAPE\"}\n \n\n \n layout = [ [sg.Text(story_text) ],\n [sg.Radio(player_choices [\"option_1\"], \"RADIO\", key= \"-option_1-\"),\n sg.Radio(player_choices [\"option_2\"], \"RADIO\", key= \"-option_2-\")],\n [sg.Submit() ,sg.Cancel()] ]\n\n window = sg.Window('The Ultimate Choose Your Own Adventure Story', layout)\n # Event Loop to process \"events\" and get the \"values\" of the inputs\n while True:\n event, values = window.read()\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\n break\n window.close()\n elif event == \"Submit\":\n if values == [\"-option_1-\"]:\n closet_scene(char_name, char_gender)\n elif values [\"-option_2-\"]:\n forest_scene(char_name, char_gender)\n\n return dialog_used, player_choices\n\n\n\n#TODO: #83 Build out closet scene\ndef closet_scene(char_name, char_gender):\n\n dialog_used = {\"men_women\": personalized_dialog(char_gender, \"men_women\")\n }\n\n story_text = f\"\"\"You quickly duck in the closet and hide, \nbut as they continue to search the house determined to find you, \nyour nerves start to get the better of you.\nYou hear several people moving closer to the closet door.\nWhen the door opens, you see six large {dialog_used [\"men_women\"]}, \ndressed in blood red robes with masks covering their faces, \nand giant swords pointing towards you. \nYour father's old shotgun is right behind you. \nIf it even works ... or is loaded, you might be able to get a shot off ...\nbut these robed lunatics might give professional bodybuilders a run for their money.\nOr instead of FIGHTing, you could just GIVE UP and hope for the best. \nThey do want you in one piece after all ... which is hopefully a good thing.\"\"\"\n\n\n player_choices = {\n \"option_1\":\"FIGHT\",\n \"option_2\":\"GIVE UP\"}\n\n next_scenes = {\n \"option_1\":\"filler_0\",\n \"option_2\":\"filler_1\"}\n \n layout = [ [sg.Text(story_text) ],\n [sg.Radio(player_choices [\"option_1\"], \"RADIO\", key= \"-option_1-\", default= True),\n sg.Radio(player_choices [\"option_2\"], \"RADIO\", key= \"-option_2-\")],\n [sg.Submit() ,sg.Cancel()] ]\n\n window = sg.Window('The Ultimate Choose Your Own Adventure Story', layout)\n # Event Loop to process \"events\" and get the \"values\" of the inputs\n while True:\n event, values = window.read()\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\n break\n window.close()\n elif event == \"Submit\":\n if values == [\"-option_1-\"]:\n next_scenes [\"option_1\"]\n elif values [\"-option_2-\"]:\n next_scenes [\"option_2\"]\n\n return dialog_used, player_choices, next_scenes\n\n# TODO: Build out Forest Scene\ndef forest_scene(char_name, char_gender):\n story_text = f\"\"\"You carefully look out the back window, \nmaking sure not to be seen, \nwaiting until all of the cultists start making their way into the front door. \nYou quickly open the window and run as fast as you can into the forest.\nJust as you get to the tree line you hear someone yell behind you \n{personalized_dialog(char_gender, \"he_she\")}'s running into the forest. The yelling and loud footsteps behind you, \ntells you that the whole group is chasing after you and drives you to run faster. \nYou duck behind a bush, then notice a cave in the side of the nearby mountain \nand quickly run for it.\nAs you sprint to the cave, a giant sword flies so close to your head that it nicks your ear. \nYou almost feel as though the force of the wind from the sword flying by you might have knocked you over, \nbut the force of trees exploding into little pieces of shrapnel in front of you after the sword tore through it \nand the tree in front of it certainly did knock you over, and seeing the sword fly back over your face, \nbarely missing your nose as you tumble to the ground makes you grateful you only hit your head on a rock.\nAs you quickly struggle to your feet, feeling a little disoriented from the explosion, \nyou don't stop to listen to the argument between two of the cultists not far behind you. \nYou just run. You run for your life. \nYou run with all the energy of two trees exploding into little pieces of shrapnel that are still covering much of your body.\nAs you duck into the cave, you stop in horror to find it barely goes back 6 feet. \nWith the sun behind the mountain that 6 feet is certainly dark, \nbut the pain from hundreds of pieces of wood embedded into your flesh testifies that what you thought you knew cannot be true. \nHundreds of points of terrible pain and the memory of a giant sword flying back over your face, \nreturning as though on command to its wielder, leaves you with a perfect conviction, \nthat there is a lot you do not know about the world. There is a lot even science does not know about the world.\nYou stand there, back pressed firmly against the rock, \nin perfect silence for what feels like an eternity, wrapped in a gem, and gifted back to you. \nWaiting. Listening. Feeling. But there's more. So much more. \nPart of you wants to just melt into the rock and hope these freaks never find you. \nPart of you wants to grab the nearby branch, left over from one of the exploding trees, \nand see how many of them you can take out. Part of you wants to fall to your knees and cry, \nbut you know there is no time for that right now.\nWhat do you want to do:\nEmbrace the weird and try to MERGE into the rock,\nor pick up the BRANCH and try to fight?\"\"\"\n\n player_choices = {\n \"option_1\":\"MERGE\",\n \"option_2\":\"BRANCH\"}\n\n next_scenes = {\n \"option_1\":\"filler_0\",\n \"option_2\":\"filler_1\"}\n\n\n return story_text, player_choices, next_scenes\n\n\nif __name__ == \"__main__\":\n main()\n \n\n\n\n\n\n","repo_name":"JosephKemper/CSE-111-Programming-With-Functions","sub_path":"text_adventure.py","file_name":"text_adventure.py","file_ext":"py","file_size_in_byte":13035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30308989911","text":"import os\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport copy\nfrom scipy.optimize import linprog\nfrom scipy.spatial import HalfspaceIntersection, ConvexHull\nimport itertools\n\nfrom config import cfg\nimport data.scannet.scannet_utils as utils\nfrom Box3dGenerator.visualizer import *\nfrom Box3dGenerator.numba_utils import *\nfrom Box3dGenerator.bounding_box import *\n\n\ndef _calc_boundary_points(bbox2d_dimension, camera_intrinsic, z_near, z_far):\n '''\n calculate the six (right, left, bottom, top, near, far) boundary values of the truncated frustum\n along x, y, z axis based on the 2D bbox corner points of color image\n '''\n # the camera at the origin is looking along -Z axis in eye space, we need to negate the input positive values\n # refer to http://www.songho.ca/opengl/gl_projectionmatrix.html\n n = -z_near\n f = -z_far\n\n xmin, ymin, xmax, ymax = bbox2d_dimension\n\n #left bottom point of the bbox on color image\n pts_lb_image = np.ones((1, 3))\n pts_lb_image[0, 0] = xmin\n pts_lb_image[0, 1] = ymin\n pts_lb_image[0, 2] = -n\n\n # right top point of the bbox on color image\n pts_rt_image = np.ones((1, 3))\n pts_rt_image[0, 0] = xmax\n pts_rt_image[0, 1] = ymax\n pts_rt_image[0, 2] = -n\n\n # transform from the color image coord to color camera coord\n pts_lb_camera = utils.project_image_to_camera(pts_lb_image, camera_intrinsic) #(1,3)\n pts_rt_camera = utils.project_image_to_camera(pts_rt_image, camera_intrinsic)\n\n l = pts_lb_camera[0, 0]\n b = pts_lb_camera[0, 1]\n r = pts_rt_camera[0, 0]\n t = pts_rt_camera[0, 1]\n\n return r, l, b, t, n, f\n\n\ndef _construct_projection_matrix(r, l, b, t, n, f):\n '''\n projection matrix\n P =\n | 2n/(r-l) 0 (r+l)/(r-l) 0 |\n | 0 2n/(t-b) (t+b)/(t-b) 0 |\n | 0 0 -(f+n)/(f-n) -2fn/(f-n) |\n | 0 0 -1 0 |\n :return:\n '''\n P = np.zeros((4,4))\n P[0,0] = 2*n / (r-l)\n P[0,2] = (r+l) / (r-l)\n P[1,1] = 2*n / (t-b)\n P[1,2] = (t+b) / (t-b)\n P[2,2] = -(f+n) / (f-n)\n P[2,3] = -2*f*n / (f-n)\n P[3,2] = -1\n\n return P\n\n\ndef _normalize_plane(p_planes):\n n = p_planes.shape[0]\n for i in range(n):\n mag = np.sqrt(p_planes[i,0]**2 + p_planes[i,1]**2 + p_planes[i,2]**2)\n p_planes[i, :] /= mag\n\n return p_planes\n\n\ndef _calc_inequalities_coefficients(M):\n '''\n calculate inequalities coefficients for clipping plane of one frustum\n :param M: np.ndarray, shape (4,4), projection matrix\n :return: p_planes: np.ndarray, shape (6,4), coefficients for six clipping plane equations\n The order of the rows is left-right-bottom-top-near-far.\n Each row contains four elements representing coefficients (a,b,c,d)\n for inequality (ax+by+cz+d>0)\n '''\n normals = np.zeros((6,4))\n normals[:, 3] = 1\n for i in range(normals.shape[0]):\n normals[i, i//2] = 1 - (i%2)*2\n p_planes = normals @ M\n\n # p_planes = np.zeros((6,4))\n #\n # # left clipping plane\n # p_planes[0, 0] = M[3, 0] + M[0, 0]\n # p_planes[0, 1] = M[3, 1] + M[0, 1]\n # p_planes[0, 2] = M[3, 2] + M[0, 2]\n # p_planes[0, 3] = M[3, 3] + M[0, 3]\n #\n # # right clipping plane\n # p_planes[1, 0] = M[3, 0] - M[0, 0]\n # p_planes[1, 1] = M[3, 1] - M[0, 1]\n # p_planes[1, 2] = M[2, 3] - M[0, 2]\n # p_planes[1, 3] = M[3, 3] - M[0, 3]\n #\n # # bottom clipping plane\n # p_planes[2, 0] = M[3, 0] + M[1, 0]\n # p_planes[2, 1] = M[3, 1] + M[1, 1]\n # p_planes[2, 2] = M[3, 2] + M[1, 2]\n # p_planes[2, 3] = M[3, 3] + M[1, 3]\n #\n # # top clipping plane\n # p_planes[3, 0] = M[3, 0] - M[1, 0]\n # p_planes[3, 1] = M[3, 1] - M[1, 1]\n # p_planes[3, 2] = M[3, 2] - M[1, 2]\n # p_planes[3, 3] = M[3, 3] - M[1, 3]\n #\n # # near clipping plane\n # p_planes[4, 0] = M[3, 0] + M[2, 0]\n # p_planes[4, 1] = M[3, 1] + M[2, 1]\n # p_planes[4, 2] = M[3, 2] + M[2, 2]\n # p_planes[4, 3] = M[3, 3] + M[2, 3]\n #\n # # far clipping plane\n # p_planes[5, 0] = M[3, 0] - M[2, 0]\n # p_planes[5, 1] = M[3, 1] - M[2, 1]\n # p_planes[5, 2] = M[3, 2] - M[2, 2]\n # p_planes[5, 3] = M[3, 3] - M[2, 3]\n\n p_planes = _normalize_plane(p_planes)\n\n return p_planes\n\n\ndef extract_frustum_plane(bbox2d_dimension, camera_intrinsic, camera2world_extrinsic, z_near, z_far):\n\n r, l, b, t, n, f = _calc_boundary_points(bbox2d_dimension, camera_intrinsic, z_near, z_far)\n P = _construct_projection_matrix(r, l, b, t, n, f)\n M = P @ np.linalg.inv(camera2world_extrinsic)\n p_planes = _calc_inequalities_coefficients(M)\n\n return p_planes\n\n\ndef _calc_interior_point(halfspaces):\n c = np.zeros((halfspaces.shape[1]-1,))\n A = halfspaces[:, :-1]\n b = -halfspaces[:, -1]\n res = linprog(c, A_ub=A, b_ub=b, bounds=(0,None))\n # if the problem can not be solved, return None\n if res.status != 0:\n print('\\t Warning! The optimization is unsolved, the status of optimization result is {0}'.format(res.status))\n return None\n interior_point = res.x\n return interior_point\n\n\ndef frustum_planes_intersect(p_planes_list, visu_interior_point=False, visu_intersection_points=False):\n halfspaces = np.vstack(p_planes_list)\n ## change halfspaces from stacked Inequalities of the form Ax+b>0 in format [A; b] to -Ax-b<0 in format [-A;-b]\n #halfspaces = - halfspaces\n interior_point = _calc_interior_point(halfspaces)\n if interior_point is None:\n return None\n if visu_interior_point:\n visualize_frustums_plus_interior_point(p_planes_list, interior_point)\n hs = HalfspaceIntersection(halfspaces, interior_point)\n if visu_intersection_points:\n visualize_frustums_intersection(p_planes_list, hs.intersections)\n return hs\n\n\ndef remove_noisy_frustums(frustum_planes, min_volume, thres_volume_ratio=2.):\n '''compute the intersection of (n-i) frustums, if the intersection volume is larger than thres_ratio*min_volume,\n consider those i frustums as noisy frustums...\n '''\n n = len(frustum_planes) # number of frustums\n iterable = list(range(n))\n to_remove_frustums = []\n for i in range(1, (n//2)+1):\n exclude_pool = itertools.combinations(iterable, i)\n for to_exclude in exclude_pool:\n cur_frustum_planes = [frustum_plane for idx, frustum_plane in enumerate(frustum_planes)\n if idx not in to_exclude]\n cur_hs = frustum_planes_intersect(cur_frustum_planes, visu_intersection_points=False)\n cur_volume = ConvexHull(cur_hs.intersections).volume\n # TODO: if increase the threhold i times when considering remove i (i>1) frustums\n if cur_volume >= thres_volume_ratio*min_volume*i:\n to_remove_frustums.extend(to_exclude)\n break\n else:\n break\n # remove the noisy frustums\n if len(to_remove_frustums) != 0:\n to_remove_frustums = list(set(to_remove_frustums))\n print('\\t Removed {0} noisy frustums [volume_ratio-{1}]'.format(len(to_remove_frustums), thres_volume_ratio))\n frustum_planes = [frustum_plane for idx, frustum_plane in enumerate(frustum_planes)\n if idx not in to_remove_frustums]\n return frustum_planes\n\n\ndef in_hull(p, hull):\n \"\"\"\n Test if points in `p` are in `hull`\n\n `p` should be a `NxK` coordinates of `N` points in `K` dimensions\n `hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the\n coordinates of `M` points in `K`dimensions for which Delaunay triangulation\n will be computed\n \"\"\"\n if not isinstance(hull,Delaunay):\n hull = Delaunay(hull)\n\n return hull.find_simplex(p)>=0\n\n\ndef frustum_ptcloud_with_cam_in_world_frame(depth_img, bbox2d_dimension, CAM, depth_intrinsic, color2depth_extrinsic,\n camera2world_extrinsic):\n pts_cam_depth_camera = utils.cropped_depth_to_point_cloud_with_cam(depth_img, depth_intrinsic, bbox2d_dimension, CAM)\n pts_depth_camera = pts_cam_depth_camera[:,:3]\n cam_score = pts_cam_depth_camera[:, -1].reshape(-1, 1)\n\n pts_color_camera = utils.calibrate_camera_depth_to_color(pts_depth_camera, color2depth_extrinsic)\n z_near = np.amin(pts_color_camera, axis=0)[2]\n z_far = np.amax(pts_color_camera, axis=0)[2]\n\n pts_world = pts_color_camera @ camera2world_extrinsic.transpose()\n\n pts_cam_world = np.hstack((pts_world[:, :3], cam_score))\n\n return pts_cam_world, z_near, z_far\n\n\n\ndef pcmerge(ptclouds, gridSize=.02):\n '''\n merge a set of 3D point clouds using a box grid filter\n :param: ptclouds, a stack of several frustum point cloud with shape (n, 4) [x,y,z,score]\n where score indicate the class activated score\n :param: gridSize, size of the voxel for grid filter, specified as a numeric value.\n Increase the size of gridSize when requiring a higher-resolution grid.\n :return: a merged point cloud\n '''\n\n def voxelize_pointcloud(n_xyz):\n segments = []\n for i in range(3):\n s = np.linspace(xyz_min[i], xyz_max[i], num=(n_xyz[i]))\n segments.append(s)\n\n ## find where each point lies in corresponding segmented axis\n voxel_x = np.clip(np.searchsorted(segments[0], ptclouds[:, 0]), 0, n_xyz[0]-1)\n voxel_y = np.clip(np.searchsorted(segments[1], ptclouds[:, 1]), 0, n_xyz[1]-1)\n voxel_z = np.clip(np.searchsorted(segments[2], ptclouds[:, 2]), 0, n_xyz[2]-1)\n voxel_n = np.ravel_multi_index([voxel_x, voxel_y, voxel_z], n_xyz)\n\n return voxel_n\n\n xyz_min = np.amin(ptclouds, axis=0)[0:3]\n xyz_max = np.amax(ptclouds, axis=0)[0:3]\n n_xyz = np.ceil((xyz_max-xyz_min)/gridSize).astype(np.int) #n_xyz = [nx, ny, nz]\n\n voxel_n = voxelize_pointcloud(n_xyz)\n n_voxels = n_xyz[0] * n_xyz[1] * n_xyz[2]\n\n voxel_sum = groupby_sum(ptclouds, voxel_n, np.zeros((n_voxels,4)))\n voxel_count = groupby_count(ptclouds, voxel_n, np.zeros(n_voxels))\n voxel_grid = np.nan_to_num(voxel_sum / voxel_count.reshape(-1,1))\n ptclouds_merged = voxel_grid[np.all(voxel_grid, axis=1)] #filter out empty voxels\n\n return ptclouds_merged\n\n\ndef compute_min_max_bounds_in_one_track(scan_dir, scan_name, objects, trajectory, cam_thres_ratio=.5, is_OBB=False):\n\n meta_file_path = os.path.join(scan_dir, '{0}.txt'.format(scan_name))\n axis_align_matrix, color2depth_extrinsic, camera_intrinsic, depth_intrinsic = utils.read_meta_file(meta_file_path)\n\n frustum_ptclouds = []\n frustum_planes = []\n instance_ids = []\n\n for frame_idx, bbox_idx in trajectory:\n obj = objects[frame_idx][bbox_idx]\n dimension = obj['dimension']\n classname = obj['classname']\n frame_name = obj['frame_name']\n instance_ids.append(obj['instance_id'])\n #visualize_bbox(scan_dir, obj, draw_text=False)\n\n depth_img_path = os.path.join(scan_dir, 'depth', '{0}.png'.format(frame_name))\n depth_img = np.array(Image.open(depth_img_path))\n\n #depth_img[depth_img == 0] = np.max(depth_img)\n #plt.imshow(1.0 / depth_img)\n #plt.show()\n\n camera2world_extrinsic_path = os.path.join(scan_dir, 'pose', '{0}.txt'.format(frame_name))\n camera2world_extrinsic = np.loadtxt(camera2world_extrinsic_path) # 4*4\n\n ## generate frustum point cloud\n cam_path = os.path.join(scan_dir, 'cam', '{0}.npy'.format(frame_name))\n CAMs = np.load(cam_path)\n CAM = CAMs[:, :, cfg.SCANNET.CLASS2INDEX[classname]]\n frustum_ptcloud, z_near, z_far = frustum_ptcloud_with_cam_in_world_frame(depth_img, dimension, CAM,\n depth_intrinsic, color2depth_extrinsic, camera2world_extrinsic)\n frustum_ptclouds.append(frustum_ptcloud)\n\n ## generate frustum clipping planes\n frustum_plane = extract_frustum_plane(dimension, camera_intrinsic, camera2world_extrinsic, z_near, z_far)\n frustum_planes.append(frustum_plane)\n ## visualize single frustum\n # visualize_one_frustum(frustum_plane)\n # visualize_one_frustum_plus_points(frustum_plane, frustum_ptcloud)\n\n ## visualize the whole point cloud and the ground truth 3D bounding box\n instance_ids = np.unique(np.array(instance_ids))\n if len(instance_ids) == 1:\n visualize_bbox3d_in_whole_scene(scan_dir, scan_name, axis_align_matrix, instance_ids[0])\n else:\n ## debug to check if one track contains more than one instance..\n print('Warning! The track contains more than one object!')\n for instance_id in instance_ids:\n visualize_bbox3d_in_whole_scene(scan_dir, scan_name, axis_align_matrix, instance_id)\n\n ptclouds_multiview = np.vstack(frustum_ptclouds)\n ## merge the frustum point clouds from multiple views\n ptclouds_merged = pcmerge(ptclouds_multiview)\n visualize_frustum_ptcloud_with_cam(ptclouds_merged)\n visualize_n_frustums_plus_ptclouds(frustum_planes, ptclouds_multiview)\n\n ## compute the intersection of n frustums, and remove noisy frustums\n hs = frustum_planes_intersect(frustum_planes, visu_intersection_points=False)\n\n if hs is not None:\n min_volume = ConvexHull(hs.intersections).volume\n frustum_planes_clean = remove_noisy_frustums(frustum_planes, min_volume, thres_volume_ratio=1.5)\n # visualize_n_frustums_plus_ptclouds(frustum_planes_clean, ptclouds)\n hs_clean = frustum_planes_intersect(frustum_planes_clean, visu_intersection_points=False)\n intersection_points = hs_clean.intersections\n inside_mask = in_hull(ptclouds_merged[:,:3],intersection_points)\n inside_ptcloud = ptclouds_merged[inside_mask]\n\n visualize_convex_hull_plus_ptcloud_interactive(intersection_points, ptclouds_merged[:,:3], inside_mask)\n\n visualize_frustum_ptcloud_with_cam(inside_ptcloud) #debug\n\n ## select relatively high class activated points\n avg_cam_score = np.mean(inside_ptcloud, axis=0)[3]\n activate_mask = np.where(inside_ptcloud[:,3]>cam_thres_ratio*avg_cam_score)[0]\n select_ptcloud = inside_ptcloud[activate_mask]\n visualize_frustum_ptcloud_with_cam(select_ptcloud) # debug\n\n candidate_pts = utils.align_world_with_axis(select_ptcloud[:,:3], axis_align_matrix)\n\n if is_OBB:\n #TODO: oriented bounding box\n pass\n else:\n ## AABB (axis-aligned bounding box) is generated\n bbox3d = create_AABB(candidate_pts, cfg.SCANNET.CLASS2INDEX[classname])\n visualize_bbox3d_in_whole_scene(scan_dir, scan_name, axis_align_matrix, instance_ids[0], bbox3d)\n return bbox3d\n\n else:\n return None\n\n\n\n\n\n\n\n\n\n","repo_name":"Na-Z/3DMVGOD","sub_path":"Box3dGenerator/frustums.py","file_name":"frustums.py","file_ext":"py","file_size_in_byte":14904,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"41077062665","text":"# -*- coding: utf-8 -*-\n# (C) 2021, Mark Mercado \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\nfrom os import stat\nfrom re import I\n\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n author: Mark Mercado (@mamercad)\n name: mamercad.cloudmason.slack\n type: notification\n requirements:\n - Allow in configuration C(callbacks_enabled = mamercad.cloudmason.slack) in C([default]).\n - The C(requests) Python library.\n short_description: Sends play events to a Slack channel.\n description:\n - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.\n options:\n slack_bot_token:\n description: Slack token; has the form C(xoxb-37809492...).\n required: true\n env:\n - name: SLACK_BOT_TOKEN\n ini:\n - section: callback_slack\n key: slack_bot_token\n slack_channel:\n description: Slack channel; has the form C(#bots).\n required: true\n env:\n - name: SLACK_CHANNEL\n ini:\n - section: callback_slack\n key: slack_channel\n slack_format:\n description: Textual display style.\n choices: [\"plain\", \"fixed\", \"visual\"]\n default: plain\n env:\n - name: SLACK_FORMAT\n ini:\n - section: callback_slack\n key: slack_format\n slack_cadence:\n description: Realtime or buffered.\n choice: [\"realtime\", \"buffered\"]\n default: realtime\n env:\n - name: SLACK_CADENCE\n ini:\n - section: callback_slack\n key: slack_cadence\n slack_threading:\n description: Use Slack threads (or not).\n default: false\n env:\n - name: SLACK_THREADING\n ini:\n - section: callback_slack\n key: slack_threading\n ansible_events:\n description: Ansible events for which to notify on.\n default: v2_playbook_on_start,v2_playbook_on_play_start,v2_playbook_on_task_start,v2_runner_on_ok,v2_runner_on_skipped,v2_runner_on_unreachable,v2_runner_on_failed,v2_playbook_on_stats\n env:\n - name: ANSIBLE_EVENTS\n ini:\n - section: callback_slack\n key: ansible_events\n\"\"\"\n\nimport json\nimport yaml\nimport requests\nfrom pprint import pprint\nimport datetime\n\nfrom ansible.plugins.callback import CallbackBase\nfrom ansible import constants as C\n\n\nclass Slack(object):\n def __init__(self, print, token, channel, threading):\n self.print = print\n self.token = token\n self.channel = channel\n self.threading = threading\n self.thread_ts = None\n\n def send(self, *args, **kwargs):\n headers = {\n \"Authorization\": f\"Bearer {self.token}\",\n \"Content-type\": \"application/json; charset=utf-8\",\n }\n\n payload = {\n \"channel\": self.channel,\n **kwargs,\n }\n\n if self.threading:\n payload.update({\"thread_ts\": self.thread_ts})\n\n slack = requests.post(\n \"https://slack.com/api/chat.postMessage\", headers=headers, json=payload\n )\n\n if slack.status_code != requests.codes.ok:\n self.print(slack.text, color=C.COLOR_ERROR)\n else:\n response = slack.json()\n if response.get(\"ok\", False) is not True:\n self.print(slack.text, color=C.COLOR_ERROR)\n else:\n if self.thread_ts is None:\n self.thread_ts = response.get(\"ts\", None)\n self.print(f\"Slack message sent ts={self.thread_ts}\", C.COLOR_DEBUG)\n\n\nclass SlackMessages(object):\n def __init__(self, print, slack):\n self.print = print\n self.slack = slack\n self.messages = []\n\n def __str__(self):\n return str(self.messages)\n\n def push(self, message):\n self.messages.append(message)\n\n def __iter__(self):\n self.i = 0\n return self\n\n def __next__(self):\n if self.i < len(self.messages):\n self.i += 1\n return self.messages[self.i - 1]\n raise StopIteration\n\n def send(self):\n blocks = []\n for message in self.messages:\n blocks.append(message.get_blocks())\n flatten_blocks = [item for sublist in blocks for item in sublist]\n # self.slack.send(text=\"hello world\", blocks=flatten_blocks)\n # not sure what to use as text here\n self.slack.send(blocks=flatten_blocks)\n\n\nclass SlackMessage(object):\n def __init__(self, print, slack, text, context, divider=False, *args, **kwargs):\n self.print = print\n self.slack = slack\n self.text = text\n self.context = context\n self.divider = divider\n self.buffered = []\n\n self.blocks = []\n\n if divider:\n self.blocks.append(self._slack_divider())\n\n if text:\n self.blocks.append(self._slack_block_section())\n\n if context:\n self.blocks.append(self._slack_block_context())\n\n def _alphabet(self, string):\n words = string.split(\" \")\n return \"\".join(\n [\n f\":alphabet-white-{letter.lower()}:\"\n for word in words\n for letter in list(word)\n ]\n )\n\n def _slack_text(self):\n pieces = []\n\n if \"indent\" in self.text:\n pieces.append(f\"{self.text['indent']}\")\n\n if \"ts\" in self.text:\n pieces.append(f\"{self.text['ts']}\")\n\n if \"pre\" in self.text:\n pieces.append(f\"*{self.text['pre']}*\")\n\n if \"text\" in self.text:\n pieces.append(f\"[ {self.text['text']} ]\")\n\n if \"post\" in self.text:\n pieces.append(f\"⮕ {self.text['post']}\")\n\n return \" \".join(pieces)\n\n def _slack_divider(self):\n return {\n \"type\": \"divider\",\n }\n\n def _slack_block_section(self):\n return {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": str(self._slack_text()),\n },\n }\n\n def _slack_block_context(self):\n return {\n \"type\": \"context\",\n \"elements\": [\n {\n \"type\": \"mrkdwn\",\n \"text\": f\"{self.context['text']}\",\n }\n ],\n }\n\n def send(self):\n self.slack.send(text=self._slack_text(), blocks=self.blocks)\n\n def get_blocks(self):\n return self.blocks\n\n\nclass CallbackModule(CallbackBase):\n\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = \"notification\"\n CALLBACK_NAME = \"mamercad.cloudmason.slack\"\n CALLBACK_NEEDS_WHITELIST = True\n\n def __init__(self, display=None):\n super(CallbackModule, self).__init__(display=display)\n self.ansible = {}\n self.ansible[\"playbook\"] = {}\n self.ansible[\"tasks\"] = []\n self.ansible[\"results\"] = {}\n self.ansible[\"summary\"] = {}\n\n self.current_play_uuid = None\n self.current_play_name = None\n self.current_task_uuid = None\n self.current_task_name = None\n\n def set_options(self, task_keys=None, var_options=None, direct=None):\n super(CallbackModule, self).set_options(\n task_keys=task_keys, var_options=var_options, direct=direct\n )\n\n self.verbosity = self._display.verbosity\n\n self.slack_bot_token = self.get_option(\"slack_bot_token\")\n self.slack_channel = self.get_option(\"slack_channel\")\n self.slack_format = self.get_option(\"slack_format\")\n self.slack_cadence = self.get_option(\"slack_cadence\")\n self.slack_threading = self.get_option(\"slack_threading\")\n self.ansible_events = self.get_option(\"ansible_events\").split(\",\")\n\n self.slack = Slack(\n print=self._display.display,\n token=self.slack_bot_token,\n channel=self.slack_channel,\n threading=self.slack_threading,\n )\n\n self.slack_messages = SlackMessages(\n print=self._display.display, slack=self.slack\n )\n\n if self.slack_bot_token is None:\n self._display.warning(\n \"Slack Bot Token was not provided; it \"\n \"can be provided using SLACK_BOT_TOKEN \"\n \"environment variable.\"\n )\n self.disabled = True\n\n if self.slack_channel is None:\n self._display.display(\n \"Slack Channel was not provided; it \"\n \"can be provided using SLACK_CHANNEL \"\n \"environment variable.\"\n )\n self.disabled = True\n\n def _get_ts(self):\n now = datetime.datetime.now()\n return f\"{now.hour:02d}:{now.minute:02d}\"\n\n def v2_playbook_on_start(self, playbook, **kwargs):\n if self.verbosity >= 3:\n self._display.display(\"v2_playbook_on_start\", color=C.COLOR_DEBUG)\n\n self.ansible[\"playbook\"][\"basedir\"] = playbook._basedir\n self.ansible[\"playbook\"][\"filename\"] = playbook._file_name\n self.ansible[\"playbook\"][\"plays\"] = playbook.get_plays()\n\n if \"v2_playbook_on_start\" in self.ansible_events:\n plays = str(playbook.get_plays())[1:-1]\n basedir = playbook._basedir\n filename = playbook._file_name\n\n text = {\n \"ts\": self._get_ts(),\n \"pre\": \"PLAYBOOK\",\n \"text\": \"Starting playbook\",\n \"post\": \":rocket:\",\n }\n context = {\n \"text\": (\n f\"Directory: {basedir}\\n\"\n f\"Filename: {filename}\\n\"\n f\"Plays: {plays}\\n\"\n )\n }\n\n message = SlackMessage(\n print=self._display.display,\n slack=self.slack,\n text=text,\n context=context,\n divider=True,\n )\n\n if self.slack_cadence == \"realtime\":\n message.send()\n else:\n self.slack_messages.push(message)\n\n def v2_playbook_on_play_start(self, play):\n if self.verbosity >= 3:\n self._display.display(\"v2_playbook_on_play_start\", color=C.COLOR_DEBUG)\n\n self.play_uuid = str(play._uuid)\n self.play_name = str(play.name)\n\n self.current_play_uuid = self.play_uuid\n self.current_play_name = self.play_name\n\n if \"v2_playbook_on_play_start\" in self.ansible_events:\n text = {\n \"indent\": \":arrow_right:\",\n \"ts\": self._get_ts(),\n \"pre\": \"PLAY\",\n \"text\": self.play_name,\n \"post\": \"\",\n }\n\n message = SlackMessage(\n print=self._display.display, slack=self.slack, text=text, context={}\n )\n\n if self.slack_cadence == \"realtime\":\n message.send()\n else:\n self.slack_messages.push(message)\n\n def v2_playbook_on_task_start(self, task, **kwargs):\n if self.verbosity:\n self._display.display(\"v2_playbook_on_task_start\", color=C.COLOR_DEBUG)\n\n self.ansible[\"tasks\"].append(\n {\n \"uuid\": task._uuid,\n \"path\": task.get_path(),\n \"role\": task._role,\n \"task\": task.get_name(),\n }\n )\n\n self.current_task_uuid = task._uuid\n self.current_task_name = task.get_name()\n\n if \"v2_playbook_on_task_start\" in self.ansible_events:\n task_name = str(task.get_name())\n text = {\n \"indent\": \":arrow_right: :arrow_right:\",\n \"ts\": self._get_ts(),\n \"pre\": \"TASK\",\n \"text\": task_name,\n \"post\": \"\",\n }\n\n message = SlackMessage(\n print=self._display.display, slack=self.slack, text=text, context={}\n )\n\n if self.slack_cadence == \"realtime\":\n message.send()\n else:\n self.slack_messages.push(message)\n\n def _runner_on(self, status, result):\n task_uuid = self.current_task_uuid\n self.ansible[\"results\"][task_uuid] = {\n \"uuid\": task_uuid,\n \"status\": status,\n \"host\": result._host,\n \"result\": result._result,\n \"task\": result._task,\n }\n\n host = result._host\n\n key = None\n\n if \"msg\" in result._result.keys():\n key = \"msg\"\n else:\n # hrm the \"first key\", definitely not sure about this\n key = list(result._result.keys())[0]\n\n msg = str(result._result[key]).strip()\n\n changed = result._result.get(\"changed\")\n if changed is None:\n post = \"\"\n else:\n post = f\"changed={str(changed).lower()}\"\n\n if status == \"ok\":\n post += \" ⮕ :white_check_mark:\"\n if status == \"failed\":\n post += \" ⮕ :x:\"\n if status == \"unreachable\":\n post += \" ⮕ :skull:\"\n\n text = {\n \"indent\": \":arrow_right: :arrow_right: :arrow_right:\",\n \"ts\": self._get_ts(),\n \"pre\": status,\n \"text\": host,\n \"post\": post,\n }\n context = {\n \"text\": f\"```{key}: {msg}```\",\n }\n\n message = SlackMessage(\n print=self._display.display, slack=self.slack, text=text, context=context\n )\n\n if self.slack_cadence == \"realtime\":\n message.send()\n else:\n self.slack_messages.push(message)\n\n def v2_runner_on_ok(self, result, *args, **kwargs):\n if self.verbosity >= 3:\n self._display.display(\"v2_runner_on_ok\", color=C.COLOR_DEBUG)\n if \"v2_runner_on_ok\" in self.ansible_events:\n self._runner_on(\"ok\", result)\n\n def v2_runner_on_skipped(self, result, *args, **kwargs):\n if self.verbosity >= 3:\n self._display.display(\"v2_runner_on_skipped\", color=C.COLOR_DEBUG)\n if \"v2_runner_on_skipped\" in self.ansible_events:\n self._runner_on(\"skipped\", result)\n\n def v2_runner_on_unreachable(self, result, *args, **kwargs):\n if self.verbosity >= 3:\n self._display.display(\"v2_runner_on_unreachable\", color=C.COLOR_DEBUG)\n if \"v2_runner_on_unreachable\" in self.ansible_events:\n self._runner_on(\"unreachable\", result)\n\n def v2_runner_on_failed(self, result, *args, **kwargs):\n if self.verbosity >= 3:\n self._display.display(\"v2_runner_on_failed\", color=C.COLOR_DEBUG)\n if \"v2_runner_on_failed\" in self.ansible_events:\n self._runner_on(\"failed\", result)\n\n def v2_playbook_on_stats(self, stats, *args, **kwargs):\n if self.verbosity >= 3:\n self._display.display(\"v2_runner_on_stats\", color=C.COLOR_DEBUG)\n if \"v2_playbook_on_stats\" in self.ansible_events:\n\n summaries = []\n _hosts = sorted(stats.processed.keys())\n for _host in _hosts:\n summary = stats.summarize(_host)\n host = str(_host)\n statsline = \" \".join(\n \"{!s}={!r}\".format(key, val) for (key, val) in summary.items()\n )\n summaries.append({\"host\": host, \"stats\": statsline})\n\n summary_lines = \"\"\n for summary in summaries:\n summary_lines += summary[\"host\"] + \": \" + summary[\"stats\"] + \"\\n\"\n\n text = {\n \"ts\": self._get_ts(),\n \"pre\": f\"PLAY RECAP\",\n }\n context = {\n \"text\": f\"```{summary_lines}```\",\n }\n\n message = SlackMessage(\n print=self._display.display,\n slack=self.slack,\n text=text,\n context=context,\n )\n\n if self.slack_cadence == \"realtime\":\n message.send()\n else:\n self.slack_messages.push(message)\n\n self.slack_messages.send()\n","repo_name":"mamercad/mamercad.cloudmason","sub_path":"plugins/callback/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":16082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8407077228","text":"import math, time\n\nt = time.time()\n\nps = {}\n\nfor a in range(1,300):\n\tfor b in range(1000,300,-1):\n\t\tc = math.sqrt(a**2 + b**2)\n\t\tif a + b + c <= 1000 and int(c) == c:\n\t\t\tkey = str(a+b+int(c))\n\t\t\tif key in ps:\n\t\t\t\tps[key] += 1\n\t\t\telse:\n\t\t\t\tps[key] = 1\t\n\nresult = ['0',0]\nfor tr in ps:\n\tif ps[tr] > result[1]:\n\t\tresult = [tr, ps[tr]]\n\t\t\nprint(result, time.time()-t, 's')\n\t\n","repo_name":"Derexas/Euler","sub_path":"39.py","file_name":"39.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21907283320","text":"from tkinter import *\nimport pygame\nfrom tkinter import filedialog\nimport time\nfrom mutagen.mp3 import MP3 #finding total length\nimport tkinter.ttk as ttk\n\n\n#creating a window\nwindow = Tk()\nwindow.title(\"MyMp3\")\nwindow.geometry(\"620x410\")\nwindow.configure(bg='chocolate1')\nwindow.iconphoto(False, PhotoImage(file='icons/icon.png'))\n\npygame.mixer.init()\n\nglobal files\nfiles = \"\"\n#methods used for controls\n\ndef saving_playlist(file):\n list_data = list_box.get(0,END)\n try:\n with open(file, \"w\", encoding=\"utf-8\") as file:\n for d in list_data:\n file.write(d + \"\\n\")\n except:\n file = filedialog.asksaveasfile(defaultextension = \".txt\",mode=\"w\")\n for d in list_data:\n file.write(d + \"\\n\")\n \n \ndef load_playlist():\n file = filedialog.askopenfilename(initialdir='playlists/',title=\"Choose A Playlist\", filetypes=((\"Text Files\",\"*.txt\"),))\n global files\n files = file\n with open(file, \"r\", encoding=\"utf-8\") as file:\n for f in file:\n list_box.insert(END, f.strip())\n\n \ndef add_song(): # add a single song in playlist\n song = filedialog.askopenfilename(initialdir='songs/',title=\"Choose A Song\", filetypes=((\"mp3 Files\",\"*.mp3\"),))\n list_box.insert(END, song)\n\ndef add_many_song(): # add multiple songs in playlist\n songs = filedialog.askopenfilenames(initialdir='songs/',title=\"Choose A Song\", filetypes=((\"mp3 Files\",\"*.mp3\"),))\n for song in songs:\n list_box.insert(END, song)\n\ndef delete_song():\n stop()\n list_box.delete(ANCHOR)# remove the selected song from play list\n \n\ndef delete_all_songs():\n stop()\n list_box.delete(0, END)# remove all songs from playlist\n \n\ndef stop():\n pygame.mixer.music.stop()\n statusbar.config(text='')\n slider.config(value=0)\n \ndef play():\n stop()\n song = list_box.get(ACTIVE)# method that loads the song and plays it\n pygame.mixer.music.load(song)\n pygame.mixer.music.play(loops=0)\n global paused\n paused=False\n \n song_dur()\n\n\nglobal paused # global variable to store if song is paused or not\npaused = False\n\ndef pause(is_paused): # method to pause and unpause\n global paused\n paused = is_paused\n\n if paused:\n pygame.mixer.music.unpause()\n paused = False\n else:\n pygame.mixer.music.pause()\n paused = True\n\ndef next_song():\n nex = list_box.curselection()# this return a number(tuple)\n nex = nex[0]+1 # add 1 to move to next song in list_box\n\n list_box.selection_clear(0, END)\n list_box.activate(nex)\n list_box.selection_set(nex, last=None)\n play()\n\ndef prev_song():\n prev = list_box.curselection()# this return a number(tuple)\n prev = prev[0]-1 # subtract 1 to move to the previous songs \n\n list_box.selection_clear(0, END)\n list_box.activate(prev)\n list_box.selection_set(prev, last=None)\n play()\n\ndef song_dur():#method that updates the status bar\n current_time = int(pygame.mixer.music.get_pos()/1000)\n\n \n formated_current_time = time.strftime('%M:%S', time.gmtime(current_time))\n\n current_song = list_box.curselection()# this return a number(tuple)\n try:\n song = list_box.get(current_song)# get the song to find the duration\n \n \n song = MP3(song)\n global duration #making global so that it can be accessed in slider \n duration = song.info.length\n formated_duration = time.strftime('%M:%S', time.gmtime(duration))\n\n \n current_time+=1# updating current time to cover up time diff between slider updation and actual time\n k=slider.get()\n if int(k)==int(duration):\n statusbar.config(text=f'{formated_duration} / {formated_duration}')\n elif paused:\n pass\n elif int(slider.get())==current_time:\n slider_length = int(duration)\n slider.config(to=slider_length, value=current_time)\n else:\n slider_length = int(duration)\n slider.config(to=slider_length, value=slider.get())\n\n formated_current_time = time.strftime('%M:%S', time.gmtime(int(slider.get())))\n statusbar.config(text=f'{formated_current_time} / {formated_duration}')\n next_time = int(slider.get())\n next_time+=1\n slider.config(value=next_time)\n\n except:\n pass\n \n \n statusbar.after(1000,song_dur)#updating timer after every second\n\ndef slide(x):\n\n song = list_box.get(ACTIVE)\n pygame.mixer.music.load(song)\n pygame.mixer.music.play(loops=0, start=int(slider.get()))\n\n\ndef volume(x):\n pygame.mixer.music.set_volume(vol_slider.get())\n\n\n current_vol = pygame.mixer.music.get_volume()\n current_vol = current_vol * 100\n\n if int(current_vol<10):\n vol_meter.config(image=vol0)\n elif int(current_vol)<25:\n vol_meter.config(image=vol1)\n elif int(current_vol)<40:\n vol_meter.config(image=vol2)\n elif int(current_vol)<55:\n vol_meter.config(image=vol3)\n elif int(current_vol)<65:\n vol_meter.config(image=vol4)\n elif int(current_vol)<75:\n vol_meter.config(image=vol5)\n elif int(current_vol)<85:\n vol_meter.config(image=vol6)\n elif int(current_vol)==100:\n vol_meter.config(image=vol7)\n \n\nglobal vol0\nglobal vol1\nglobal vol2\nglobal vol3\nglobal vol4\nglobal vol5\nglobal vol6\nglobal vol7\n\nvol0 = PhotoImage(file='icons/VOL0.png',)\nvol1 = PhotoImage(file='icons/VOL1.png')\nvol2 = PhotoImage(file='icons/VOL2.png')\nvol3 = PhotoImage(file='icons/VOL3.png')\nvol4 = PhotoImage(file='icons/VOL4.png')\nvol5 = PhotoImage(file='icons/VOL5.png')\nvol6 = PhotoImage(file='icons/VOL6.png')\nvol7 = PhotoImage(file='icons/VOL7.png')\n\nmaster_frame = Frame(window, bg=\"DarkOrange3\")\nmaster_frame.pack(pady=20)\n\n#list to conatin the song names\nlist_box = Listbox(master_frame, bg=\"black\", fg=\"green2\", selectforeground=\"cyan\", selectbackground=\"gray\", width=50)\nlist_box.grid(row=0,column=0)\n\nvolume_frame = LabelFrame(master_frame,text=\"Volume\")\nvolume_frame.grid(row=0,column=1,padx=30)\n \n#loading the icons for control buttons\nprev_icon = PhotoImage(file='icons/backB.png')\nnext_icon = PhotoImage(file='icons/nextB.png')\nplay_icon = PhotoImage(file='icons/playB.png')\npause_icon = PhotoImage(file='icons/pauseB.png')\nstop_icon = PhotoImage(file='icons/stopB.png')\n\nslider = ttk.Scale(master_frame, from_=0, to=100, orient=HORIZONTAL, value=0, command=slide, length=450)\nslider.grid(row=1,column=0,pady=10)\n\nvol_slider = ttk.Scale(volume_frame, from_=1, to=0, orient=VERTICAL, value=1, command=volume, length=135)\nvol_slider.pack(pady=10)\n\n\n# creating a frame to contain buttons\ncontrol_frame = Frame(master_frame)\ncontrol_frame.grid(row=2,column=0)\n\nvol_meter = Label(master_frame, image=vol7)\nvol_meter.grid(row=1,rowspan=2, column=1,padx=30,pady=10)\n\n#creating play,pause,next and previous buttons\nplay_button = Button(control_frame, image=play_icon , borderwidth=0, command= play, height = 50, width = 50)\npause_button = Button(control_frame, image=pause_icon , borderwidth=0, command=lambda:pause(paused), height = 50, width = 50)\nnext_button = Button(control_frame, image=next_icon , borderwidth=0, command=next_song, height = 50, width = 50)\nprev_button = Button(control_frame, image=prev_icon , borderwidth=0, command=prev_song, height = 50, width = 50)\nstop_button = Button(control_frame, image=stop_icon , borderwidth=0, command=stop, height = 50, width = 50)\n\nplay_button.grid(row=0, column=0,padx=10,pady=20)\npause_button.grid(row=0, column=1, padx=10,pady=20)\nnext_button.grid(row=0, column=2, padx=10,pady=20)\nprev_button.grid(row=0, column=3, padx=10,pady=20)\nstop_button.grid(row=0, column=4, padx=10,pady=20)\n\n#creating a menu\nmy_menu = Menu(window)\nwindow.config(menu=my_menu)\n\nfile_menu = Menu(my_menu)\nmy_menu.add_cascade(label=\"File\", menu=file_menu)\nfile_menu.add_command(label=\"Load Playlist\",command=load_playlist)\nfile_menu.add_command(label=\"Save PlayList\",command=lambda:saving_playlist(files))\n\nadd_song_menu = Menu(my_menu)\nmy_menu.add_cascade(label=\"Add Songs\", menu=add_song_menu)\nadd_song_menu.add_command(label=\"Add a song\",command=add_song)\nadd_song_menu.add_command(label=\"Add multiple songs\",command=add_many_song)\n\ndelete_song_menu = Menu(my_menu)\nmy_menu.add_cascade(label=\"Delete Songs\", menu=delete_song_menu)\ndelete_song_menu.add_command(label=\"Delete this song\",command=delete_song)\ndelete_song_menu.add_command(label=\"Delete all songs\",command=delete_all_songs)\n\n\n\nstatusbar = Label(window, text='', bd=1, relief=GROOVE, anchor=E)\nstatusbar.pack(fill=X, side=BOTTOM, ipady=2)\n\n\n\nwindow.mainloop()\n","repo_name":"Himanshu9617/MusicPlayer","sub_path":"MyMp3.py","file_name":"MyMp3.py","file_ext":"py","file_size_in_byte":8609,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"35236561499","text":"global i \n\ni = 0\nnumbers = []\n\ncond = (i < 6)\n\ndef iter(i, list):\n\tprint(f'At the top i is {i}.')\n\tlist.append(i)\n\tprint(\"Numbers now : {}\".format(list))\n\nwhile cond :\n\titer(i, numbers)\n\ti += 1\n\tcond = (i < 6)\n\tprint(f\"Now i is {i}.\")\n\nprint(\"The numbers : {}\".format(numbers))","repo_name":"thibault-djaballah/lp3thw","sub_path":"ex33.py","file_name":"ex33.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26400019226","text":"with open(\"test.txt\") as f:\n numbers = f.read().splitlines()\n\nnumbers = [int(x) for x in numbers]\n\nprint(numbers)\n\ntarget = 41682220\ntarget_not_found = True\ni = 0\n\nwhile i < len(numbers) and target_not_found:\n added_nums = []\n\n n = numbers[i:]\n\n for j in range(len(n)):\n if sum(added_nums) >= target:\n break\n else:\n added_nums.append(n[j])\n \n if sum(added_nums) == target:\n target_not_found = False\n\n \n i+=1\n \n \nprint(added_nums)\nfinal = min(added_nums) + max(added_nums)\nprint(final)\n","repo_name":"conormorgan/AdventOfCode","sub_path":"day9/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24854684036","text":"### These are all random ZOHO Interview Questions taken randomly\n\n\"\"\"\nQuestion 1: Print all distinct permutations of a given string with duplicate characters. \n\nQuestion 2: Write a program to give the following output for the given input\nEg 1: Input: a1b10\n Output: abbbbbbbbbb\nEg: 2: Input: b3c6d15\n Output: bbbccccccddddddddddddddd\nThe number varies from 1 to 99.\n\"\"\"\n\nclass ZOHO_COMPETETIVE_CODING:\n def toString(self,List): \n return ''.join(List) \n # Function to print permutations of string \n # This function takes three parameters: \n # 1. String \n # 2. Starting index of the string \n # 3. Ending index of the string. \n def permute(self, a, l, r): \n if l==r: \n print (self.toString(a)) \n else: \n for i in range(l,r+1): \n a[l], a[i] = a[i], a[l] \n self.permute(a, l+1, r) \n a[l], a[i] = a[i], a[l] # backtrack \n def expand_the_string(self, string):\n i = 0\n # Create an array that contains Numbers to check is an Integer or not\n numbers = [str(i) for i in range(0,10)]\n # stack to store the past elements\n stack = []\n # to get pop elements from stack\n elements = \"\"\n ans = \" \"\n while i < len(string):\n if string[i] in numbers: \n if string [i+1] in numbers:\n num = int (str(string[i]) + str(string[i+1]))\n #print(num)\n while stack:\n elements += stack.pop()\n #print(elements)\n ans += (elements * num)\n elements = \"\"\n #print(ans)\n i += 1\n else:\n num = int (string[i])\n #print(num)\n while stack:\n elements += stack.pop()\n #print(elements)\n ans += (elements * num)\n elements = \"\" # Backtracking\n #print(ans)\n i += 1\n stack.append(string[i])\n #print(stack)\n i+=1\n return ans\n \nif __name__ == \"__main__\":\n z = ZOHO_COMPETETIVE_CODING()\n string = \"ABC\"\n n = len(string) \n a = list(string) \n #z.permute(a, 0, n-1)\n #print(z.expand_the_string(\"b3c6d15\"))\n\n","repo_name":"sukilsiva/competitve_coding","sub_path":"ZOHO interview Questions Set-3.py","file_name":"ZOHO interview Questions Set-3.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28460828229","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n\n # Add, Update, Delete, Edit Data Urls\n path('addcategoryimage/', views.addci, name='addci'),\n path('editcategoryimage//', views.edtci, name='edtci'),\n path('updatecategoryimage//', views.updci, name='updci'),\n path('deletecategoryimage//', views.delci, name='delci'),\n\n # Fetch Or Other Data Urls\n path('managecategoryimage/', views.manci, name='manci'),\n \n]","repo_name":"deegosai/autoimagemaker","sub_path":"addpoint/catimg/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3594591907","text":"# Score categories.\n# Change the values as you see fit.\nYACHT = 0\nONES = 1\nTWOS = 2\nTHREES = 3\nFOURS = 4\nFIVES = 5\nSIXES = 6\nFULL_HOUSE = 7\nFOUR_OF_A_KIND = 8\nLITTLE_STRAIGHT = 9\nBIG_STRAIGHT = 10\nCHOICE = 11\n\nNUMBERS = (ONES, TWOS, THREES, FOURS, FIVES, SIXES)\n\nSTRAIGHTS = (LITTLE_STRAIGHT, BIG_STRAIGHT)\n\n\ndef is_full_house(dice: list): return sorted(\n {value: dice.count(value) for value in dice}.values()) == [2, 3]\n\n\ndef four_of_a_kind(dice: list):\n values = {value: dice.count(value) for value in dice}\n try:\n num = [key for key, val in values.items() if val >= 4][0]\n return 4 * num\n except IndexError:\n return 0\n\n\ndef is_straight(dice: list, type):\n if type == BIG_STRAIGHT:\n return len(set(dice)) == 5 and 1 not in dice\n else:\n return len(set(dice)) == 5 and 6 not in dice\n\n\ndef score(dice: list, category: int):\n if category == CHOICE:\n return sum(dice)\n\n if category == YACHT:\n return 50 if len(set(dice)) == 1 else 0\n\n if category in NUMBERS:\n return sum(category for i in dice if i == category)\n\n if category == FULL_HOUSE and is_full_house(dice):\n return sum(dice)\n\n if category == FOUR_OF_A_KIND:\n return four_of_a_kind(dice)\n\n if category in STRAIGHTS and is_straight(dice, category):\n return 30\n\n return 0\n","repo_name":"xfredeq/Exercism-Python","sub_path":"easy_exercises/yacht/yacht.py","file_name":"yacht.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31067050740","text":"# Runtime: 64 ms, faster than 12.63% of Python3 online submissions for Kids With the Greatest Number of Candies.\n# Memory Usage: 13.8 MB, less than 62.67% of Python3 online submissions for Kids With the Greatest Number of Candies.\n\n#Time: O(N)\n#Space: O(N)\n\n\nclass Solution:\n def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:\n # Base Case\n if not candies:\n return []\n if len(candies) == 1:\n return [True]\n # Finding a kid with the greatest number of candies\n max_cand = 0\n result = []\n length = len(candies)\n for i in range(length):\n max_cand = max(max_cand, candies[i])\n # Checking which kid can have the greatest #of candies\n for i in range(length):\n if candies[i]+extraCandies >= max_cand:\n result.append(True)\n else:\n result.append(False)\n return result\n","repo_name":"sayuree/leetcode-problems","sub_path":"arrays/1431.kids_with_the_greatest_number_of_candies.py","file_name":"1431.kids_with_the_greatest_number_of_candies.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36388422186","text":"from math import pi\nimport numpy as np\nimport uwds3_msgs\nfrom .shape import Shape, ShapeType\nfrom ..vector.vector6d import Vector6D\n\n\nclass Mesh(Shape):\n \"\"\"Represents a 3D Mesh\"\"\"\n def __init__(self, mesh_resource=\"\", name=\"\",\n x=.0, y=.0, z=.0,\n rx=.0, ry=.0, rz=.0,\n scale_x=1., scale_y=1., scale_z=1.,\n r=0, g=0., b=0., a=1.):\n \"\"\"Mesh constructor\n \"\"\"\n super(Mesh, self).__init__(ShapeType.MESH,\n name=name,\n x=x, y=y, z=z,\n rx=rx, ry=ry, rz=rz,\n scale_x=scale_x,\n scale_y=scale_y,\n scale_z=scale_z,\n r=r, g=g, b=b, a=a)\n self.mesh_resource = mesh_resource\n\n def from_msg(self, msg):\n \"\"\" Convert from ROS message\n \"\"\"\n self.mesh_resource = msg.mesh_resource\n self.name = msg.name\n a = msg.color.a\n r = msg.color.r\n g = msg.color.g\n b = msg.color.b\n self.color = np.array([r, g, b, a])\n self.pose.from_msg(msg.pose)\n return self\n\n def to_msg(self):\n \"\"\" Convert to ROS message\n \"\"\"\n shape = uwds3_msgs.msg.PrimitiveShape()\n shape.type = self.type\n shape.name = self.name\n shape.mesh_resource = self.mesh_resource\n shape.scale.x = self.scale.x\n shape.scale.y = self.scale.y\n shape.scale.z = self.scale.z\n shape.color.r = self.color[0]\n shape.color.g = self.color[1]\n shape.color.b = self.color[2]\n shape.color.a = self.color[3]\n shape.pose = self.pose.to_msg()\n return shape\n","repo_name":"AndrewJSchoen/uwds3","sub_path":"src/pyuwds3/types/shape/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"71880757600","text":"#Ananiya Deneke\r\n#Main file\r\n#5/16/2023\r\n#some code taken from lab tutorial\r\n\r\nimport pygame\r\nfrom ball import Ball\r\nfrom paddle import Paddle\r\nfrom text import Text\r\n\r\ndef pause_game(surface, message):\r\n paused = True\r\n pauseText = Text(message, 50, 300)\r\n while paused:\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n paused = False\r\n elif event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n quit()\r\n elif event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n pauseText.setMessage(message)\r\n pauseText.draw(surface)\r\n pygame.display.flip()\r\n\r\ndef restart_game():\r\n game_code()\r\n\r\ndef game_code():\r\n pygame.init()\r\n surface = pygame.display.set_mode((800, 600))\r\n DREXEL_BLUE = (7, 41, 77)\r\n\r\n myBall = Ball(400, 300, 25, DREXEL_BLUE)\r\n myPaddle = Paddle(200, 25, DREXEL_BLUE)\r\n myScoreBoard = Text(\"Score: 0\", 10, 10)\r\n myLevel = Text(\"Level: 1\", 700, 10)\r\n\r\n fpsClock = pygame.time.Clock()\r\n\r\n clockSpeed = 60\r\n clockCounter = 1\r\n numHits = 0\r\n\r\n green = (0, 255, 0)\r\n yellow = (255, 255, 0)\r\n red = (255, 0, 0)\r\n white = (255, 255, 255)\r\n lightBlue = (0, 255, 255)\r\n\r\n running = True\r\n\r\n while running:\r\n # Different clock speeds for different levels\r\n if clockSpeed == 60:\r\n surface.fill(lightBlue)\r\n myScoreBoard.setMessage(\"Score: \" + str(numHits))\r\n myLevel.setMessage(\"Level: 1\")\r\n\r\n if clockSpeed == 120:\r\n surface.fill(yellow)\r\n myLevel.setMessage(\"Level: 2\")\r\n\r\n if clockSpeed == 240:\r\n surface.fill(red)\r\n myLevel.setMessage(\"Level: 3\")\r\n\r\n if clockSpeed > 240:\r\n surface.fill(green)\r\n pause_game(surface, 'Game Won!!! - press Space to play again')\r\n restart_game()\r\n \r\n myBall.draw(surface)\r\n myPaddle.draw(surface)\r\n myScoreBoard.draw(surface)\r\n myLevel.draw(surface)\r\n\r\n if myBall.intersects(myPaddle):\r\n myBall.setYSpeed(myBall.getSpeed()[0] * - 2)\r\n numHits += 1\r\n myScoreBoard.setMessage(\"Score: \" + str(numHits))\r\n\r\n if numHits >= clockCounter * 1:\r\n clockSpeed *= 2\r\n clockCounter += 1\r\n # Player loses when paddle fails to catch ball at the top surface\r\n if myBall.getLoc()[1] > (600 - 20 - 25): # account for ball radius LHS and paddle height and 20 pixel offset for RHS\r\n pause_game(surface, 'Level Failed - press Space to try again')\r\n restart_game()\r\n myBall.move()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pause_game(surface, \"GAME PAUSED - press Space to Continue or Esc again to Quit\")\r\n\r\n pygame.display.update()\r\n fpsClock.tick(clockSpeed)\r\n pygame.quit()\r\n exit()\r\n\r\n\r\ngame_code()\r\n","repo_name":"AnanDeneke/Ananiya_Coding","sub_path":"Python/Customized pygame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31258861250","text":"import tensorflow.compat.v1 as tf\ntf.disable_eager_execution()\nimport tensorflow as tf2\ndef rename_var(ckpt_path, new_ckpt_path):\n with tf.Session() as sess:\n for var_name, _ in tf2.train.list_variables(ckpt_path):\n print(var_name)\n var = tf2.train.load_variable(ckpt_path, var_name)\n new_var_name = var_name.replace('/', '_')\n var = tf.Variable(var, name=new_var_name)\n\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n saver.save(sess, new_ckpt_path)\nif __name__ == '__main__':\n ckpt_path=[]\n new_ckpt_path=[]\n ckpt_path.append ('/home/zjj/WTUDF/weight_tf/wgts_epochs_10000.ckpt')\n new_ckpt_path.append('/home/zjj/WTUDF/weight_tf/wgts_epochs_10000_fix.ckpt')\n for ckpt_1,ckpt_fix in zip(ckpt_path,new_ckpt_path):\n rename_var(ckpt_1, ckpt_fix)\n\n","repo_name":"Kira-Z-China/weight-transfer","sub_path":"utils/change_ckpt_dict_name.py","file_name":"change_ckpt_dict_name.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"73028642080","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\nlenhash = {}\n\nspace = 1\n\nmins = []\nmaxs = []\n\nfor i in range(1, 50):\n minx = 48*(2**i - 1) + sum([(48**2)**p for p in range(i)])\n maxx = 122*(2**i - 1)+ sum([(122**2)**p for p in range(i)])\n\n mins.append(math.log(minx))\n maxs.append(math.log(maxx))\n\n lenhash[i] = [minx, maxx]\n\n# print(lenhash)\n\nfor i, m in enumerate(zip(mins, maxs)):\n plt.plot([m[0], m[1]], [(i+1)*space, (i+1)*space])\n\n# plt.grid()\nplt.show()\n\n","repo_name":"brainspoof/newtons-method","sub_path":"str_length_graph.py","file_name":"str_length_graph.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27292128071","text":"from datetime import datetime\n\nfrom freezegun import freeze_time\n\nfrom odoo.exceptions import UserError\nfrom odoo.tests import tagged\nfrom odoo.tests.common import Form\n\nfrom odoo.addons.budget_control.tests.common import BudgetControlCommon\n\n\n@tagged(\"post_install\", \"-at_install\")\nclass TestBudgetControlPurchase(BudgetControlCommon):\n @classmethod\n @freeze_time(\"2001-02-01\")\n def setUpClass(cls):\n super().setUpClass()\n # Create sample ready to use Budget Control\n cls.budget_control = cls.BudgetControl.create(\n {\n \"name\": \"CostCenter1/%s\" % cls.year,\n \"template_id\": cls.budget_period.template_id.id,\n \"budget_period_id\": cls.budget_period.id,\n \"analytic_account_id\": cls.costcenter1.id,\n \"plan_date_range_type_id\": cls.date_range_type.id,\n \"template_line_ids\": [\n cls.template_line1.id,\n cls.template_line2.id,\n cls.template_line3.id,\n ],\n }\n )\n # Test item created for 3 kpi x 4 quarters = 12 budget items\n cls.budget_control.prepare_budget_control_matrix()\n assert len(cls.budget_control.line_ids) == 12\n # Assign budget.control amount: KPI1 = 100, KPI2=200, Total=300\n cls.budget_control.line_ids.filtered(lambda x: x.kpi_id == cls.kpi1)[:1].write(\n {\"amount\": 100}\n )\n cls.budget_control.line_ids.filtered(lambda x: x.kpi_id == cls.kpi2)[:1].write(\n {\"amount\": 200}\n )\n cls.budget_control.flush() # Need to flush data into table, so it can be sql\n cls.budget_control.allocated_amount = 300\n cls.budget_control.action_done()\n # Purchase method\n cls.product1.product_tmpl_id.purchase_method = \"purchase\"\n cls.product2.product_tmpl_id.purchase_method = \"purchase\"\n\n @freeze_time(\"2001-02-01\")\n def _create_purchase(self, po_lines):\n Purchase = self.env[\"purchase.order\"]\n view_id = \"purchase.purchase_order_form\"\n with Form(Purchase, view=view_id) as po:\n po.partner_id = self.vendor\n po.date_order = datetime.today()\n for po_line in po_lines:\n with po.order_line.new() as line:\n line.product_id = po_line[\"product_id\"]\n line.product_qty = po_line[\"product_qty\"]\n line.price_unit = po_line[\"price_unit\"]\n line.account_analytic_id = po_line[\"analytic_id\"]\n purchase = po.save()\n return purchase\n\n @freeze_time(\"2001-02-01\")\n def test_01_budget_purchase(self):\n \"\"\"\n On Purchase Order\n (1) Test case, no budget check -> OK\n (2) Check Budget with analytic_kpi -> Error amount exceed on kpi1\n (3) Check Budget with analytic -> OK\n (2) Check Budget with analytic -> Error amount exceed\n \"\"\"\n # KPI1 = 100, KPI2 = 200, Total = 300\n self.assertEqual(300, self.budget_control.amount_budget)\n # Prepare PO\n purchase = self._create_purchase(\n [\n {\n \"product_id\": self.product1, # KPI1 = 101 -> error\n \"product_qty\": 1,\n \"price_unit\": 101,\n \"analytic_id\": self.costcenter1,\n },\n {\n \"product_id\": self.product2, # KPI2 = 198\n \"product_qty\": 2,\n \"price_unit\": 99,\n \"analytic_id\": self.costcenter1,\n },\n ]\n )\n\n # (1) No budget check first\n self.budget_period.control_budget = False\n self.budget_period.control_level = \"analytic_kpi\"\n # force date commit, as freeze_time not work for write_date\n purchase = purchase.with_context(force_date_commit=purchase.date_order)\n purchase.button_confirm() # No budget check no error\n # (2) Check Budget with analytic_kpi -> Error\n purchase.button_cancel()\n purchase.button_draft()\n self.budget_period.control_budget = True # Set to check budget\n # kpi 1 (kpi1) & CostCenter1, will result in $ -1.00\n with self.assertRaises(UserError):\n purchase.button_confirm()\n # (3) Check Budget with analytic -> OK\n purchase.button_cancel()\n purchase.button_draft()\n self.budget_period.control_level = \"analytic\"\n purchase.button_confirm()\n self.assertEqual(self.budget_control.amount_balance, 1)\n purchase.button_cancel()\n self.assertEqual(self.budget_control.amount_balance, 300)\n # (4) Amount exceed -> Error\n purchase.order_line[1].price_unit = 100\n purchase.button_draft()\n # CostCenter1, will result in $ -1.00\n with self.assertRaises(UserError):\n purchase.button_confirm()\n\n @freeze_time(\"2001-02-01\")\n def test_02_budget_purchase_to_invoice(self):\n \"\"\"Purchase to Invoice, commit and uncommit\"\"\"\n # KPI1 = 100, KPI2 = 200, Total = 300\n self.assertEqual(300, self.budget_control.amount_budget)\n # Prepare PO on kpi1 with qty 3 and unit_price 10\n purchase = self._create_purchase(\n [\n {\n \"product_id\": self.product1, # KPI1 = 30\n \"product_qty\": 3,\n \"price_unit\": 10,\n \"analytic_id\": self.costcenter1,\n },\n ]\n )\n self.budget_period.control_budget = True\n self.budget_period.control_level = \"analytic\"\n purchase = purchase.with_context(force_date_commit=purchase.date_order)\n purchase.button_confirm()\n # PO Commit = 30, INV Actual = 0, Balance = 270\n self.assertEqual(self.budget_control.amount_commit, 30)\n self.assertEqual(self.budget_control.amount_actual, 0)\n self.assertEqual(self.budget_control.amount_balance, 270)\n # Create and post invoice\n purchase.action_create_invoice()\n self.assertEqual(purchase.invoice_status, \"invoiced\")\n invoice = purchase.invoice_ids[:1]\n # Change qty to 1\n invoice.with_context(check_move_validity=False).invoice_line_ids[0].quantity = 1\n invoice.with_context(check_move_validity=False)._onchange_invoice_line_ids()\n invoice.invoice_date = invoice.date\n invoice.action_post()\n # PO Commit = 20, INV Actual = 10, Balance = 270\n self.budget_control.invalidate_cache()\n self.assertEqual(self.budget_control.amount_commit, 20)\n self.assertEqual(self.budget_control.amount_actual, 10)\n self.assertEqual(self.budget_control.amount_balance, 270)\n # # Cancel invoice\n invoice.button_cancel()\n self.budget_control.invalidate_cache()\n self.assertEqual(self.budget_control.amount_commit, 30)\n self.assertEqual(self.budget_control.amount_actual, 0)\n self.assertEqual(self.budget_control.amount_balance, 270)\n\n @freeze_time(\"2001-02-01\")\n def test_03_budget_recompute_and_close_budget_move(self):\n \"\"\"Purchase to Invoice (partial)\n - Test recompute on both Purchase and Invoice\n - Test close on both Purchase and Invoice\"\"\"\n # Prepare PO on kpi1 with qty 3 and unit_price 10\n purchase = self._create_purchase(\n [\n {\n \"product_id\": self.product1, # KPI1 = 30\n \"product_qty\": 2,\n \"price_unit\": 15,\n \"analytic_id\": self.costcenter1,\n },\n {\n \"product_id\": self.product2, # KPI2 = 40\n \"product_qty\": 4,\n \"price_unit\": 10,\n \"analytic_id\": self.costcenter1,\n },\n ]\n )\n self.budget_period.control_budget = True\n self.budget_period.control_level = \"analytic\"\n purchase = purchase.with_context(force_date_commit=purchase.date_order)\n purchase.button_confirm()\n # PO Commit = 70, INV Actual = 0\n self.assertEqual(self.budget_control.amount_purchase, 70)\n self.assertEqual(self.budget_control.amount_actual, 0)\n # Create and post invoice\n purchase.action_create_invoice()\n self.assertEqual(purchase.invoice_status, \"invoiced\")\n invoice = purchase.invoice_ids[:1]\n # Change qty to 1 and 3\n invoice = invoice.with_context(check_move_validity=False)\n invoice.invoice_line_ids[0].quantity = 1\n invoice.invoice_line_ids[1].quantity = 3\n invoice._onchange_invoice_line_ids()\n invoice.invoice_date = invoice.date\n invoice.action_post()\n # PO Commit = 25, INV Actual = 45\n self.budget_control.flush()\n self.assertEqual(self.budget_control.amount_purchase, 25)\n self.assertEqual(self.budget_control.amount_actual, 45)\n # Test recompute, must be same\n purchase.recompute_budget_move()\n self.budget_control.flush()\n self.assertEqual(self.budget_control.amount_purchase, 25)\n self.assertEqual(self.budget_control.amount_actual, 45)\n invoice.recompute_budget_move()\n self.budget_control.flush()\n self.assertEqual(self.budget_control.amount_actual, 45)\n self.assertEqual(self.budget_control.amount_purchase, 25)\n # Test close budget move\n purchase.close_budget_move()\n self.budget_control.flush()\n self.budget_control.invalidate_cache()\n self.assertEqual(self.budget_control.amount_purchase, 0)\n self.assertEqual(self.budget_control.amount_actual, 45)\n # Test close budget move\n invoice.close_budget_move()\n self.budget_control.flush()\n self.budget_control.invalidate_cache()\n self.assertEqual(self.budget_control.amount_purchase, 0)\n self.assertEqual(self.budget_control.amount_actual, 0)\n","repo_name":"intrepidux/oca-account-budgeting","sub_path":"budget_control_purchase/tests/test_budget_purchase.py","file_name":"test_budget_purchase.py","file_ext":"py","file_size_in_byte":10002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"26104616817","text":"from lzy.api.v2 import op, Lzy\n\n\n@op\ndef a(s: int) -> int:\n print(s)\n return s\n\n\n@op\ndef b(s1: int, s2: int) -> str:\n print(s1, s2)\n return str(s1 + s2)\n\n\ndef run():\n lzy = Lzy()\n lzy.auth(user=\"lzy-internal-user\", key_path=\"/tmp/key\", endpoint=\"localhost:13579\")\n with lzy.workflow(\"test\", interactive=False):\n s1 = a(21)\n s2 = a(21)\n ret = b(s1, s2)\n print(ret)\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"ottergottaott/lzy","sub_path":"pylzy/tests/scenarios/v2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"23409672591","text":"\"\"\"\"Database Connection\"\"\"\nfrom prettyconf import config\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom sqlalchemy.ext.asyncio import create_async_engine, async_sessionmaker, AsyncSession\nfrom sqlalchemy.ext.declarative import declarative_base\n\npostgres_url = config(\"BANCO_AUTH\", default=None)\n\nBase = declarative_base()\n\nasync_engine = create_async_engine(postgres_url, future=True)\n\nsession_local = async_sessionmaker(\n async_engine,\n class_=AsyncSession,\n expire_on_commit=False\n)\n\n\nasync def get_connection():\n async with session_local() as session:\n try:\n yield session\n finally:\n await session.close()\n\n\nasync def database_commit(session, model) -> None:\n \"\"\"Generalized commit for used in the system.\"\"\"\n try:\n session.add(model)\n await session.commit()\n await session.refresh(model)\n except SQLAlchemyError as err:\n print(err)\n await session.rollback()\n","repo_name":"m4n1nh0/FastApi-Intro","sub_path":"database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"22556791954","text":"import json\nimport os\n\nfrom json_friends import check_exists_file_and_deleted\nfrom secret import MY_ID\n\n\ndef forming_nodes_into_file():\n path_file = \"csv_files/nodes.csv\"\n check_exists_file_and_deleted(path_file)\n\n users_files = os.listdir(\"users\")\n with open(path_file, \"w\") as nodes_file:\n nodes_file.write(\"id, label, sex\\n\")\n nodes_file.write(f\"{MY_ID}, ME, 2\\n\")\n for user_file in users_files:\n with open(f\"users/{user_file}\", \"r\") as data_file:\n user = json.load(data_file)\n try:\n line = f\"{user['id']}, {user['first_name']} {user['last_name']}, {user['sex']}\\n\"\n nodes_file.write(line)\n except ValueError:\n print(line)\n continue\n\n\nif __name__ == \"__main__\":\n forming_nodes_into_file()\n","repo_name":"zaaleksey/vk-friends","sub_path":"nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"24904583607","text":"def month(monthly_factors, AADT_Exit, AADT_Waldron, AADT_I24):\n user_input = input('Enter the date (in form of MM/DD/YYYY): ')\n month = int(user_input[0:2])\n factor = monthly_factors[month-1]\n exit_monthly_volume = int(factor * AADT_Exit)\n i24_monthly_volume = int(factor * AADT_I24)\n waldron_monthly_volume = int(factor * AADT_Waldron)\n volumes = [exit_monthly_volume, waldron_monthly_volume, i24_monthly_volume]\n return volumes\n \ndef time(monthly_volume):\n average_day = [1.04,1.03,1.02,1.01,1.00,1.00,1.15,1.32,1.24,1.14,1.12,1.14,1.17,1.17,1.20,1.27,1.27,1.25,1.23,1.21,1.10,1.08,1.08,1.05]\n sunday = [1.07, 1.05, 1.04,1.02,1.00,1.00,1.00,1.00,1.00,1.03,1.07,1.10,1.12,1.12,1.11,1.11,1.10,1.09,1.08,1.08,1.07,1.05,1.05,1.03]\n monday = [1.03,1.02,1.02,1.01,1.00,1.00,1.19,1.41,1.30,1.14,1.11,1.13,1.15,1.14,1.18,1.28,1.28,1.26,1.24,1.22,1.08,1.06,1.06,1.04]\n tuesday = [1.02,1.02,1.01,1.01,1.00,1.01,1.24,1.50,1.39,1.21,1.14,1.15,1.17,1.17,1.21,1.34,1.34,1.32,1.29,1.26,1.10,1.08,1.08,1.04]\n wednesday = [1.03,1.02,1.02,1.01,1.00,1.01,1.24,1.49,1.39,1.21,1.14,1.15,1.18,1.17,1.22,1.34,1.33,1.31,1.29,1.26,1.10,1.08,1.06,1.04]\n thursday = [1.03,1.03,1.02,1.01,1.00,1.00,1.23,1.47,1.36,1.19,1.14,1.16,1.19,1.18,1.22,1.35,1.34,1.32,1.29,1.27,1.10,1.09,1.08,1.05]\n friday = [1.04,1.03,1.02,1.01,1.00,1.00,1.18,1.35,1.24,1.16,1.16,1.19,1.22,1.23,1.30,1.35,1.34,1.32,1.29,1.27,1.13,1.12,1.11,1.09]\n saturday = [1.06,1.05,1.03,1.02,1.00,1.00,1.00,1.01,1.03,1.07,1.11,1.13,1.16,1.17,1.17,1.16,1.17,1.16,1.15,1.13,1.11,1.11,1.12,1.09] \n days = [sunday, monday, tuesday, wednesday, thursday, friday, saturday]\n sums = [sum(sunday), sum(monday), sum(tuesday), sum(wednesday), sum(thursday), sum(friday), sum(saturday)]\n daily_factors = [(sum(sunday)/sum(average_day)), (sum(monday)/sum(average_day)), (sum(tuesday)/sum(average_day)), (sum(wednesday)/sum(average_day)), (sum(thursday)/sum(average_day)), (sum(friday)/sum(average_day)), (sum(saturday)/sum(average_day))]\n day = int(input('Input day of the week:\\nWhere 1 = Sunday, 2 = Monday, 3 = Tuesday, 4 = Wednesday, 5 = Thursday, 6 = Friday, 7 = Saturday')) - 1\n day_input = days[day]\n time = int(input('What is the current hour (military time): \\nFormatted such that 2AM = \"2\", 10PM = \"22\"'))\n time_factor = float(day_input[time])\n \n exit_hourly_volume = int(float(monthly_volume[0]) * float(daily_factors[day])/float(sums[day]) * time_factor)\n waldron_hourly_volume = int(float(monthly_volume[1]) * float(daily_factors[day])/float(sums[day]) * time_factor)\n i24_hourly_volume = int(float(monthly_volume[2]) * float(daily_factors[day])/float(sums[day]) * time_factor)\n \n weekly_vol = (int(monthly_volume[0]) * int(average_day[int(time)])) / (sum(average_day))\n hourly_volumes = [exit_hourly_volume, waldron_hourly_volume, i24_hourly_volume]\n return hourly_volumes\n\ndef calculate_feet_clear(hourly_volume):\n i24_volume = int(hourly_volume[2])\n length_veh = 20\n length_semi = 72\n average_length = (length_veh * 0.95) + (length_semi *0.05)\n average_gap = 3\n num_lanes = 4\n feet_hour_queue = int((i24_volume * average_length) + (average_gap * (i24_volume - 1)))/num_lanes\n feet_within_scope = 10040.33\n feet_to_clear = 0\n if feet_hour_queue <= feet_within_scope:\n feet_to_clear += feet_hour_queue\n else:\n feet_to_clear += feet_within_scope\n return feet_to_clear\n \ndef flush_timing(feet_to_clear, hourly_volume):\n ''' Let's assume a few things. \n \n First, the collision has resulted in all four lanes of I-24 WB being\n blocked west of Waldron Road. \n \n Second, the collision results in all vehicles attempting to exit I-24 within the region of interest.\n Specifically, the vehicles stopped before Waldron Rd but after the next exit will attempt to exit. \n \n Since this exit is on the far right, I would assume a large part of the cars in the far left lane do not \n try to get over. We will estimate this percentage as 10%. As for the second-left lane, I estimate this to be 45%.\n For the lane adjacent to the far right lane, I estimate this to be 60%. For the far right lane, I estimate this \n to be 100% (to allow for access to the exit). This comes out to an average of 53.75%.\n \n 1. Determine how many miles the stopped vehicles (10040.33 ft)\n done above and now passed to this function as feet_to_clear\n \n '''\n \n #data collected on Friday @ 11am hour in November yields the following\n cycle = 113\n red = 2\n yellow = 2.5\n green_exit_base = 20\n green_waldron_base = 84\n hourly_volumes_base = [255, 1073]\n cycle_volume_exit = 8 #veh/cycle, under normal operating circumstances\n \n \n veh_queue = 4 * feet_to_clear/25.6 #4 accounts for each lane\n queue_to_clear = int(veh_queue * 0.5375) #veh/hr \n \n exit_volume = int(hourly_volume[0]) + queue_to_clear\n two_approaches = [exit_volume, hourly_volume[1]] #veh/hr\n \n #time lost\n l1 = 2.0 #s/phase\n e = 2.0 #s/phase\n l2 = yellow + red - e\n tli = l1 + l2\n loss_cycle = tli + tli\n \n #Comparing the AADT's for North and South Waldron Road, there is (total=34,065)(SB thru = 0.705)(NB thru = 0.295)\n south_bound = 0.705 * hourly_volume[1]\n north_bound = 0.295 * hourly_volume[1]\n \n #determine ELT for Waldron Rd, where ELT only applies to north-bound\n if south_bound <= 1200:\n ELT = (0.000008 *south_bound * south_bound) + (0.0024 * south_bound) + 1.0048\n if ELT > 15:\n ELT == 15\n elif south_bound > 1200:\n ELT = 15\n \n ERT = 1.18 #no pedestrians, applies only to south-bound\n \n #approximate VLT and VRT, where VLT applies to North-Bound Waldron Rd, VRT applies to South-Bound Waldron Rd.\n #let's approximate that 20% of vehicles passing through this intersection are turning to enter I-24 West-Bound\n \n VLTE = (0.20 * north_bound) * ELT\n VRTE = (0.20 * south_bound) * ERT\n \n north_VEQ = VLTE + (0.8 * north_bound)\n south_VEQ = VRTE + (0.8 * south_bound)\n \n north_VEQL = north_VEQ / 2\n south_VEQL = south_VEQ / 2\n \n #Since the exit approach has no opposing traffic, for simplicity, all movements are counted as through\n exit_VEQ = exit_volume #one lane for most of the approach\n \n #critical volume\n if south_VEQL > north_VEQL:\n vca = south_VEQL\n else:\n vca = north_VEQL\n vcb = exit_VEQ\n \n vc = vca + vcb\n\n cdes = loss_cycle / (1 - (vc / (1700*0.85)))\n \n if cdes < 60: #setting a reasonable minimum cycle length\n cdes = 60\n \n gtot = cdes - loss_cycle\n\n ga = int(gtot * (vca/vc))\n gb = int(gtot) - ga\n \n plan_list = [ga, gb, gtot, cdes]\n \n return plan_list\n \n\ndef output_plan(plan_list):\n ga = plan_list[0]\n gb = plan_list[1]\n gtot = plan_list[2]\n cdes = plan_list[3]\n \n print('')\n print('For the given date and time, the following Flush Signal Timing Plan will be implemented until queueing disperses.')\n print('')\n print('Total cycle length: {}s'.format(int(cdes)))\n print('')\n print('---------- Movement A ----------')\n print('Waldron road approach will be given green balls in each direction')\n print('with permitted left turns north-bound and permitted right turns south-bound')\n print('to allow for vehicles entering I-24 west-bound.')\n print('Movement green time: {}s'.format(ga))\n print('Movement yellow time: 2.5s')\n print('Movement all-red time: 2.0s')\n print('')\n print('---------- Movement B ----------')\n print('Interstate exit approach will be given the signal to allow for left and right turns onto Waldron Road.')\n print('Movement green time: {}s'.format(gb))\n print('Movement yellow time: 2.5s')\n print('Movement all-red time: 2.0s')\n print('')\n print('The total allocated green time for both movements is {}s out of a {}s cycle length.'.format(int(gtot), int(cdes)))\n \ndef main():\n # determining variation factors from the inputted time and date\n monthly_factors = [0.84282, 0.87252, 0.98391, 1.01361, 1.03218, 1.09901, 1.12129, 1.07673, 1.02475, 1.02847, 0.97649, 0.92822]\n AADT_Exit = (5716/2) #veh/day in the 1 direction\n AADT_Waldron = (24006) #veh/day per direction\n AADT_I24 = (149161/2) #veh/day per direction\n \n monthly_volume = month(monthly_factors, AADT_Exit, AADT_Waldron, AADT_I24)\n hourly_volume = time(monthly_volume)\n feet_to_clear = calculate_feet_clear(hourly_volume)\n plan_list = flush_timing(feet_to_clear, hourly_volume)\n output_plan(plan_list)\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"graciegumm/CE4501_Project","sub_path":"flush_plan.py","file_name":"flush_plan.py","file_ext":"py","file_size_in_byte":8691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12603810721","text":"from operator import itemgetter\nfrom itertools import groupby\nimport string\n\n\ndef clean(text):\n translator = str.maketrans('', '', string.punctuation)\n return text.lower().translate(translator)\n\n\ndef mapper(lines):\n pairs = [(word, 1) for line in lines for word in line.split()]\n return sorted(pairs, key=itemgetter(0))\n\n\ndef reducer(pairs):\n result = []\n current_word, current_count = None, 0\n for word, count in sorted(pairs, key=itemgetter(0)):\n if word == current_word:\n current_count += count\n else:\n if current_word is not None:\n result.append([current_word, current_count])\n current_word = word\n current_count = count\n if word == current_word:\n result.append([current_word, current_count])\n return result \n\n\nif __name__ == \"__main__\":\n lines = \\\n \"\"\"All that is gold does not glitter,\nNot all those who wander are lost;\nThe old that is strong does not wither,\nDeep roots are not reached by the frost.\nFrom the ashes, a fire shall be woken,\nA light from the shadows shall spring;\nRenewed shall be blade that was broken,\nThe crownless again shall be king.\"\"\".splitlines()\n\n mapper_output = mapper(lines)\n print(mapper_output)\n\n reducer_input = sorted(mapper_output, key=itemgetter(0))\n reducer_output = reducer(reducer_input)\n print(reducer_output)\n","repo_name":"saurabhmathur96/map-reduce","sub_path":"worker_node/implementations/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"276030302","text":"import os\r\nprint(os.getcwdb())\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n'''\r\n#don't think i need this anymore but ill keep it just in case\r\n@app.route('/test', methods=['GET','POST'])\r\ndef test():\r\n if request.method == 'POST':\r\n if 'file' not in request.files:\r\n error='no file selected'\r\n return redirect('index')\r\n file = request.files['file']\r\n if file.filename =='':\r\n error = 'no selected file'\r\n return redirect('index')\r\n if file:\r\n filename = secure_filename(file.filename)\r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'],filename))\r\n return render_template('test.html')\r\n ''' \r\n\r\n\r\n\r\n\r\nx = np.array(['A', 'B', 'V','D'])\r\ny = np.array([3,8,1,10])\r\nplt.barh(x,y)\r\nplt.show()\r\n\r\n\r\n\r\n@app.route('/update_review/', methods=['GET','POST'])\r\n@login_required\r\ndef update_review(review_id):\r\n db = get_db()\r\n review = db.execute(''' SELECT * FROM reviews WHERE review_id=?; ''',(review_id,)).fetchone()['review']\r\n edit_review = review\r\n form = EditReview()\r\n if form.validate_on_submit():\r\n review = form.edit\r\n return redirect('/')\r\n return render_template('edit_review.html', form=form, review=review)\r\n\r\n\r\n@app.route('/delete_review/')\r\n@login_required\r\ndef delete_review(review_id):\r\n db = get_db()\r\n db.execute('''DELETE FROM reviews WHERE review_id=? ''',(review_id,))\r\n db.commit\r\n return redirect(url_for('user'))","repo_name":"emmar266/BookShop","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22241472846","text":"from django.template.loader import get_template\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nimport datetime\nfrom mysite.forms import ContactForm\nfrom django.core.mail import send_mail, get_connection\n\ndef current_datetime(request):\n now = datetime.datetime.now()\n # t = get_template('current_datetime.html')\n # html = t.render({'current_date' : now})\n # return HttpResponse(html)\n return render(request, 'current_datetime.html', {'current_date' : now}) #render consolidates the above commented steps\n\ndef hours_ahead(request, offset):\n try:\n offset = int(offset)\n except ValueError:\n raise Http404()\n dt = datetime.datetime.now() + datetime.timedelta(hours=offset)\n #html = \"In %s hour(s), it will be %s.\" % (offset, dt)\n return render(request, 'hours_ahead.html', {'offset' : offset, 'datetime' : dt})\n\ndef display_meta(request):\n \"\"\"displays metadata about the user (IP address, browser, etc.)\"\"\"\n values = request.META\n html = []\n for k in sorted(values):\n html.append('%s%s' % (k, values[k]))\n return HttpResponse('%s
' % '\\n'.join(html))\n\ndef contact(request):\n if request.method == 'POST':\n form = ContactForm(request.POST) # Check to see if request has been submitted; otherwise display blank contact form\n if form.is_valid(): # check to see if form contains valid data\n cd = form.cleaned_data\n con = get_connection('django.core.mail.backends.console.EmailBackend') # used in development - doesn't require an email server\n send_mail(\n cd['subject'],\n cd['message'],\n cd.get('email', 'noreply@example.com'),\n ['siteowner@example.com'],\n connection=con\n )\n return HttpResponseRedirect('/contact/thanks/')\n else:\n form = ContactForm(\n initial={'subject': 'I love your site!'}\n )\n\n return render(request, 'contact_form.html', {'form': form}) # if form doesn't have valid data, reloads the contact form\n\ndef thanks(request):\n return render(request, 'thanks.html')","repo_name":"msmorr/mysite","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3578037582","text":"from typing import Dict, List, Optional, Union\n\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import AutoMLJob as RestAutoMLJob\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import Forecasting as RestForecasting\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import ForecastingPrimaryMetrics, JobBase, TaskType\nfrom azure.ai.ml._utils.utils import camel_to_snake, is_data_binding_expression\nfrom azure.ai.ml.constants import TabularTrainingMode\nfrom azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY\nfrom azure.ai.ml.constants._job.automl import AutoMLConstants\nfrom azure.ai.ml.entities._credentials import _BaseJobIdentityConfiguration\nfrom azure.ai.ml.entities._job._input_output_helpers import from_rest_data_outputs, to_rest_data_outputs\nfrom azure.ai.ml.entities._job.automl.stack_ensemble_settings import StackEnsembleSettings\nfrom azure.ai.ml.entities._job.automl.tabular.automl_tabular import AutoMLTabular\nfrom azure.ai.ml.entities._job.automl.tabular.featurization_settings import TabularFeaturizationSettings\nfrom azure.ai.ml.entities._job.automl.tabular.forecasting_settings import ForecastingSettings\nfrom azure.ai.ml.entities._job.automl.tabular.limit_settings import TabularLimitSettings\nfrom azure.ai.ml.entities._job.automl.training_settings import ForecastingTrainingSettings\nfrom azure.ai.ml.entities._util import load_from_dict\n\n\nclass ForecastingJob(AutoMLTabular):\n \"\"\"\n Configuration for AutoML Forecasting Task.\n\n :param primary_metric: The primary metric to use for model selection.\n :type primary_metric: Optional[str]\n :param forecasting_settings: The settings for the forecasting task.\n :type forecasting_settings:\n Optional[~azure.ai.ml.automl.ForecastingSettings]\n :param kwargs: Job-specific arguments\n :type kwargs: Dict[str, Any]\n \"\"\"\n\n _DEFAULT_PRIMARY_METRIC = ForecastingPrimaryMetrics.NORMALIZED_ROOT_MEAN_SQUARED_ERROR\n\n def __init__(\n self,\n *,\n primary_metric: Optional[str] = None,\n forecasting_settings: Optional[ForecastingSettings] = None,\n **kwargs,\n ) -> None:\n \"\"\"Initialize a new AutoML Forecasting task.\"\"\"\n # Extract any task specific settings\n featurization = kwargs.pop(\"featurization\", None)\n limits = kwargs.pop(\"limits\", None)\n training = kwargs.pop(\"training\", None)\n\n super().__init__(\n task_type=TaskType.FORECASTING,\n featurization=featurization,\n limits=limits,\n training=training,\n **kwargs,\n )\n\n self.primary_metric = primary_metric or ForecastingJob._DEFAULT_PRIMARY_METRIC\n self._forecasting_settings = forecasting_settings\n\n @property\n def primary_metric(self) -> Optional[str]:\n \"\"\"\n Return the primary metric to use for model selection.\n\n :return: The primary metric for model selection.\n :rtype: Optional[str]\n \"\"\"\n return self._primary_metric\n\n @primary_metric.setter\n def primary_metric(self, value: Union[str, ForecastingPrimaryMetrics]) -> None:\n \"\"\"\n Set the primary metric to use for model selection.\n\n :param value: The primary metric for model selection.\n :type: Union[str, ~azure.ai.ml.automl.ForecastingPrimaryMetrics]\n \"\"\"\n if is_data_binding_expression(str(value), [\"parent\"]):\n self._primary_metric = value\n return\n self._primary_metric = (\n ForecastingJob._DEFAULT_PRIMARY_METRIC\n if value is None\n else ForecastingPrimaryMetrics[camel_to_snake(value).upper()]\n )\n\n @property\n def training(self) -> ForecastingTrainingSettings:\n \"\"\"\n Return the forecast training settings.\n\n :return: training settings.\n :rtype: ~azure.ai.ml.automl.ForecastingTrainingSettings\n \"\"\"\n return self._training or ForecastingTrainingSettings()\n\n @property\n def forecasting_settings(self) -> ForecastingSettings:\n \"\"\"\n Return the forecast settings.\n\n :return: forecast settings.\n :rtype: ~azure.ai.ml.automl.ForecastingSettings\n \"\"\"\n return self._forecasting_settings\n\n def set_forecast_settings(\n self,\n *,\n time_column_name: Optional[str] = None,\n forecast_horizon: Optional[Union[str, int]] = None,\n time_series_id_column_names: Optional[Union[str, List[str]]] = None,\n target_lags: Optional[Union[str, int, List[int]]] = None,\n feature_lags: Optional[str] = None,\n target_rolling_window_size: Optional[Union[str, int]] = None,\n country_or_region_for_holidays: Optional[str] = None,\n use_stl: Optional[str] = None,\n seasonality: Optional[Union[str, int]] = None,\n short_series_handling_config: Optional[str] = None,\n frequency: Optional[str] = None,\n target_aggregate_function: Optional[str] = None,\n cv_step_size: Optional[int] = None,\n features_unknown_at_forecast_time: Optional[Union[str, List[str]]] = None,\n ) -> None:\n \"\"\"Manage parameters used by forecasting tasks.\n\n :keyword time_column_name:\n The name of the time column. This parameter is required when forecasting to specify the datetime\n column in the input data used for building the time series and inferring its frequency.\n :paramtype time_column_name: Optional[str]\n :keyword forecast_horizon:\n The desired maximum forecast horizon in units of time-series frequency. The default value is 1.\n\n Units are based on the time interval of your training data, e.g., monthly, weekly that the forecaster\n should predict out. When task type is forecasting, this parameter is required. For more information on\n setting forecasting parameters, see `Auto-train a time-series forecast model `_.\n :type forecast_horizon: Optional[Union[int, str]]\n :keyword time_series_id_column_names:\n The names of columns used to group a time series.\n It can be used to create multiple series. If time series id column names is not defined or\n the identifier columns specified do not identify all the series in the dataset, the time series identifiers\n will be automatically created for your data set.\n :paramtype time_series_id_column_names: Optional[Union[str, List[str]]]\n :keyword target_lags:\n The number of past periods to lag from the target column. By default the lags are turned off.\n\n When forecasting, this parameter represents the number of rows to lag the target values based\n on the frequency of the data. This is represented as a list or single integer. Lag should be used\n when the relationship between the independent variables and dependent variable do not match up or\n correlate by default. For example, when trying to forecast demand for a product, the demand in any\n month may depend on the price of specific commodities 3 months prior. In this example, you may want\n to lag the target (demand) negatively by 3 months so that the model is training on the correct\n relationship. For more information, see `Auto-train a time-series forecast model\n `_.\n\n **Note on auto detection of target lags and rolling window size.\n Please see the corresponding comments in the rolling window section.**\n We use the next algorithm to detect the optimal target lag and rolling window size.\n\n #. Estimate the maximum lag order for the look back feature selection. In our case it is the number of\n periods till the next date frequency granularity i.e. if frequency is daily, it will be a week (7),\n if it is a week, it will be month (4). That values multiplied by two is the largest\n possible values of lags/rolling windows. In our examples, we will consider the maximum lag\n order of 14 and 8 respectively).\n #. Create a de-seasonalized series by adding trend and residual components. This will be used\n in the next step.\n #. Estimate the PACF - Partial Auto Correlation Function on the on the data from (2)\n and search for points, where the auto correlation is significant i.e. its absolute\n value is more then 1.96/square_root(maximal lag value), which correspond to significance of 95%.\n #. If all points are significant, we consider it being strong seasonality\n and do not create look back features.\n #. We scan the PACF values from the beginning and the value before the first insignificant\n auto correlation will designate the lag. If first significant element (value correlate with\n itself) is followed by insignificant, the lag will be 0 and we will not use look back features.\n\n :type target_lags: Optional[Union[str, int, List[int]]]\n :keyword feature_lags: Flag for generating lags for the numeric features with 'auto' or None.\n :paramtype feature_lags: Optional[str]\n :keyword target_rolling_window_size:\n The number of past periods used to create a rolling window average of the target column.\n\n When forecasting, this parameter represents `n` historical periods to use to generate forecasted values,\n <= training set size. If omitted, `n` is the full training set size. Specify this parameter\n when you only want to consider a certain amount of history when training the model.\n If set to 'auto', rolling window will be estimated as the last\n value where the PACF is more then the significance threshold. Please see target_lags section for details.\n :paramtype target_rolling_window_size: Optional[Union[str, int]]\n :keyword country_or_region_for_holidays: The country/region used to generate holiday features.\n These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.\n :paramtype country_or_region_for_holidays: Optional[str]\n :keyword use_stl: Configure STL Decomposition of the time-series target column.\n use_stl can take three values: None (default) - no stl decomposition, 'season' - only generate\n season component and season_trend - generate both season and trend components.\n :type use_stl: Optional[str]\n :keyword seasonality: Set time series seasonality as an integer multiple of the series frequency.\n If seasonality is set to 'auto', it will be inferred.\n If set to None, the time series is assumed non-seasonal which is equivalent to seasonality=1.\n :paramtype seasonality: Optional[Union[int, str]\n :keyword short_series_handling_config:\n The parameter defining how if AutoML should handle short time series.\n\n Possible values: 'auto' (default), 'pad', 'drop' and None.\n\n * **auto** short series will be padded if there are no long series,\n otherwise short series will be dropped.\n * **pad** all the short series will be padded.\n * **drop** all the short series will be dropped\".\n * **None** the short series will not be modified.\n\n If set to 'pad', the table will be padded with the zeroes and\n empty values for the regressors and random values for target with the mean\n equal to target value median for given time series id. If median is more or equal\n to zero, the minimal padded value will be clipped by zero:\n Input:\n\n +------------+---------------+----------+--------+\n | Date | numeric_value | string | target |\n +============+===============+==========+========+\n | 2020-01-01 | 23 | green | 55 |\n +------------+---------------+----------+--------+\n\n Output assuming minimal number of values is four:\n\n +------------+---------------+----------+--------+\n | Date | numeric_value | string | target |\n +============+===============+==========+========+\n | 2019-12-29 | 0 | NA | 55.1 |\n +------------+---------------+----------+--------+\n | 2019-12-30 | 0 | NA | 55.6 |\n +------------+---------------+----------+--------+\n | 2019-12-31 | 0 | NA | 54.5 |\n +------------+---------------+----------+--------+\n | 2020-01-01 | 23 | green | 55 |\n +------------+---------------+----------+--------+\n\n **Note:** We have two parameters short_series_handling_configuration and\n legacy short_series_handling. When both parameters are set we are\n synchronize them as shown in the table below (short_series_handling_configuration and\n short_series_handling for brevity are marked as handling_configuration and handling\n respectively).\n\n +------------+--------------------------+----------------------+-----------------------------+\n | | handling | | handling | | resulting | | resulting |\n | | | configuration | | handling | | handling |\n | | | | | configuration |\n +============+==========================+======================+=============================+\n | True | auto | True | auto |\n +------------+--------------------------+----------------------+-----------------------------+\n | True | pad | True | auto |\n +------------+--------------------------+----------------------+-----------------------------+\n | True | drop | True | auto |\n +------------+--------------------------+----------------------+-----------------------------+\n | True | None | False | None |\n +------------+--------------------------+----------------------+-----------------------------+\n | False | auto | False | None |\n +------------+--------------------------+----------------------+-----------------------------+\n | False | pad | False | None |\n +------------+--------------------------+----------------------+-----------------------------+\n | False | drop | False | None |\n +------------+--------------------------+----------------------+-----------------------------+\n | False | None | False | None |\n +------------+--------------------------+----------------------+-----------------------------+\n\n :type short_series_handling_config: Optional[str]\n :keyword frequency: Forecast frequency.\n\n When forecasting, this parameter represents the period with which the forecast is desired,\n for example daily, weekly, yearly, etc. The forecast frequency is dataset frequency by default.\n You can optionally set it to greater (but not lesser) than dataset frequency.\n We'll aggregate the data and generate the results at forecast frequency. For example,\n for daily data, you can set the frequency to be daily, weekly or monthly, but not hourly.\n The frequency needs to be a pandas offset alias.\n Please refer to pandas documentation for more information:\n https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects\n :type frequency: Optional[str]\n :keyword target_aggregate_function: The function to be used to aggregate the time series target\n column to conform to a user specified frequency. If the\n target_aggregation_function is set, but the freq parameter\n is not set, the error is raised. The possible target\n aggregation functions are: \"sum\", \"max\", \"min\" and \"mean\".\n * The target column values are aggregated based on the specified operation.\n Typically, sum is appropriate for most scenarios.\n * Numerical predictor columns in your data are aggregated by sum, mean, minimum value,\n and maximum value. As a result, automated ML generates new columns suffixed with the\n aggregation function name and applies the selected aggregate operation.\n * For categorical predictor columns, the data is aggregated by mode,\n the most prominent category in the window.\n * Date predictor columns are aggregated by minimum value, maximum value and mode.\n\n +----------------+-------------------------------+--------------------------------------+\n | | freq | | target_aggregation_function | | Data regularity |\n | | | | fixing mechanism |\n +================+===============================+======================================+\n | None (Default) | None (Default) | | The aggregation |\n | | | | is not applied. |\n | | | | If the valid |\n | | | | frequency can |\n | | | | not be |\n | | | | determined |\n | | | | the error |\n | | | | will be raised. |\n +----------------+-------------------------------+--------------------------------------+\n | Some Value | None (Default) | | The aggregation |\n | | | | is not applied. |\n | | | | If the number |\n | | | | of data points |\n | | | | compliant to |\n | | | | given frequency |\n | | | | grid is |\n | | | | less then 90% |\n | | | | these points |\n | | | | will be |\n | | | | removed, |\n | | | | otherwise |\n | | | | the error will |\n | | | | be raised. |\n +----------------+-------------------------------+--------------------------------------+\n | None (Default) | Aggregation function | | The error about |\n | | | | missing |\n | | | | frequency |\n | | | | parameter is |\n | | | | raised. |\n +----------------+-------------------------------+--------------------------------------+\n | Some Value | Aggregation function | | Aggregate to |\n | | | | frequency using |\n | | | | provided |\n | | | | aggregation |\n | | | | function. |\n +----------------+-------------------------------+--------------------------------------+\n\n :type target_aggregate_function: Optional[str]\n :keyword cv_step_size:\n Number of periods between the origin_time of one CV fold and the next fold. For\n example, if `n_step` = 3 for daily data, the origin time for each fold will be\n three days apart.\n :paramtype cv_step_size: Optional[int]\n :keyword features_unknown_at_forecast_time:\n The feature columns that are available for training but unknown at the time of forecast/inference.\n If features_unknown_at_forecast_time is set to an empty list, it is assumed that\n all the feature columns in the dataset are known at inference time. If this parameter is not set\n the support for future features is not enabled.\n :paramtype features_unknown_at_forecast_time: Optional[Union[str, List[str]]]\n \"\"\"\n self._forecasting_settings = self._forecasting_settings or ForecastingSettings()\n\n self._forecasting_settings.country_or_region_for_holidays = (\n country_or_region_for_holidays\n if country_or_region_for_holidays is not None\n else self._forecasting_settings.country_or_region_for_holidays\n )\n self._forecasting_settings.cv_step_size = (\n cv_step_size if cv_step_size is not None else self._forecasting_settings.cv_step_size\n )\n self._forecasting_settings.forecast_horizon = (\n forecast_horizon if forecast_horizon is not None else self._forecasting_settings.forecast_horizon\n )\n self._forecasting_settings.target_lags = (\n target_lags if target_lags is not None else self._forecasting_settings.target_lags\n )\n self._forecasting_settings.target_rolling_window_size = (\n target_rolling_window_size\n if target_rolling_window_size is not None\n else self._forecasting_settings.target_rolling_window_size\n )\n self._forecasting_settings.frequency = (\n frequency if frequency is not None else self._forecasting_settings.frequency\n )\n self._forecasting_settings.feature_lags = (\n feature_lags if feature_lags is not None else self._forecasting_settings.feature_lags\n )\n self._forecasting_settings.seasonality = (\n seasonality if seasonality is not None else self._forecasting_settings.seasonality\n )\n self._forecasting_settings.use_stl = use_stl if use_stl is not None else self._forecasting_settings.use_stl\n self._forecasting_settings.short_series_handling_config = (\n short_series_handling_config\n if short_series_handling_config is not None\n else self._forecasting_settings.short_series_handling_config\n )\n self._forecasting_settings.target_aggregate_function = (\n target_aggregate_function\n if target_aggregate_function is not None\n else self._forecasting_settings.target_aggregate_function\n )\n self._forecasting_settings.time_column_name = (\n time_column_name if time_column_name is not None else self._forecasting_settings.time_column_name\n )\n self._forecasting_settings.time_series_id_column_names = (\n time_series_id_column_names\n if time_series_id_column_names is not None\n else self._forecasting_settings.time_series_id_column_names\n )\n self._forecasting_settings.features_unknown_at_forecast_time = (\n features_unknown_at_forecast_time\n if features_unknown_at_forecast_time is not None\n else self._forecasting_settings.features_unknown_at_forecast_time\n )\n\n # override\n def set_training(\n self,\n *,\n enable_onnx_compatible_models: Optional[bool] = None,\n enable_dnn_training: Optional[bool] = None,\n enable_model_explainability: Optional[bool] = None,\n enable_stack_ensemble: Optional[bool] = None,\n enable_vote_ensemble: Optional[bool] = None,\n stack_ensemble_settings: Optional[StackEnsembleSettings] = None,\n ensemble_model_download_timeout: Optional[int] = None,\n allowed_training_algorithms: Optional[List[str]] = None,\n blocked_training_algorithms: Optional[List[str]] = None,\n training_mode: Optional[Union[str, TabularTrainingMode]] = None,\n ) -> None:\n \"\"\"\n The method to configure forecast training related settings.\n\n :keyword enable_onnx_compatible_models:\n Whether to enable or disable enforcing the ONNX-compatible models.\n The default is False. For more information about Open Neural Network Exchange (ONNX) and Azure Machine\n Learning, see this `article `__.\n :type enable_onnx_compatible: Optional[bool]\n :keyword enable_dnn_training:\n Whether to include DNN based models during model selection.\n However, the default is True for DNN NLP tasks, and it's False for all other AutoML tasks.\n :paramtype enable_dnn_training: Optional[bool]\n :keyword enable_model_explainability:\n Whether to enable explaining the best AutoML model at the end of all AutoML training iterations.\n For more information, see `Interpretability: model explanations in automated machine learning\n `__.\n , defaults to None\n :type enable_model_explainability: Optional[bool]\n :keyword enable_stack_ensemble:\n Whether to enable/disable StackEnsemble iteration.\n If `enable_onnx_compatible_models` flag is being set, then StackEnsemble iteration will be disabled.\n Similarly, for Timeseries tasks, StackEnsemble iteration will be disabled by default, to avoid risks of\n overfitting due to small training set used in fitting the meta learner.\n For more information about ensembles, see `Ensemble configuration\n `__\n , defaults to None\n :type enable_stack_ensemble: Optional[bool]\n :keyword enable_vote_ensemble:\n Whether to enable/disable VotingEnsemble iteration.\n For more information about ensembles, see `Ensemble configuration\n `__\n , defaults to None\n :type enable_vote_ensemble: Optional[bool]\n :keyword stack_ensemble_settings:\n Settings for StackEnsemble iteration, defaults to None\n :paramtype stack_ensemble_settings: Optional[StackEnsembleSettings]\n :keyword ensemble_model_download_timeout:\n During VotingEnsemble and StackEnsemble model generation,\n multiple fitted models from the previous child runs are downloaded. Configure this parameter with a\n higher value than 300 secs, if more time is needed, defaults to None\n :paramtype ensemble_model_download_timeout: Optional[int]\n :keyword allowed_training_algorithms:\n A list of model names to search for an experiment. If not specified,\n then all models supported for the task are used minus any specified in ``blocked_training_algorithms``\n or deprecated TensorFlow models, defaults to None\n :paramtype allowed_training_algorithms: Optional[List[str]]\n :keyword blocked_training_algorithms:\n A list of algorithms to ignore for an experiment, defaults to None\n :paramtype blocked_training_algorithms: Optional[List[str]]\n :keyword training_mode:\n [Experimental] The training mode to use.\n The possible values are-\n\n * distributed- enables distributed training for supported algorithms.\n\n * non_distributed- disables distributed training.\n\n * auto- Currently, it is same as non_distributed. In future, this might change.\n\n Note: This parameter is in public preview and may change in future.\n :type training_mode: Optional[Union[~azure.ai.ml.constants.TabularTrainingMode, str]]\n \"\"\"\n super().set_training(\n enable_onnx_compatible_models=enable_onnx_compatible_models,\n enable_dnn_training=enable_dnn_training,\n enable_model_explainability=enable_model_explainability,\n enable_stack_ensemble=enable_stack_ensemble,\n enable_vote_ensemble=enable_vote_ensemble,\n stack_ensemble_settings=stack_ensemble_settings,\n ensemble_model_download_timeout=ensemble_model_download_timeout,\n allowed_training_algorithms=allowed_training_algorithms,\n blocked_training_algorithms=blocked_training_algorithms,\n training_mode=training_mode,\n )\n\n # Disable stack ensemble by default, since it is currently not supported for forecasting tasks\n if enable_stack_ensemble is None:\n self._training.enable_stack_ensemble = False\n\n def _to_rest_object(self) -> JobBase:\n forecasting_task = RestForecasting(\n target_column_name=self.target_column_name,\n training_data=self.training_data,\n validation_data=self.validation_data,\n validation_data_size=self.validation_data_size,\n weight_column_name=self.weight_column_name,\n cv_split_column_names=self.cv_split_column_names,\n n_cross_validations=self.n_cross_validations,\n test_data=self.test_data,\n test_data_size=self.test_data_size,\n featurization_settings=self._featurization._to_rest_object() if self._featurization else None,\n limit_settings=self._limits._to_rest_object() if self._limits else None,\n training_settings=self._training._to_rest_object() if self._training else None,\n primary_metric=self.primary_metric,\n log_verbosity=self.log_verbosity,\n forecasting_settings=self._forecasting_settings._to_rest_object(),\n )\n\n self._resolve_data_inputs(forecasting_task)\n self._validation_data_to_rest(forecasting_task)\n\n properties = RestAutoMLJob(\n display_name=self.display_name,\n description=self.description,\n experiment_name=self.experiment_name,\n tags=self.tags,\n compute_id=self.compute,\n properties=self.properties,\n environment_id=self.environment_id,\n environment_variables=self.environment_variables,\n services=self.services,\n outputs=to_rest_data_outputs(self.outputs),\n resources=self.resources,\n task_details=forecasting_task,\n identity=self.identity._to_job_rest_object() if self.identity else None,\n queue_settings=self.queue_settings,\n )\n\n result = JobBase(properties=properties)\n result.name = self.name\n return result\n\n @classmethod\n def _from_rest_object(cls, obj: JobBase) -> \"ForecastingJob\":\n properties: RestAutoMLJob = obj.properties\n task_details: RestForecasting = properties.task_details\n\n job_args_dict = {\n \"id\": obj.id,\n \"name\": obj.name,\n \"description\": properties.description,\n \"tags\": properties.tags,\n \"properties\": properties.properties,\n \"experiment_name\": properties.experiment_name,\n \"services\": properties.services,\n \"status\": properties.status,\n \"creation_context\": obj.system_data,\n \"display_name\": properties.display_name,\n \"compute\": properties.compute_id,\n \"outputs\": from_rest_data_outputs(properties.outputs),\n \"resources\": properties.resources,\n \"identity\": _BaseJobIdentityConfiguration._from_rest_object(properties.identity)\n if properties.identity\n else None,\n \"queue_settings\": properties.queue_settings,\n }\n\n forecasting_job = cls(\n target_column_name=task_details.target_column_name,\n training_data=task_details.training_data,\n validation_data=task_details.validation_data,\n validation_data_size=task_details.validation_data_size,\n weight_column_name=task_details.weight_column_name,\n cv_split_column_names=task_details.cv_split_column_names,\n n_cross_validations=task_details.n_cross_validations,\n test_data=task_details.test_data,\n test_data_size=task_details.test_data_size,\n featurization=TabularFeaturizationSettings._from_rest_object(task_details.featurization_settings)\n if task_details.featurization_settings\n else None,\n limits=TabularLimitSettings._from_rest_object(task_details.limit_settings)\n if task_details.limit_settings\n else None,\n training=ForecastingTrainingSettings._from_rest_object(task_details.training_settings)\n if task_details.training_settings\n else None,\n primary_metric=task_details.primary_metric,\n forecasting_settings=ForecastingSettings._from_rest_object(task_details.forecasting_settings)\n if task_details.forecasting_settings\n else None,\n log_verbosity=task_details.log_verbosity,\n **job_args_dict,\n )\n\n forecasting_job._restore_data_inputs()\n forecasting_job._validation_data_from_rest()\n\n return forecasting_job\n\n @classmethod\n def _load_from_dict(\n cls,\n data: Dict,\n context: Dict,\n additional_message: str,\n **kwargs,\n ) -> \"ForecastingJob\":\n from azure.ai.ml._schema.automl.table_vertical.forecasting import AutoMLForecastingSchema\n from azure.ai.ml._schema.pipeline.automl_node import AutoMLForecastingNodeSchema\n\n if kwargs.pop(\"inside_pipeline\", False):\n loaded_data = load_from_dict(AutoMLForecastingNodeSchema, data, context, additional_message, **kwargs)\n else:\n loaded_data = load_from_dict(AutoMLForecastingSchema, data, context, additional_message, **kwargs)\n job_instance = cls._create_instance_from_schema_dict(loaded_data)\n return job_instance\n\n @classmethod\n def _create_instance_from_schema_dict(cls, loaded_data: Dict) -> \"ForecastingJob\":\n loaded_data.pop(AutoMLConstants.TASK_TYPE_YAML, None)\n data_settings = {\n \"training_data\": loaded_data.pop(\"training_data\"),\n \"target_column_name\": loaded_data.pop(\"target_column_name\"),\n \"weight_column_name\": loaded_data.pop(\"weight_column_name\", None),\n \"validation_data\": loaded_data.pop(\"validation_data\", None),\n \"validation_data_size\": loaded_data.pop(\"validation_data_size\", None),\n \"cv_split_column_names\": loaded_data.pop(\"cv_split_column_names\", None),\n \"n_cross_validations\": loaded_data.pop(\"n_cross_validations\", None),\n \"test_data\": loaded_data.pop(\"test_data\", None),\n \"test_data_size\": loaded_data.pop(\"test_data_size\", None),\n }\n job = ForecastingJob(**loaded_data)\n job.set_data(**data_settings)\n return job\n\n def _to_dict(self, inside_pipeline=False) -> Dict: # pylint: disable=arguments-differ\n from azure.ai.ml._schema.automl.table_vertical.forecasting import AutoMLForecastingSchema\n from azure.ai.ml._schema.pipeline.automl_node import AutoMLForecastingNodeSchema\n\n if inside_pipeline:\n schema_dict = AutoMLForecastingNodeSchema(context={BASE_PATH_CONTEXT_KEY: \"./\"}).dump(self)\n else:\n schema_dict = AutoMLForecastingSchema(context={BASE_PATH_CONTEXT_KEY: \"./\"}).dump(self)\n return schema_dict\n\n def __eq__(self, other):\n if not isinstance(other, ForecastingJob):\n return NotImplemented\n\n if not super(ForecastingJob, self).__eq__(other):\n return False\n\n return self.primary_metric == other.primary_metric and self._forecasting_settings == other._forecasting_settings\n\n def __ne__(self, other):\n return not self.__eq__(other)\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/tabular/forecasting_job.py","file_name":"forecasting_job.py","file_ext":"py","file_size_in_byte":38375,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"25050083079","text":"\"\"\"\nname = input(\"Enter your name: \")\nprint(name)\nprint(type(name))\n\nnum = input (\"Enter any number: \")\nprint(num)\nprint(type(num))\n\n#explicitly to int\nnum1 = int(input(\"Enter any number: \"))\nprint(num1)\nprint(type(num1))\n\n\"\"\"\n# Python program to take integer input in Python\n \n# input size of the list\nn = int(input(\"Enter the size of list : \"))\n# store integers in a list using map, split and strip functions\nlst = list(map(int, input(\n \"Enter the integer elements of list(Space-Separated): \").strip().split()))[:n]\nprint('The list is:', lst) # printing the list\n","repo_name":"MrCodeX007/PythonPracticing","sub_path":"PythonPractices/InputFunctions.py","file_name":"InputFunctions.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26376567682","text":"import math\nfrom omegaconf import II\n\nimport torch.nn.functional as F\nfrom fairseq import metrics\nfrom fairseq.criterions import FairseqCriterion, register_criterion\nfrom fairseq.dataclass import FairseqDataclass\nfrom sklearn.metrics import accuracy_score\n#from sklearn.metrics import classification_report\nclass CrossEntropyCriterionConfig(FairseqDataclass):\n sentence_avg: bool = II(\"params.optimization.sentence_avg\")\n\n\n@register_criterion(\"seq_cls_cross_entropy\", dataclass=CrossEntropyCriterionConfig)\nclass SeqClsCrossEntropyCriterion(FairseqCriterion):\n @classmethod\n def add_args(cls, parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n FairseqCriterion.add_args(parser)\n parser.add_argument('--sentence_avg', default = True,action='store_true',\n help='image feature dimension')\n\n def __init__(self, task,sentence_avg):\n super().__init__(task)\n self.sentence_avg = sentence_avg\n def forward(self, model, sample, reduce=True):\n\n net_output = model(**sample[\"net_input\"])\n acc = self.compute_acc(model,net_output,sample)\n loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)\n sample_size = sample[\"nsentences\"] #batch_size\n logging_output = {\n \"loss\": loss.data,\n \"sample_size\": sample_size,\n \"acc\":acc\n }\n #print(classification_report(model.get_targets(sample, net_output).view(-1).to(\"cpu\"), net_output.argmax(-1).to(\"cpu\")))\n return loss, sample_size, logging_output\n\n def compute_loss(self, model, net_output, sample, reduce=True):\n ground_truth_list= model.get_targets(sample, net_output)\n lprobs = net_output.transpose(0,1)\n loss = 0\n #print(lprobs.size())\n for index, target in enumerate(ground_truth_list):\n #print(len(target))\n loss += F.nll_loss(\n lprobs[index][:len(target)],\n target.long(),\n reduction=\"sum\" if reduce else \"none\",\n ignore_index=100\n )\n return loss, loss\n def compute_acc(self,model,net_output,sample):\n ground_truth_list = model.get_targets(sample, net_output)\n lprobs = net_output.transpose(0, 1).argmax(-1)\n labels,prediction = [],[]\n for index,each in enumerate(ground_truth_list):\n labels+=each.tolist()\n prediction+=lprobs[index][:len(each)].tolist()\n acc = accuracy_score(labels, prediction)\n print(acc)\n return acc\n\n\n @staticmethod\n def reduce_metrics(logging_outputs) -> None:\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get(\"loss\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n acc = sum(log.get(\"acc\", 0) for log in logging_outputs)\n #print(\"acc\",acc)\n #print(\"sample_size\",sample_size)\n metrics.log_scalar(\n \"loss\", loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n metrics.log_scalar(\n \"acc\", acc, sample_size, round=3\n )\n metrics.log_scalar(\n \"batch_size\", sample_size, sample_size, round=3\n )\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n \"\"\"\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n \"\"\"\n return True\n","repo_name":"araloak/MM-Pre-train","sub_path":"criterions/seq_cls_cross_entropy.py","file_name":"seq_cls_cross_entropy.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"342578342","text":"import random\nimport sys\n\nsys.path.append('../AStarAlgorithm')\n\nfrom AStarSimplePath import AStarSimplePath as assp\n\nclass Collision:\n\n @staticmethod\n def detect_collision(pos_players, players_path, players_step):\n \"\"\"Detect collision between the players\n\n Parameters\n ----------\n pos_players : *args\n coordinates of the players\n player1 : int\n number of the player\n players_path : *args\n path of the players\n players_step : *args\n step of the players\n\n Returns\n -------\n bool\n True if collision False otherwise\n \"\"\"\n for i in range(len(players_path)):\n\n # information of player i\n path_i = players_path[i]\n step_player_i = players_step[i]\n pos_player_i = pos_players[i]\n next_pos_player_i = path_i[step_player_i]\n\n for j in range(len(players_path)):\n\n # same player\n if i == j: continue\n\n # information of player j\n path_j = players_path[j]\n step_player_j = players_step[j]\n pos_player_j = pos_players[j]\n next_pos_player_j = path_j[step_player_j]\n\n # next position is the same for both player\n if next_pos_player_i == next_pos_player_j:\n return i\n\n # face to face collision\n elif pos_player_i == next_pos_player_j and\\\n pos_player_j == next_pos_player_i:\n return i\n return False\n\n @classmethod\n def is_valid_coordinates(cls, coord, map_size, obstacles):\n \"\"\"Check if some coordinates are valid\n\n Parameters\n ----------\n coord : tuple of int\n coordinates to verify\n map_size : int\n size of the map\n obstacles : *args\n coordinates of the obstacles\n\n Returns\n -------\n bool\n True if the coordinates are valid False otherwise\n \"\"\"\n x, y = coord\n # inside the map\n if x < 0 or x > 19 or y < 0 or y > 19:\n return False\n # not a wall\n if coord in obstacles:\n return False\n return True\n\n @classmethod\n def random_move(cls, obstacles, player, players_path, players_step, pos_players):\n \"\"\"Get a valid random move from a player\n\n Parameters\n ----------\n obstacles : *args\n coordinates of the obstacles\n player : int\n number of the player\n players_path : *args\n paths of the players\n players_step : *args\n steps of the players\n pos_players : *args\n coordinates of the players\n\n Returns\n -------\n tuple of int\n coordinates of the random move\n \"\"\"\n row, col = pos_players[player]\n random_move = (row, col)\n for r, c in random.sample([(1,0),(-1,0),(0,1),(0,-1)], 4):\n next_coord = row+r, col+c\n if Collision.is_valid_coordinates(next_coord, 20, obstacles):\n random_move = next_coord\n break\n return random_move\n\n\n @staticmethod\n def manage_collision(player, players_path, players_step,\n pos_players, wallStates, again=False):\n \"\"\"Modify the path of the player to evoid a collision\n\n Parameters\n ----------\n player : int\n number of the player\n players_path : *args\n paths of the players\n players_step : *args\n steps of the players\n pos_players : *args\n coordinates of the players\n wallStates : *args\n coordinates of the walls\n \"\"\"\n # information of the player\n current_pos = pos_players[player]\n current_step = players_step[player]\n next_pos = players_path[player][current_step]\n\n # evoid collision on the next pos\n # consider it as an obstacle\n obstacles = wallStates + [next_pos]\n\n # next position is goal\n if current_step == len(players_path[player])-1:\n\n # remove the goal coordinates\n del players_path[player][-1]\n\n # make a random move, next iteration it'll calcul a path to the goal\n players_path[player].append(Collision.random_move(obstacles, player,\n players_path, players_step, pos_players))\n\n else:\n\n new_path = []\n\n # start is current pos\n start = current_pos\n # goal is the coordinates two steps later in the path\n goal = players_path[player][current_step+1]\n\n #calcul path from start to goal\n new_slice = assp.calcul_path(start, goal, obstacles, 20)\n\n # if there is no path to the wanted coordinates\n if again or new_slice is False:\n # add a random move\n new_path.append(Collision.random_move(obstacles, player,\n players_path, players_step, pos_players))\n # remove the end of the path to add the random coordinates\n del players_path[player][current_step:]\n\n players_path[player] += new_path\n\n else:\n new_path += new_slice\n # remove the coordinates of the collision\n del players_path[player][current_step]\n # insert the new alternative slice into the path\n for i in range(len(new_path)-1):\n players_path[player].insert(players_step[player]+i, new_path[i])\n","repo_name":"su-3i025-projet/coop-pathfinding-lemajoran","sub_path":"pySpriteWorld-forStudents/MethodPlayer/method1.py","file_name":"method1.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42785226176","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nimport torch\nimport torch.nn as nn\nclass LinearRegresson:\n \n def __init__(self,learn_rate = 0.001,n_iters=1000):\n self.learn_rate = learn_rate\n self.n_iters = n_iters\n self.weight = None\n self.bias = None\n \n def fit(self,x,y):\n print(x.shape)\n n_sample,n_features = x.shape\n self.weight = np.zeros(n_features)\n self.bias = 0\n for i in range(self.n_iters):\n y_predicted = np.dot(x,self.weight)+self.bias # y = wx+b\n dw = (1/n_sample) * np.dot(x.T,(y_predicted-y))\n db = (1/n_sample) * np.sum(y_predicted-y)\n self.weight-=self.learn_rate*dw\n self.bias-=self.learn_rate*db\n \n def predict(self,x):\n y_predicted = np.dot(x,self.weight)+self.bias # y = wx+b\n \n return y_predicted\n\n\nx_np,y_np = datasets.make_regression(n_samples=200,n_features=1,n_targets=1,noise=20)\n\n\n# print(y_np.shape)\nx = torch.from_numpy(x_np.astype(np.float32))\ny = torch.from_numpy(y_np.astype(np.float32))\ny = y.view(y.shape[0],1)\n\n# print(x)\n# print(y)\n#creat model\nn_sample,n_feature =x.shape\nmodel= nn.Linear(n_feature,n_feature)\nw,b = model.parameters()\n#loss and optimzer\nlearning_rate = 0.01\ncriterion = nn.MSELoss()\noptimizer = torch.optim.SGD([w],lr=learning_rate)\n#training loop\nn_iters = 100\n\nfor i in range(n_iters):\n #forward pass and loss\n y_predicted = model(x)\n loss = criterion(y_predicted,y)\n #backward\n loss.backward()\n #update\n optimizer.step()\n optimizer.zero_grad()\n if (i+1)%10 ==0:\n w,b = model.parameters()\n\n print(f\"epoch {i+1}: w = {w[0][0].item():3f}, loss ={loss.item():.8f}\")\n \n\n# model = LinearRegresson(learn_rate=0.01)\n# model.fit(x,y)\npredict = model(x).detach().numpy()\n\nplt.plot(x_np,predict,\"r\")\nplt.plot(x_np,y_np,\"bo\")\nplt.show()\nplt.close()","repo_name":"fit1999123/Deep_learning","sub_path":"pytroch_practice/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37672488313","text":"import os, ui\nimport editor\n\nclass App(object):\n def __init__(self, bview, fn):\n self.bview = bview\n self.fn = fn\n \n self.view = ui.View()\n self.view.name = os.path.split(fn)[-1]\n self.view.right_button_items = [\n ui.ButtonItem(\"Open in editor\", None, self.openineditor)\n ]\n \n self.text = ui.TextView()\n self.text.flex = \"WH\"\n self.text.font = (\"Courier\", 18)\n self.text.text = open(self.fn).read()\n \n self.view.add_subview(self.text)\n self.view.present(\"fullscreen\")\n \n def openineditor(self, sender):\n self.view.close()\n editor.open_file(self.fn)\n\n","repo_name":"JadedTuna/browsepy","sub_path":"apps/text_viewer.py","file_name":"text_viewer.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"19185385366","text":"#!/usr/bin/env python\n\n\"\"\"\nA script to build document and word representations for the model, saving\nthem to disk\n\"\"\"\n\nfrom pickle import dump, load\nimport sys\nimport os\nfrom os.path import join\nfrom collections import defaultdict\n\nimport numpy as n\n\nimport config as c\nfrom vectorize import vectorize\nimport vectorize as v\nfrom annotation import readEvents, readDocs, createInstances, readEntities\nfrom ml.util import mkdir\n\nfrom ner import NERIndex\n\ndef setupDataSet(dataPath, eventsFile, windowConv, contextConvs):\n\t\"\"\"\n\tPreps the data for learning\n\t\"\"\"\n\t#read the event annotations\n\tevents = readEvents(eventsFile)\n\n\t#read the data\n\trawData, labels = createInstances(readDocs(dataPath, events), events)\n\n\tleft = n.array([windowConv.convert(i) for i in rawData])\n\n\t#vectorize it\n\tright = vectorize(rawData, contextConvs)\n\n\treturn (left, right), labels, [i.event for i in rawData]\n\ndef writeWindow(dataPath, labelFile, converters, wordConv, outPrefix):\n\t\"\"\"\n\tCreates and saves event data\n\t\"\"\"\n\t(dataLeft, dataRight), labels, events = setupDataSet(dataPath, labelFile, wordConv, converters)\n\n\t#print out shape info\n\tprint(\"left shape {}\".format(dataLeft.shape))\n\tprint(\"right shape {}\".format(dataRight.shape))\n\n\twith open(outPrefix.format(\"left\"), \"w\") as leftOut, open(outPrefix.format(\"right\"), \"w\") as rightOut, open(outPrefix.format(\"labels\"), \"w\") as labelsOut:\n\t\t\n\t\tn.save(leftOut, dataLeft)\n\t\tn.save(rightOut, dataRight)\n\t\tn.save(labelsOut, labels)\n\n\treturn events\n\ndef main(outDir):\n\t\"\"\"\n\tPrepares the data according the config sets and save it to disk as a\n\tpickled map\n\t\"\"\"\n\tprint(\"Building Converters\")\n\tglovePath = \"data/vectors/glove/glove.6B.50d.txt\"\n\tw2vPath = \"data/vectors/word2vec/GoogleNews-vectors-negative300.bin.gz\"\n\td2vPath = \"data/vectors/doc2vec/ace/doc_embeddings.txt\"\n\ts2vPath = \"data/vectors/doc2vec/ace/sent_embeddings.txt\"\n\n\t#w2vModel = v.loadW2V(w2vPath)\n\t#gloveModel = v.loadGlove(glovePath)\n\n\tentTrain = \"data/entities_training.csv\"\n\tentDev = \"data/entities_dev.csv\"\n\tentTest = \"data/entities_testing.csv\"\n\n\tentities = readEntities(entTrain) + readEntities(entDev) + readEntities(entTest)\n\tentFeats = v.EntityFeats(entities)\n\tposFeats = v.SparsePOSFeats(load(open(\"data/pos_tags.p\")))\n\tdepFeats = v.SparseDependencyFeats(load(open(\"data/dep_tags.p\")))\n\tdocFeats = v.SparseDocTypeFeats(\"data/doc_types.txt\")\n\n\twordIndex = load(open(\"data/word_index.p\"))\n\tentityIndex = load(open(\"data/entity_map.p\"))\n\n\t#TODO remove\n\t#w2v = v.Word2VecFeats(defaultdict(lambda: [0.0]))\n\t#dataPath = \"/home/walker/Data/ace/tmp/\"\n\n\t#leftConverter = v.Word2VecFeats(v.loadW2V(\"data/vectors/word2vec/GoogleNews-vectors-negative300.bin.gz\"), 5)\n\t#leftConverter = v.Word2VecFeats(v.loadGlove(\"data/vectors/glove/glove.6B.50d.txt\"), 20)\n\t#leftConverter = v.WindowFeats([v.Word2VecFeats(v.loadGlove(\"data/vectors/glove/glove.6B.50d.txt\")), v.NERFeats(\"data/vectors/ner/nerIndex.p\")], 20)\n\t#leftConverter = v.WindowFeats([v.Word2VecFeats(v.loadGlove(glovePath)), v.PositionFeats()], 20)\n\t#leftConverter = v.WindowFeats([v.Word2VecFeats(v.loadGlove(glovePath))], 20)\n\t#leftConverter = v.WindowFeats([v.Word2VecFeats(w2vModel)], 10)\n\t#leftConverter = v.WindowFeats([v.Word2VecFeats(gloveModel), entFeats, posFeats, depFeats], 20)\n\t#leftConverter = v.WindowFeats([v.Word2VecFeats(w2vModel), entFeats, v.PositionFeats()], 15)\n\tleftConverter = v.WindowFeats([v.WordEmbeddingFeats(wordIndex), v.EntityEmbeddingFeats(entityIndex, entities), \n\tv.DistanceEmbeddingFeats()], 15)\n\n\t#rightConverters = [v.Word2VecFeats(v.loadW2V(w2vPath), 1),\n\t#v.Doc2VecFeats(d2vPath),\n\t#v.Sentence2VecFeats(s2vPath)]\n\t\n\t#rightConverters = [v.Word2VecFeats(w2vModel, 1), v.Doc2VecFeats(d2vPath), v.Sentence2VecFeats(s2vPath)]\n\t#rightConverters = [v.Word2VecFeats(gloveModel, 1), v.Word2VecFeats(w2vModel, 1), \n\t#v.Doc2VecFeats(d2vPath), v.Sentence2VecFeats(s2vPath), entFeats, depFeats, posFeats, docFeats]\n\n\trightConverters = [v.Doc2VecFeats(d2vPath), v.Sentence2VecFeats(s2vPath)]\n\tmkdir(outDir)\n\n\t#vectorize the data\n\tprint(\"Read training\")\n\ttrainingEvents = writeWindow(c.dataPath, c.trainingFile, rightConverters, leftConverter, join(outDir, \"training_{}.p\"))\n\n\tprint(\"Read dev\")\n\tdevEvents = writeWindow(c.dataPath, c.devFile, rightConverters, leftConverter, join(outDir, \"dev_{}.p\"))\n\n\tprint(\"Read testing\")\n\ttestEvents = writeWindow(c.dataPath, c.testFile, rightConverters, leftConverter, join(outDir, \"test_{}.p\"))\n\n\tdata = {\"train_events\":trainingEvents, \"dev_events\":devEvents, \n\t\"test_events\":testEvents,\n\t\"info\": \"\\n\".join(map(str,[leftConverter] + rightConverters))}\n\t\n\twith open(join(outDir, \"info.p\"),\"w\") as out:\n\t\tdump(data, out)\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1])\n","repo_name":"meghasin/Event-extraction-via-deep-semantic-LSTM","sub_path":"buildWindows.py","file_name":"buildWindows.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3716930415","text":"import sys\nimport logging\n\nimport FreeCAD\n\n# Logging Handler\n# Retain a reference to the logging handler so it may be removed on requeset.\n# Also to prevent 2 handlers being added\n_logging_handler = None\n\n# FreeCAD Logging Handler\nclass FreeCADConsoleHandler(logging.Handler):\n \"\"\"logging.Handler class to output to FreeCAD's console\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(FreeCADConsoleHandler, self).__init__(*args, **kwargs)\n\n # Test for expected print functions\n # (just check they exist, if they don't an exception will be raised)\n FreeCAD.Console.PrintMessage\n FreeCAD.Console.PrintWarning\n FreeCAD.Console.PrintError\n\n def emit(self, record):\n log_text = self.format(record) + \"\\n\"\n if record.levelno >= logging.ERROR:\n FreeCAD.Console.PrintError(log_text)\n elif record.levelno >= logging.WARNING:\n FreeCAD.Console.PrintWarning(log_text)\n else:\n FreeCAD.Console.PrintMessage(log_text)\n\n\ndef enable(level=None, format=\"%(message)s\"):\n \"\"\"\n Enable python builtin logging, and output it somewhere you can see.\n - FreeCAD Console, or\n - STDOUT (if output to console fails, for whatever reason)\n\n Any script can log to FreeCAD console with:\n\n >>> import cadquery\n >>> cadquery.freecad_impl.console_logging.enable()\n >>> import logging\n >>> log = logging.getLogger(__name__)\n >>> log.debug(\"detailed info, not normally displayed\")\n >>> log.info(\"some information\")\n some information\n >>> log.warning(\"some warning text\") # orange text\n some warning text\n >>> log.error(\"an error message\") # red text\n an error message\n\n logging only needs to be enabled once, somewhere in your codebase.\n debug logging level can be set with:\n\n >>> import cadquery\n >>> import logging\n >>> cadquery.freecad_impl.console_logging.enable(logging.DEBUG)\n >>> log = logging.getLogger(__name__)\n >>> log.debug(\"debug logs will now be displayed\")\n debug logs will now be displayed\n\n :param level: logging level to display, one of logging.(DEBUG|INFO|WARNING|ERROR)\n :param format: logging format to display (search for \"python logging format\" for details)\n :return: the logging Handler instance in effect\n \"\"\"\n global _logging_handler\n\n # Set overall logging level (done even if handler has already been assigned)\n root_logger = logging.getLogger()\n if level is not None:\n root_logger.setLevel(level)\n elif _logging_handler is None:\n # level is not specified, and ho handler has been added yet.\n # assumption: user is enabling logging for the first time with no parameters.\n # let's make it simple for them and default the level to logging.INFO\n # (logging default level is logging.WARNING)\n root_logger.setLevel(logging.INFO)\n\n if _logging_handler is None:\n # Determine which Handler class to use\n try:\n _logging_handler = FreeCADConsoleHandler()\n except Exception as e:\n raise\n # Fall back to STDOUT output (better than nothing)\n _logging_handler = logging.StreamHandler(sys.stdout)\n\n # Configure and assign handler to root logger\n _logging_handler.setLevel(logging.DEBUG)\n root_logger.addHandler(_logging_handler)\n\n # Set formatting (can be used to re-define logging format)\n formatter = logging.Formatter(format)\n _logging_handler.setFormatter(formatter)\n\n return _logging_handler\n\n\ndef disable():\n \"\"\"\n Disables logging to FreeCAD console (or STDOUT).\n Note, logging may be enabled by another imported module, so this isn't a\n guarantee; this function undoes logging_enable(), nothing more.\n \"\"\"\n global _logging_handler\n if _logging_handler:\n root_logger = logging.getLogger()\n root_logger.handlers.remove(_logging_handler)\n _logging_handler = None\n","repo_name":"dcowden/cadquery","sub_path":"cadquery/freecad_impl/console_logging.py","file_name":"console_logging.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":431,"dataset":"github-code","pt":"54"} +{"seq_id":"28726247706","text":"# -*- coding: utf-8 -*-\n\nfrom django.views.generic.base import TemplateView\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Alignment, Border, Font, PatternFill, Side\n\n#from django.http import HttpResponse\n#from django.contrib.auth.models import User\n\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User, Group\nfrom django.contrib.auth.forms import UserCreationForm\n#from django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse \nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom empleados.models import Empleado\nfrom empleados.metodos import set_values_emp\nfrom candidatos.models import *\nfrom candidatos.forms import *\nfrom .models import *\nfrom empleados.forms import *\nfrom candidatos.metodos import *\n\nfrom .forms import *\nfrom datetime import datetime, timedelta\nfrom django.db import IntegrityError\nfrom django.views.defaults import page_not_found\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\n###############\n\n###############\n\n############### Exportar a pdf\n\ndef mi_error_404(request):\n nombre_template = '404.html'\n return page_not_found(request, template_name=nombre_template)\n\ndef mod_fecha(fec):\n if fec not in [None,'']:\n return (datetime.strptime(fec, '%d/%m/%Y').strftime('%Y-%m-%d'))\n else:\n return fec\n\ndef enviar_email(request):\n name=request.POST.get('nombre')\n email=request.POST.get('correo')\n destino=request.POST.get('correo_cand')\n message=request.POST.get('mensaje')\n print(destino)\n body=render_to_string(\n 'email_content.html',{\n 'name':name,\n 'email':email,\n 'message':message,\n },\n )\n email_message = EmailMessage(\n subject='Mensaje de usuario',\n body=body,\n from_email=email,\n #to=['juvenciolugo@hotmail.com'],\n to=[destino],\n )\n email_message.content_subtype = 'html'\n email_message.send()\n return redirect('candLst')\n\ndef enviar2_email(request):\n name=request.POST.get('nombre')\n email=request.POST.get('correo')\n destino=request.POST.get('correo_emp')\n print(destino)\n message=request.POST.get('mensaje')\n body=render_to_string(\n 'email_content.html',{\n 'name':name,\n 'email':email,\n 'message':message,\n },\n )\n email_message = EmailMessage(\n subject='Mensaje de usuario',\n body=body,\n from_email=email,\n #to=['juvenciolugo@gmail.com.com'],\n to=[destino],\n )\n email_message.content_subtype = 'html'\n email_message.send()\n #aviso de envio\n return redirect('empLst')\n\nclass ReportePersonalizadoPDF(TemplateView):\n def get(self, request,*args,**kwargs):\n import io \n from reportlab.platypus import SimpleDocTemplate, Paragraph, TableStyle \n from reportlab.lib.styles import getSampleStyleSheet \n from reportlab.lib import colors \n from reportlab.lib.pagesizes import letter ,A4\n from reportlab.lib.units import inch, cm\n from reportlab.platypus import Table \n from reportlab.lib.enums import TA_JUSTIFY, TA_LEFT, TA_CENTER #, TA_RIGHT\n from reportlab.pdfgen import canvas\n from reportlab.pdfbase.pdfmetrics import stringWidth\n \n \n styles = getSampleStyleSheet() \n\n #styleR = styles[\"Normal\"]\n #styleR.alignment = TA_RIGHT\n\n styleN = styles[\"Normal\"]\n styleN.alignment = TA_LEFT\n \n \n styleC = styles[\"Normal\"]\n styleC.alignment = TA_CENTER\n\n pk=kwargs['id']\n cand = Candidato.objects.get(id=pk)\n\n response = HttpResponse(content_type='application/pdf') \n buff = io.BytesIO() \n c = canvas.Canvas(buff, pagesize=A4)\n doc = SimpleDocTemplate(buff, \n pagesize=letter, \n rightMargin=40, \n leftMargin=40, \n topMargin=60, \n bottomMargin=18, \n ) \n candidatos = [] \n \n #ejemplo = Paragraph(\"Mitexto\", styleN) \n #candidatos.append(ejemplo) \n c.drawImage(\"static/img/logoNuevo.png\", 200, 770,190,60)\n c.setLineWidth(.3)\n c.setFont('Helvetica', 22)\n c.drawString(200, 750, 'Solicitud de empleo')\n c.setFont('Helvetica', 12)\n #c.drawString(30, 735, 'Report')\n \n \n\n c.setFont('Helvetica', 6)\n c.drawString(400, 730,' Fecha Fuente de reclutamiento')\n c.drawString(400, 720,cand.fecha_solicitud.strftime('%d/%m/%Y'))\n c.drawString(450, 720,cand.fuente_recluta)\n c.drawString(350, 710,'Puesto Solicitado Sueldo deseado')\n c.drawString(350, 700,cand.puesto_solicitado)\n c.drawString(450, 700,cand.sueldo_deseado)\n c.drawString(50, 690,'Apellido Paterno Apellido Materno Nombre(s) Sexo Estado Civil')\n c.drawString(50, 680,cand.apellido_paterno+' '+cand.apellido_materno+' '+cand.nombre)\n c.drawString(260, 680,cand.sexo)\n c.drawString(295, 680,cand.estado_civil)\n c.drawString(50, 670,'Edad Fecha de nacimiento Lugar de Nacimiento Teléfono')\n c.drawString(50, 660,cand.edad)\n c.drawString(80, 660,str(cand.fecha_nac.strftime('%d/%m/%Y')))\n c.drawString(130, 660,cand.lugar_nac)\n c.drawString(213, 660,str(cand.tel))\n c.drawString(50, 650,'Calle No Colonia Estado CP Tiempo de trayetoria de su casa')\n c.drawString(50, 640,cand.calle)\n c.drawString(173, 640,cand.colonia)\n if cand.esdo not in[None,'']:\n c.drawString(250, 640,cand.esdo)\n else:\n c.drawString(250, 640,'')\n\n c.drawString(300, 640,str(cand.cp))\n c.drawString(350, 640,str(cand.trayectoria_de_casa))\n c.drawString(50, 630,'R.F.C. Afiliación al IMSS No. de cartilla Militar Tipo y No de licencia de manejo')\n c.drawString(50, 620,str(cand.rfc))\n c.drawString(100, 620,str(cand.imss))\n c.drawString(150, 620,str(cand.cartilla))\n c.drawString(230, 620,str(cand.tipo_licencia))\n c.drawString(270, 620,str(cand.licencia))\n \n \n styles = getSampleStyleSheet()\n styleBH = styles[\"BodyText\"]\n styleBH.alignment = TA_CENTER\n styleBH.fontSize = 6\n\n nivel = Paragraph('''NIVEL ESCOLAR''',styleBH)\n institucion = Paragraph('''INSTITUCIÓN''',styleBH)\n annios = Paragraph('''AÑOS''',styleBH)\n inicio = Paragraph('''INICIO''',styleBH)\n termino = Paragraph('''TERMINO''',styleBH)\n documento = Paragraph('''DOCUMENTO''',styleBH)\n \n data = []\n data.append([nivel,institucion,annios,inicio,termino,documento])\n \n nivel =Paragraph('Primaria',styleBH)\n institucion =Paragraph(str(cand.primaria),styleBH)\n annios =Paragraph(str(cand.primaria_annios),styleBH)\n inicio =Paragraph(str(cand.primaria_inicio),styleBH)\n termino =Paragraph(str(cand.primaria_termino),styleBH)\n documento=Paragraph(str(cand.primaria_documento),styleBH)\n data.append([nivel,institucion,annios,inicio,termino,documento])\n\n nivel =Paragraph('Secundaria',styleBH)\n institucion =Paragraph(str(cand.secundaria),styleBH)\n annios =Paragraph(str(cand.secundaria_annios),styleBH)\n inicio =Paragraph(str(cand.secundaria_inicio),styleBH)\n termino =Paragraph(str(cand.secundaria_termino),styleBH)\n documento=Paragraph(str(cand.secundaria_documento),styleBH)\n data.append([nivel,institucion,annios,inicio,termino,documento])\n \n nivel =Paragraph('Preparatoria',styleBH)\n institucion =Paragraph(str(cand.preparatoria),styleBH)\n annios =Paragraph(str(cand.preparatoria_annios),styleBH)\n inicio =Paragraph(str(cand.preparatoria_inicio),styleBH)\n termino =Paragraph(str(cand.preparatoria_termino),styleBH)\n documento=Paragraph(str(cand.preparatoria_documento),styleBH)\n data.append([nivel,institucion,annios,inicio,termino,documento])\n\n nivel =Paragraph('Tecnica',styleBH)\n institucion =Paragraph(str(cand.tecnica),styleBH)\n annios =Paragraph(str(cand.tecnica_annios),styleBH)\n inicio =Paragraph(str(cand.tecnica_inicio),styleBH)\n termino =Paragraph(str(cand.tecnica_termino),styleBH)\n documento=Paragraph(str(cand.tecnica_documento),styleBH)\n data.append([nivel,institucion,annios,inicio,termino,documento])\n\n\n \n #datos=(cand.fecha_solicitud.strftime('%d-%m-%Y'), cand.puesto_solicitado)\n #t= Table([headings]+[datos])\n #t.setStyle([('GRID', (0, 0), (1, -1), 1, colors.dodgerblue)])\n #candidatos.append(t) \n \n #table size\n width, height = A4\n high = 500\n table = Table(data, colWidths=[3.5 * cm, 6 * cm, 1.3 * cm,1.4*cm,2*cm,3*cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n \n stylePro = styles[\"BodyText\"]\n stylePro.alignment = TA_CENTER\n stylePro.fontSize = 6\n\n \n\n #contenido de tabla profesional\n estudios=Estudios_pro.objects.filter(candidato=cand)\n high = 446\n \n for est in estudios:\n data = []\n tipo = Paragraph(str(est.estudios_tipo),stylePro)\n institucion = Paragraph(str(est.estudios_escuela),stylePro)\n annios = Paragraph(str(est.estudios_annios),stylePro)\n inicio = Paragraph(str(est.estudios_inicio),stylePro)\n termino = Paragraph(str(est.estudios_termino),stylePro)\n documento = Paragraph(str(est.estudios_documento),stylePro)\n data.append([tipo,institucion,annios,inicio,termino,documento])\n\n carrera = Paragraph('Estudio',stylePro)\n tesis=Paragraph('Tésis',stylePro)\n cedula=Paragraph('Cédula',stylePro)\n data.append([carrera,'',tesis,'','',cedula])\n\n carrera = Paragraph(str(est.estudios_nombre),stylePro)\n tesis=Paragraph(str(est.estudios_tesis),stylePro)\n cedula=Paragraph(str(est.estudios_cedula),stylePro)\n \n data.append([carrera,'',tesis,'','',cedula])\n \n \n table = Table(data, colWidths=[3.5 * cm, 6 * cm, 1.3 * cm,1.4*cm,2*cm,3*cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n ('SPAN',(0,1),(1,1)),\n ('SPAN',(0,2),(1,2)),\n ('SPAN',(2,1),(4,1)),\n ('SPAN',(2,2),(4,2)),\n \n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n high-=54\n high-=10\n c.drawString(50, high,'¿Estudia actualmente? '+str(cand.estudia_actualmente)+' ¿Que? '+str(cand.estudia_que)+' ¿Donde? '+str(cand.estudia_donde))\n high-=10\n c.drawString(50, high,'Horario : '+str(cand.estudia_horario)+' Fecha que finaliza : '+str(cand.estudia_termino))\n c.showPage()\n ##Hoja 2\n stylePro.alignment = TA_LEFT\n data = []\n high=800\n idiomas=Idioma_candidato.objects.filter(candidato=cand)\n idiomas_lst=''\n for idi in idiomas:\n idiomas_lst ='IDIOMA(S): '+str(idi.idioma)+' %'+str(idi.idioma_porcentaje)\n if not idiomas_lst in [None,'']:\n data.append([Paragraph(idiomas_lst,stylePro)])\n table = Table(data, colWidths=[19 * cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n high-=18\n data = []\n data.append([Paragraph('Máqunas, equipos y herramientas que puede manejar : '+str(cand.maquinas_equipos),stylePro)])\n table = Table(data, colWidths=[19 * cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n\n #Datos personales\n \n high-=90\n c.setFont('Helvetica', 6)\n c.drawString(30, high+75,'DATOS PERSONALES')\n data = []\n des=''\n nom = Paragraph('''Nombre''',stylePro)\n edad = Paragraph('''Edad''',stylePro)\n ocu = Paragraph('''Ocupación''',stylePro)\n vive = Paragraph('''Vive''',stylePro)\n data.append(['',nom,edad,ocu,vive])\n \n des= Paragraph('Padre',stylePro)\n nom = Paragraph(str(cand.padre_nombre),stylePro)\n edad = Paragraph(str(cand.padre_edad),stylePro)\n ocu = Paragraph(str(cand.padre_ocupacion),stylePro)\n vive = Paragraph(str(cand.padre_vive),stylePro)\n data.append([des,nom,edad,ocu,vive])\n \n des= Paragraph('Madre',stylePro)\n nom = Paragraph(str(cand.madre_nombre),stylePro)\n edad = Paragraph(str(cand.madre_edad),stylePro)\n ocu = Paragraph(str(cand.madre_ocupacion),stylePro)\n vive = Paragraph(str(cand.madre_vive),stylePro)\n data.append([des,nom,edad,ocu,vive])\n\n des= Paragraph('Cónyuge',stylePro)\n nom = Paragraph(str(cand.conyuge_nombre),stylePro)\n edad = Paragraph(str(cand.conyuge_edad),stylePro)\n ocu = Paragraph(str(cand.conyuge_ocupacion),stylePro)\n vive = Paragraph(str(cand.conyuge_vive),stylePro)\n data.append([des,nom,edad,ocu,vive])\n table = Table(data, colWidths=[1.6*cm,6 * cm, 1 * cm, 6 * cm,1.4*cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n ###HERMANOS\n hermanos=Hermano_candidato.objects.filter(candidato=cand)\n \n nher=0\n for herm in hermanos:\n nher+=1\n high -= 18\n data = []\n des = Paragraph('Hermano',stylePro)\n nom = Paragraph(str(herm.hermano_nombre),stylePro)\n edad = Paragraph(str(herm.hermano_edad),stylePro)\n ocu = Paragraph(str(herm.hermano_ocupacion),stylePro)\n data.append([des,nom,edad,ocu,''])\n table = Table(data, colWidths=[1.6*cm,6 * cm, 1 * cm, 6 * cm,1.4*cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n #high-=54\n ###HIJOS\n \n hijos=Hijo_candidato.objects.filter(candidato=cand)\n #if (nher==1):\n # high += 36\n \n for hijo in hijos:\n high -= 18\n data = []\n des = Paragraph('Hijo',stylePro)\n nom = Paragraph(str(hijo.hijo_nombre),stylePro)\n edad = Paragraph(str(hijo.hijo_edad),stylePro)\n ocu = Paragraph(str(hijo.hijo_ocupacion),stylePro)\n data.append([des,nom,edad,ocu,''])\n table = Table(data, colWidths=[1.6*cm,6 * cm, 1 * cm, 6 * cm,1.4*cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n \n high-=18\n ##EXPERIENCIA LABORAL\n c.drawString(30, high,'EXPERIENCIA LABORAL')\n high-=15\n experiencias=Experiencia.objects.filter(candidato=cand)\n for exp in experiencias:\n c.drawString(30, high,'Empresa Dirección Teléfono')\n c.drawString(30, high-10,str(exp.empresa_nombre))\n c.drawString(130, high-10,str(exp.empresa_direccion))\n c.drawString(280, high-10,str(exp.empresa_tel))\n high-=20\n c.drawString(30, high,'Giro Nombre del jefe inmediato Puesto del jefe inmediato')\n c.drawString(30, high-10,str(exp.empresa_giro))\n c.drawString(130, high-10,str(exp.empresa_nombre_jefe))\n c.drawString(280, high-10,str(exp.empresa_jefe_puesto))\n high-=20\n c.drawString(30, high,'Fecha de ingreso Salario inicial Fecha de separación Salario final')\n c.drawString(30, high-10,exp.empresa_fecha_ingreso.strftime('%d/%m/%Y'))\n c.drawString(120, high-10,str(exp.empresa_salario_inicio))\n fs=exp.empresa_fecha_separacion\n if fs not in [None,'']:\n c.drawString(180, high-10,exp.empresa_fecha_separacion.strftime('%d/%m/%Y'))\n else:\n c.drawString(180, high-10,'')\n\n c.drawString(230, high-10,str(exp.empresa_salario_final))\n high-=20\n c.drawString(30, high,'Último puesto desempeñado Tiempo Departamento Puesto anterior Tiempo')\n c.drawString(30, high-10,str(exp.empresa_puesto_ultimo))\n c.drawString(120, high-10,str(exp.empresa_puesto_ultimo_tiempo))\n c.drawString(180, high-10,str(exp.empresa_puesto_ultimo_depto))\n high-=20\n c.drawString(30, high,'Puesto anterior Tiempo Departamento Puesto anterior Tiempo')\n c.drawString(30, high-10,str(exp.empresa_puesto_anterior))\n c.drawString(120, high-10,str(exp.empresa_puesto_anterior_tiempo))\n c.drawString(180, high-10,str(exp.empresa_puesto_anterior_depto))\n\n high-=20\n c.drawString(30, high,'Experiencia en supervision : '+str(exp.experiencia_supervision)+' No. Personas que superviso : '+str(exp.experiencia_supervision_num))\n c.drawString(30, high-10,'Motivo de la separación')\n c.drawString(30, high-20,'Mejorar el ingreso')\n high-=100\n\n ##DATOS GENERALES\n #' ¿Cuál? : '+str(cand.sindicato_nombre)+\n # ' Cargo: '+str(cand.sindicato_cargo))\n high+=50\n c.drawString(30, high,'DATOS GENERALES')\n c.drawString(30, high-10,'Vive en casa: '+str(cand.vivienda_propia)+' ¿Tiene crédito infonavit? : '+str(cand.credito_infonavit)+'Pago mensual: '+str(cand.pago_infonavit))\n c.drawString(30, high-20,'¿Tiene auto propio?: '+str(cand.auto_propio)+' Marca : '+str(cand.auto_marca)+' Modelo: '+str(cand.auto_modelo)+' Seguro de vida: '+str(cand.seguro_vida)+' Monto '+ str(cand.seguro_monto))\n c.drawString(30, high-30,'¿Afiliado a un sindicato?: '+str(cand.afiliado_sindicato)+' ¿Cuál: '+str(cand.sindicato_nombre)+' Cargo: '+str(cand.sindicato_cargo))\n c.drawString(30, high-40,'¿A qué dedica su tiempo libre?: '+str(cand.tiempo_libre)+' ¿Embarazo? : '+str(cand.embarazo)+' Religión: '+str(cand.religion)+' Dispuesto a rolar turno'+ str(cand.disposicion_rolar))\n c.drawString(30, high-50,'¿Estado de salud?: '+str(cand.estado_salud)+' ¿Fuma? : '+str(cand.fuma)+' ¿Bebe?: '+str(cand.bebe)+' ¿Tatuajes?: '+str(cand.tatuajes)+' ¿Perforaciones?: '+ str(cand.perforaciones))\n c.drawString(30, high-60,'¿Dispuesto a viajar?: '+str(cand.disposicion_viajar)+' ¿Tiene ingresos extras? : '+str(cand.ingreso_extra)+' Monto?: '+str(cand.ingreso_monto)+' ¿Fuente?: '+ str(cand.ingreso_fuente))\n c.drawString(30, high-70,'¿Trabaja con nosotros algún pariente o amigo?: '+str(cand.labora_conocido)+' ¿Nombre? : '+str(cand.conocido_nombre)+' ¿Departamento?: '+ str(cand.conocido_depto))\n c.drawString(30, high-80,'En caso de ser aceptado ¿En qué fecha estaría disponible para trabajar?: '+str(cand.fecha_disponible))\n\n ##REFERENCIAS\n referencias=Referencia.objects.filter(candidato=cand)\n high -= 100\n stylePro.alignment = TA_CENTER\n c.drawString(30, high,'REFERENCIAS')\n high -= 20\n data = []\n nom = Paragraph('Nombre',stylePro)\n ocu = Paragraph('Ocupación',stylePro)\n tel = Paragraph('Teléfono',stylePro)\n annios = Paragraph('Años de conocerlo',stylePro)\n data.append([nom,ocu,tel,annios])\n table = Table(data, colWidths=[6 * cm, 6 * cm, 2.5 * cm,2.5*cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n \n for ref in referencias:\n high -= 18\n data = []\n nom = Paragraph(str(ref.referencia_nombre),stylePro)\n ocu = Paragraph(str(ref.referencia_ocupacion),stylePro)\n tel = Paragraph(str(ref.referencia_tel),stylePro)\n annios = Paragraph(str(ref.referencia_annios_conocer),stylePro)\n \n data.append([nom,ocu,tel,annios])\n table = Table(data, colWidths=[6 * cm, 6 * cm, 2.5 * cm,2.5*cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n #high-=54\n \n \n #Firma\n high-=80\n data = []\n texto=Paragraph(\"CORPORATIVO AXXUM, S.A. DE C.V. LE AGRADECE LA INFORMACIÓN PROPORCIONADA, SI ALGUNO DE LOS DATOS NO FUERA VERÍDICO, TODO CONVENIO CELEBRADO CON LA EMPRESA SERIA NULO.\",stylePro)\n firma=Paragraph('Firma',styleC)\n data.append([texto,firma])\n table = Table(data, colWidths=[6 * cm, 6*cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.black),\n ('BOX',(0,0),(-1,-1), 0.25,colors.black),\n \n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n c.line(235, high+17, 335, high+17)\n \n c.showPage()\n high=450\n \n \n\n \n styleAviT = styles[\"Normal\"]\n styleAviT.alignment = TA_JUSTIFY\n styleAviT.fontSize = 6\n data = []\n tit_aviso=Paragraph(\"AVISO DE PRIVACIDAD\",styleC)\n data.append([tit_aviso])\n aviso=Paragraph(\" CORPORATIVO AXXUM, S.A. DE C.V., con domicilio en la calle Rio Manzanares 308 Oficina 3, Colonia: Del Valle, San Pedro Garza García, Nuevo León, Código Postal 6622, \"+\n \"es responsable del uso que se le dé a sus datos personales y de su protección, los cuales serán tratados y resguardados con base en los principios de licitud, calidad,\"+\n \"consentimiento, información, finalidad, lealtad, proporcionalidad y responsabilidad, consagrados en la Ley Federal de Protección de Datos Personales en Posesión de los Particulares.\"+\n \"Su información personal será utilizada con el fin de encontrar al candidato ideal que pueda cubrir la vacante publicada en los distintos medios electrónicos y/o escritos. Para las finalidades antes mencionadas requerimos obtener los siguientes datos:\"+\n \"Datos personales : * Nombre Completo * Lugar y Fecha de Nacimiento * Sexo * Domicilio, Correo Electronico * RFC, CURP, No. IMSS, Tipo y No. De Lic. De Manejo * Estado Civil Antecedentes Academicos: * Nivel Escolar \"+\n \" * Institución y Domicilio * Años Cursados, Fecha de Ingreso y Egreso * Documento Recibido Conocimientos Generales: * Idiomas * Maquinas de Oficina o Equipo de Trabajo que Maneje * Funciones de Oficina u otros que domine.\"+\n \"Datos Familiares: * Nombre de: Padre, Madre, Esposo, Hijos y Hermanos * Edad, Ocupación, Empresa, Dirección y Telefono de cada uno de los familiares antes mencionados Experiencia Laboral: * Empresa Actual o Ultima, Dirección y Teléfono \"+\n \" * Giro de la Empresa, Nombre y Puesto del Jefe Inmediato * Fecha de Ingreso y Sueldo, Fecha de Salida y Ultimo Sueldo * Ultimo puesto desempeñado y duración, Departamento, Puesto Anterior y duración \"+\n \" * Motivo de Separación Estado de Salud y Hábitos Personales: * Tipo de casa en donde vive, Cuenta o no No con Credito INFONAVIT, Pagos Mesuales * Automóvil propio, Tipo, Modelo y Marca * Si cuenta con seguro de vida \"+\n \" * Si ha sido afiliado a algun sindicato, si ha sido afianzado * A que dedica su tiempo libre * Embarazo, Religion, Rolar turnos, como es su estado de Salud, Fuma, Bebe, Enfermedades , Toma medicamentos, le han practicado alguna cirugía, etc. \"+\n \" * En caso de ser contratado en que fecha puede presentarse. Referencias Personales: * Nombre, Dirección, Teléfono, Ocupación y Tiempo de conocer a las perdonas que lo refieren.\"+\n \"Usted tiene derecho de acceder, rectificar y cancelar sus datos personales, así como de oponerse al tratamiento de los mismos o revocar el consentimiento que para tal fin nos haya otorgado, en CORPORATIVO AXXUM, S.A. DE C.V. \"+\n \"En este sentido hacemos de su conocimiento que para el caso en el que CORPORATIVO AXXUM, S.A. DE C.V., decida no contratarlo, desechará su información en un periodo de treinta días contados a partir de la fecha de firma del presente documento.\"+\n \"En caso de querer conocer cualquier modificación a este aviso de privacidad se puede poner en contacto con nuestro departamento de datos personales con Victoria Hidalgo García, en \"+\n \"Rio Manzanares 308 Oficina 3, Colonia: Del Valle, San Pedro Garza García, Nuevo León, Código Postal 6622, victoria.hidalgo@springlabs.net\",styleAviT)\n data.append([aviso])\n table = Table(data, colWidths=[18 * cm])\n table.setStyle(TableStyle([\n ('INNERGRID',(0,0),(-1,-1), 0.25,colors.white),\n ('BOX',(0,0),(-1,-1), 0.25,colors.white),\n ]))\n table.wrapOn(c, width, height)\n table.drawOn(c, 30, high)\n\n #text.setFont(\"Helvetica\", 12)\n #text.textLine(\"¡Hola, mundo!\")\n #text.textLine(\"¡Desde ReportLab y Python!\")\n \n high-=300\n #text = c.beginText(50, h - 50)\n #text.setFont(\"Helvetica\", 12)\n text =\"Autorizo\"\n text_width = stringWidth(text,\"Helvetica\",12)\n text_o = c.beginText((width-text_width)/2, high)\n text_o.textLine(text)\n c.drawText(text_o)\n text =\"Nombre y firma\"\n text_width = stringWidth(text,\"Helvetica\",12)\n text_o = c.beginText((width-text_width)/2, high-10)\n text_o.textLine(text)\n c.drawText(text_o)\n c.line(((width-text_width)/2)-10, high+15, ((width-text_width)/2) + text_width+10, high+15)\n\n \n c.save() \n\n response.write(buff.getvalue()) \n buff.close() \n return response\n\n\n###############\n\n\nclass ReportePersonalizadoExcel(TemplateView):\n def get(self, request,*args,**kwargs):\n id=kwargs['id']\n \n\n cand = Candidato.objects.get(id=id)\n wb = Workbook()\n bandera = True\n cont = 1\n #controlador=4\n if bandera:\n ws = wb.active\n ws.title ='Hoja'+str(cont)\n bandera = False\n else:\n ws = wb.create_sheet('Hoja'+str(cont))\n\n #ws.title ='Hoja'+str(cont)\n #ws = wb.create_sheet('Hoja'+str(cont))\n contador=1\n for letra in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n ws[letra+str(contador)].alignment = Alignment(horizontal = \"center\", vertical = \"center\")\n ws[letra+str(contador)].border = Border(left = Side(border_style = \"thin\"), right = Side(border_style = \"thin\"),\n top = Side(border_style = \"thin\"), bottom= Side(border_style = \"thin\"))\n ws[letra+str(contador)].fill = PatternFill(start_color ='66FFCC',end_color='66FFCC', fill_type='solid')\n ws[letra+str(contador)].font = Font(name ='Calibri', size=12, bold=True)\n ws.column_dimensions[letra].width = 20\n \n for letra in \"ABCDEFGHIJKLM\":\n ws[\"A\"+letra+str(contador)].alignment = Alignment(horizontal = \"center\", vertical = \"center\")\n ws[\"A\"+letra+str(contador)].border = Border(left = Side(border_style = \"thin\"), right = Side(border_style = \"thin\"),\n top = Side(border_style = \"thin\"), bottom= Side(border_style = \"thin\"))\n ws[\"A\"+letra+str(contador)].fill = PatternFill(start_color ='66FFCC',end_color='66FFCC', fill_type='solid')\n ws[\"A\"+letra+str(contador)].font = Font(name ='Calibri', size=12, bold=True)\n ws.column_dimensions[\"A\"+letra].width = 20\n ws.row_dimensions[1].height = 30\n\n pais= str(cand.pais_nacimiento)\n\n \n \n \n ws['A1'] = \"NOMBRE\"\n \n ws['A2'] = cand.nombre+\" \"+cand.apellido_paterno+\" \"+cand.apellido_materno\n ws['B1'] = \"ZONA\"\n ws['C1'] = \"Puesto\"\n ws['C2'] = cand.puesto_solicitado\n ws['D1'] = \"Home Office\"\n ws['E1'] = \"Jornada\"\n ws['F1'] = \"Jornada Real\"\n ws['G1'] = \"Jornada en contrato\"\n ws['H1'] = \"Nacionalidad\"\n ws['H2'] = pais\n ws['I1'] = \"Edo civil\"\n ws['I2'] = cand.estado_civil\n ws['J1'] = \"Fecha de nacimiento\"\n ws['J2'] = cand.fecha_nac\n ws['K1'] = \"CURP\"\n ws['K2'] = cand.curp\n ws['L1'] = \"RFC\"\n ws['L2'] = cand.rfc\n ws['M1'] = \"Identificación expedida por\"\n ws['N1'] = \"Con número\"\n ws['O1'] = \"EMPRESA\"\n ws['P1'] = \"NSS\"\n ws['P2'] = cand.imss\n ws['Q1'] = \"CALLE\"\n ws['Q2'] = cand.calle\n ws['R1'] = \"Número exterior\"\n ws['R2'] = cand.num_ext\n ws['S1'] = \"Número interior\"\n ws['S2'] = cand.num_int\n ws['T1'] = \"COLONIA\"\n ws['T2'] = cand.colonia\n ws['U1'] = \"MUNICIPIO\"\n ws['V1'] = \"ESTADO\"\n ws['V2'] = cand.esdo\n ws['W1'] = \"CÓDIGO POSTAL\"\n ws['W2'] = cand.cp\n ws['X1'] = \"NÚMERO DE TARJETA\"\n ws['Y1'] = \"NO DE CUENTA\"\n ws['Z1'] = \"CLABE INTERBANCARIA\"\n ws['AA1'] = \"NOMBRE DE BANCO\"\n ws['AB1'] = \"Tel. casa\"\n ws['AB2'] = cand.tel\n ws['AC1'] = \"Tel. personal\"\n ws['AD1'] = \"SD IMSS\"\n ws['AE1'] = \"Ayuda diaria\"\n ws['AF1'] = \"Sueldo neto\"\n ws['AG1'] = \"Cantidad letra\"\n ws['AH1'] = \"Bono\"\n ws['AI1'] = \"% comisión\"\n ws['AJ1'] = \"Fecha de ingreso\"\n ws['AK1'] = \"Fecha de alta IMSS\"\n ws['AL1'] = \"Correo empresa\"\n ws['AM1'] = \"Correo personal\"\n ws['AM2'] = cand.email_personal\n \n\n #establecer nombre de archivo\n nombre_archivo = \"ReportePersonalizadoExcel.xlsx\"\n #definir tipo de respuesta\n response = HttpResponse(content_type=\"application/ms-excel\")\n contenido = \"attachment; filename = {0}\".format(nombre_archivo)\n response[\"Content-Disposition\"] = contenido\n wb.save(response)\n return response\n\nclass ReporteLstPersonalizadoExcel(TemplateView):\n#def ReporteLstPersonalizadoExcel(request):\n def get(self, request,*args,**kwargs):\n candidatos = Candidato.objects.all()\n wb = Workbook()\n bandera = True\n cont = 2\n \n ########\n if bandera:\n ws = wb.active\n ws.title ='Hoja'+str(cont-1)\n bandera = False\n else:\n ws = wb.create_sheet('Hoja'+str(cont-1))\n \n \n ########\n for cand in candidatos:\n #controlador=4\n #ws.title ='Hoja'+str(cont)\n #ws = wb.create_sheet('Hoja'+str(cont))\n contador=1\n for letra in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n ws[letra+str(contador)].alignment = Alignment(horizontal = \"center\", vertical = \"center\")\n ws[letra+str(contador)].border = Border(left = Side(border_style = \"thin\"), right = Side(border_style = \"thin\"),\n top = Side(border_style = \"thin\"), bottom= Side(border_style = \"thin\"))\n ws[letra+str(contador)].fill = PatternFill(start_color ='66FFCC',end_color='66FFCC', fill_type='solid')\n ws[letra+str(contador)].font = Font(name ='Calibri', size=12, bold=True)\n ws.column_dimensions[letra].width = 20\n \n for letra in \"ABCDEFGHIJKLM\":\n ws[\"A\"+letra+str(contador)].alignment = Alignment(horizontal = \"center\", vertical = \"center\")\n ws[\"A\"+letra+str(contador)].border = Border(left = Side(border_style = \"thin\"), right = Side(border_style = \"thin\"),\n top = Side(border_style = \"thin\"), bottom= Side(border_style = \"thin\"))\n ws[\"A\"+letra+str(contador)].fill = PatternFill(start_color ='66FFCC',end_color='66FFCC', fill_type='solid')\n ws[\"A\"+letra+str(contador)].font = Font(name ='Calibri', size=12, bold=True)\n ws.column_dimensions[\"A\"+letra].width = 20\n ws.row_dimensions[1].height = 30\n\n pais= str(cand.pais_nacimiento)\n if (cont==2):\n ws['A1'] = \"NOMBRE\"\n ws['B1'] = \"ZONA\"\n ws['C1'] = \"Puesto\"\n ws['D1'] = \"Home Office\"\n ws['E1'] = \"Jornada\"\n ws['F1'] = \"Jornada Real\"\n ws['G1'] = \"Jornada en contrato\"\n ws['H1'] = \"Nacionalidad\"\n ws['I1'] = \"Edo civil\"\n ws['J1'] = \"Fecha de nacimiento\"\n ws['K1'] = \"CURP\"\n ws['L1'] = \"RFC\"\n ws['M1'] = \"Identificación expedida por\"\n ws['N1'] = \"Con número\"\n ws['O1'] = \"EMPRESA\"\n ws['P1'] = \"NSS\"\n ws['Q1'] = \"CALLE\"\n ws['R1'] = \"Número exterior\"\n ws['S1'] = \"Número interior\"\n ws['T1'] = \"COLONIA\"\n ws['U1'] = \"MUNICIPIO\"\n ws['V1'] = \"ESTADO\"\n ws['W1'] = \"CÓDIGO POSTAL\"\n ws['X1'] = \"NÚMERO DE TARJETA\"\n ws['Y1'] = \"NO DE CUENTA\"\n ws['Z1'] = \"CLABE INTERBANCARIA\"\n ws['AA1'] = \"NOMBRE DE BANCO\"\n ws['AB1'] = \"Tel. casa\"\n ws['AC1'] = \"Tel. personal\"\n ws['AD1'] = \"SD IMSS\"\n ws['AE1'] = \"Ayuda diaria\"\n ws['AF1'] = \"Sueldo neto\"\n ws['AG1'] = \"Cantidad letra\"\n ws['AH1'] = \"Bono\"\n ws['AI1'] = \"% comisión\"\n ws['AJ1'] = \"Fecha de ingreso\"\n ws['AK1'] = \"Fecha de alta IMSS\"\n ws['AL1'] = \"Correo empresa\"\n ws['AM1'] = \"Correo personal\"\n \n ws['A'+str(cont)] = cand.nombre+\" \"+cand.apellido_paterno+\" \"+cand.apellido_materno\n ws['C'+str(cont)] = cand.puesto_solicitado\n ws['H'+str(cont)] = pais\n ws['I'+str(cont)] = cand.estado_civil\n ws['J'+str(cont)] = cand.fecha_nac\n ws['K'+str(cont)] = cand.curp\n ws['L'+str(cont)] = cand.rfc\n ws['P'+str(cont)] = cand.imss\n ws['Q'+str(cont)] = cand.calle\n ws['R'+str(cont)] = cand.num_ext\n ws['S'+str(cont)] = cand.num_int\n ws['T'+str(cont)] = cand.colonia\n ws['V'+str(cont)] = cand.esdo\n ws['W'+str(cont)] = cand.cp\n ws['AB'+str(cont)] = cand.tel\n ws['AM'+str(cont)] = cand.email_personal\n cont += 1\n \n\n #establecer nombre de archivo\n print(\"Salio\")\n nombre_archivo = \"ListaCandidatosExcel.xlsx\"\n #definir tipo de respuesta\n response = HttpResponse(content_type=\"application/ms-excel\")\n contenido = \"attachment; filename = {0}\".format(nombre_archivo)\n response[\"Content-Disposition\"] = contenido\n wb.save(response)\n return response\n\n\n# Create your views here. \n\ndef login_view(request):\n state = \"\"\n username = \"\"\n password = \"\"\n next = \"\"\n\n if request.GET:\n next = request.GET['next']\n\n if request.POST:\n username = request.POST['username']\n password = request.POST['password']\n\n if '@' in username:\n try:\n check = User.objects.get(email=username)\n username = check.username\n except:\n pass\n\n user = authenticate(username=username, password=password)\n\n \n\n if user is not None:\n if user.is_active:\n if user.groups.filter(name__in=['Administrador']).exists():\n login(request, user)\n return HttpResponseRedirect('/sadmin/')\n else:\n if user.groups.filter(name__in=['RRHH']).exists():\n login(request, user)\n return HttpResponseRedirect('/rrhh/')\n else:\n if user.groups.filter(name__in=['Empleados']).exists():\n login(request, user)\n return HttpResponseRedirect('/cola-home/')\n else:\n return HttpResponseRedirect(next)\n\n else:#no activo\n state = 0\n #\n \n\n\n\n \"\"\"if not user.is_superuser:\n if not user.groups.filter(name__in=['Becario']).exists():\n if user.is_active:\n login(request, user)\n if next == \"\":\n return HttpResponseRedirect('/portafolio/')\n else:\n return HttpResponseRedirect(next)\n else:\n state = 0\n else:\n if user.is_active:\n login(request, user)\n if next == \"\":\n return HttpResponseRedirect('/homebec/')\n\n\n\n else:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect('/sadmin/')\n else:\n state = 0\"\"\"\n else:\n state = 0\n\n return render(request, 'dashboard/login.html', {'state':state, 'username': username, 'next': next,},)\n\n\n####################################\n\n@login_required(login_url = '/login/')\ndef crear(request, template_name = \"dashboard/crear.html\"):\n if request.method == 'POST':\n\n form = CreaEmpleado(request.POST)\n if form.is_valid():\n usuario = request.POST['username']\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n correo = request.POST['email']\n contrasena = form.cleaned_data['password']\n usuario_inactivo = User.objects.get(email = correo)\n try:\n if usuario_inactivo:\n mensaje = 2\n else:\n # Crear nuevo USUARIO\n user = User.objects.create_user(password=contrasena,is_superuser=False,username=usuario,first_name=first_name,last_name=last_name,email=correo,is_staff=False,is_active=False)\n user.save(using=self._db)\n mensaje = 1\n return HttpResponseRedirect('/sadmin/')\n except:\n mensaje = 2\n else:\n print(form.errors)\n else:\n form = CreaEmpleado()\n return render(request, template_name, locals(),)\n\n\n@login_required(login_url = '/login/')\ndef crear2(request, template_name = \"dashboard/empAct.html\"):\n if request.method == 'POST':\n\n form = CreaEmpleado2(request.POST)\n is_admin = request.POST['is_admin']\n username = request.POST['username']\n correo = request.POST['email']\n if form.is_valid():\n try:\n form.save() \n \n if is_admin==\"0\":\n group = Group.objects.get(name='Administrador')\n elif is_admin==\"1\":\n group = Group.objects.get(name='Consultor')\n elif is_admin==\"2\":\n group = Group.objects.get(name='Editor')\n elif is_admin==\"3\":\n group = Group.objects.get(name='Empleados')\n else:\n group = Group.objects.get(name='Becario')\n us=User.objects.get(username = username, email= correo)\n group.user_set.add(us)\n form = CreaEmpleado2()\n form.fields[\"username\"].initial=\"\"\n form.fields[\"email\"].initial=\"\"\n form.fields[\"first_name\"].initial=\"\"\n form.fields[\"last_name\"].initial=\"\"\n form.fields[\"password1\"].initial=\"\"\n form.fields[\"password2\"].initial=\"\"\n mensaje = 1\n except:\n print(\"Error al guardar\")\n print(form.errors)\n mensaje = 2\n else:\n print(\"Error de datos\")\n #print(form)\n print(form.errors) \n else:\n form = CreaEmpleado2()\n return render(request, template_name, locals(),) \n\n@login_required(login_url = '/login/')\ndef editarEmp(request, id, template_name = \"dashboard/editarEmp.html\"):\n if request.method == 'POST':\n\n form = CreaEmpleado2(request.POST)\n is_admin = request.POST['is_admin']\n username = request.POST['username']\n correo = request.POST['email']\n print(is_admin)\n if form.is_valid():\n try:\n form.save() \n if is_admin==0:\n group = Group.objects.get(name='Administrador')\n elif is_admin==1:\n group = Group.objects.get(name='Consultor')\n elif is_admin==2:\n group = Group.objects.get(name='Editor')\n elif is_admin==3:\n group = Group.objects.get(name='Empleados')\n else:\n group = Group.objects.get(name='Becario')\n us=User.objects.get(username = username, email= correo)\n group.user_set.add(us)\n mensaje = 1\n except:\n print(form.errors)\n mensaje = 2\n else:\n #print(form)\n print(form.errors)\n else:\n form = CreaEmpleado2()\n return render(request, template_name, locals(),)\n\n\n@login_required(login_url = '/login/')\ndef sadmin(request, template_name = \"dashboard/sadmin.html\"):\n # Verificamos si el usuario tiene foto, Si existe algún registro en\n # Tabla Empleados que referencie al usuario en turno y si este posee\n # foto cargada al sistema\n usuario = request.user\n try:\n empleado = Empleado.objects.get(user = usuario.pk)\n foto = 1\n etapa = empleado.status\n if Estudio.objects.filter(user = usuario.pk).exists():\n profesion = Estudio.objects.get(user=usuario.pk)\n #empid=empleado\n except:\n foto = 0\n etapa = 1\n pass\n\n return render(request, template_name, locals(),)\n\n\n\n@login_required(login_url = '/login/')\ndef rrhh(request, template_name = \"dashboard/rrhh.html\"):\n # Verificamos si el usuario tiene foto, Si existe algún registro en\n # Tabla Empleados que referencie al usuario en turno y si este posee\n # foto cargada al sistema\n usuario = request.user\n try:\n empleado = Empleado.objects.get(user = usuario.pk)\n foto = 1\n etapa = empleado.status\n if Estudio.objects.filter(user = usuario.pk).exists():\n profesion = Estudio.objects.get(user=usuario.pk)\n #empid=empleado\n except:\n foto = 0\n etapa = 1\n pass\n\n return render(request, template_name, locals(),)\n\ndef update_pro_docs(request):\n id=request.POST.get('est_Id')\n \n try:\n est=Estudios_pro.objects.get(pk=id)\n est.comprobante=est.comprobante=request.FILES['comprobante']\n est.save()\n response=\"OK\"\n except Exception as e:\n response=\"ERROR\"\n print(e)\n\n return HttpResponse(response)\n\ndef update_otro_docs(request):\n id=request.POST.get('est_Id')\n try:\n est=Estudios_otros.objects.get(pk=id)\n est.comprobante=request.FILES['comprobante']\n est.save()\n response=\"OK\"\n except Exception as e:\n response=\"ERROR\"\n print(e)\n\n return HttpResponse(response)\n\ndef update_docs(request):\n id=request.POST.get(\"cand_Id\")\n curriculum=False\n inefrente=False\n ineatras=False\n acta=False\n curp=False\n rfc=False\n comprobante_domicilio=False\n imss=False\n carta1=False\n carta2=False\n contrato=False\n infonavit=False\n permiso=False\n oficio=False\n carta_menor=False\n INE_frenteTut=False\n INE_atrasTut=False\n \n #investigar si trae files\n if (\"curriculum\" in request.FILES):\n curriculum = request.FILES['curriculum']\n if (\"INE_frente\" in request.FILES):\n inefrente = request.FILES['INE_frente']\n if (\"INE_atras\" in request.FILES):\n ineatras = request.FILES['INE_atras']\n if (\"acta\" in request.FILES):\n acta = request.FILES['acta']\n if (\"curp\" in request.FILES):\n curp = request.FILES['curp']\n if (\"rfc\" in request.FILES):\n rfc = request.FILES['rfc']\n if (\"comprobante_domicilio\" in request.FILES):\n comprobante_domicilio = request.FILES['comprobante_domicilio']\n if (\"imss\" in request.FILES):\n imss = request.FILES['imss']\n if (\"carta1\" in request.FILES):\n carta1 = request.FILES['carta1']\n if (\"carta2\" in request.FILES):\n carta2 = request.FILES['carta2']\n if (\"contrato\" in request.FILES):\n contrato = request.FILES['contrato']\n if (\"infonavit\" in request.FILES):\n infonavit = request.FILES['infonavit']\n #extranjero\n if (\"permiso\" in request.FILES):\n permiso = request.FILES['permiso']\n #becario\n if (\"oficio\" in request.FILES):\n oficio = request.FILES['oficio']\n #menor\n if (\"carta_menor\" in request.FILES):\n carta_menor = request.FILES['carta_menor']\n if (\"INE_frenteTut\" in request.FILES):\n INE_frenteTut = request.FILES['INE_frenteTut']\n if (\"INE_atrasTut\" in request.FILES):\n INE_atrasTut = request.FILES['INE_atrasTut']\n\n \n cand = Candidato.objects.get(emp_id=id)\n if curriculum:\n cand.curriculum=curriculum\n if inefrente:\n cand.docu_ident_front=inefrente\n if ineatras:\n cand.docu_ident_back=ineatras\n if acta:\n cand.acta_nacimiento=acta\n if curp:\n cand.imagen_curp=curp\n if rfc:\n cand.imagen_rfc=rfc\n if comprobante_domicilio:\n cand.comprobante_domicilio=comprobante_domicilio\n if imss:\n cand.imagen_imss=imss\n if carta1:\n cand.carta1_recomendacion=carta1\n if carta2:\n cand.carta2_recomendacion=carta2\n if contrato:\n cand.contrato=contrato\n if infonavit:\n cand.imagen_infonavit=infonavit\n if permiso:\n cand.permiso=permiso\n if oficio:\n cand.oficio=oficio\n if carta_menor:\n cand.carta_menor=carta_menor\n if INE_frenteTut:\n cand.INE_frenteTut=INE_frenteTut\n if INE_atrasTut:\n cand.INE_atrasTut=INE_atrasTut\n \n print(inefrente)\n try:\n cand.save()\n response=\"OK\"\n except Exception as e:\n response=\"ERROR\"\n print(e)\n return HttpResponse(response)\n \n\n\n@login_required(login_url = '/login/')\ndef candDocs(request,id, template_name = \"dashboard/candDocs.html\"):\n candId=id\n curriculum=False\n inefrente=False\n ineatras=False\n acta=False\n curp=False\n rfc=False\n comprobante=False\n imss=False\n carta1=False\n carta2=False\n contrato=False\n infonavit=False\n permiso=False\n oficio=False\n carta_menor=False\n INE_frenteTut=False\n INE_atrasTut=False\n #lifrente=False\n #licatras=False\n if (request.POST):\n #si hay post\n licatras=False\n\n \n else:\n user = request.user\n empleado = Empleado.objects.filter(user = user.pk)\n if (empleado):\n foto = 1\n else:\n foto = 0\n \n candidato = Candidato.objects.get(emp_id=id)\n #cand= Candidato.objects.get(emp_id=id)\n \n if (candidato.curriculum not in [None,'']):\n curriculum=candidato.curriculum\n if (candidato.docu_ident_front not in [None,'']):\n inefrente=candidato.docu_ident_front\n if (candidato.docu_ident_back not in [None,'']):\n ineatras=candidato.docu_ident_back\n if (candidato.acta_nacimiento not in [None,'']):\n acta=candidato.acta_nacimiento\n if (candidato.imagen_curp not in [None,'']):\n curp=candidato.imagen_curp\n if (candidato.imagen_rfc not in [None,'']):\n rfc=candidato.imagen_rfc\n if (candidato.comprobante_domicilio not in [None,'']):\n comprobante=candidato.comprobante_domicilio\n if (candidato.imagen_imss not in [None,'']):\n imss=candidato.imagen_imss\n if (candidato.carta1_recomendacion not in [None,'']):\n carta1=candidato.carta1_recomendacion\n if (candidato.carta2_recomendacion not in [None,'']):\n carta2=candidato.carta2_recomendacion\n if (candidato.contrato not in [None,'']):\n contrato=candidato.contrato\n if (candidato.imagen_infonavit not in [None,'']):\n infonavit=candidato.imagen_infonavit\n if (candidato.permiso not in [None,'']):\n permiso=candidato.permiso\n if (candidato.oficio not in [None,'']):\n oficio=candidato.oficio\n if (candidato.carta_menor not in [None,'']):\n carta_menor=candidato.carta_menor\n if (candidato.INE_frenteTut not in [None,'']):\n INE_frenteTut=candidato.INE_frenteTut\n if (candidato.INE_frenteTut not in [None,'']):\n INE_atrasTut=candidato.INE_atrasTut\n \n if int(candidato.edad)<18:\n menor=True\n else:\n menor=False\n \n if str(candidato.pais_nacimiento)==\"MÉXICO\":\n extranjero=False\n else:\n extranjero=True\n \n if User.objects.filter(pk=id, groups__name='Becario').exists():\n becario=True\n else:\n becario=False\n \n\n form_est=Formulario_est_comp()\n form_est.fields['comprobante'].widget.attrs['accept'] ='image/*'\n form_est.fields['comprobante'].widget.attrs['class'] ='comprobante'\n \n form=Formulario_cand_docs()\n form.fields['curriculum'].widget.attrs['accept'] ='application/pdf'\n form.fields['INE_frente'].widget.attrs['accept'] ='image/*'\n form.fields['INE_atras'].widget.attrs['accept'] ='image/*'\n form.fields['acta'].widget.attrs['accept'] ='image/*'\n form.fields['curp'].widget.attrs['accept'] ='image/*'\n form.fields['rfc'].widget.attrs['accept'] ='image/*'\n form.fields['comprobante_domicilio'].widget.attrs['accept'] ='image/*'\n form.fields['imss'].widget.attrs['accept'] ='image/*'\n form.fields['carta1'].widget.attrs['accept'] ='image/*'\n form.fields['carta2'].widget.attrs['accept'] ='image/*'\n form.fields['contrato'].widget.attrs['accept'] ='image/*'\n form.fields['infonavit'].widget.attrs['accept'] ='image/*'\n #extranjero\n form.fields['permiso'].widget.attrs['accept'] ='image/*'\n #becario\n form.fields['oficio'].widget.attrs['accept'] ='image/*'\n #menor de edad\n form.fields['carta_menor'].widget.attrs['accept'] ='image/*'\n form.fields['INE_frenteTut'].widget.attrs['accept'] ='image/*'\n form.fields['INE_atrasTut'].widget.attrs['accept'] ='image/*'\n #estudios profesionales\n Est_pro=Estudios_pro.objects.filter(candidato=candidato)\n #otros estudios\n Est_otros=Estudios_otros.objects.filter(candidato=candidato)\n\n \n return render(request, template_name, locals(),)\n\n@login_required(login_url = '/login/')\ndef candLst(request, template_name = \"dashboard/candLst.html\"):\n \n user = request.user\n empleado = Empleado.objects.filter(user = user.pk)\n if (empleado):\n foto = 1\n else:\n foto = 0\n \n candidatos = Candidato.objects.filter(emp_id=None)\n \n ###form para Docs\n \n #form=Formulario_cand_docs()\n \n #idiomas = Idioma.objects.filter(id=candidato)\n #estudios = Estudios_pro.objects.filter(id=candidato)\n #hermanos = Hermano_candidato.objects.filter(id=candidato)\n #hijos = Hijo_candidato.objects.filter(id=candidato)\n #hijos = Hijo_candidato.objects.filter(id=candidato)\n #experiencias = Experiencia.objects.filter(id=candidato)\n #referencias = Referencia.objects.filter(id=candidato)\n #empleados = User.objects.filter(is_superuser=0,is_active=1)\n return render(request, template_name, locals(),)\n\n\n\n\n@login_required(login_url = '/login/')\ndef candrrhh(request,id, template_name = \"candidatos/captura_cv.html\"):\n rrhh=\"RRHH\"\n status_con=\"True\"\n con_id=id\n #llenar candidato\n candidato= Candidato.objects.get(id=id)\n #form_candidato=Formulario_candidato()\n form_candidato=set_values_candidato(candidato)\n form_candidato.fields['con_id'].initial = con_id\n\n form_idioma=Formulario_idioma()\n form_editidioma=Formulario_idioma()\n form_hermano=Formulario_hermano_candidato()\n form_edithermano=Formulario_hermano_candidato()\n form_hijo=Formulario_hijo_candidato\n form_edithijo=Formulario_hijo_candidato\n form_referencia=Formulario_referencia()\n form_editreferencia=Formulario_referencia()\n form_experiencia=Formulario_experiencia()\n form_editexperiencia=Formulario_experiencia()\n form_estudio=Formulario_estudios()\n form_editestudio=Formulario_estudios()\n form_estudiootro=Formulario_estudiosotros()\n form_editestudiootro=Formulario_estudiosotros()\n \n \n experiencias = Experiencia.objects.filter(candidato=candidato)\n referencias = Referencia.objects.filter(candidato=candidato)\n hijos = Hijo_candidato.objects.filter(candidato=candidato)\n hermanos = Hermano_candidato.objects.filter(candidato=candidato)\n idiomas = Idioma_candidato.objects.filter(candidato=candidato)\n estudios = Estudios_pro.objects.filter(candidato=candidato)\n \n form_idioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n form_editidioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n\n \n\n form_candidato.fields['edad'].widget.attrs['readonly'] ='readonly'\n form_candidato.fields['piso'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['depto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_que'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_donde'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_horario'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_termino'].widget.attrs['disabled'] ='disabled'\n\n \n \n form_candidato.fields['primaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['secundaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['preparatoria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['tecnica_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_documento'].widget.attrs['disabled'] ='disabled'\n\n \n\n form_candidato.fields['pago_infonavit'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_marca'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_modelo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['seguro_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['afianzado_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_cargo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_fuente'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_depto'].widget.attrs['disabled'] ='disabled'\n\n form_experiencia.fields['experiencia_supervision_num'].widget.attrs['disabled'] ='disabled'\n \n\n return render(request, template_name, locals(),)\n\ndef buscar_cand(request):\n data = dict()\n id=request.GET.get('id')\n data['form_is_valid'] = False\n try:\n cand = Candidato.objects.get(emp_id = id)\n data['form_is_valid'] = True\n except Exception as e:\n pass\n \n \n\n return JsonResponse(data)\n\n@login_required(login_url = '/login/')\ndef empAct(request, template_name = \"dashboard/empAct.html\"):\n user = request.user\n \n empleado = Empleado.objects.filter(user = user.pk)\n if (empleado):\n foto = 1\n else:\n foto = 0\n if request.method == 'POST':\n try:\n tipoemp = TipoEmpleado(request.POST)\n if tipoemp.is_valid():\n tipo = request.POST[\"tipo_empleado\"]\n\n if tipo==\"1\":\n grupo = Group.objects.get(name=\"Empleados\")\n elif tipo==\"2\":\n grupo = Group.objects.get(name=\"Editor\")\n elif tipo==\"3\":\n grupo = Group.objects.get(name=\"Consultor\")\n elif tipo==\"4\":\n grupo = Group.objects.get(name=\"Becario\")\n else:\n grupo = Group.objects.get(name=\"Administrador\")\n\n empleados =grupo.user_set.all()\n form = TipoEmpleado(request.POST)# TipoEmpleado()\n form_emp = EditEmpleado()\n nform = CreaEmpleado2()\n\n except Exception as e:\n print(\"error existe\")\n print(e) \n pass\n \n else:\n \n grupo = Group.objects.get(name=\"Empleados\")\n empleados = grupo.user_set.all()\n form = TipoEmpleado()\n form_emp = EditEmpleado()\n nform = CreaEmpleado2()\n\n #empleados = User.objects.filter(is_superuser=0,is_active=1)\n \n\n return render(request, template_name, locals(),)\n\n@login_required(login_url = '/login/')\ndef empLst(request, template_name = \"dashboard/empLst.html\"):\n user = request.user\n \n empleado = Empleado.objects.filter(user = user.pk)\n if (empleado):\n foto = 1\n else:\n foto = 0\n if request.method == 'POST':\n try:\n tipoemp = TipoEmpleado(request.POST)\n if tipoemp.is_valid():\n tipo = request.POST[\"tipo_empleado\"]\n\n if tipo==\"1\":\n grupo = Group.objects.get(name=\"Empleados\")\n elif tipo==\"2\":\n grupo = Group.objects.get(name=\"Editor\")\n elif tipo==\"3\":\n grupo = Group.objects.get(name=\"Consultor\")\n elif tipo==\"4\":\n grupo = Group.objects.get(name=\"Becario\")\n elif tipo==\"5\":\n grupo = Group.objects.get(name=\"Administrador\")\n else:\n grupo = Group.objects.get(name=\"RRHH\")\n\n empleados =grupo.user_set.filter(is_active=true)\n form = TipoEmpleado(request.POST)# TipoEmpleado()\n form_emp = EditEmpleado()\n nform = CreaEmpleado2()\n ###form para Docs\n form_doc=Formulario_cand_docs()\n\n except Exception as e:\n print(e) \n pass\n \n else:\n \n grupo = Group.objects.get(name=\"Empleados\")\n empleados = grupo.user_set.filter(is_active=true)\n form = TipoEmpleado()\n form_emp = EditEmpleado()\n nform = CreaEmpleado2()\n ###form para Docs\n form_doc=Formulario_cand_docs()\n ###form para posible baja\n form_baja=Formulario_baja()\n\n\n #empleados = User.objects.filter(is_superuser=0,is_active=1)\n \n\n return render(request, template_name, locals(),)\n\n\n@login_required(login_url = '/login/')\ndef empIna(request, template_name = \"dashboard/empIna.html\"):\n grupo = Group.objects.get(name=\"Empleados\")\n empleados =grupo.user_set.all().filter(is_superuser=0,is_active=0)\n #empleados = User.objects.filter(is_superuser=3 , is_active=0)\n \n\n return render(request, template_name, locals(),)\n\n@login_required(login_url = '/login/')\ndef admAct(request, template_name = \"dashboard/admAct.html\"):\n grupo = Group.objects.get(name=\"Administrador\")\n empleados =grupo.user_set.all().filter(is_superuser=1,is_active=1)\n #empleados = User.objects.filter(is_superuser=1)\n \n\n return render(request, template_name, locals(),)\n\n@login_required(login_url = '/login/')\ndef admBec(request, template_name = \"dashboard/admBec.html\"):\n grupo = Group.objects.get(name=\"Becario\")\n empleados =grupo.user_set.all().filter(is_superuser=0)\n #empleados = User.objects.filter(is_superuser=4)\n \n\n return render(request, template_name, locals(),)\n\n######################################## \n ############## Visor de etapas del empleado\n@login_required(login_url = '/login/')\ndef dashAdm(request,id, template_name = \"dashboard/dashboardAdm.html\"):\n #usuario = request.user\n try:\n #print(id)\n empleado = Empleado.objects.get(user_id = id)\n usuario = User.objects.get(id = empleado.pk)\n etapa = empleado.status\n print(etapa)\n print(empleado.user_id)\n #empleado.status = 2 \n #empleado.save()\n except:\n pass\n return render(request, template_name, locals(),)\n\n\n\n\n ############### \n\n\n \n@login_required(login_url = '/login/')\ndef login_out(request):\n logout(request)\n return HttpResponseRedirect('/login/')\n\n@login_required(login_url = '/login/')\ndef porta_emp(request, template_name = \"candidatos/captura_cv.html\"):\n # Verificamos si el usuario tiene foto, Si existe algún registro en\n # Tabla Empleados que referencie al usuario en turno y si este posee\n # foto cargada al sistema\n usuario = request.user\n try:\n empleado = Empleado.objects.get(user = usuario.pk)\n foto = 1\n etapa = empleado.status\n if Estudio.objects.filter(user = usuario.pk).exists():\n profesion = Estudio.objects.get(user=usuario.pk)\n except:\n foto = 0\n etapa = 1\n pass\n #investigar si tiene archivo en candidatos\n #llenar candidato\n rrhh=\"PORTA_EMP\"\n emp_id=usuario.id\n form_candidato=Formulario_candidato()\n try:\n candidato= Candidato.objects.get(emp_id=usuario.id)\n status_con=\"True\"\n con_id=candidato.id\n form_candidato.fields['con_id'].initial = con_id\n #form_candidato=Formulario_candidato()\n form_candidato=set_values_candidato(candidato)\n experiencias = Experiencia.objects.filter(candidato=candidato)\n referencias = Referencia.objects.filter(candidato=candidato)\n hijos = Hijo_candidato.objects.filter(candidato=candidato)\n hermanos = Hermano_candidato.objects.filter(candidato=candidato)\n idiomas = Idioma_candidato.objects.filter(candidato=candidato)\n estudios = Estudios_pro.objects.filter(candidato=candidato)\n estudiosotros=Estudios_otros.objects.filter(candidato=candidato)\n except Exception as e:\n pass\n ####FORMULARIOS\n form_idioma=Formulario_idioma()\n form_editidioma=Formulario_idioma()\n form_hermano=Formulario_hermano_candidato()\n form_edithermano=Formulario_hermano_candidato()\n form_hijo=Formulario_hijo_candidato\n form_edithijo=Formulario_hijo_candidato\n form_referencia=Formulario_referencia()\n form_editreferencia=Formulario_referencia()\n form_experiencia=Formulario_experiencia()\n form_editexperiencia=Formulario_experiencia()\n form_estudio=Formulario_estudios()\n form_editestudio=Formulario_estudios()\n form_estudiootro=Formulario_estudiosotros()\n form_editestudiootro=Formulario_estudiosotros()\n\n form_idioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n form_editidioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n\n \n\n form_candidato.fields['edad'].widget.attrs['readonly'] ='readonly'\n form_candidato.fields['piso'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['depto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_que'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_donde'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_horario'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_termino'].widget.attrs['disabled'] ='disabled'\n\n \n \n form_candidato.fields['primaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['secundaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['preparatoria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['tecnica_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_documento'].widget.attrs['disabled'] ='disabled'\n\n \n\n form_candidato.fields['pago_infonavit'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_marca'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_modelo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['seguro_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['afianzado_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_cargo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_fuente'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_depto'].widget.attrs['disabled'] ='disabled'\n\n form_experiencia.fields['experiencia_supervision_num'].widget.attrs['disabled'] ='disabled'\n \n\n \n \n return render(request, template_name, locals(),)\n\n@login_required(login_url = '/login/')\ndef portafolio(request, template_name = \"candidatos/captura_cv.html\"):\n # Verificamos si el usuario tiene foto, Si existe algún registro en\n # Tabla Empleados que referencie al usuario en turno y si este posee\n # foto cargada al sistema\n usuario = request.user\n try:\n empleado = Empleado.objects.get(user = usuario.pk)\n foto = 1\n etapa = empleado.status\n if Estudio.objects.filter(user = usuario.pk).exists():\n profesion = Estudio.objects.get(user=usuario.pk)\n except:\n foto = 0\n etapa = 1\n pass\n #investigar si tiene archivo en candidatos\n #llenar candidato\n rrhh=\"PORTAFOLIO\"\n emp_id=usuario.id\n form_candidato=Formulario_candidato()\n try:\n candidato= Candidato.objects.get(emp_id=usuario.id)\n status_con=\"True\"\n con_id=candidato.id\n form_candidato.fields['con_id'].initial = con_id\n #form_candidato=Formulario_candidato()\n form_candidato=set_values_candidato(candidato)\n experiencias = Experiencia.objects.filter(candidato=candidato)\n referencias = Referencia.objects.filter(candidato=candidato)\n hijos = Hijo_candidato.objects.filter(candidato=candidato)\n hermanos = Hermano_candidato.objects.filter(candidato=candidato)\n idiomas = Idioma_candidato.objects.filter(candidato=candidato)\n estudios = Estudios_pro.objects.filter(candidato=candidato)\n estudiosotros=Estudios_otros.objects.filter(candidato=candidato)\n except Exception as e:\n pass\n ####FORMULARIOS\n form_idioma=Formulario_idioma()\n form_editidioma=Formulario_idioma()\n form_hermano=Formulario_hermano_candidato()\n form_edithermano=Formulario_hermano_candidato()\n form_hijo=Formulario_hijo_candidato\n form_edithijo=Formulario_hijo_candidato\n form_referencia=Formulario_referencia()\n form_editreferencia=Formulario_referencia()\n form_experiencia=Formulario_experiencia()\n form_editexperiencia=Formulario_experiencia()\n form_estudio=Formulario_estudios()\n form_editestudio=Formulario_estudios()\n form_estudiootro=Formulario_estudiosotros()\n form_editestudiootro=Formulario_estudiosotros()\n\n form_idioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n form_editidioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n\n \n\n form_candidato.fields['edad'].widget.attrs['readonly'] ='readonly'\n form_candidato.fields['piso'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['depto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_que'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_donde'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_horario'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_termino'].widget.attrs['disabled'] ='disabled'\n\n \n \n form_candidato.fields['primaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['secundaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['preparatoria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['tecnica_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_documento'].widget.attrs['disabled'] ='disabled'\n\n \n\n form_candidato.fields['pago_infonavit'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_marca'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_modelo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['seguro_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['afianzado_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_cargo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_fuente'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_depto'].widget.attrs['disabled'] ='disabled'\n\n form_experiencia.fields['experiencia_supervision_num'].widget.attrs['disabled'] ='disabled'\n \n\n \n \n return render(request, template_name, locals(),)\n\n\n\n\ndef register(request, template_name = \"dashboard/register.html\"):\n if request.method == 'POST':\n form = ActivaEmpleado(request.POST)\n if form.is_valid():\n correo = form.cleaned_data['email']\n usuario = form.cleaned_data['username']\n contrasena = form.cleaned_data['password']\n # Consultar si existe correo\n try:\n usuario_inactivo = User.objects.get(email = correo)\n if usuario_inactivo:\n # Verificar su status (ACTIVADO O NO ACTIVADO)\n if usuario_inactivo.is_active:\n mensaje = 0\n form = ActivaEmpleado()\n form.fields[\"username\"].initial=\"\"\n form.fields[\"email\"].initial=\"\"\n else:\n # Actualizar USERNAME\n usuario_inactivo.username = usuario\n usuario_inactivo.save()\n # Actualizar PASSWORD\n usuario_inactivo.set_password(contrasena)\n usuario_inactivo.save()\n # Cambiar status\n usuario_inactivo.is_active = True\n usuario_inactivo.save()\n mensaje = 1\n form = ActivaEmpleado()\n form.fields[\"username\"].initial=\"\"\n form.fields[\"email\"].initial=\"\"\n \n \n except:\n form = ActivaEmpleado()\n form.fields[\"username\"].initial=\"\"\n form.fields[\"email\"].initial=\"\"\n mensaje = 2\n \n \n else:\n form = ActivaEmpleado()\n form.fields[\"username\"].initial=\"\"\n form.fields[\"email\"].initial=\"\"\n print(form.errors)\n else:\n form = ActivaEmpleado()\n return render(request, template_name, locals(),)\n\ndef update_per(request):\n response =\"NONE\"\n if request.method == 'POST':\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n emp_id = request.POST.get('id')\n if (password1==password2):\n username = request.POST.get('username')\n user_existente = User.objects.get(username = username)\n if (user_existente):\n if(user_existente.id != int(emp_id)):\n response=\"user_existe\"\n return HttpResponse(response)\n print(request.POST)\n email = request.POST['email']\n print(email)\n email_existente = User.objects.get(email = email)\n if (email_existente):\n if(email_existente.id != int(emp_id)):\n response=\"email_existe\"\n return HttpResponse(response)\n try:\n username = request.POST.get('username')\n first_name = request.POST.get('first_name')\n last_name = request.POST.get('last_name')\n email = request.POST.get('email')\n is_active = request.POST.get('is_active')\n is_superuser = False\n password = request.POST.get('password1')\n per= User.objects.get(id=emp_id)\n per.username=username\n per.first_name=first_name\n per.last_name=last_name\n per.email=email\n per.is_active=is_active\n per.is_superuser=is_superuser\n per.set_password(password)\n per.save()\n response =\"OK\"\n except:\n print(per.errors)\n response=\"error\"\n else:\n response=\"passerror\"\n \n return HttpResponse(response)\n\ndef new_per(request):\n response=\"NONE\"\n if request.method == 'POST':\n form = CreaEmpleado2(request.POST)\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n \n if (password1==password2):\n \n username = request.POST.get('username')\n user_existente = User.objects.filter(username = username).exists()\n \n if not user_existente:\n email = request.POST['email']\n email_existente = User.objects.filter(email = email).exists()\n if not email_existente:\n if form.is_valid():\n try:\n form.save()\n is_admin = request.POST['is_admin']\n if is_admin==\"0\":\n group = Group.objects.get(name='Administrador')\n elif is_admin==\"1\":\n group = Group.objects.get(name='RRHH')\n elif is_admin==\"2\":\n group = Group.objects.get(name='Editor')\n elif is_admin==\"3\":\n group = Group.objects.get(name='Empleados')\n elif is_admin==\"4\":\n group = Group.objects.get(name='Becario')\n else:\n group = Group.objects.get(name='Consultor')\n us=User.objects.get(username = username, email= email)\n group.user_set.add(us)\n data = {\n 'result': 'OK'\n }\n response=data\n except:\n print(form.errors)\n data = {\n 'result': 'error'\n }\n response=data\n else:\n print(form.errors)\n #pas_err=form.errors\n \n if form.errors['password2']:\n pas_err=form.errors['password2']\n data = {\n 'result': 'password2',\n 'message': pas_err\n }\n \n #pas_err=form.errors.as_text\n \n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'email_existe',\n }\n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'user_existe'\n }\n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'passerror'\n }\n response=data\n \n \n \n #return HttpResponse(response)\n return JsonResponse(response)\n\n\n\ndef add_per2(request):\n if request.method == 'POST':\n print(\"entro a post\")\n\n form = CreaEmpleado2(request.POST)\n is_admin = request.POST['is_admin']\n username = request.POST['username']\n correo = request.POST['email']\n response=\"\"\n if form.is_valid():\n try:\n #form.save() \n \n if is_admin==\"0\":\n group = Group.objects.get(name='Administrador')\n elif is_admin==\"1\":\n group = Group.objects.get(name='Consultor')\n elif is_admin==\"2\":\n group = Group.objects.get(name='Editor')\n elif is_admin==\"3\":\n group = Group.objects.get(name='Empleados')\n else:\n group = Group.objects.get(name='Becario')\n #us=User.objects.get(username = username, email= correo)\n #group.user_set.add(us)\n \n #form = CreaEmpleado2()\n #form.fields[\"username\"].initial=\"\"\n #form.fields[\"email\"].initial=\"\"\n #form.fields[\"first_name\"].initial=\"\"\n #form.fields[\"last_name\"].initial=\"\"\n #form.fields[\"password1\"].initial=\"\"\n #form.fields[\"password2\"].initial=\"\"\n response=\"OK\"\n except:\n response=\"error\"\n pass\n \n else:\n response=\"existe\"\n return HttpResponse(response)\n #print(form)\n #print(\"error en valid\")\n #print(form.errors) \n \n # manda existe en add_per\n\n #return HttpResponseRedirect(self.get_success_url())\n #display_html_dict = self._displayhtml(request)\n #return render(request,self.display_html_dict)\n #return JsonResponse({\n # 'success':False,\n # 'err_code':'invalid_form',\n # 'err_msg':form.errors\n # })\n\n\ndef filtra_cand(request):\n data = dict()\n palabra=request.GET.get('dato')\n #name__iexact=\"beatles blog\" __icontains\n candidatos = Candidato.objects.filter(puesto_solicitado__icontains=palabra,emp_id=None)\n if candidatos:\n data['form_is_valid'] = True #si encontro registros\n data['html_candidatos_lista'] = render_to_string('dashboard/cand_cont.html', {\n 'candidatos': candidatos\n })\n else:\n data['form_is_valid'] = False #si no encontro registros\n \n return JsonResponse(data)\n\ndef filtra2_cand(request):\n data = dict()\n nom=request.GET.get('nom')\n pat=request.GET.get('pat')\n mat=request.GET.get('mat')\n #name__iexact=\"beatles blog\" __icontains\n candidatos = Candidato.objects.filter(nombre__icontains=nom,apellido_paterno__icontains=pat,apellido_materno__icontains=mat,emp_id=None)\n if candidatos:\n data['form_is_valid'] = True #si encontro registros\n data['html_candidatos_lista'] = render_to_string('dashboard/cand_cont.html', {\n 'candidatos': candidatos\n })\n else:\n data['form_is_valid'] = False #si no encontro registros\n \n return JsonResponse(data)\n\ndef filtra3_cand(request):\n data = dict()\n fec=request.GET.get('fec')\n fec=mod_fecha(fec)\n print(fec)\n candidatos = Candidato.objects.filter(fecha_solicitud__date=fec,emp_id=None)\n if candidatos:\n data['form_is_valid'] = True #si encontro registros\n data['html_candidatos_lista'] = render_to_string('dashboard/cand_cont.html', {\n 'candidatos': candidatos\n })\n else:\n data['form_is_valid'] = False #si no encontro registros\n \n return JsonResponse(data)\n\ndef all_cand(request):\n data = dict()\n candidatos = Candidato.objects.filter(emp_id=None)\n if candidatos:\n data['form_is_valid'] = True #si encontro registros\n data['html_candidatos_lista'] = render_to_string('dashboard/cand_cont.html', {\n 'candidatos': candidatos\n })\n else:\n data['form_is_valid'] = False #si no encontro registros\n \n return JsonResponse(data)\n\ndef del_candidato(request):#elimina candidato\n data = dict()\n id=request.GET.get('id')\n \n candidato= Candidato.objects.get(pk=id)\n candidato.delete()\n data['form_is_valid'] = True\n candidatos = Candidato.objects.filter(emp_id=None)\n data['html_candidatos_lista'] = render_to_string('dashboard/cand_cont.html', {\n 'candidatos': candidatos\n })\n return JsonResponse(data)\n\n@login_required(login_url = '/login/')\ndef pasar_emp(request,id, template_name = \"dashboard/pasar_emp.html\"):\n idcand=id\n nform = set_values_emp(idcand)\n\n return render(request, template_name, locals(),)\n\ndef new_emp(request):\n response=\"NONE\"\n if request.method == 'POST':\n form = CreaEmpleado2(request.POST)\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n idcand = request.POST.get('idcand')\n \n if (password1==password2):\n \n username = request.POST.get('username')\n user_existente = User.objects.filter(username = username).exists()\n \n if not user_existente:\n email = request.POST['email']\n email_existente = User.objects.filter(email = email).exists()\n if not email_existente:\n if form.is_valid():\n try:\n #form.save()\n post=form.save()\n group = Group.objects.get(name='Empleados')\n us=User.objects.get(username = username, email= email)\n group.user_set.add(us)\n us.is_active=True\n us.save()\n data = {\n 'result': 'OK'\n }\n response=data\n #actualizar modelo de candidato emp_id\n cand=Candidato.objects.get(id=idcand)\n cand.emp_id=post.id\n cand.save()\n ################### enviar correo\n message=\"Se ha generado una cuenta de acceso a su nombre usuario : \"+username+\" password: \"+password1\n name=\"Recursos humanos\"\n email='rrhh@springlabs.net'\n ##correo de candidato personal\n destino=cand.email_personal\n\n print(destino)\n \n body=render_to_string(\n 'email_content.html',{\n 'name':name,\n 'email':email,\n 'message':message,\n },\n )\n email_message = EmailMessage(\n subject='Mensaje de usuario',\n body=body,\n from_email=email,\n #to=['rrhh@springlabs.net'],\n to=[destino],\n )\n email_message.content_subtype = 'html'\n email_message.send()\n except Exception as e:\n print(e)\n print(form.errors)\n data = {\n 'result': 'error'\n }\n response=data\n else:\n #print(form.errors)\n for er in form.errors:\n print(er)\n pas_err=form.errors[er]\n data = {\n 'result': er,\n 'message': pas_err\n }\n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'email_existe',\n }\n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'user_existe'\n }\n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'passerror'\n }\n response=data\n \n \n \n #return HttpResponse(response)\n return JsonResponse(response)\n\ndef baja_emp(request):#baja de empleado\n data = dict()\n id=request.GET.get('emp_Id')\n motivo=request.GET.get('motivo')\n fecha = datetime.strptime(request.GET.get('fecha_baja'), '%d/%m/%Y').strftime('%Y-%m-%d')\n tipo=request.GET.get('tipo_baja')\n try:\n #modificar en tabla candidato activo x NO\n emp= Candidato.objects.get(emp_id=id)\n emp.motivo=motivo\n emp.fecha_baja=fecha\n emp.tipo_baja=tipo\n emp.activo='NO'\n emp.save()\n #modificar en tabla user is_active a false\n usr= User.objects.get(pk=emp.emp_id)\n grupo = Group.objects.get(name=\"Ex-Empleados\")\n grupo.user_set.add(usr)\n grupo2 = Group.objects.get(name=\"Empleados\")\n grupo2.user_set.remove(usr)\n usr.is_active=False\n usr.save()\n\n data['form_is_valid'] = True\n grupo = Group.objects.get(name=\"Empleados\")\n empleados = grupo.user_set.filter(is_active=true)\n #empleados = Candidato.objects.filter(activo=\"SI\")\n data['html_empleados_lista'] = render_to_string('dashboard/emp_cont.html', {\n 'empleados': empleados\n })\n except Exception as e:\n data['form_is_valid'] = False\n print(e)\n \n \n\n \n return JsonResponse(data)\n\ndef home(request, template_name = \"dashboard/home.html\"):\n# Imagen de Gerd Altmann en Pixabay\n return render(request, template_name, locals(),)\n\n\n@login_required(login_url = '/login/')\ndef cola_home(request, template_name = \"dashboard/cola_home.html\"):\n # Verificamos si el usuario tiene foto, Si existe algún registro en\n # Tabla Empleados que referencie al usuario en turno y si este posee\n # foto cargada al sistema\n usuario = request.user\n try:\n empleado = Empleado.objects.get(user = usuario.pk)\n foto = 1\n # etapa = empleado.status\n if Estudio.objects.filter(user = usuario.pk).exists():\n profesion = Estudio.objects.get(user=usuario.pk)\n #empid=empleado\n except:\n foto = 0\n etapa = 1\n pass\n\n return render(request, template_name, locals(),)\n\n\n#### COLABORADOR#########################################################\n#### COLABORADOR#########################################################\n#### COLABORADOR#########################################################\n@login_required(login_url = '/login/')\ndef colaborador(request,id, template_name = \"colaboradores/colaborador.html\"):\n rrhh=\"COLABORADOR\"\n status_con=\"True\"\n #llenar candidato\n candidato= Candidato.objects.get(emp_id=id)\n con_id=candidato.id\n #form_candidato=Formulario_candidato()\n form_candidato=set_values_candidato(candidato)\n\n \n form_candidato.fields['con_id'].initial = con_id\n\n form_idioma=Formulario_idioma()\n form_editidioma=Formulario_idioma()\n form_hermano=Formulario_hermano_candidato()\n form_edithermano=Formulario_hermano_candidato()\n form_hijo=Formulario_hijo_candidato\n form_edithijo=Formulario_hijo_candidato\n form_referencia=Formulario_referencia()\n form_editreferencia=Formulario_referencia()\n form_experiencia=Formulario_experiencia()\n form_editexperiencia=Formulario_experiencia()\n form_estudio=Formulario_estudios()\n form_editestudio=Formulario_estudios()\n \n form_estudiootro=Formulario_estudiosotros()\n form_editestudiootro=Formulario_estudiosotros()\n \n \n experiencias = Experiencia.objects.filter(candidato=candidato)\n referencias = Referencia.objects.filter(candidato=candidato)\n hijos = Hijo_candidato.objects.filter(candidato=candidato)\n hermanos = Hermano_candidato.objects.filter(candidato=candidato)\n idiomas = Idioma_candidato.objects.filter(candidato=candidato)\n estudios = Estudios_pro.objects.filter(candidato=candidato)\n estudiosotros=Estudios_otros.objects.filter(candidato=candidato)\n \n form_idioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n form_editidioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n\n \n\n form_candidato.fields['edad'].widget.attrs['readonly'] ='readonly'\n form_candidato.fields['piso'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['depto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_que'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_donde'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_horario'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_termino'].widget.attrs['disabled'] ='disabled'\n\n \n \n form_candidato.fields['primaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['secundaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['preparatoria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['tecnica_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_documento'].widget.attrs['disabled'] ='disabled'\n\n \n\n form_candidato.fields['pago_infonavit'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_marca'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_modelo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['seguro_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['afianzado_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_cargo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_fuente'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_depto'].widget.attrs['disabled'] ='disabled'\n\n form_experiencia.fields['experiencia_supervision_num'].widget.attrs['disabled'] ='disabled'\n \n\n return render(request, template_name, locals(),)\n\n@login_required(login_url = '/login/')\ndef colarrhh(request,id, template_name = \"candidatos/captura_cv.html\"):\n rrhh=\"COLABORADOR\"\n status_con=\"True\"\n #llenar candidato\n candidato= Candidato.objects.get(emp_id=id)\n con_id=candidato.id\n #form_candidato=Formulario_candidato()\n form_candidato=set_values_candidato(candidato)\n\n \n form_candidato.fields['con_id'].initial = con_id\n\n form_idioma=Formulario_idioma()\n form_editidioma=Formulario_idioma()\n form_hermano=Formulario_hermano_candidato()\n form_edithermano=Formulario_hermano_candidato()\n form_hijo=Formulario_hijo_candidato\n form_edithijo=Formulario_hijo_candidato\n form_referencia=Formulario_referencia()\n form_editreferencia=Formulario_referencia()\n form_experiencia=Formulario_experiencia()\n form_editexperiencia=Formulario_experiencia()\n form_estudio=Formulario_estudios()\n form_editestudio=Formulario_estudios()\n \n form_estudiootro=Formulario_estudiosotros()\n form_editestudiootro=Formulario_estudiosotros()\n \n \n experiencias = Experiencia.objects.filter(candidato=candidato)\n referencias = Referencia.objects.filter(candidato=candidato)\n hijos = Hijo_candidato.objects.filter(candidato=candidato)\n hermanos = Hermano_candidato.objects.filter(candidato=candidato)\n idiomas = Idioma_candidato.objects.filter(candidato=candidato)\n estudios = Estudios_pro.objects.filter(candidato=candidato)\n estudiosotros=Estudios_otros.objects.filter(candidato=candidato)\n \n form_idioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n form_editidioma.fields['idioma'].widget.attrs['style'] =\"text-transform: uppercase;\"\n\n \n\n form_candidato.fields['edad'].widget.attrs['readonly'] ='readonly'\n form_candidato.fields['piso'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['depto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_que'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_donde'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_horario'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['estudia_termino'].widget.attrs['disabled'] ='disabled'\n\n \n \n form_candidato.fields['primaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['primaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['secundaria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['secundaria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['preparatoria_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['preparatoria_documento'].widget.attrs['disabled'] ='disabled'\n \n form_candidato.fields['tecnica_annios'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_inicio'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_termino'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['tecnica_documento'].widget.attrs['disabled'] ='disabled'\n\n \n\n form_candidato.fields['pago_infonavit'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_marca'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['auto_modelo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['seguro_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['afianzado_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['sindicato_cargo'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_monto'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['ingreso_fuente'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_nombre'].widget.attrs['disabled'] ='disabled'\n form_candidato.fields['conocido_depto'].widget.attrs['disabled'] ='disabled'\n\n form_experiencia.fields['experiencia_supervision_num'].widget.attrs['disabled'] ='disabled'\n \n\n return render(request, template_name, locals(),)\n\ndef lstcol_idioma(request):#buscar idioma\n #print(request.GET.get('id'))\n id=request.GET.get('id')\n \n idioma= Idioma_candidato.objects.get(pk=id)\n data = {\n 'status':\"OK\",\n 'id': idioma.id,\n 'idioma': idioma.idioma,\n 'porcentaje': idioma.idioma_porcentaje,\n }\n\n return JsonResponse(data)\n\ndef delcol_estudio(request):#Eliminar estudio\n data = dict()\n id=request.GET.get('id')\n estudio= Estudios_pro.objects.get(pk=id)\n candidato=estudio.candidato\n estudio.delete()\n data['form_is_valid'] = True\n estudios = Estudios_pro.objects.filter(candidato=candidato)\n data['html_estudios_lista'] = render_to_string('candidatos/estudios_lista.html', {\n 'estudios': estudios\n })\n\n return JsonResponse(data)\n\ndef lstcol_estudio(request):#buscar estudio\n #print(request.GET.get('id'))\n id=request.GET.get('id')\n #id=13\n estudio= Estudios_pro.objects.get(pk=id)\n data = {\n 'status':\"OK\",\n 'id': estudio.id,\n 'tipo': estudio.estudios_tipo,\n 'escuela': estudio.estudios_escuela,\n 'nombre': estudio.estudios_nombre,\n 'annios': estudio.estudios_annios,\n 'inicio': estudio.estudios_inicio,\n 'termino': estudio.estudios_termino,\n 'documento': estudio.estudios_documento,\n 'tesis': estudio.estudios_tesis,\n 'forma': estudio.estudios_forma,\n 'cedula': estudio.estudios_cedula,\n }\n\n return JsonResponse(data)\n\ndef delcol_otroestudio(request):#Eliminar estudiootro\n data = dict()\n id=request.GET.get('id')\n estudio= Estudios_otros.objects.get(pk=id)\n candidato=estudio.candidato\n estudio.delete()\n data['form_is_valid'] = True\n estudiosotros = Estudios_otros.objects.filter(candidato=candidato)\n data['html_estudiosotros_lista'] = render_to_string('candidatos/estudiosotros_lista.html', {\n 'estudiosotros': estudiosotros\n })\n\n return JsonResponse(data)\n\ndef lstcol_otroestudio(request):#buscar estudiootro\n #print(request.GET.get('id'))\n id=request.GET.get('id')\n \n estudio= Estudios_otros.objects.get(pk=id)\n data = {\n 'status':\"OK\",\n 'id': estudio.id,\n 'tipo': estudio.estudios_tipo,\n 'nombre': estudio.estudios_nombre,\n 'inicio': estudio.estudios_inicio,\n 'termino': estudio.estudios_termino,\n 'documento': estudio.estudios_documento,\n }\n\n return JsonResponse(data)\n\ndef delcol_hijo(request):#borrar hijo\n data = dict()\n id=request.GET.get('id')\n hijo= Hijo_candidato.objects.get(pk=id)\n candidato=hijo.candidato\n hijo.delete()\n data['form_is_valid'] = True\n hijos = Hijo_candidato.objects.filter(candidato=candidato)\n data['html_hijos_lista'] = render_to_string('candidatos/hijos_lista.html', {\n 'hijos': hijos\n })\n return JsonResponse(data)\n\ndef lstcol_hijo(request):#buscar hijo\n id=request.GET.get('id')\n hijo= Hijo_candidato.objects.get(pk=id)\n\n data = {\n 'status':\"OK\",\n 'id': hijo.id,\n 'nombre': hijo.hijo_nombre,\n 'paterno': hijo.hijo_apellido_paterno,\n 'materno': hijo.hijo_apellido_materno,\n 'edad': hijo.hijo_edad,\n 'ocupacion': hijo.hijo_ocupacion,\n 'lugar': hijo.hijo_lugar_ocupacion,\n 'domicilio': hijo.hijo_domicilio,\n 'telefono': hijo.hijo_tel,\n }\n\n return JsonResponse(data)\n\ndef delcol_hermano(request):#borrar hermano\n data = dict()\n id=request.GET.get('id')\n hermano= Hermano_candidato.objects.get(pk=id)\n candidato=hermano.candidato\n hermano.delete()\n data['form_is_valid'] = True\n hermanos = Hermano_candidato.objects.filter(candidato=candidato)\n data['html_hermanos_lista'] = render_to_string('candidatos/hermanos_lista.html', {\n 'hermanos': hermanos\n })\n return JsonResponse(data)\n\ndef lstcol_hermano(request):#buscar hermano\n id=request.GET.get('id')\n hermano= Hermano_candidato.objects.get(pk=id)\n \n data = {\n 'status':\"OK\",\n 'id': hermano.id,\n 'nombre': hermano.hermano_nombre,\n 'paterno': hermano.hermano_apellido_paterno,\n 'materno': hermano.hermano_apellido_materno,\n 'edad': hermano.hermano_edad,\n 'ocupacion': hermano.hermano_ocupacion,\n 'lugar': hermano.hermano_lugar_ocupacion,\n 'domicilio': hermano.hermano_domicilio,\n 'telefono': hermano.hermano_tel,\n }\n\n return JsonResponse(data)\n\ndef delcol_experiencia(request):#borrar experiencia\n data = dict()\n id=request.GET.get('id')\n exp= Experiencia.objects.get(pk=id)\n candidato=exp.candidato\n exp.delete()\n data['form_is_valid'] = True\n experiencias = Experiencia.objects.filter(candidato=candidato)\n data['html_experiencias_lista'] = render_to_string('candidatos/experiencias_lista.html', {\n 'experiencias': experiencias\n })\n return JsonResponse(data)\n\ndef lstcol_experiencia(request):#buscar hermano\n try:\n # print(request.GET.get('id'))\n id=request.GET.get('id')\n exp= Experiencia.objects.get(pk=id)\n except Exception as e:\n print(e)\n \n \n try:\n data = {\n 'status':\"OK\",\n 'id': exp.id,\n 'nombre': exp.empresa_nombre,\n 'direccion': exp.empresa_direccion,\n 'telefono': exp.empresa_tel,\n 'giro': exp.empresa_giro,\n 'jefe': exp.empresa_nombre_jefe,\n 'puesto': exp.empresa_jefe_puesto,\n 'ingreso': exp.empresa_fecha_ingreso,\n 'salario_inicial': exp.empresa_salario_inicio,\n 'separacion': exp.empresa_fecha_separacion,\n 'salario_final': exp.empresa_salario_final,\n 'puesto_ultimo': exp.empresa_puesto_ultimo,\n 'puesto_ultimo_tiempo': exp.empresa_puesto_ultimo_tiempo,\n 'depto_ultimo': exp.empresa_puesto_ultimo_depto,\n 'puesto_anterior': exp.empresa_puesto_anterior,\n 'puesto_anterior_tiempo': exp.empresa_puesto_anterior_tiempo,\n 'depto_anterior': exp.empresa_puesto_anterior_depto,\n 'exp_supervision': exp.experiencia_supervision,\n 'num_supervision': exp.experiencia_supervision_num,\n 'motivo': exp.separacion_motivo,\n }\n \n except Exception as e:\n print(e)\n \n\n return JsonResponse(data)\n\ndef delcol_referencia(request):#borrar referencia\n data = dict()\n id=request.GET.get('id')\n ref= Referencia.objects.get(pk=id)\n candidato=ref.candidato\n ref.delete()\n data['form_is_valid'] = True\n referencias = Referencia.objects.filter(candidato=candidato)\n data['html_referencias_lista'] = render_to_string('candidatos/referencias_lista.html', {\n 'referencias': referencias\n })\n return JsonResponse(data)\n\ndef lstcol_referencia(request):#buscar referencia\n try:\n id=request.GET.get('id')\n ref= Referencia.objects.get(pk=id)\n except Exception as e:\n print(e)\n try:\n data = {\n 'status':\"OK\",\n 'id': ref.id,\n 'nombre': ref.referencia_nombre,\n 'domicilio': ref.referencia_domicilio,\n 'telefono': ref.referencia_tel,\n 'ocupacion': ref.referencia_ocupacion,\n 'annios': ref.referencia_annios_conocer,\n }\n \n except Exception as e:\n print(e)\n\n return JsonResponse(data)\n\ndef delcol_idioma(request):#elimina idioma\n data = dict()\n id=request.GET.get('id')\n \n idioma= Idioma_candidato.objects.get(pk=id)\n candidato=idioma.candidato\n idioma.delete()\n data['form_is_valid'] = True\n idiomas = Idioma_candidato.objects.filter(candidato=candidato)\n data['html_idiomas_lista'] = render_to_string('candidatos/idiomas_lista.html', {\n 'idiomas': idiomas\n })\n return JsonResponse(data)\n\ndef col_secuno(request):#agregar secuno\n if not request.POST._mutable:\n request.POST._mutable = True\n request.POST['fecha_nac']=mod_fecha(request.POST.get('fecha_nac'))\n form = CandidatoSecunoForm(request.POST)\n response=\"\"\n \n if form.is_valid():\n id_n = request.POST.get('id_n')\n con_id = request.POST.get('con_id')\n if con_id:\n id_n=con_id\n\n if id_n:\n candidato= Candidato.objects.get(id=id_n)\n \n candidato.fuente_recluta=form.cleaned_data['fuente_recluta']\n candidato.puesto_solicitado=form.cleaned_data['puesto_solicitado']\n candidato.sueldo_deseado=form.cleaned_data['sueldo_deseado']\n candidato.puesto_solicitado=form.cleaned_data['puesto_solicitado']\n candidato.nombre=form.cleaned_data['nombre']\n candidato.apellido_paterno=form.cleaned_data['apellido_paterno']\n candidato.apellido_materno=form.cleaned_data['apellido_materno']\n candidato.sexo=form.cleaned_data['sexo']\n candidato.estado_civil=form.cleaned_data['estado_civil']\n candidato.edad=form.cleaned_data['edad']\n candidato.fecha_nac=form.cleaned_data['fecha_nac']\n candidato.lugar_nac=form.cleaned_data['lugar_nac']\n candidato.pais_nacimiento=form.cleaned_data['pais_nacimiento']\n candidato.tel=form.cleaned_data['tel']\n candidato.cel=form.cleaned_data['cel']\n candidato.tipo=form.cleaned_data['tipo']\n candidato.calle=form.cleaned_data['calle']\n candidato.num_ext=form.cleaned_data['num_ext']\n candidato.num_int=form.cleaned_data['num_int']\n candidato.calle_uno=form.cleaned_data['calle_uno']\n candidato.calle_dos=form.cleaned_data['calle_dos']\n candidato.piso=form.cleaned_data['piso']\n candidato.depto=form.cleaned_data['depto']\n candidato.cp=form.cleaned_data['cp']\n candidato.colonia=form.cleaned_data['colonia']\n candidato.esdo=form.cleaned_data['esdo']\n candidato.pais_direc=form.cleaned_data['pais_direc']\n candidato.referencia=form.cleaned_data['referencia']\n candidato.trayectoria_de_casa=form.cleaned_data['trayectoria_de_casa']\n #candidato.email_personal=request.POST.get('email_personal')\n candidato.email_personal=form.cleaned_data['email_personal']\n candidato.rfc=form.cleaned_data['rfc']\n candidato.curp=form.cleaned_data['curp']\n candidato.imss=form.cleaned_data['imss']\n candidato.cartilla=form.cleaned_data['cartilla']\n candidato.tipo_licencia=form.cleaned_data['tipo_licencia']\n candidato.licencia=form.cleaned_data['licencia']\n \n #candidato.save()\n data = {\n 'result': 'OK',\n 'id':id_n\n }\n response=data\n else:\n print(\"edad:\")\n print(form.cleaned_data['edad'])\n print(request.POST)\n #candidato= Candidato.objects.get(id=cand_id)\n #post=form.save()\n #print(post.id)\n data = {\n 'result': 'OK',\n #'id':post.id\n 'id':13\n }\n response=data\n\n else:\n #pas_err=form.errors['password2']\n print(form.errors)\n #data = {\n # 'result': 'datos',\n # 'message': pas_err\n #}\n data = {\n 'result': 'errores'\n }\n response=data\n return JsonResponse(response)\n\ndef col_secdos(request):#agregar secdos\n #tomar Id del input oculto\n cand_id = request.POST.get('candId2')\n print(cand_id)\n #cand_id=13\n if not request.POST._mutable:\n request.POST._mutable = True\n \n \n request.POST['estudia_termino']=mod_fecha(request.POST.get('estudia_termino'))\n\n #print(request.POST)\n form = CandidatoSecdosForm(request.POST)\n #print(form)\n if form.is_valid():\n try:\n #buscar candidato en base de datos\n candidato= Candidato.objects.get(id=cand_id)\n #candidato= Candidato.objects.get(id=37)\n \n ##asignar valores\n candidato.primaria=form.cleaned_data['primaria']\n candidato.primaria_annios=form.cleaned_data['primaria_annios']\n candidato.primaria_inicio=form.cleaned_data['primaria_inicio']\n candidato.primaria_termino=form.cleaned_data['primaria_termino']\n candidato.primaria_documento=form.cleaned_data['primaria_documento']\n candidato.secundaria=form.cleaned_data['secundaria']\n candidato.secundaria_annios=form.cleaned_data['secundaria_annios']\n candidato.secundaria_inicio=form.cleaned_data['secundaria_inicio']\n candidato.secundaria_termino=form.cleaned_data['secundaria_termino']\n candidato.secundaria_documento=form.cleaned_data['secundaria_documento']\n candidato.preparatoria=form.cleaned_data['preparatoria']\n candidato.preparatoria_annios=form.cleaned_data['preparatoria_annios']\n candidato.preparatoria_inicio=form.cleaned_data['preparatoria_inicio']\n candidato.preparatoria_termino=form.cleaned_data['preparatoria_termino']\n candidato.preparatoria_documento=form.cleaned_data['preparatoria_documento']\n candidato.tecnica=form.cleaned_data['tecnica']\n candidato.tecnica_annios=form.cleaned_data['tecnica_annios']\n candidato.tecnica_inicio=form.cleaned_data['tecnica_inicio']\n candidato.tecnica_termino=form.cleaned_data['tecnica_termino']\n candidato.tecnica_documento=form.cleaned_data['tecnica_documento']\n \n candidato.estudia_actualmente=form.cleaned_data['estudia_actualmente']\n candidato.estudia_que=form.cleaned_data['estudia_que']\n candidato.estudia_donde=form.cleaned_data['estudia_donde']\n candidato.estudia_horario=form.cleaned_data['estudia_horario']\n candidato.estudia_termino=form.cleaned_data['estudia_termino']\n candidato.maquinas_equipos=form.cleaned_data['maquinas_equipos']\n \n candidato.save()\n \n data = {\n 'result': 'OK'\n }\n response=data\n except Exception as e:\n print(e)\n data = {\n 'result': 'errores'\n }\n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n\n return JsonResponse(response)\n\ndef col_sectres(request):#agregar sectres\n #tomar Id del input oculto\n cand_id = request.POST.get('candId3')\n #cand_id=13\n form = CandidatoSectresForm(request.POST)\n if form.is_valid():\n try:\n #buscar candidato en base de datos\n candidato= Candidato.objects.get(id=cand_id)\n ##asignar valores\n candidato.padre_nombre=form.cleaned_data['padre_nombre']\n candidato.padre_apellido_paterno=form.cleaned_data['padre_apellido_paterno']\n candidato.padre_apellido_materno=form.cleaned_data['padre_apellido_materno']\n candidato.padre_edad=form.cleaned_data['padre_edad']\n candidato.padre_ocupacion=form.cleaned_data['padre_ocupacion']\n candidato.padre_lugar_trabajo=form.cleaned_data['padre_lugar_trabajo']\n candidato.padre_domicilio=form.cleaned_data['padre_domicilio']\n candidato.padre_tel=form.cleaned_data['padre_tel']\n candidato.padre_vive=form.cleaned_data['padre_vive']\n\n candidato.madre_nombre=form.cleaned_data['madre_nombre']\n candidato.madre_apellido_paterno=form.cleaned_data['madre_apellido_paterno']\n candidato.madre_apellido_materno=form.cleaned_data['madre_apellido_materno']\n candidato.madre_edad=form.cleaned_data['madre_edad']\n candidato.madre_ocupacion=form.cleaned_data['madre_ocupacion']\n candidato.madre_lugar_trabajo=form.cleaned_data['madre_lugar_trabajo']\n candidato.madre_domicilio=form.cleaned_data['madre_domicilio']\n candidato.madre_tel=form.cleaned_data['madre_tel']\n candidato.madre_vive=form.cleaned_data['madre_vive']\n\n candidato.conyuge_nombre=form.cleaned_data['conyuge_nombre']\n candidato.conyuge_apellido_paterno=form.cleaned_data['conyuge_apellido_paterno']\n candidato.conyuge_apellido_materno=form.cleaned_data['conyuge_apellido_materno']\n candidato.conyuge_edad=form.cleaned_data['conyuge_edad']\n candidato.conyuge_ocupacion=form.cleaned_data['conyuge_ocupacion']\n candidato.conyuge_lugar_trabajo=form.cleaned_data['conyuge_lugar_trabajo']\n candidato.conyuge_domicilio=form.cleaned_data['conyuge_domicilio']\n candidato.conyuge_tel=form.cleaned_data['conyuge_tel']\n candidato.conyuge_vive=form.cleaned_data['conyuge_vive']\n candidato.save()\n \n data = {\n 'result': 'OK'\n }\n response=data\n except Exception as e:\n # print(e)\n data = {\n 'result': 'errores'\n }\n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n\n return JsonResponse(response)\n\ndef col_seccuatro(request):#agregar secdos\n #tomar Id del input oculto\n cand_id = request.POST.get('candId4')\n #cand_id =13\n if not request.POST._mutable:\n request.POST._mutable = True\n request.POST['fecha_disponible']=mod_fecha(request.POST.get('fecha_disponible'))\n form = CandidatoSeccuatroForm(request.POST)\n if form.is_valid():\n try:\n #buscar candidato en base de datos\n candidato= Candidato.objects.get(id=cand_id)\n ##asignar valores\n candidato.vivienda_propia=form.cleaned_data['vivienda_propia']\n candidato.credito_infonavit=form.cleaned_data['credito_infonavit']\n candidato.pago_infonavit=form.cleaned_data['pago_infonavit']\n candidato.auto_propio=form.cleaned_data['auto_propio']\n candidato.auto_marca=form.cleaned_data['auto_marca']\n candidato.auto_modelo=form.cleaned_data['auto_modelo']\n candidato.seguro_vida=form.cleaned_data['seguro_vida']\n candidato.seguro_monto=form.cleaned_data['seguro_monto']\n\n candidato.afianzado=form.cleaned_data['afianzado']\n candidato.afianzado_monto=form.cleaned_data['afianzado_monto']\n candidato.afiliado_sindicato=form.cleaned_data['afiliado_sindicato']\n candidato.sindicato_nombre=form.cleaned_data['sindicato_nombre']\n candidato.sindicato_cargo=form.cleaned_data['sindicato_cargo']\n candidato.tiempo_libre=form.cleaned_data['tiempo_libre']\n candidato.embarazo=form.cleaned_data['embarazo']\n candidato.religion=form.cleaned_data['religion']\n\n candidato.estado_salud=form.cleaned_data['estado_salud']\n candidato.fuma=form.cleaned_data['fuma']\n candidato.bebe=form.cleaned_data['bebe']\n candidato.tatuajes=form.cleaned_data['tatuajes']\n candidato.perforaciones=form.cleaned_data['perforaciones']\n candidato.disposicion_rolar=form.cleaned_data['disposicion_rolar']\n candidato.disposicion_viajar=form.cleaned_data['disposicion_viajar']\n candidato.ingreso_extra=form.cleaned_data['ingreso_extra']\n candidato.ingreso_monto=form.cleaned_data['ingreso_monto']\n candidato.ingreso_fuente=form.cleaned_data['ingreso_fuente']\n candidato.labora_conocido=form.cleaned_data['labora_conocido']\n candidato.conocido_nombre=form.cleaned_data['conocido_nombre']\n candidato.conocido_depto=form.cleaned_data['conocido_depto']\n candidato.fecha_disponible=form.cleaned_data['fecha_disponible']\n candidato.save()\n \n data = {\n 'result': 'OK'\n }\n response=data\n except Exception as e:\n # print(e)\n data = {\n 'result': 'errores'\n }\n response=data\n else:\n #print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n\n return JsonResponse(response)\n\ndef col_seccinco(request):#agregar experiencia sec cinco\n data = dict()\n if not request.POST._mutable:\n request.POST._mutable = True\n request.POST['empresa_fecha_ingreso']=mod_fecha(request.POST.get('empresa_fecha_ingreso'))\n request.POST['empresa_fecha_separacion']=mod_fecha(request.POST.get('empresa_fecha_separacion'))\n form = ExperienciaSeccincoForm(request.POST)\n cand_id = request.POST.get('candId5')\n #cand_id =13\n response=\"\"\n if form.is_valid():\n try:\n candidato= Candidato.objects.get(id=cand_id)\n experiencia = form.save(commit=False)\n experiencia.candidato=candidato\n experiencia.save()\n data['form_is_valid'] = True\n experiencias = Experiencia.objects.filter(candidato=candidato)\n data['html_experiencias_lista'] = render_to_string('candidatos/experiencias_lista.html', {\n 'experiencias': experiencias\n })\n response=data\n except Exception as e:\n #print(e)\n data = {\n 'result': 'errores'\n }\n response=data\n else:\n #print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n return JsonResponse(response)\n\ndef col_secseis(request):#agregar referencia sec seis\n data = dict()\n form = ReferenciaSecseisForm(request.POST)\n cand_id = request.POST.get('candId6')\n #cand_id =13\n response=\"\"\n if form.is_valid():\n try:\n candidato= Candidato.objects.get(id=cand_id)\n referencia = form.save(commit=False)\n referencia.candidato=candidato\n referencia.save()\n data['form_is_valid'] = True\n referencias = Referencia.objects.filter(candidato=candidato)\n data['html_referencias_lista'] = render_to_string('candidatos/referencias_lista.html', {\n 'referencias': referencias\n })\n response=data\n except Exception as e:\n #print(e)\n data = {\n 'result': 'errores'\n }\n response=data\n else:\n #print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n return JsonResponse(response)\n\ndef addcol_hijo(request):#agregar hijo sec tres\n data = dict()\n form = HijoSectresForm(request.POST)\n bnombre = request.POST.get('hijo_nombre')\n bpaterno = request.POST.get('hijo_apellido_paterno')\n bmaterno = request.POST.get('hijo_apellido_materno')\n \n cand_id = request.POST.get('candId3_hijo')\n #cand_id =13\n response=\"\"\n if form.is_valid():\n candidato= Candidato.objects.get(id=cand_id)\n try:\n hij=Hijo_candidato.objects.get(candidato=candidato,hijo_nombre=bnombre,hijo_apellido_paterno=bpaterno,hijo_apellido_materno=bmaterno)\n if hij:\n data={\n 'form_is_valid' : False,\n 'error':'Hijo ya existe!'\n }\n except Exception as e:\n #print(e)\n hijo = form.save(commit=False)\n hijo.candidato=candidato\n hijo.save()\n data['form_is_valid'] = True\n hijos = Hijo_candidato.objects.filter(candidato=candidato)\n data['html_hijos_lista'] = render_to_string('candidatos/hijos_lista.html', {\n 'hijos': hijos\n })\n response=data\n else:\n # print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n \n return JsonResponse(response)\n\ndef editcol_hijo(request):#editar hijo\n data = dict()\n id=request.POST.get('hijo_id')\n cand_id=request.POST.get('candId_edithijo')\n #cand_id=13\n bnombre = request.POST.get('hijo_nombre')\n candidato= Candidato.objects.get(id=cand_id)\n try:\n hij=Hijo_candidato.objects.filter(~Q(id=id),candidato=candidato,hijo_nombre=bnombre)\n if hij:\n data={\n 'form_is_valid' : False,\n 'error':'Hijo ya existe!'\n }\n else:\n hijo= Hijo_candidato.objects.get(pk=id)\n candidato=hijo.candidato\n hijo.hijo_nombre=bnombre\n hijo.hijo_edad=request.POST.get('hijo_edad')\n hijo.hijo_ocupacion=request.POST.get('hijo_ocupacion')\n \n hijo.save()\n data['form_is_valid'] = True\n hijos = Hijo_candidato.objects.filter(candidato=candidato)\n data['html_hijos_lista'] = render_to_string('candidatos/hijos_lista.html', {\n 'hijos': hijos\n })\n except Exception as e:\n print(e)\n \n \n return JsonResponse(data)\n\ndef addcol_hermano(request):#agregar hermano sec tres\n data = dict()\n form = HermanoSectresForm(request.POST)\n bnombre = request.POST.get('hermano_nombre')\n bpaterno = request.POST.get('hermano_apellido_paterno')\n bmaterno = request.POST.get('hermano_apellido_materno')\n cand_id = request.POST.get('candId3_hermano')\n #cand_id =13\n response=\"\"\n if form.is_valid():\n candidato= Candidato.objects.get(id=cand_id)\n try:\n her=Hermano_candidato.objects.get(candidato=candidato,hermano_nombre=bnombre,hermano_apellido_paterno=bpaterno,hermano_apellido_materno=bmaterno)\n if her:\n data={\n 'form_is_valid' : False,\n 'error':'Hermano ya existe!'\n }\n except Exception as e: \n hermano = form.save(commit=False)\n hermano.candidato=candidato\n hermano.save()\n data['form_is_valid'] = True\n hermanos = Hermano_candidato.objects.filter(candidato=candidato)\n data['html_hermanos_lista'] = render_to_string('candidatos/hermanos_lista.html', {\n 'hermanos': hermanos\n })\n response=data\n else:\n #print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n return JsonResponse(response)\n\ndef editcol_hermano(request):#editar hermano\n data = dict()\n id=request.POST.get('hermano_id')\n cand_id = request.POST.get('candId_edithermano')\n #cand_id =13\n candidato= Candidato.objects.get(id=cand_id)\n bnombre = request.POST.get('hermano_nombre')\n try:\n her=Hermano_candidato.objects.filter(~Q(id=id),candidato=candidato,hermano_nombre=bnombre)\n if her:\n data={\n 'form_is_valid' : False,\n 'error':'Hermano ya existe!'\n }\n else:\n hermano= Hermano_candidato.objects.get(pk=id)\n candidato=hermano.candidato\n hermano.hermano_nombre=request.POST.get('hermano_nombre')\n hermano.hermano_edad=request.POST.get('hermano_edad')\n hermano.hermano_ocupacion=request.POST.get('hermano_ocupacion')\n \n hermano.save()\n data['form_is_valid'] = True\n hermanos = Hermano_candidato.objects.filter(candidato=candidato)\n data['html_hermanos_lista'] = render_to_string('candidatos/hermanos_lista.html', {\n 'hermanos': hermanos\n })\n\n except Exception as e:\n print(e)\n \n \n return JsonResponse(data)\n\ndef addcol_idioma(request):#agregar o modificar idioma\n data = dict()\n if not request.POST._mutable:\n request.POST._mutable = True\n form = IdiomaSecdosForm(request.POST)\n request.POST['idioma']=request.POST.get('idioma').upper()\n bidioma = request.POST.get('idioma')\n cand_id = request.POST.get('candId2_idi')\n #cand_id =13\n response=\"\"\n if form.is_valid():\n id=request.POST.get('idi_id')\n candidato= Candidato.objects.get(id=cand_id)\n if (id in [None,'']):#nuevo idioma\n try:\n idi=Idioma_candidato.objects.get(candidato=candidato,idioma=bidioma)\n if idi:\n data={\n 'form_is_valid' : False,\n 'error':'Idioma ya existe!'\n }\n except Exception as e:\n idi = form.save(commit=False)\n idi.candidato=candidato\n idi.save()\n data['form_is_valid'] = True\n idiomas = Idioma_candidato.objects.filter(candidato=candidato)\n data['html_idiomas_lista'] = render_to_string('candidatos/idiomas_lista.html', {\n 'idiomas': idiomas\n },request=request)\n response=data \n else:#actualizar idioma\n try:\n \n idioma= Idioma_candidato.objects.get(pk=id)\n #idioma = form.save(commit=False)\n idioma.idioma=form.cleaned_data['idioma']\n idioma.idioma_porcentaje=form.cleaned_data['idioma_porcentaje']\n idioma.save()\n data['form_is_valid'] = True\n idiomas = Idioma_candidato.objects.filter(candidato=candidato)\n data['html_idiomas_lista'] = render_to_string('candidatos/idiomas_lista.html', {\n 'idiomas': idiomas\n },request=request)\n response=data\n except Exception as e:\n print(e)\n data = {\n 'result': 'errores'\n }\n\n else:\n print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n \n return JsonResponse(response)\n\ndef editcol_idioma(request):#editar idioma\n data = dict()\n if not request.POST._mutable:\n request.POST._mutable = True\n id=request.POST.get('idi_id')\n request.POST['idioma']=request.POST.get('idioma').upper()\n f_idioma=request.POST.get('idioma')\n #print(f_idioma)\n f_porcentaje=request.POST.get('idioma_porcentaje')\n cand_id = request.POST.get('candId_editidioma')\n #cand_id =13\n candidato= Candidato.objects.get(id=cand_id)\n #investiga si existe el nombre\n try:\n \n idi=Idioma_candidato.objects.filter(~Q(id=id),candidato=candidato,idioma=f_idioma)\n if idi:\n data={\n 'form_is_valid' : False,\n 'error':'Idioma ya existe!'\n }\n else:\n idioma= Idioma_candidato.objects.get(pk=id)\n candidato=idioma.candidato\n\n idioma.idioma=f_idioma.upper()\n idioma.idioma_porcentaje=f_porcentaje\n idioma.save()\n data['form_is_valid'] = True\n idiomas = Idioma_candidato.objects.filter(candidato=candidato)\n data['html_idiomas_lista'] = render_to_string('candidatos/idiomas_lista.html', {\n 'idiomas': idiomas\n })\n # print(\"encontro\")\n except Exception as e:\n print(e)\n \n \n \n return JsonResponse(data)\n\ndef addcol_otroestudio(request):#agregar estudiootro sec dos\n data = dict()\n form = EstudioOtroSecdosForm(request.POST)\n cand_id = request.POST.get('candId2_estotro')\n # cand_id =13\n response=\"\"\n if form.is_valid():\n candidato= Candidato.objects.get(id=cand_id)\n estudio = form.save(commit=False)\n estudio.candidato=candidato\n estudio.save()\n data['form_is_valid'] = True\n estudiosotros = Estudios_otros.objects.filter(candidato=candidato)\n data['html_estudiosotros_lista'] = render_to_string('candidatos/estudiosotros_lista.html', {\n 'estudiosotros': estudiosotros\n })\n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n \n return JsonResponse(response)\n\ndef editcol_otroestudio(request):#editar estudio\n data = dict()\n id=request.POST.get('estotro_id')\n\n try:\n estudio= Estudios_otros.objects.get(pk=id)\n candidato=estudio.candidato\n estudio.estudios_tipo=request.POST.get('estudios_tipo')\n estudio.estudios_nombre=request.POST.get('estudios_nombre')\n estudio.estudios_inicio=request.POST.get('estudios_inicio')\n estudio.estudios_termino=request.POST.get('estudios_termino')\n estudio.estudios_documento=request.POST.get('estudios_documento')\n \n estudio.save()\n data['form_is_valid'] = True\n estudiosotros = Estudios_otros.objects.filter(candidato=candidato)\n data['html_estudiosotros_lista'] = render_to_string('candidatos/estudiosotros_lista.html', {\n 'estudiosotros': estudiosotros\n })\n response=data\n except Exception as e:\n print(e)\n data = {\n 'result': 'errores'\n }\n response=data\n \n \n return JsonResponse(data)\n\ndef addcol_estudio(request):#agregar estudio sec dos\n data = dict()\n form = EstudioSecdosForm(request.POST)\n cand_id = request.POST.get('candId2_est')\n #cand_id =13\n response=\"\"\n if form.is_valid():\n candidato= Candidato.objects.get(id=cand_id)\n estudio = form.save(commit=False)\n estudio.candidato=candidato\n estudio.save()\n data['form_is_valid'] = True\n estudios = Estudios_pro.objects.filter(candidato=candidato)\n data['html_estudios_lista'] = render_to_string('colaboradores/estudios_lista.html', {\n 'estudios': estudios\n })\n response=data\n else:\n print(form.errors)\n data = {\n 'result': 'errores'\n }\n response=data\n \n return JsonResponse(response)\n\ndef editcol_estudio(request):#editar estudio\n data = dict()\n id=request.POST.get('est_id')\n\n try:\n estudio= Estudios_pro.objects.get(pk=id)\n candidato=estudio.candidato\n estudio.estudios_tipo=request.POST.get('estudios_tipo')\n estudio.estudios_escuela=request.POST.get('estudios_escuela')\n estudio.estudios_nombre=request.POST.get('estudios_nombre')\n estudio.estudios_annios=request.POST.get('estudios_annios')\n estudio.estudios_inicio=request.POST.get('estudios_inicio')\n estudio.estudios_termino=request.POST.get('estudios_termino')\n estudio.estudios_documento=request.POST.get('estudios_documento')\n estudio.estudios_tesis=request.POST.get('estudios_tesis')\n estudio.estudios_forma=request.POST.get('estudios_forma')\n estudio.estudios_cedula=request.POST.get('estudios_cedula')\n \n estudio.save()\n data['form_is_valid'] = True\n estudios = Estudios_pro.objects.filter(candidato=candidato)\n data['html_estudios_lista'] = render_to_string('candidatos/estudios_lista.html', {\n 'estudios': estudios\n })\n response=data\n except Exception as e:\n print(e)\n data = {\n 'result': 'errores'\n }\n response=data\n \n \n return JsonResponse(data)\n\ndef editcol_experiencia(request):#editar experiencia\n data = dict()\n #form = IdiomaSecdosForm(request.POST)\n if not request.POST._mutable:\n request.POST._mutable = True\n request.POST['empresa_fecha_ingreso']=mod_fecha(request.POST.get('empresa_fecha_ingreso'))\n request.POST['empresa_fecha_separacion']=mod_fecha(request.POST.get('empresa_fecha_separacion'))\n id=request.POST.get('exp_id')\n exp= Experiencia.objects.get(pk=id)\n candidato=exp.candidato\n exp.empresa_nombre=request.POST.get('empresa_nombre')\n exp.empresa_direccion=request.POST.get('empresa_direccion')\n exp.empresa_tel=request.POST.get('empresa_tel')\n exp.empresa_giro=request.POST.get('empresa_giro')\n exp.empresa_nombre_jefe=request.POST.get('empresa_nombre_jefe')\n exp.empresa_jefe_puesto=request.POST.get('empresa_jefe_puesto')\n exp.empresa_fecha_ingreso=request.POST.get('empresa_fecha_ingreso')\n exp.empresa_salario_inicio=request.POST.get('empresa_salario_inicio')\n exp.empresa_fecha_separacion=request.POST.get('empresa_fecha_separacion')\n exp.empresa_salario_final=request.POST.get('empresa_salario_final')\n exp.empresa_puesto_ultimo=request.POST.get('empresa_puesto_ultimo')\n exp.empresa_puesto_ultimo_tiempo=request.POST.get('empresa_puesto_ultimo_tiempo')\n exp.empresa_puesto_ultimo_depto=request.POST.get('empresa_puesto_ultimo_depto')\n exp.empresa_puesto_anterior=request.POST.get('empresa_puesto_anterior')\n exp.empresa_puesto_anterior_tiempo=request.POST.get('empresa_puesto_anterior_tiempo')\n exp.empresa_puesto_anterior_depto=request.POST.get('empresa_puesto_anterior_depto')\n exp.experiencia_supervision=request.POST.get('experiencia_supervision')\n exp.experiencia_supervision_num=request.POST.get('experiencia_supervision_num')\n exp.separacion_motivo=request.POST.get('separacion_motivo')\n\n \n exp.save()\n data['form_is_valid'] = True\n experiencias = Experiencia.objects.filter(candidato=candidato)\n data['html_experiencias_lista'] = render_to_string('candidatos/experiencias_lista.html', {\n 'experiencias': experiencias\n })\n response=data\n \n return JsonResponse(data)\n\ndef editcol_referencia(request):#editar referencia\n data = dict()\n #form = IdiomaSecdosForm(request.POST)\n \n id=request.POST.get('ref_id')\n ref= Referencia.objects.get(pk=id)\n candidato=ref.candidato\n ref.referencia_nombre=request.POST.get('referencia_nombre')\n ref.referencia_domicilio=request.POST.get('referencia_domicilio')\n ref.referencia_tel=request.POST.get('referencia_tel')\n ref.referencia_ocupacion=request.POST.get('referencia_ocupacion')\n ref.referencia_annios_conocer=request.POST.get('referencia_annios_conocer')\n \n ref.save()\n data['form_is_valid'] = True\n referencias = Referencia.objects.filter(candidato=candidato)\n data['html_referencias_lista'] = render_to_string('candidatos/referencias_lista.html', {\n 'referencias': referencias\n })\n response=data\n \n return JsonResponse(data)\n\n\n\n####","repo_name":"juvenciolugo/springrrhh","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":141803,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69986612961","text":"import spiceypy\nfrom spice4mertis.core.geometry import pixel_geometry\nimport numpy as np\nimport pandas as pd\nimport datetime\nfrom spice4mertis.utils.sensor import ccd_center, pixel_lines, pixel_samples\n\ndef run(mk, time_start='', time_finish='', step=60, target='MERCURY',\n frame='', sensor='MPO_MERTIS_TIR_PLANET', pixel_line='',\n pixel_sample='', observer='MPO'):\n\n spiceypy.furnsh(mk)\n\n target = target.upper()\n if not time_start: time_start = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n if not frame: frame = f'IAU_{target}'\n if not time_finish: time_finish = time_start\n if not pixel_sample: pixel_sample = np.floor(ccd_center(sensor)[0])\n if not pixel_line: pixel_line = np.floor(ccd_center(sensor)[1])\n\n if pixel_sample == 'all':\n pixel_sample = np.arange(1, pixel_samples(sensor), 1)\n else:\n pixel_sample = [pixel_sample]\n if pixel_line == 'all':\n pixel_line = np.arange(1, pixel_lines(sensor), 1)\n else:\n pixel_line = [pixel_line]\n\n\n et_start = spiceypy.utc2et(time_start)\n et_finish = spiceypy.utc2et(time_finish)\n\n if et_start != et_finish:\n interval = np.arange(et_start, et_finish, step)\n else:\n interval = [et_start]\n\n # Time tag [UTC]\n # pixel id [(x,y)]\n # corner id [(x,y)]\n\n # Requested geometry\n\n # lat lon intersection (planetocentric)\n # lat lon subspacecraft\n # lat lon subsolar\n # target distance intersection\n # target angular diameter\n # local solar time intersection\n # phase angle intersection\n # emission angle intersection\n # incidence angle intersection\n\n with open('spice4mertis.csv', 'w') as o:\n o.write('utc,et,pixlin,pixsam,tarlon,tarlat,sublon,sublat,sunlon,sunlat,tardis,tarang,ltime,phase,emissn,incdnc\\n')\n for et in interval:\n utc = spiceypy.et2utc(et, 'ISOC', 3)\n for line in pixel_line:\n for sample in pixel_sample:\n pixelGeometry = pixel_geometry(et, sensor, line, sample, target, frame, observer=observer)\n print(utc,line,sample,str(pixelGeometry)[1:-1].replace(',',' '))\n o.write(f'{utc},{et},{line},{sample},{str(pixelGeometry)[1:-1].replace(\" \",\"\")}\\n')\n return\n\n","repo_name":"esaSPICEservice/spice4mertis","sub_path":"spice4mertis/core/director.py","file_name":"director.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37720055709","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n\n\n\nimport json\nimport codecs\nimport os\n\n\nclass JsonWithEncodingPipeline(object):\n\n def __init__(self):\n self.file = codecs.open('graped_data_utf8.json', 'w', encoding='utf-8')\n self.file.write('[')\n\n def process_item(self, item, spider):\n line = json.dumps(dict(item), ensure_ascii=False) + \"\\n\"\n self.file.write(line+',')\n return item\n\n def close_spider(self, spider):\n self.file.seek(-1, os.SEEK_END)\n self.file.truncate();\n self.file.write(']')\n self.file.close()\n","repo_name":"loading21th/tutorial-66law","sub_path":"tutorial/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73064188961","text":"import hashlib\nimport random\nimport time\nfrom urllib.parse import parse_qs\n\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom app.models import Wheel,Nav,Mustbuy,Shop,Mainshow,Foodtypes,Goods,User,Cart,OrderGoods,Order\n\n# Create your views here.\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django_redis import cache\nfrom django.core.cache import cache\n\nfrom app.alipay import alipay\n\n\ndef home(request):\n #轮播图\n wheels = Wheel.objects.all()\n navs = Nav.objects.all()\n mustbuys = Mustbuy.objects.all()\n shoplist = Shop.objects.all()\n shophead = shoplist[0]\n shoptabs = shoplist[1:3]\n shopclass= shoplist[3:7]\n shopcommends = shoplist[7:11]\n mainshows = Mainshow.objects.all()\n\n response_dir = {\n 'wheels':wheels,\n 'navs':navs,\n 'mustbuys':mustbuys,\n 'shophead':shophead,\n 'shoptabs':shoptabs,\n 'shopclass':shopclass,\n 'shopcommends':shopcommends,\n 'mainshows':mainshows,\n\n }\n return render(request,'home/home.html',context=response_dir)\n\ndef market(request, childid='0' ,sortid='0'):\n foodtypes = Foodtypes.objects.all()\n\n\n # 根据 分类ID 获取对应分类信息\n # goods_list = Goods.objects.all()[0:5] #展示所有商品\n #默认打开热销榜(显示的商品应该过滤出是typeid是热销榜)\n # goods_list = Goods.objects.filter(categoryid=categoryid)\n\n #客户端需要记录点击分类的下标index[cookies 自动携带」\n #jquery.cookie.js 需要导包(里面有set.get cookie)在js文件中设置\n index = int(request.COOKIES.get('index','0')) #开始没有点击没有cookies 需要设置给默认值\n\n #根据index获取对应的分类ID\n categoryid = foodtypes[index].typeid #下标要转为数字类型\n\n #根据分类id获取对应分类信息\n goods_list = Goods.objects.filter(categoryid=categoryid)\n\n #获取子类信息\n childtypenames= foodtypes[index].childtypenames\n\n #将对应子类拆分出来\n childtype_list=[] #存储子类列表信息\n for item in childtypenames.split('#'):\n # item >> 全部分类:0\n # item >> 子类名称: 子类id\n item_arr = item.split(':')\n temp_dir = {\n 'name':item_arr[0],\n 'id':item_arr[1]\n }\n childtype_list.append(temp_dir)\n\n\n if childid == '0':\n goods_list = Goods.objects.filter(categoryid=categoryid)\n else:\n goods_list = Goods.objects.filter(categoryid=categoryid).filter(childcid=childid)\n\n #排序\n if sortid == '1':\n goods_list = goods_list.order_by('-productnum')\n elif sortid == '2':\n goods_list = goods_list.order_by('price')\n elif sortid == '3':\n goods_list = goods_list.order_by('-price')\n\n response_dir = {\n 'foodtypes': foodtypes,\n 'goods_list': goods_list,\n 'childtype_list': childtype_list,\n 'childid': childid,\n\n\n }\n\n\n #获取购物车信息(需先登录)\n token = request.session.get('token')\n userid = cache.get(token) #根据token拿到用户\n if userid:\n user = User.objects.get(pk=userid)#有用户就有购物车\n carts = user.cart_set.all()#类似于object\n response_dir['carts'] = carts\n\n\n\n\n\n return render(request,'market/market.html',context=response_dir)\n\ndef cart(request):\n # carts =Cart.objects.all() #获取所有购物车信息\n # carts = Cart.objects.filter(number__gt=0)\n # return render(request,'cart/cart.html',context={'carts':carts})\n token = request.session.get('token')\n userid = cache.get(token)\n if userid: # 有登录才显示\n user = User.objects.get(pk=userid)\n carts = user.cart_set.filter(number__gt=0)\n\n isall = True\n for cart in carts:\n if not cart.isselect:\n isall = False\n\n return render(request, 'cart/cart.html', context={'carts': carts, 'isall': isall})\n else: # 未登录不显示\n return render(request, 'cart/no-login.html')\n\ndef mine(request):\n token = request.session.get('token')\n userid = cache.get(token)\n response_data = {\n 'user': None\n }\n if userid:\n user = User.objects.get(pk=userid)\n response_data['user'] = user\n\n orders = user.order_set.all()\n # 待付款\n response_data['waitpay'] = orders.filter(status=0).count()\n # 待发货\n response_data['paydone'] = orders.filter(status=1).count()\n\n return render(request, 'mine/mine.html', context=response_data)\n\ndef login(request):\n if request.method == 'GET':\n return render(request, 'mine/login.html')\n elif request.method == 'POST':\n email = request.POST.get('email')\n password = request.POST.get('password')\n back = request.COOKIES.get('back') #重定向位置\n print(back)\n user = User.objects.filter(email=email)\n if user.exists():\n user = user.first()\n if user.password == generate_password(password):\n\n token = generate_token() #更新token\n cache.set(token,user.id,60*60*24*3) #状态保持\n #传递客户端\n request.session['token']=token\n # return redirect('axf:mine')\n\n if back == 'mine':\n return redirect('axf:mine')\n else:\n return redirect('axf:marketbase')\n\n else:\n return render(request, 'mine/login.html', context={'ps_err': '密码错误'})\n else:\n return render(request,'mine/login.html',context={'user_err':'用户不存在' })\n # return render(request,'mine/mine.html')\n\ndef logout(request):\n request.session.flush()\n\n return render(request,'mine/mine.html')\n\n\ndef generate_password(param):\n md5 = hashlib.md5()\n md5.update(param.encode('utf-8'))\n return md5.hexdigest()\n\ndef generate_token():\n temp = str(time.time()) + str(random.random())\n md5 = hashlib.md5()\n md5.update(temp.encode('utf-8'))\n return md5.hexdigest()\n\n\ndef register(request):\n if request.method == 'GET':\n return render(request, 'mine/register.html')\n elif request.method == 'POST':\n email = request.POST.get('email')\n password = generate_password(request.POST.get('password'))\n name = request.POST.get('name')\n\n user=User()\n user.email = email\n user.password = password\n user.name = name\n user.save()\n\n token = generate_token()\n cache.set(token, user.id, 60 * 60 * 24 * 3)\n\n request.session['token'] = token\n return render(request,'mine/mine.html')\n\n\ndef addcart(request):\n # 获取token\n token = request.session.get('token')\n\n # 响应数据\n response_data = {}\n\n # 缓存\n if token:\n userid = cache.get(token)\n # print(userid) #获取用户id\n if userid: # 已经登录\n user = User.objects.get(pk=userid)\n goodsid = request.GET.get('goodsid')\n goods = Goods.objects.get(pk=goodsid)\n # print(user,goodsid) #点击商品后查看是否能获取对应id\n # 商品不存在: 添加新记录 商品存在: 修改number\n carts = Cart.objects.filter(user=user).filter(goods=goods)\n if carts.exists():\n cart = carts.first()\n cart.number = cart.number + 1\n cart.save()\n else:\n cart = Cart()\n cart.user = user\n cart.goods = goods\n cart.number = 1\n cart.save()\n\n response_data['status'] = 1 #添加成功\n response_data['number'] = cart.number\n response_data['msg'] = '添加 {} 购物车成功: {}'.format(cart.goods.productlongname, cart.number)\n\n return JsonResponse(response_data)\n # return HttpResponse('添加商品到购物车')\n response_data['status'] = -1 #未登录状态\n response_data['msg'] = '请登录后操作'\n return JsonResponse(response_data)\n\ndef checkemail(request):\n email = request.GET.get('email')\n\n # 去数据库中查找\n users = User.objects.filter(email=email)\n if users.exists(): # 账号被占用 1可用, 0不可用\n response_data = {\n 'status': 0,\n 'msg': '账号被占用!'\n }\n else: # 账号可用\n response_data = {\n 'status':1,\n 'msg': '账号可用!'\n }\n return JsonResponse(response_data)\n\ndef subcart(request):\n goodsid = request.GET.get('goodsid') #获取ajax请求参数\n goods = Goods.objects.get(pk=goodsid) #通过商品id获取商品\n\n #用户 #减用户已经登录\n token = request.session.get('token')\n userid = cache.get(token)\n user = User.objects.get(pk=userid)\n\n #获取对应的购物车信息\n cart = Cart.objects.filter(user=user).filter(goods=goods).first()\n cart.number = cart.number -1\n cart.save()\n\n print((goodsid))\n response_data ={\n 'msg':'删减成功',\n 'status':1,\n 'number':cart.number\n }\n return JsonResponse(response_data)\n\n# def changecartselect(requset):\n# cartid = requset.GET.get('cartid')\n# print(cartid)\n# response_data = {\n# 'msg':'状态修改成功',\n# 'status':1,\n# }\n# return JsonResponse(response_data)\ndef changecartselect(request):\n cartid = request.GET.get('cartid')\n\n cart = Cart.objects.get(pk=cartid)\n cart.isselect = not cart.isselect\n cart.save()\n\n response_data = {\n 'msg': '状态修改成功',\n 'status': 1,\n 'isselect': cart.isselect\n }\n\n return JsonResponse(response_data)\n\ndef changecartall(request):\n isall = request.GET.get('isall')\n\n token = request.session.get('token')\n userid = cache.get(token)\n user = User.objects.get(pk=userid)\n carts = user.cart_set.all()\n\n if isall == 'true':\n isall = True\n else:\n isall = False\n\n for cart in carts:\n cart.isselect = isall\n cart.save()\n\n response_data = {\n 'msg': '全选/取消全选 成功',\n 'status': 1\n }\n\n return JsonResponse(response_data)\n\ndef generate_identifier():\n temp = str(time.time()) + str(random.randrange(1000,10000))\n return temp\n\n\n\ndef generateorder(request):\n token = request.session.get('token')\n userid = cache.get(token)\n user = User.objects.get(pk=userid)\n\n # 订单\n order = Order()\n order.user = user\n order.identifier = generate_identifier()\n order.save()\n\n # 订单商品(购物车中选中)\n carts = user.cart_set.filter(isselect=True)\n for cart in carts:\n orderGoods = OrderGoods()\n orderGoods.order = order\n orderGoods.goods = cart.goods\n orderGoods.number = cart.number\n orderGoods.save()\n # 购买后从购物车中移除\n cart.delete()\n\n return render(request, 'order/orderdetail.html', context={'order': order})\n\ndef orderlist(request):\n token = request.session.get('token')\n userid = cache.get(token)\n user = User.objects.get(pk=userid)\n orders = user.order_set.all()\n # status_list = ['未付款', '待发货', '待收货', '待评价', '已评价']\n return render(request, 'order/orderlist.html', context={'orders':orders})\n\ndef orderdetail(request, identifier):\n order = Order.objects.filter(identifier=identifier).first()\n return render(request, 'order/orderdetail.html', context={'order': order})\n\ndef returnurl(request):\n return redirect('axf:mine')\n\n\n@csrf_exempt\ndef appnotifyurl(request):\n if request.method == 'POST':\n # 获取到参数\n body_str = request.body.decode('utf-8')\n # 通过parse_qs函数\n post_data = parse_qs(body_str)\n # 转换为字典\n post_dic = {}\n for k,v in post_data.items():\n post_dic[k] = v[0]\n # 获取订单号\n out_trade_no = post_dic['out_trade_no']\n # 更新状态\n Order.objects.filter(identifier=out_trade_no).update(status=1)\n return JsonResponse({'msg':'success'})\n\n\ndef pay(request):\n orderid = request.GET.get('orderid')\n order = Order.objects.get(pk=orderid)\n\n sum = 0\n for orderGoods in order.ordergoods_set.all():\n sum += orderGoods.goods.price * orderGoods.number\n\n # 支付地址信息\n data = alipay.direct_pay(\n subject='支付', # 显示标题\n out_trade_no=order.identifier, #订单号\n total_amount=str(sum), # 支付金额\n return_url='http://127.0.0.1:8000/axf/returnurl/'\n )\n\n # 支付地址\n alipay_url = 'https://openapi.alipaydev.com/gateway.do?{data}'.format(data=data)\n\n response_data = {\n 'msg': '调用支付接口',\n 'alipayurl': alipay_url,\n 'status': 1\n }\n return JsonResponse(response_data)\n","repo_name":"yaoxi123/axf","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71807265123","text":"\"\"\" Python wrapper for getting water consumption data from MyWaterToronto. \"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport logging\nfrom datetime import date, datetime\nfrom http import HTTPStatus\nfrom typing import Any, cast\n\nfrom aiohttp import ClientResponse, ClientSession\nfrom aiohttp_retry import ExponentialRetry, RetryClient\nfrom pytz import timezone, utc\n\nfrom .const import (\n AIOHTTP_RETRY_ATTEMPTS,\n API_ACCOUNTDETAILS_URL,\n API_CONSUMPTION_URL,\n API_OP_VALIDATE,\n API_VALIDATE_URL,\n BAD_REQUEST,\n CONSUMPTION_RESULT_OK,\n HTTP_HEADERS,\n HTTP_MOVED_TEMPORARILY,\n INTERVAL_DAY,\n INTERVAL_HOUR,\n INTERVAL_MONTH,\n KEY_ADDRESS,\n KEY_CONSUMPTION,\n KEY_CONSUMPTION_DATA,\n KEY_CONSUMPTION_END_DATE,\n KEY_CONSUMPTION_INTERVAL_TYPE,\n KEY_CONSUMPTION_START_DATE,\n KEY_CONSUMPTION_SUMMARY,\n KEY_CONSUMPTION_TOTAL,\n KEY_CONSUMPTION_UNITOFMEASURE,\n KEY_CONSUMPTION_VALUE_TYPE,\n KEY_ERROR_MESSAGE,\n KEY_ERROR_STRING,\n KEY_METER_FIRST_READ_DATE,\n KEY_METER_LAST_READ_DATE,\n KEY_METER_LAST_READING,\n KEY_METER_LIST,\n KEY_METER_MIU,\n KEY_METER_NUMBER,\n KEY_METER_UNIT_OF_MEASURE,\n KEY_PREMISE_ID,\n KEY_PREMISE_LIST,\n KEY_REF_TOKEN,\n KEY_RESULT_CODE,\n KEY_STATUS,\n KEY_VALIDATE_RESPONSE,\n STATUS_FAILURE,\n STATUS_VALIDATION_ERROR,\n)\nfrom .enums import ConsumptionBuckets, LastPaymentMethod\nfrom .errors import (\n AccountDetailsError,\n AccountNotValidatedError,\n ApiError,\n GetConsumptionError,\n SessionValidationError,\n ValidateAccountInfoError,\n)\nfrom .format_date import (\n format_date,\n format_start_month,\n format_start_week,\n format_start_year,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass MyWaterToronto:\n \"\"\"Main class to perform MyWaterToronto API requests.\"\"\"\n\n def __init__(\n self,\n session: ClientSession,\n account_number: str,\n client_number: str,\n last_name: str,\n postal_code: str,\n last_payment_method: LastPaymentMethod,\n ) -> None:\n \"\"\"Initialize.\"\"\"\n self._session = session\n self._retry_client = RetryClient(session)\n self._account_number = account_number\n self._client_number = client_number\n self._last_name = last_name\n self._postal_code = postal_code\n self._last_payment_method = last_payment_method\n\n self._ref_token: str = None\n\n self._account_details: dict[str, Any] = None\n\n self._consumption_buckets = None\n\n self._retry_options = ExponentialRetry(\n attempts=AIOHTTP_RETRY_ATTEMPTS,\n statuses={BAD_REQUEST},\n evaluate_response_callback=self._async_evaluate_response,\n )\n\n async def async_validate_account(self) -> bool:\n \"\"\"Validate account information with MyWaterToronto.\"\"\"\n\n url = API_VALIDATE_URL\n\n payload = {\n \"API_OP\": API_OP_VALIDATE,\n \"ACCOUNT_NUMBER\": self.account_number_full,\n \"LAST_NAME\": self._last_name,\n \"POSTAL_CODE\": self._postal_code,\n \"LAST_PAYMENT_METHOD\": self._last_payment_method.value,\n }\n\n async with self._retry_client.post(\n url=url,\n retry_options=self._retry_options,\n headers=HTTP_HEADERS,\n json=payload,\n verify_ssl=False,\n ) as resp:\n if (resp.status == HTTP_MOVED_TEMPORARILY) or (\n resp.real_url.name == \"something-went-wrong.html\"\n ):\n raise ValidateAccountInfoError(\"Invalid account information\")\n if resp.status != HTTPStatus.OK:\n json.loads(await resp.text())\n raise ApiError(\n f\"Invalid response from MyWaterToronto API: {resp.status}\"\n )\n _LOGGER.debug(\"Data retrieved from %s, status: %s\", url, resp.status)\n data = await resp.json()\n\n _LOGGER.debug(\"Data retrieved from validate --> %s\", json.dumps(data, indent=4))\n\n if KEY_VALIDATE_RESPONSE not in data:\n raise (\n f\"{KEY_VALIDATE_RESPONSE} key could not be found in \"\n f\"MyWaterToronto Validation Response: {data}\"\n )\n\n if KEY_REF_TOKEN not in data[KEY_VALIDATE_RESPONSE]:\n raise ApiError(\n f\"{KEY_REF_TOKEN} key could not be found in \"\n f\"MyWaterToronto Validation Response: {data}\"\n )\n self._ref_token = data[KEY_VALIDATE_RESPONSE][KEY_REF_TOKEN]\n\n _LOGGER.debug(\"Ref token retrieved from validate --> %s\", self._ref_token)\n\n return True\n\n async def _async_evaluate_response(self, response: ClientResponse) -> bool:\n data = await response.json()\n if KEY_RESULT_CODE in data:\n result_code = data[KEY_RESULT_CODE]\n if result_code == BAD_REQUEST:\n return False\n\n return True\n\n async def async_get_account_details(self) -> dict[str, Any]:\n \"\"\"Get the account details from MyWaterToronto.\"\"\"\n\n # Check if there is a ref token\n if not self._ref_token:\n raise AccountNotValidatedError(\"The account has not been validated yet\")\n\n params_json = {\n \"API_OP\": \"ACCOUNTDETAILS\",\n \"ACCOUNT_NUMBER\": self.account_number_full,\n }\n params = {\"refToken\": self._ref_token, \"json\": json.dumps(params_json)}\n\n url = API_ACCOUNTDETAILS_URL\n async with self._retry_client.get(\n url=url,\n retry_options=self._retry_options,\n headers=HTTP_HEADERS,\n params=params,\n verify_ssl=False,\n ) as resp:\n\n if resp.status != HTTPStatus.OK:\n json.loads(await resp.text())\n raise ApiError(\n f\"Invalid response from MyWaterToronto API: {resp.status}\"\n )\n _LOGGER.debug(\"Data retrieved from %s, status: %s\", url, resp.status)\n data = await resp.json()\n\n _LOGGER.debug(\n \"Data retrieved from account details --> %s\",\n json.dumps(data, indent=4),\n )\n\n if KEY_PREMISE_LIST not in data:\n raise AccountDetailsError(\n f\"Premise list could not be found in \"\n f\"MyWaterToronto Account Details response: {data}\"\n )\n\n if KEY_METER_LIST not in data[KEY_PREMISE_LIST][0]:\n raise AccountDetailsError(\n f\"Meter list could not be found in \"\n f\"MyWaterToronto Account Details response: {data}\"\n )\n\n self._account_details = cast(\n dict[str, Any], data if isinstance(data, dict) else data[0]\n )\n return self.account_details\n\n async def _async_update_consumption_buckets(\n self,\n meter: dict[str, Any],\n current_date: date | None = None,\n ) -> None:\n\n if current_date:\n _current_date = current_date\n else:\n _current_date = utc.localize(datetime.utcnow()).astimezone(\n timezone(\"Canada/Eastern\")\n )\n\n if _current_date.month == 1:\n ytd_interval = INTERVAL_DAY\n else:\n ytd_interval = INTERVAL_MONTH\n\n self._consumption_buckets = {\n ConsumptionBuckets.TOTAL_USAGE: {\n KEY_CONSUMPTION_VALUE_TYPE: ConsumptionBuckets.TOTAL_USAGE.value,\n KEY_CONSUMPTION_INTERVAL_TYPE: INTERVAL_MONTH,\n KEY_CONSUMPTION_START_DATE: meter[KEY_METER_FIRST_READ_DATE],\n KEY_CONSUMPTION_END_DATE: format_date(_current_date),\n },\n ConsumptionBuckets.TODAY_USAGE: {\n KEY_CONSUMPTION_VALUE_TYPE: ConsumptionBuckets.TODAY_USAGE.value,\n KEY_CONSUMPTION_INTERVAL_TYPE: INTERVAL_HOUR,\n KEY_CONSUMPTION_START_DATE: format_date(_current_date),\n KEY_CONSUMPTION_END_DATE: format_date(_current_date),\n },\n ConsumptionBuckets.WEEK_TO_DATE_USAGE: {\n KEY_CONSUMPTION_VALUE_TYPE: ConsumptionBuckets.WEEK_TO_DATE_USAGE.value,\n KEY_CONSUMPTION_INTERVAL_TYPE: INTERVAL_DAY,\n KEY_CONSUMPTION_START_DATE: format_start_week(_current_date),\n KEY_CONSUMPTION_END_DATE: format_date(_current_date),\n },\n ConsumptionBuckets.MONTH_TO_DATE_USAGE: {\n KEY_CONSUMPTION_VALUE_TYPE: ConsumptionBuckets.MONTH_TO_DATE_USAGE.value, # pylint: disable=line-too-long\n KEY_CONSUMPTION_INTERVAL_TYPE: INTERVAL_DAY,\n KEY_CONSUMPTION_START_DATE: format_start_month(_current_date),\n KEY_CONSUMPTION_END_DATE: format_date(_current_date),\n },\n ConsumptionBuckets.YEAR_TO_DATE_USAGE: {\n KEY_CONSUMPTION_VALUE_TYPE: ConsumptionBuckets.YEAR_TO_DATE_USAGE.value,\n KEY_CONSUMPTION_INTERVAL_TYPE: ytd_interval,\n KEY_CONSUMPTION_START_DATE: format_start_year(_current_date),\n KEY_CONSUMPTION_END_DATE: format_date(_current_date),\n },\n }\n\n async def _async_get_meter_info(\n self,\n meter_number: str,\n ) -> dict[str, Any]:\n\n selected_meter = None\n\n for premise in self._account_details[KEY_PREMISE_LIST]:\n for meter in premise[KEY_METER_LIST]:\n if meter[KEY_METER_NUMBER] == meter_number:\n selected_meter = meter\n\n return cast(\n dict[str, Any],\n selected_meter if isinstance(selected_meter, dict) else selected_meter[0],\n )\n\n async def async_get_consumption(self, buckets=None) -> dict[str, Any]:\n \"\"\"Get the meter consumption from MyWaterToronto.\"\"\"\n\n _LOGGER.debug(\"Getting consumption data for account\")\n\n account_consumption = {}\n account_consumption[KEY_PREMISE_LIST] = {}\n\n for premise in self.account_details[KEY_PREMISE_LIST]:\n _LOGGER.debug(\n \"Getting consumption data for Premise ID: %s, address: %s\",\n premise[KEY_PREMISE_ID],\n premise[KEY_ADDRESS],\n )\n\n premise_consumption = {\n KEY_ADDRESS: premise[KEY_ADDRESS],\n KEY_METER_LIST: {},\n }\n\n for meter in premise[KEY_METER_LIST]:\n meter_consumption = await self.async_get_meter_consumption(\n meter, buckets=buckets\n )\n\n premise_consumption[KEY_METER_LIST][\n meter[KEY_METER_NUMBER]\n ] = meter_consumption\n\n account_consumption[KEY_PREMISE_LIST][\n premise[KEY_PREMISE_ID]\n ] = premise_consumption\n\n return account_consumption\n\n async def async_get_meter_consumption(\n self, meter: dict[str, Any], buckets=None\n ) -> dict[str, Any]:\n \"\"\"Get the meter consumption from MyWaterToronto for the specified meter.\"\"\"\n\n _LOGGER.debug(\"Getting consumption data for meter: %s\", meter[KEY_METER_NUMBER])\n\n meter_data = {\n KEY_METER_FIRST_READ_DATE: meter[KEY_METER_FIRST_READ_DATE],\n KEY_METER_LAST_READ_DATE: meter[KEY_METER_LAST_READ_DATE],\n KEY_CONSUMPTION_DATA: {},\n }\n\n for bucket in ConsumptionBuckets:\n # If a subset of buckets was selected, check if current bucket is\n # selected otherwise skip bucket\n if buckets:\n if bucket not in buckets:\n continue\n\n try:\n consumption = await self.async_get_meter_consumption_for_bucket(\n meter, consumption_bucket=bucket\n )\n except Exception as error:\n raise GetConsumptionError(f\"Error '{error}'\") from error\n\n _LOGGER.debug(\n \"Consumption for bucket %s is %s%s\",\n bucket.value,\n consumption[KEY_CONSUMPTION],\n consumption[KEY_CONSUMPTION_UNITOFMEASURE],\n )\n meter_data[KEY_CONSUMPTION_DATA][bucket.value] = consumption\n\n return meter_data\n\n async def async_get_meter_consumption_for_bucket(\n self,\n meter: dict[str, Any],\n consumption_bucket: ConsumptionBuckets | None,\n ) -> dict[str, Any]:\n # pylint: disable=too-many-branches\n \"\"\"Get the meter consumption from MyWaterToronto for the specified bucket.\"\"\"\n\n consumption_data = None\n\n if not consumption_bucket:\n _consumption_bucket = ConsumptionBuckets.TOTAL_USAGE\n else:\n _consumption_bucket = consumption_bucket\n\n if _consumption_bucket == ConsumptionBuckets.TOTAL_USAGE:\n # Add the current reading from account details\n consumption_data = {\n KEY_CONSUMPTION: meter[KEY_METER_LAST_READING].lstrip(\"0\"),\n KEY_CONSUMPTION_UNITOFMEASURE: meter[KEY_METER_UNIT_OF_MEASURE],\n }\n else:\n # Check if there is a ref token\n if not self._ref_token:\n raise AccountNotValidatedError(\"The account has not been validated yet\")\n\n # Check if the consumption buckets have been defined\n if not self._consumption_buckets:\n await self._async_update_consumption_buckets(meter)\n\n consumption_bucket = self._consumption_buckets[_consumption_bucket]\n\n params_json = {\n \"API_OP\": \"CONSUMPTION\",\n \"ACCOUNT_NUMBER\": self.account_number_full,\n \"MIU_ID\": meter[KEY_METER_MIU],\n \"START_DATE\": consumption_bucket[KEY_CONSUMPTION_START_DATE],\n \"END_DATE\": consumption_bucket[KEY_CONSUMPTION_END_DATE],\n \"INTERVAL_TYPE\": consumption_bucket[KEY_CONSUMPTION_INTERVAL_TYPE],\n }\n\n params = {\n \"refToken\": self._ref_token,\n \"json\": json.dumps(params_json),\n }\n\n _LOGGER.debug(\"Params to retrieve consumption data: %s\", params)\n\n url = API_CONSUMPTION_URL\n\n async with self._retry_client.get(\n url=url,\n retry_options=self._retry_options,\n headers=HTTP_HEADERS,\n params=params,\n verify_ssl=False,\n ) as resp:\n if resp.status != HTTPStatus.OK:\n error_text = json.loads(await resp.text())\n raise ApiError(\n f\"Invalid response from MyWaterToronto \"\n f\"Consumption API: {error_text}\"\n )\n if resp.content_type != \"application/json\":\n raise ApiError(\n \"Response is not in application/json \"\n \"format form MyWaterToronto Consumption API\"\n )\n\n _LOGGER.debug(\"Data retrieved from %s, status: %s\", url, resp.status)\n data = await resp.json()\n\n _LOGGER.debug(\n \"Data retrieved from meter consumption --> %s\",\n json.dumps(data, indent=4),\n )\n\n if KEY_RESULT_CODE not in data:\n if KEY_VALIDATE_RESPONSE in data:\n validate_response = data[KEY_VALIDATE_RESPONSE]\n status = validate_response[KEY_STATUS]\n if status == STATUS_FAILURE:\n error_message = validate_response[KEY_ERROR_MESSAGE]\n if error_message == STATUS_VALIDATION_ERROR:\n raise SessionValidationError(\n \"Session has timed out or it has not been validated yet\"\n )\n\n raise ApiError(\"Invalid consumption data returned\")\n\n result_code = data[KEY_RESULT_CODE]\n\n if result_code == BAD_REQUEST:\n # The MyWaterToronto data returns a 'Bad Request' error string\n # when there is no data, typically for hourly data request\n\n consumption_value = 0\n elif result_code != CONSUMPTION_RESULT_OK:\n raise ApiError(\n f\"Error returned from consumption data \"\n f\"- resultCode: {result_code}, \"\n f\"errorString: {data[KEY_ERROR_STRING]}\"\n )\n else:\n if KEY_CONSUMPTION_SUMMARY not in data:\n raise ApiError(\n f\"Consumption summary could not be found \"\n f\"in MyWaterToronto Consumption response: {data}\"\n )\n\n if KEY_CONSUMPTION_TOTAL in data[KEY_CONSUMPTION_SUMMARY]:\n consumption_value = data[KEY_CONSUMPTION_SUMMARY][\n KEY_CONSUMPTION_TOTAL\n ]\n else:\n consumption_value = 0\n\n consumption_data = {\n KEY_CONSUMPTION: consumption_value,\n KEY_CONSUMPTION_UNITOFMEASURE: meter[KEY_METER_UNIT_OF_MEASURE],\n }\n\n return consumption_data\n\n @property\n def account_number_full(self) -> str | None:\n \"\"\"Return full account number.\"\"\"\n return self._account_number + \"-\" + self._client_number\n\n @property\n def account_details(self) -> dict[str, Any] | None:\n \"\"\"Return account details.\"\"\"\n return self._account_details\n","repo_name":"davecpearce/pymywatertoronto","sub_path":"pymywatertoronto/mywatertoronto.py","file_name":"mywatertoronto.py","file_ext":"py","file_size_in_byte":17564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14071864772","text":"import sys\nimport time\n\ndef spinning_cursor():\n while True:\n for cursor in '|/-\\\\':\n yield cursor\n\n\ndef loading_animation():\n animation = \"|/-\\\\\"\n for i in range(20):\n time.sleep(0.1)\n print(\"\\r\" + \"Cargando :) \" + animation[i % len(animation)], end=\"\")\n\n\nspinner = loading_animation()\nfor _ in range(50):\n sys.stdout.write(next(spinner))\n sys.stdout.flush()\n time.sleep(0.1)\n sys.stdout.write('\\b')","repo_name":"RIcki-cpu/AI-Proyects","sub_path":"spinningcursor.py","file_name":"spinningcursor.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6752490445","text":"import torch.nn as nn\nimport torch\nimport math\nimport pdb\n\n\nclass PoolingLayer(nn.Module):\n \"\"\"\n PoolingLayer: convert a 3-dim tensor to a 2-dim tensor;\n \"\"\" \n\n def __init__(self, pooling_type='last-pooling', sequence_len=None, seq_mask=None):\n super().__init__()\n self.pooling_type = pooling_type\n self.sequence_len = sequence_len\n self.seq_mask = seq_mask #(bs, sl)\n self.labels = None\n\n def set_pooling_type(self, pooling_type):\n self.pooling_type = pooling_type\n return\n \n def set_sequence_len(self, sequence_length):\n self.sequence_len = sequence_length\n return\n\n def forward(self, x):\n \"\"\"\n input: \n x: (bs, sl, dim)\n\n return \n result: (bs, dim)\n \"\"\"\n if self.pooling_type == 'max-pooling':\n seq_mask_expand = self.seq_mask.unsqueeze(-1).expand(x.size()).float()\n x[seq_mask_expand == 0.] = -1e9 #(bs, sl, dim)\n return torch.max(x, 1)[0]\n if self.pooling_type == 'mean-pooling':\n # seq_mask_expand = self.seq_mask.unsqueeze(-1).expand(x.size()).float() #(bs, sl, dim)\n seq_mask_expand = self.seq_mask.unsqueeze(-1).repeat(1, 1, x.size()[-1]).float() #(bs, sl, dim)\n x_sum_embeddings = torch.sum(x * seq_mask_expand, 1) #(bs, dim)\n x_len = torch.sum(self.seq_mask, -1).unsqueeze(-1) #(bs)\n # pdb.set_trace()\n return x_sum_embeddings/x_len\n if self.pooling_type == 'last-pooling':\n assert self.sequence_len != None\n # pdb.set_trace()\n # return torch.index_select(x, 1, self.sequence_len -1) #(bs, dim)\n # return x[torch.arange(x.size(0)), self.sequence_len-1] #(bs, dim)\n # indices = torch.unsqueeze(self.sequence_len-1, 1)\n # indices = torch.unsqueeze(indices, 2)\n # indices = torch.repeat_interleave(indices, x.size(-1), dim=2) #(bs, 1, dim)\n # return torch.gather(x, 1, indices).squeeze() #(bs, dim)\n #由于是左padding, 因此只选取最后一个item即可;\n return x[:, -1, :]\n \n if self.pooling_type == 'item_level_mean_pooling':\n assert self.labels != None #(bs, sl)\n labels_mask = self.labels.unsqueeze(-1).float() #(bs, sl, 1)\n x_sum_embeddings = torch.sum(labels_mask * x, 1) #(bs, dim)\n return x_sum_embeddings / torch.sum(self.labels + 0.1, -1).unsqueeze(-1)\n\n \n if self.pooling_type == 'cls_pooling':\n assert self.sequence_len != None #(bs)\n return x[torch.arange(x.size(0)), self.sequence_len] #(bs, dim)\n return None","repo_name":"paulpig/sequentialRec","sub_path":"meantime/models/transformer_models/utils/pooling.py","file_name":"pooling.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18517458365","text":"from fastapi import HTTPException, status\n\n\nclass UnAuthorized(HTTPException):\n def __init__(self, message: str = \"Could not validate credentials\"):\n super().__init__(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=message,\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n","repo_name":"kseriakov/fastapi-mongo-jwt","sub_path":"backend/auth/auth_exceptions.py","file_name":"auth_exceptions.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35076323647","text":"#!/usr/bin/env python3\n\"\"\" Expectation \"\"\"\n\n\nimport numpy as np\npdf = __import__('5-pdf').pdf\n\n\ndef expectation(X, pi, m, S):\n \"\"\" Function that calculates the expectation step\n in the EM algorithm for a GMM\n \"\"\"\n\n if not isinstance(X, np.ndarray) or len(X.shape) != 2:\n return None, None\n if not isinstance(m, np.ndarray) or len(m.shape) != 2:\n return None, None\n if not isinstance(S, np.ndarray) or len(S.shape) != 3:\n return None, None\n if not isinstance(pi, np.ndarray) or len(pi.shape) != 1:\n return None, None\n\n n, d = X.shape\n\n if d != S.shape[1] or S.shape[1] != S.shape[2]:\n return (None, None)\n if d != m.shape[1] or m.shape[0] != S.shape[0]:\n return (None, None)\n if pi.shape[0] != m.shape[0]:\n return (None, None)\n\n if not np.isclose(np.sum(pi), 1):\n return None, None\n\n k = S.shape[0]\n auxiliar = np.zeros((k, n))\n\n for i in range(k):\n PDF = pdf(X, m[i], S[i])\n auxiliar[i] = pi[i] * PDF\n\n g = auxiliar / np.sum(auxiliar, axis=0)\n tll = np.sum(np.log(np.sum(auxiliar, axis=0)))\n\n return g, tll\n","repo_name":"linkjavier/holbertonschool-machine_learning","sub_path":"unsupervised_learning/0x01-clustering/6-expectation.py","file_name":"6-expectation.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72536959521","text":"def is_winning_board(board):\n winning_line = len(board) * [\"X\"]\n for line in board:\n if line == winning_line:\n return True\n for x in range(0, len(board)):\n column = []\n for line in board:\n column.append(line[x])\n if column == winning_line:\n return True\n column = []\n return False\n\n\ndef get_board_result(board, called_number):\n result = 0\n for line in board:\n for number in line:\n if number != \"X\":\n result = result + int(number)\n return result * int(called_number)\n\n\ndef first():\n f = open(\"4.input\", \"r\", encoding=\"UTF-8\")\n drawn_numbers = f.readline().replace(\"\\n\", \"\").split(\",\")\n matrix = []\n array = []\n for line in f:\n if line != \"\\n\":\n line = line.replace(\"\\n\", \"\").replace(\" \", \" \")\n if line[0] == \" \":\n line = line[1:]\n array.append(line.split(\" \"))\n elif array:\n matrix.append(array)\n array = []\n\n for number in drawn_numbers:\n for board in matrix:\n for line in board:\n if number in line:\n line[line.index(number)] = \"X\"\n if is_winning_board(board):\n print(get_board_result(board, number))\n return 0\n\n\ndef second():\n f = open(\"4.input\", \"r\", encoding=\"UTF-8\")\n drawn_numbers = f.readline().replace(\"\\n\", \"\").split(\",\")\n matrix = []\n array = []\n for line in f:\n if line != \"\\n\":\n line = line.replace(\"\\n\", \"\").replace(\" \", \" \")\n if line[0] == \" \":\n line = line[1:]\n array.append(line.split(\" \"))\n elif array:\n matrix.append(array)\n array = []\n\n for number in drawn_numbers:\n for board in matrix:\n for line in board:\n if number in line:\n line[line.index(number)] = \"X\"\n if is_winning_board(board):\n if len(matrix) != 1:\n matrix.remove(board)\n drawn_numbers.insert(drawn_numbers.index(number), number)\n else:\n print(get_board_result(board, number))\n return 0\n\n\nfirst()\n\nsecond()\n","repo_name":"Koty97/Advent_of_Code","sub_path":"2021/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12843382118","text":"import pandas as pd\nimport itertools\nimport spacy\nfrom scripts.constants import KEEP_COLUMNS, TITLE, MODELNOQ, FEATURES\n\n\ndef apply(sample:pd.DataFrame, model, lowercase:bool=False) -> None:\n if lowercase:\n sample['doc'] = sample.title.apply(lambda x: model(x.lower()))\n else:\n sample['doc'] = sample.title.apply(model)\n sample['span'] = sample['doc'].apply(lambda x: x.spans['sc'])\n sample['tags'] = sample['span'].apply(lambda x: [tag.label_ for tag in x])\n for _, row in sample.iterrows():\n print(row.title,dict(zip(row.tags, row.span)), sep='\\n--->',end='\\n\\n')\n return sample\n\ndef extract_tags(df:pd.DataFrame) -> pd.DataFrame:\n for tag in FEATURES:\n df[tag.upper()] = df['span'].apply(lambda x: x.get(tag.upper())).fillna('')\n df = df.drop(columns=['span'])\n df = df[[TITLE]+FEATURES+[feature.upper() for feature in FEATURES]]\n return df\n\nif __name__ == '__main__':\n dataset = 'final_dataset.csv'\n model = spacy.load('training/model-best')\n df = pd.read_csv(f'assets/{dataset}',on_bad_lines='warn')[KEEP_COLUMNS]\n df = df.query('~title.isnull()')\n df = df.fillna('')\n df = df.groupby([TITLE,MODELNOQ]).first().reset_index(drop=False)\n sample = df\n sample['doc'] = sample.title.apply(model)\n sample['span'] = sample.doc.apply(lambda x: dict([(span.label_,str(span)) for span in x.spans['sc'] if str(span)]))\n sample = sample.drop(columns=['doc'])\n sample = extract_tags(sample)\n # print('-------\\n')\n # for _, row in sample.iterrows():\n # print('========= TITLE ==============')\n # print(row.title)\n # print()\n # print('========= ANOTATIONS =========')\n # print(row[FEATURES])\n # print()\n # print('========= DETECTED ===========')\n # print(row.span)\n # print('=============================')\n # print()\n # print('-------\\n')","repo_name":"NaelsonDouglas/NER","sub_path":"src/playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24606738673","text":"\"\"\"\nBase class for storing a S3 File\n\"\"\"\nfrom ..utils.exceptions import ETLInputError\nfrom ..utils.helpers import parse_path\nfrom .s3_path import S3Path\nfrom .utils import read_from_s3\nfrom .utils import upload_to_s3\n\nDEFAULT_FILE_NAME = 'file'\n\n\nclass S3File(object):\n \"\"\"S3 File object that provides functions to operate with a file on S3\n\n The S3 file unifies the file concept, which could be stored on the\n local file system, as a string, or already in s3.\n\n \"\"\"\n def __init__(self, path=None, text=None, s3_path=None):\n \"\"\"Constructor for the S3 File object\n\n Args:\n path (str): Local path to file\n text (str): Text of a file\n s3_path (S3Path, optional): s3_path of the file\n\n \"\"\"\n\n if path or text:\n assert (path and not text) or (text and not path), \\\n 'Cannot specify both path and text for s3 file.'\n\n # Initialize all the values\n self._path = parse_path(path)\n self._text = text\n self._s3_path = s3_path\n\n def upload_to_s3(self):\n \"\"\"Sends file to URI. This action is idempotent.\n\n Raises:\n ETLInputError: If no URL is provided\n \"\"\"\n if self._s3_path:\n if self._path or self._text:\n # There exists something locally to store\n upload_to_s3(self._s3_path, self._path, self._text)\n else:\n raise ETLInputError('No URI provided for the file to be uploaded')\n\n @property\n def text(self):\n \"\"\"Outputs the text of the associated file\n\n Returns:\n result(str): The text of the file. Can be local or on S3\n \"\"\"\n if self._text:\n # The text attribute is populated; return it.\n return self._text\n elif self._path:\n # The path attribute is populated. Read the file contents\n with open(self._path, 'r') as f:\n return f.read()\n return read_from_s3(self._s3_path)\n\n @property\n def file_name(self):\n \"\"\"The file name of this file\n\n Returns:\n file_name(str): The file_name of this file\n \"\"\"\n if self._path:\n return self._path.split('/').pop()\n else:\n return DEFAULT_FILE_NAME\n\n @property\n def s3_path(self):\n \"\"\"Outputs the s3_path\n \"\"\"\n return self._s3_path\n\n @s3_path.setter\n def s3_path(self, s3_path):\n \"\"\"Set the S3 path for the file\n\n Args:\n s3_path(S3Path): If the path is a directory, a\n name will be assigned based on the path variable.\n If there is no path, the name \"file\" will be applied.\n \"\"\"\n\n if not isinstance(s3_path, S3Path):\n raise ETLInputError('Input path should be of type S3Path')\n\n # Copy the object as we would change it for the file\n self._s3_path = S3Path(\n key=s3_path.key,\n bucket=s3_path.bucket,\n is_directory=s3_path.is_directory,\n )\n if s3_path.is_directory:\n # This is a directory; add a file name.\n self._s3_path.append(self.file_name)\n","repo_name":"coursera/dataduct","sub_path":"dataduct/s3/s3_file.py","file_name":"s3_file.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"54"} +{"seq_id":"73957616482","text":"import io\nimport logging\nimport os\nimport time\nfrom contextlib import redirect_stderr\nfrom unittest.mock import patch\n\nimport pytest\n\nimport ray\nfrom ray import tune\nfrom ray.air.constants import MAX_REPR_LENGTH\nfrom ray.data.preprocessor import Preprocessor\nfrom ray.tune.impl import tuner_internal\nfrom ray.train.data_parallel_trainer import DataParallelTrainer\nfrom ray.train.gbdt_trainer import GBDTTrainer\nfrom ray.train.trainer import BaseTrainer\nfrom ray.air.config import ScalingConfig\nfrom ray.util.placement_group import get_current_placement_group\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture\ndef ray_start_4_cpus():\n address_info = ray.init(num_cpus=4)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\nclass DummyPreprocessor(Preprocessor):\n def __init__(self):\n self.fit_counter = 0\n\n def fit(self, ds):\n self.fit_counter += 1\n\n def transform(self, ds):\n return ds.map(lambda x: x + 1)\n\n\nclass DummyTrainer(BaseTrainer):\n _scaling_config_allowed_keys = BaseTrainer._scaling_config_allowed_keys + [\n \"num_workers\",\n \"use_gpu\",\n \"resources_per_worker\",\n \"placement_strategy\",\n ]\n\n def __init__(self, train_loop, custom_arg=None, **kwargs):\n self.custom_arg = custom_arg\n self.train_loop = train_loop\n super().__init__(**kwargs)\n\n def training_loop(self) -> None:\n self.train_loop(self)\n\n\nclass DummyGBDTTrainer(GBDTTrainer):\n _dmatrix_cls: type = None\n _ray_params_cls: type = None\n _tune_callback_report_cls: type = None\n _tune_callback_checkpoint_cls: type = None\n _init_model_arg_name: str = None\n\n\ndef test_trainer_fit(ray_start_4_cpus):\n def training_loop(self):\n tune.report(my_metric=1)\n\n trainer = DummyTrainer(train_loop=training_loop)\n result = trainer.fit()\n assert result.metrics[\"my_metric\"] == 1\n\n\ndef test_preprocess_datasets(ray_start_4_cpus):\n def training_loop(self):\n assert self.datasets[\"my_dataset\"].take() == [2, 3, 4]\n\n datasets = {\"my_dataset\": ray.data.from_items([1, 2, 3])}\n trainer = DummyTrainer(\n training_loop, datasets=datasets, preprocessor=DummyPreprocessor()\n )\n trainer.fit()\n\n\ndef test_resources(ray_start_4_cpus):\n def check_cpus(self):\n assert ray.available_resources()[\"CPU\"] == 2\n\n assert ray.available_resources()[\"CPU\"] == 4\n trainer = DummyTrainer(\n check_cpus, scaling_config=ScalingConfig(trainer_resources={\"CPU\": 2})\n )\n trainer.fit()\n\n\n@pytest.mark.parametrize(\"gen_dataset\", [True, False])\ndef test_preprocess_fit_on_train(ray_start_4_cpus, gen_dataset):\n def training_loop(self):\n # Fit was only called once.\n assert self.preprocessor.fit_counter == 1\n # Datasets should all be transformed.\n assert self.datasets[\"train\"].take() == [2, 3, 4]\n assert self.datasets[\"my_dataset\"].take() == [2, 3, 4]\n\n if gen_dataset:\n datasets = {\n \"train\": lambda: ray.data.from_items([1, 2, 3]),\n \"my_dataset\": lambda: ray.data.from_items([1, 2, 3]),\n }\n else:\n datasets = {\n \"train\": ray.data.from_items([1, 2, 3]),\n \"my_dataset\": ray.data.from_items([1, 2, 3]),\n }\n trainer = DummyTrainer(\n training_loop, datasets=datasets, preprocessor=DummyPreprocessor()\n )\n trainer.fit()\n\n\ndef test_preprocessor_already_fitted(ray_start_4_cpus):\n def training_loop(self):\n # Make sure fit is not called if preprocessor is already fit.\n assert self.preprocessor.fit_counter == 1\n # Datasets should all be transformed.\n assert self.datasets[\"train\"].take() == [2, 3, 4]\n assert self.datasets[\"my_dataset\"].take() == [2, 3, 4]\n\n datasets = {\n \"train\": ray.data.from_items([1, 2, 3]),\n \"my_dataset\": ray.data.from_items([1, 2, 3]),\n }\n preprocessor = DummyPreprocessor()\n preprocessor.fit(ray.data.from_items([1]))\n trainer = DummyTrainer(\n training_loop, datasets=datasets, preprocessor=DummyPreprocessor()\n )\n trainer.fit()\n\n\ndef test_arg_override(ray_start_4_cpus):\n def check_override(self):\n assert self.scaling_config.num_workers == 1\n # Should do deep update.\n assert not self.custom_arg[\"outer\"][\"inner\"]\n assert self.custom_arg[\"outer\"][\"fixed\"] == 1\n # Should merge with base config.\n assert self.preprocessor.original\n\n pg = get_current_placement_group()\n assert len(pg.bundle_specs) == 2 # 1 trainer, 1 worker\n\n preprocessor = DummyPreprocessor()\n preprocessor.original = True\n scale_config = ScalingConfig(num_workers=4)\n trainer = DummyTrainer(\n check_override,\n custom_arg={\"outer\": {\"inner\": True, \"fixed\": 1}},\n preprocessor=preprocessor,\n scaling_config=scale_config,\n )\n\n new_config = {\n \"custom_arg\": {\"outer\": {\"inner\": False}},\n \"scaling_config\": ScalingConfig(num_workers=1),\n }\n\n tune.run(trainer.as_trainable(), config=new_config)\n\n\ndef test_reserved_cpus(ray_start_4_cpus):\n def train_loop(self):\n ray.data.range(10).show()\n\n # Will deadlock without reserved CPU fraction.\n scale_config = ScalingConfig(num_workers=1, _max_cpu_fraction_per_node=0.9)\n trainer = DummyTrainer(\n train_loop,\n scaling_config=scale_config,\n )\n tune.run(trainer.as_trainable(), num_samples=4)\n\n # Needs to request 0 CPU for the trainer otherwise the pg\n # will require {CPU: 1} * 2 resources, which means\n # _max_cpu_fraction_per_node == 0.01 cannot schedule it\n # (because this only allows to have 1 CPU for pg per node).\n scale_config = ScalingConfig(\n num_workers=1, _max_cpu_fraction_per_node=0.01, trainer_resources={\"CPU\": 0}\n )\n trainer = DummyTrainer(\n train_loop,\n scaling_config=scale_config,\n )\n tune.run(trainer.as_trainable(), num_samples=4)\n\n\ndef test_reserved_cpu_warnings(ray_start_4_cpus):\n def train_loop(config):\n pass\n\n class MockLogger:\n def __init__(self):\n self.warnings = []\n\n def warning(self, msg):\n self.warnings.append(msg)\n\n def warn(self, msg, **kwargs):\n self.warnings.append(msg)\n\n def info(self, msg):\n print(msg)\n\n def clear(self):\n self.warnings = []\n\n try:\n old = tuner_internal.warnings\n tuner_internal.warnings = MockLogger()\n\n # Fraction correctly specified.\n trainer = DummyTrainer(\n train_loop,\n scaling_config=ScalingConfig(num_workers=1, _max_cpu_fraction_per_node=0.9),\n datasets={\"train\": ray.data.range(10)},\n )\n trainer.fit()\n assert not tuner_internal.warnings.warnings\n\n # No datasets, no fraction.\n trainer = DummyTrainer(\n train_loop,\n scaling_config=ScalingConfig(num_workers=1),\n )\n trainer.fit()\n assert not tuner_internal.warnings.warnings\n\n # Should warn.\n trainer = DummyTrainer(\n train_loop,\n scaling_config=ScalingConfig(num_workers=3),\n datasets={\"train\": ray.data.range(10)},\n )\n trainer.fit()\n assert (\n len(tuner_internal.warnings.warnings) == 1\n ), tuner_internal.warnings.warnings\n assert \"_max_cpu_fraction_per_node\" in tuner_internal.warnings.warnings[0]\n tuner_internal.warnings.clear()\n\n # Warn if num_samples is configured\n trainer = DummyTrainer(\n train_loop,\n scaling_config=ScalingConfig(num_workers=1),\n datasets={\"train\": ray.data.range(10)},\n )\n tuner = tune.Tuner(trainer, tune_config=tune.TuneConfig(num_samples=3))\n tuner.fit()\n assert (\n len(tuner_internal.warnings.warnings) == 1\n ), tuner_internal.warnings.warnings\n assert \"_max_cpu_fraction_per_node\" in tuner_internal.warnings.warnings[0]\n tuner_internal.warnings.clear()\n\n # Don't warn if resources * samples < 0.8\n trainer = DummyTrainer(\n train_loop,\n scaling_config=ScalingConfig(num_workers=1, trainer_resources={\"CPU\": 0}),\n datasets={\"train\": ray.data.range(10)},\n )\n tuner = tune.Tuner(trainer, tune_config=tune.TuneConfig(num_samples=3))\n tuner.fit()\n assert not tuner_internal.warnings.warnings\n\n # Don't warn if Trainer is not used\n tuner = tune.Tuner(train_loop, tune_config=tune.TuneConfig(num_samples=3))\n tuner.fit()\n assert not tuner_internal.warnings.warnings\n finally:\n tuner_internal.warnings = old\n\n\ndef test_setup(ray_start_4_cpus):\n def check_setup(self):\n assert self._has_setup\n\n class DummyTrainerWithSetup(DummyTrainer):\n def setup(self):\n self._has_setup = True\n\n trainer = DummyTrainerWithSetup(check_setup)\n trainer.fit()\n\n\ndef test_fail(ray_start_4_cpus):\n def fail(self):\n raise ValueError\n\n trainer = DummyTrainer(fail)\n with pytest.raises(ValueError):\n trainer.fit()\n\n\n@patch.dict(os.environ, {\"RAY_LOG_TO_STDERR\": \"1\"})\ndef _is_trainable_name_overriden(trainer: BaseTrainer):\n trainable = trainer.as_trainable()\n output = io.StringIO()\n\n def say(self):\n logger.warning(\"say\")\n\n trainable.say = say\n with redirect_stderr(output):\n remote_trainable = ray.remote(trainable)\n remote_actor = remote_trainable.remote()\n ray.get(remote_actor.say.remote())\n time.sleep(1) # make sure logging gets caught\n output = output.getvalue()\n print(output)\n assert trainable().__repr__() in output\n\n\ndef test_trainable_name_is_overriden_data_parallel_trainer(ray_start_4_cpus):\n trainer = DataParallelTrainer(\n lambda x: x, scaling_config=ScalingConfig(num_workers=1)\n )\n\n _is_trainable_name_overriden(trainer)\n\n\ndef test_trainable_name_is_overriden_gbdt_trainer(ray_start_4_cpus):\n trainer = DummyGBDTTrainer(\n params={},\n label_column=\"__values__\",\n datasets={\"train\": ray.data.from_items([1, 2, 3])},\n scaling_config=ScalingConfig(num_workers=1),\n )\n\n _is_trainable_name_overriden(trainer)\n\n\ndef test_repr():\n def training_loop(self):\n pass\n\n trainer = DummyTrainer(\n training_loop,\n datasets={\n \"train\": ray.data.from_items([1, 2, 3]),\n },\n )\n\n representation = repr(trainer)\n\n assert \"DummyTrainer\" in representation\n assert len(representation) < MAX_REPR_LENGTH\n\n\nif __name__ == \"__main__\":\n import sys\n\n import pytest\n\n sys.exit(pytest.main(sys.argv[1:] + [\"-v\", \"-x\", __file__]))\n","repo_name":"Eashurox/CPDP_ML","sub_path":"Dataset/ML Projects/ray_versions/ray-ray-2.0.0/python/ray/train/tests/test_base_trainer.py","file_name":"test_base_trainer.py","file_ext":"py","file_size_in_byte":10761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19248643472","text":"# Code from https://www.kaggle.com/code/billiemage/object-detection\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nfrom config import CFG\n\nclass XMLParser:\n def __init__(self,xml_file):\n\n self.xml_file = xml_file\n self._root = ET.parse(self.xml_file).getroot()\n self._objects = self._root.findall(\"object\")\n # path to the image file as describe in the xml file\n self.img_path = os.path.join(CFG.img_path, self._root.find('filename').text)\n # image id \n self.image_id = self._root.find(\"filename\").text\n # names of the classes contained in the xml file\n self.names = self._get_names()\n # coordinates of the bounding boxes\n self.boxes = self._get_bndbox()\n\n def parse_xml(self):\n \"\"\"\"Parse the xml file returning the root.\"\"\"\n \n tree = ET.parse(self.xml_file)\n return tree.getroot()\n\n def _get_names(self):\n\n names = []\n for obj in self._objects:\n name = obj.find(\"name\")\n names.append(name.text)\n\n return np.array(names)\n\n def _get_bndbox(self):\n\n boxes = []\n for obj in self._objects:\n coordinates = []\n bndbox = obj.find(\"bndbox\")\n coordinates.append(np.int32(bndbox.find(\"xmin\").text))\n coordinates.append(np.int32(np.float32(bndbox.find(\"ymin\").text)))\n coordinates.append(np.int32(bndbox.find(\"xmax\").text))\n coordinates.append(np.int32(bndbox.find(\"ymax\").text))\n boxes.append(coordinates)\n\n return np.array(boxes)\n\ndef xml_files_to_df(xml_files):\n \n \"\"\"\"Return pandas dataframe from list of XML files.\"\"\"\n \n names = []\n boxes = []\n image_id = []\n xml_path = []\n img_path = []\n for f in xml_files:\n xml = XMLParser(f)\n names.extend(xml.names)\n boxes.extend(xml.boxes)\n image_id.extend([xml.image_id] * len(xml.names))\n xml_path.extend([xml.xml_file] * len(xml.names))\n img_path.extend([xml.img_path] * len(xml.names))\n a = {\"image_id\": image_id,\n \"names\": names,\n \"boxes\": boxes,\n \"xml_path\":xml_path,\n \"img_path\":img_path}\n \n df = pd.DataFrame.from_dict(a, orient='index')\n df = df.transpose()\n \n df['xmin'] = -1\n df['ymin'] = -1\n df['xmax'] = -1\n df['ymax'] = -1\n\n df[['xmin','ymin','xmax','ymax']] = np.stack([df['boxes'][i] for i in range(len(df['boxes']))])\n\n df.drop(columns=['boxes'], inplace=True)\n df['xmin'] = df['xmin'].astype('float32')\n df['ymin'] = df['ymin'].astype('float32')\n df['xmax'] = df['xmax'].astype('float32')\n df['ymax'] = df['ymax'].astype('float32')\n \n df['id'] = df['image_id'].map(lambda x: x.split(\".jpg\")[0])\n \n return df\n\ndef concat_gt(row):\n label = row['label']\n\n xmin = row['xmin']\n xmax = row['xmax']\n ymin = row['ymin']\n ymax = row['ymax']\n\n return [label, xmin, ymin, xmax, ymax]\n\ndef group_objects(df):\n df['concatenated'] = df.apply(concat_gt, axis=1)\n\n df = df.groupby('id')[['concatenated', 'img_path']].agg({'concatenated': list, \n 'img_path': np.unique}).reset_index(drop=True)\n return df\n\n\ndef build_df(xml_files):\n # parse xml files and create pandas dataframe\n df = xml_files_to_df(xml_files)\n \n\n classes = sorted(df['names'].unique())\n cls2id = {cls_name: i for i, cls_name in enumerate(classes)}\n df['label'] = df['names'].map(cls2id)\n \n # in this df, each object of a given image is in a separate row\n df = df[['id', 'label', 'xmin', 'ymin', 'xmax', 'ymax', 'img_path']]\n \n return df, classes","repo_name":"moein-shariatnia/Pix2Seq","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"54"} +{"seq_id":"33064477961","text":"import scrapy\nfrom scrapy import Selector, Request\nfrom ..items import MyspiderItem\n\nclass sb(scrapy.Spider):\n name = \"sb\"\n start_urls = []\n # start_urls=['http://www.agronet.com.cn/answer/detail/3309']\n for i in range(1,4150):\n start_urls.append('http://www.agronet.com.cn/answer/detail/' + str(i) )\n def parse(self, response):\n\n # 使用 XPath 表达式来定位和提取数据\n #使用text\n print(response.url)\n\n que = response.xpath(\"//span[@class='mxn_problem_h1']/text()\").extract_first()\n ans = response.xpath(\"//div[@class='mxn_wyhd']/text()\").extract_first()\n caina_ans = response.xpath(\"//div[@class='mxn_da_detail']/text()\").extract_first()\n\n # 上面的是要搜索的,想还是直接用完整的xpath更省时间\n # que = response.xpath(\"/html/body/div[3]/div[@class='wid']/div[@class='mxnlist_layer1']/div[@class='mxnlist_left']/div[@class='iproblem mxn_10']/span[@class='mxn_problem_h1']/text()\").extract_first()\n # ans = response.xpath(\"/html/body/div[3]/div[@class='wid']/div[@class='mxnlist_layer1']/div[@class='mxnlist_left']/div[@class='mxn_da mxn_10']/dl[@class='mxn_da_dl']/dt/div[@class='mxn_wyhd']/text()\").extract_first()\n # caina_ans = response.xpath(\"/html/body/div[3]/div[@class='wid']/div[@class='mxnlist_layer1']/div[@class='mxnlist_left']/div[@class='mxn_da mxn_10']/div[@class='mxn_da_detail']/text()\").extract_first()\n\n if caina_ans != None:\n ans = caina_ans\n # 优先选择采纳回答\n\n if ans == None or que == None:\n return\n\n #使用string\n # que = response.xpath(\"string(//span[@class='mxn_problem_h1'])\").extract_first()\n # ans = response.xpath(\"string(//div[@class='mxn_wyhd']/text())\").extract_first()\n # ans = ans.replace(' ', '')\n # ans = ans.replace('\\n', ' ')\n # ans = ans.replace('\\r', '')\n # que = que.replace(' ', '')\n # que = que.replace('\\n', ' ')\n # que = que.replace('\\r', '')\n\n # 处理一下格式,替换特定字符\n ans = ans.strip() # 去除首尾的空白字符和换行符\n que = que.strip()\n que = que.replace('\\u3000', ' ')# 替换u3000为空格\n ans = ans.replace('\\u3000', ' ')\n ans = ans.replace('\\r', '')# 去除\\r保留\\n\n que = que.replace('\\r', '')\n # ans = \" \".join(ans.split()) # 合并连续的空白字符为一个空格\n\n items = MyspiderItem(que=que, ans=ans)\n\n if ans!=None and que!=None:\n yield items","repo_name":"REXWindW/Scrapy_for_Agronet","sub_path":"myspider/myspider/spiders/sb.py","file_name":"sb.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"12197340963","text":"import pandas as pd\n\n\ndef calculate_demographic_data(print_data=True):\n # Read data from file\n df = pd.read_csv(\"adult.data.csv\")\n\n\n # How many of each race are represented in this dataset? This should be a Pandas series with race names as the index labels.\n listOfCount=[]\n def getRace(df):\n indexes=df.index\n for index in indexes:\n listOfCount.append(df[index])\n listOfCount.sort(reverse=True)\n getRace(df.groupby(\"race\").count()[\"age\"])\n race_count = listOfCount\n\n # What is the average age of men?\n average_age_men = float(f\"{df[df['sex']=='Male']['age'].mean():.1f}\")\n\n # What is the percentage of people who have a Bachelor's degree?\n withFloat=df[df[\"education\"]==\"Bachelors\"].count()[\"age\"]/df[\"education\"].count()*100\n percentage_bachelors = float(f\"{withFloat:.1f}\")\n\n # What percentage of people with advanced education (`Bachelors`, `Masters`, or `Doctorate`) make more than 50K?\n # What percentage of people without advanced education make more than 50K?\n\n # with and without `Bachelors`, `Masters`, or `Doctorate`\n higher_education = ((df[((df[\"education\"]==\"Bachelors\")|(df[\"education\"]==\"Masters\")|(df[\"education\"]==\"Doctorate\"))][\"age\"]).count()/df[\"education\"].count())*100\n lower_education =((df[\"education\"].count()-df[(df[\"education\"]==\"Bachelors\")|(df[\"education\"]==\"Masters\")|(df[\"education\"]==\"Doctorate\")][\"age\"].count())/df[\"education\"].count())*100\n\n # percentage with salary >50K\n\n higher_education_rich = round(len(df[df[\"education\"].isin([\"Bachelors\", \"Masters\", \"Doctorate\"])][df[df[\n \"education\"].isin([\"Bachelors\", \"Masters\", \"Doctorate\"])].salary == \">50K\"]) / len(\n df[df[\"education\"].isin([\"Bachelors\", \"Masters\", \"Doctorate\"])]) * 100, 1)\n\n lower_education_rich = round((len(df[~df[\"education\"].isin([\"Bachelors\", \"Masters\", \"Doctorate\"])][df[~df[\n \"education\"].isin([\"Bachelors\", \"Masters\", \"Doctorate\"])].salary == \">50K\"])) / len(\n df[~df[\"education\"].isin([\"Bachelors\", \"Masters\", \"Doctorate\"])]) * 100, 1)\n\n # What is the minimum number of hours a person works per week (hours-per-week )?\n min_work_hours = df[\"hours-per-week\"].min()\n\n # What percentage of the people who work the minimum number of hours per week have a salary of >50K?\n num_min_workers =(df[(df[\"hours-per-week\"]==1)&(df[\"salary\"]==\">50K\")])[\"age\"].count()\n rich_percentage = ((df[(df[\"hours-per-week\"]==1)&(df[\"salary\"]==\">50K\")])[\"age\"].count()/df[df[\"hours-per-week\"]==1][\"age\"].count())*100\n\n # What country has the highest percentage of people that earn >50K?\n maxRate=((df[df[\"salary\"]==\">50K\"].groupby(\"native-country\").count())/(df.groupby(\"native-country\").count()))[\"salary\"].max()\n countriesRate=((df[df[\"salary\"]==\">50K\"].groupby(\"native-country\").count())/(df.groupby(\"native-country\").count()))\n listOfCountry=countriesRate[countriesRate[\"salary\"]==maxRate].index\n highest_earning_country =listOfCountry[0]\n \n withFloat2= maxRate*100\n highest_earning_country_percentage = float(f\"{withFloat2:.1f}\")\n\n # Identify the most popular occupation for those who earn >50K in India.\n IndiaData= df.groupby(\"native-country\").get_group(\"India\")\n numbersOfOccupation=df.groupby(\"native-country\").get_group(\"India\").groupby(\"occupation\").count()\n listOfOccupation=numbersOfOccupation[numbersOfOccupation==40].dropna().index\n top_IN_occupation =listOfOccupation[0]\n #(((df.groupby(\"native-country\").get_group(\"India\").groupby(\"occupation\")).count())[\"age\"])\n\n # DO NOT MODIFY BELOW THIS LINE\n \n if print_data:\n print(\"Number of each race:\\n\", race_count) \n print(\"Average age of men:\", average_age_men)\n print(f\"Percentage with Bachelors degrees: {percentage_bachelors}%\")\n print(f\"Percentage with higher education that earn >50K: {higher_education_rich}%\")\n print(f\"Percentage without higher education that earn >50K: {lower_education_rich}%\")\n print(f\"Min work time: {min_work_hours} hours/week\")\n print(f\"Percentage of rich among those who work fewest hours: {rich_percentage}%\")\n print(\"Country with highest percentage of rich:\", highest_earning_country)\n print(f\"Highest percentage of rich people in country: {highest_earning_country_percentage}%\")\n print(\"Top occupations in India:\", top_IN_occupation)\n \n return {\n 'race_count': race_count,\n 'average_age_men': average_age_men,\n 'percentage_bachelors': percentage_bachelors,\n 'higher_education_rich': higher_education_rich,\n 'lower_education_rich': lower_education_rich,\n 'min_work_hours': min_work_hours,\n 'rich_percentage': rich_percentage,\n 'highest_earning_country': highest_earning_country,\n 'highest_earning_country_percentage':\n highest_earning_country_percentage,\n 'top_IN_occupation': top_IN_occupation\n }\n\n\n#print(calculate_demographic_data())","repo_name":"codesigned4/Data-Analysis-with-Python","sub_path":"Demographic-data-analyzer/demographic_data_analyzer.py","file_name":"demographic_data_analyzer.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43696581911","text":"#####################################################\r\n# Preprocess pixel-based common indicators #\r\n#####################################################\r\n\r\nimport pandas as pd\r\nimport geopandas as gpd\r\nfrom netCDF4 import Dataset, num2date\r\n# Our tailored module\r\nimport PreprocessUtils as ut\r\n\r\n# Read NetCDF\r\nfile = 'Data/air.mon.mean.nc'\r\nnc = Dataset(file, mode='r')\r\n# We store lat and lon in separate variables\r\nlat = nc.variables['lat'][:]\r\nlon = nc.variables['lon'][:]\r\n\r\n# We convert to pandas df and fix the longitudes (from -180 to 180)\r\nnames = ['lat', 'lon']\r\nindex = pd.MultiIndex.from_product([lat, lon], names=names)\r\ndfu = pd.DataFrame(index=index).reset_index()\r\n# Fix longitudes with the function we defined in the PreprocessUtils module\r\ndfu.lon = dfu.lon.transform(ut.convert_lons)\r\n\r\n# Create geopandas spatial object from pandas table (WGS84) as pixel grid\r\ndfu = gpd.GeoDataFrame(dfu,\r\n geometry=gpd.points_from_xy(dfu.lon, dfu.lat),\r\n crs={'init': 'epsg:4326'})\r\n\r\n# Read country data, select and rename country names\r\ncountry = gpd.read_file('Data/countries.geojson').loc[:, ['NAME_EN', 'geometry']]\r\ncountry = country.rename(columns={'NAME_EN': 'countryname'})\r\n# Do spatial join to extract the country of each pixel\r\ndfu = gpd.sjoin(dfu, country, how='left').loc[:, ['lon', 'lat', 'countryname', 'geometry']]\r\n# Pixels for which we got no result fall on water, fill with string\r\ndfu.countryname = dfu.countryname.fillna('Water body')\r\n\r\n# Next, let's add climate zones. Read file.\r\nclimatezones = gpd.read_file('Data/climatezones.geojson')\r\n# And perform spatial join\r\ndfu = gpd.sjoin(dfu, climatezones, how='left').loc[:, ['lon', 'lat', 'countryname', 'climate']]\r\n# For some record we got duplicates (more than one climate zone per point). Take the last match.\r\ndfu = dfu.drop_duplicates(subset=['lon', 'lat'], keep='last')","repo_name":"carlesmila/GeotechClimateChange","sub_path":"PreprocessCommon.py","file_name":"PreprocessCommon.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"2830352205","text":"class Solution:\n def findSubstring(self, s: str, words: list[str]) -> list[int]:\n # steps\n # 1. start left 0\n # 2. right - words[i] length\n # 3. move the right by words[i] length\n # 4. if the new substring is among words .. add to hashmap ..\n # 5. if the hashmap length is equal to words length .. store left\n # 6. move the left by words[i] length\n # 7. remove the left element from hashmap\n # 7. If not found till the end of the right .. return ..\n\n left, right, wordLength = 0, 0, len(words[0])\n store = dict()\n indexStore = []\n\n right += wordLength\n\n # move the left with range..\n for left in range(0, len(s), wordLength):\n \n # print(f\"len of store {left} and len of s {len(s)} and wordLength: {s[left:right]}\")\n \n # while right <= len(s):\n sbstr = s[left:right]\n\n print(f\"sbstr: {sbstr}\")\n\n # if sbstr in words:\n # print(f\"sbstr {sbstr}\")\n # store[sbstr] = sbstr\n\n right += wordLength\n\n\n left += right\n\n print(f\"right {store}\")\n\n return []\n\nmyClass = Solution()\nmyClass.findSubstring(\"aabda\", [\"da\", \"bd\"])\n","repo_name":"MussieT/Leetcode","sub_path":"DS_and_Algos/Python/Algorithms/SlidingWindows/substring_with_concatnation_of_all_words.py","file_name":"substring_with_concatnation_of_all_words.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7953317329","text":"from PIL import Image\nimport random\n\ndef make_collage(images, width, height):\n collage = Image.new(\"RGB\", (width, height), (255, 255, 255))\n images = random.sample(images, 9)\n\n x_offset = 0\n y_offset = 0\n for img in images:\n collage.paste(img, (x_offset, y_offset))\n x_offset += img.width\n if x_offset >= collage.width:\n x_offset = 0\n y_offset += img.height\n return collage\n\nif __name__ == \"__main__\":\n # Load the images you want to use in the collage\n images = [Image.open(f\"image{i}.jpg\") for i in range(1, 10)]\n # Specify the size of the final collage\n width = 3 * images[0].width\n height = 3 * images[0].height\n\n collage = make_collage(images, width, height)\n collage.show()\n collage.save(\"collage.jpg\")\n","repo_name":"dustinober1/Python-Scripts","sub_path":"combine_random_pics_3x3.py","file_name":"combine_random_pics_3x3.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23772236176","text":"import ast\n\ndef collectProgramDesign(path: str):\n with open(path, \"r\") as f:\n source = f.read()\n\n myast = ast.parse(source)\n #print(ast.dump(myast))\n root_funcs = set([anode.name for anode in myast.body if isinstance(anode, ast.FunctionDef)])\n\n root_classes = set([anode for anode in myast.body if isinstance(anode, ast.ClassDef)])\n\n ret_classes = {}\n\n for aclass in root_classes:\n class_name = aclass.name\n\n func_visitor = MyFuncDefVisitor()\n\n func_visitor.visit(aclass)\n funcs = func_visitor.getFunctions()\n\n attr_visitor = MyAttrVisitor(funcs)\n attr_visitor.visit(aclass)\n\n attrs = attr_visitor.getFields()\n\n ret_classes[class_name] = {\"attrs\": attrs, \"funcs\": funcs}\n\n return (root_funcs, ret_classes)\n\n\nclass MyAttrVisitor(ast.NodeVisitor):\n def __init__(self, funcs):\n super().__init__()\n self.fields = []\n self.funcs = funcs\n\n def visit_Attribute(self, node):\n if isinstance(node.value, ast.Name) and node.value.id == \"self\" and node.attr not in self.funcs:\n self.fields.append(node.attr)\n return self.generic_visit(node)\n\n def getFields(self):\n return set(self.fields)\n\n\nclass MyFuncDefVisitor(ast.NodeVisitor):\n def __init__(self):\n super().__init__()\n self.funcs = []\n\n def visit_FunctionDef(self, node):\n self.funcs.append(node.name)\n return self.generic_visit(node)\n\n def getFunctions(self):\n return set(self.funcs)\n\nif __name__ == \"__main__\":\n collectProgramDesign(\"./input.py\")\n","repo_name":"darkrsw/knu2023design-midterm-template","sub_path":"design_extractor.py","file_name":"design_extractor.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17298043700","text":"import smtplib\nimport urllib\nimport re\nimport json\nimport time\n\nclass Registro(object):\n def __init__(self, filename):\n self.filename = filename\n self.file = json.load(open(self.filename))\n \n def checkAddress(self):\n site = urllib.urlopen(self.file[\"urls\"][\"check_ip\"])\n grab = re.findall('([0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+)', site.read())\n address = grab[0]\n old_ip = self.getLastIP()\n if old_ip != address:\n self.file[\"urls\"][\"last_ip\"] = address\n self.file[\"urls\"][\"last_ip_updated\"] = time.strftime(\"%c\")\n json.dump(self.file, open(self.filename, \"w\"), sort_keys = False, indent = 4)\n return True\n else:\n return False\n \n def getLastIP(self):\n return self.file[\"urls\"][\"last_ip\"] \n\n\nclass Gmail(object):\n def __init__(self, filename):\n self.file = json.load(open(filename))\n self.email = self.file[\"Gmail\"][\"email\"]\n self.emailto = self.file[\"Gmail\"][\"emailto\"]\n self.password = self.file[\"Gmail\"][\"password\"]\n self.server = 'smtp.gmail.com'\n self.port = 587\n session = smtplib.SMTP(self.server, self.port) \n session.ehlo()\n session.starttls()\n session.ehlo\n session.login(self.email, self.password)\n self.session = session\n\n def send_message(self, subject, body):\n ''' This must be removed '''\n headers = [\n \"From: \" + self.email,\n \"Subject: \" + subject,\n \"To: \" + self.emailto,\n \"MIME-Version: 1.0\",\n \"Content-Type: text/html\"]\n headers = \"\\r\\n\".join(headers)\n self.session.sendmail(\n self.email,\n self.email,\n headers + \"\\r\\n\\r\\n\" + body)\n\n\ngm = Gmail('./checkip.json')\nreg = Registro('./checkip.json')\n\nif reg.checkAddress():\n gm.send_message('New IP', reg.getLastIP())","repo_name":"moraprj/Automatic-IP-Change-Notifier","sub_path":"checkip.py","file_name":"checkip.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73969357921","text":"import sys\nfrom PyQt5 import QtWidgets\n\nimport AppLoop\nimport qdarkstyle\nfrom ui.UIMainViewControl import UIMainViewControl\n\n\ndef my_exception_hook(exctype, value, traceback):\n print(exctype, value, traceback)\n sys._excepthook(exctype, value, traceback)\n sys.exit(1)\n\n\ndef show_main():\n app = QtWidgets.QApplication(sys.argv)\n sys._excepthook = sys.excepthook\n sys.excepthook = my_exception_hook\n AppLoop.start()\n\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n\n w = UIMainViewControl()\n w.show()\n\n sys.exit(app.exec_())\n","repo_name":"Beugeny/game_test","sub_path":"ui/MainView.py","file_name":"MainView.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31686573881","text":"# DL6G.py CS5173/6073 cheng 2019\r\n# from tensorflow.org/tutorials/keras\r\n# three-layer neural network for fashion_mnist data classification\r\n# Usage: python DL6G.py\r\n\r\nimport tensorflow as tf\r\nmnist = tf.keras.datasets.mnist\r\n\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\nx_train, x_test = x_train / 255.0, x_test / 255.0\r\n\r\nmodel = tf.keras.models.Sequential([\r\n tf.keras.layers.Flatten(input_shape=(28, 28)),\r\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\r\n tf.keras.layers.Dropout(0.2),\r\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\r\n])\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\nmodel.fit(x_train, y_train, epochs=5)\r\n\r\nmodel.evaluate(x_test, y_test)\r\ngraph = tf.get_default_graph()\r\noperations = graph.get_operations()\r\nprint(operations)\r\nprint(len(operations))\r\nprint(operations[9])\r\n\r\n","repo_name":"domfarolino/deep-learning","sub_path":"Assignment6/DL6G.py","file_name":"DL6G.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11067341960","text":"import heapq\nfrom collections import defaultdict\n\nimport aocutils\n\n\ndef step_time(maxflow, flows, state):\n if state[3] > 0 and state[5] > 0:\n missed, t, node, remaining, enode, ermaining, opened, flow, total = state\n future = flow * t\n if node != 'IDLE':\n assert remaining <= t\n future += (t - remaining) * flows[node]\n if enode != 'IDLE':\n assert ermaining <= t\n future += (t - ermaining) * flows[enode]\n new_missed = maxflow * 26 - total - future\n stept = min(remaining, ermaining, t)\n new_total = total + flow * stept\n\n return new_missed, t - stept, node, remaining - stept, enode, ermaining - stept, opened, flow, new_total\n else:\n return state\n\n\ndef main(file):\n def distp(node):\n return [(1, n) for n in neighbors[node]]\n\n print(\"RUNNING\", file)\n neighbors = {}\n flows = {}\n for line in aocutils.readlines(file):\n splits = line.split(' ', 9)\n valve = splits[1]\n flow = aocutils.parseints(splits[4])[0]\n paths = splits[9].split(', ')\n flows[valve] = flow\n neighbors[valve] = paths\n maxflow = sum(flow for flow in flows.values())\n\n dists = defaultdict(dict)\n interesting = {n for n in neighbors.keys() if flows[n]}\n for n in interesting:\n for othern, dist in aocutils.dijkstra(distp, n, None).items():\n if othern in interesting:\n if othern != n:\n dists[n][othern] = dist\n for othern, dist in aocutils.dijkstra(distp, 'AA', None).items():\n if othern in interesting:\n dists['AA'][othern] = dist\n dists['IDLE'] = dict()\n\n bitfornode = {n: 1 << i for i, n in enumerate(interesting)}\n states = [(maxflow * 26, 26, 'AA', 0, 'AA', 0, 0, 0, 0)]\n best = 0\n mint = 0\n while states:\n missed, t, node, remaining, enode, ermaining, opened, flow, total = heapq.heappop(states)\n if t < mint:\n mint = t\n print(f\"t={t} states={len(states)}\")\n if node == 'IDLE' and enode == 'IDLE':\n final_value = total + t * flow\n if final_value > best:\n best = final_value\n continue\n\n # TODO tighter upper bound\n best_possible_result = total + t * maxflow\n if best_possible_result < best:\n continue\n\n if remaining == 0:\n new_flow = flow + flows[node]\n actions = 0\n for n, dist in dists[node].items():\n if bitfornode[n] & opened != 0:\n continue\n action_time = dist + 1\n if t - action_time < 0:\n continue\n actions += 1\n heapq.heappush(\n states,\n step_time(maxflow, flows, (missed, t, n, action_time, enode, ermaining, opened | bitfornode[n], new_flow, total)),\n )\n if actions < 2:\n heapq.heappush(\n states,\n step_time(maxflow, flows, (missed, t, 'IDLE', 99, enode, ermaining, opened, new_flow, total))\n )\n elif ermaining == 0:\n new_flow = flow + flows[enode]\n actions = 0\n for n, dist in dists[enode].items():\n if bitfornode[n] & opened != 0:\n continue\n action_time = dist + 1\n if t - action_time < 0:\n continue\n\n actions += 1\n heapq.heappush(\n states,\n step_time(maxflow, flows, (missed, t, node, remaining, n, action_time, opened | bitfornode[n], new_flow, total))\n )\n if actions < 2:\n heapq.heappush(\n states,\n step_time(maxflow, flows, (missed, t, node, remaining, 'IDLE', 99, opened, new_flow, total))\n )\n else:\n assert False\n\n print(best)\n\n\nif __name__ == '__main__':\n import time\n\n t0 = time.monotonic()\n\n main(\"example.txt\")\n t1 = time.monotonic()\n print(\"in \", t1 - t0)\n main(\"input.txt\")\n t2 = time.monotonic()\n print(\"in \", t2 - t1)\n","repo_name":"martenbr/aoc","sub_path":"aoc2022/dec16/valve2.py","file_name":"valve2.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8343800294","text":"# These settings are NOT here for users. Changing these may break the game. Change at your own risk.\n\nimport sys\n\nwindowsize = 854, 480\nfullscreen = \"--fullscreen\" in sys.argv\nnoaudio = \"--noaudio\" in sys.argv\nnonumpy = \"--nonumpy\" in sys.argv\ngamename = \"Testing for PyWeek\"\nmaxfps = 60\n\n\n","repo_name":"cosmologicon/pyjam","sub_path":"test/src/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"32870504810","text":"#URL: https://leetcode.com/explore/learn/card/data-structure-tree/133/conclusion/942/\n#Description\n\"\"\"\nGiven two integer arrays inorder and postorder where inorder is the inorder traversal of a binary \ntree and postorder is the postorder traversal of the same tree, construct and return the binary \ntree.\n\n\nExample 1:\n\nInput: inorder = [9,3,15,20,7], postorder = [9,15,7,20,3]\nOutput: [3,9,20,null,null,15,7]\n\n\nExample 2:\n\nInput: inorder = [-1], postorder = [-1]\nOutput: [-1]\n\n\nConstraints:\n\n1 <= inorder.length <= 3000\npostorder.length == inorder.length\n-3000 <= inorder[i], postorder[i] <= 3000\ninorder and postorder consist of unique values.\nEach value of postorder also appears in inorder.\ninorder is guaranteed to be the inorder traversal of the tree.\npostorder is guaranteed to be the postorder traversal of the tree.\n\"\"\"\n\nfrom binary_tree.node import Node\n\ndef buildTreeRecursive(inorder, inStart, inEnd, postorder, postStart, postEnd):\n sz = inEnd - inStart + 1\n assert sz == postEnd - postStart + 1\n if sz <= 0:\n return None\n elif sz == 1:\n return Node(postorder[postEnd])\n else:\n rootVal = postorder[postEnd]\n rootInorderIndex = -1\n for i in range(inStart, inEnd + 1):\n if inorder[i] == rootVal:\n rootInorderIndex = i\n break\n assert rootInorderIndex >= 0\n leftSz = rootInorderIndex - inStart\n rightSz = inEnd - rootInorderIndex\n leftNode = buildTreeRecursive(inorder, inStart, inStart + leftSz - 1,\n postorder, postStart, postStart + leftSz - 1)\n rightNode = buildTreeRecursive(inorder, rootInorderIndex + 1, inEnd,\n postorder, postStart + leftSz, postStart + leftSz + rightSz -1)\n root = Node(rootVal, leftNode, rightNode)\n return root\n \n\n\ndef buildTree(inorder, postorder):\n sz = len(inorder)\n assert sz == len(postorder)\n if sz == 0:\n return None\n \n root = buildTreeRecursive(inorder, 0, sz - 1, postorder, 0, sz - 1)\n return root","repo_name":"saurabh-pandey/AlgoAndDS","sub_path":"leetcode/binary_tree/build_from_in_post_order.py","file_name":"build_from_in_post_order.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41420091528","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution(object):\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head != None and head.next != None:\n tempNode = head.next\n head.next = tempNode.next\n tempNode.next = head\n tempHead = tempNode\n\n while head.next != None:\n tempNode = head.next\n head.next = tempNode.next\n tempNode.next = tempHead\n tempHead = tempNode\n \n head = tempHead\n\n return head","repo_name":"Cytherr1/daily-leetcode","sub_path":"easy/Reverse Linked List.py","file_name":"Reverse Linked List.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3839494880","text":"import os\nimport re\nfrom setuptools import setup\n\n__name__ = 'gohlkegrabber'\n\nversion_fn = os.path.join(__name__, \"_version.py\")\n__version__ = \"unknown\"\ntry:\n version_line = open(version_fn, \"rt\").read()\nexcept EnvironmentError:\n pass # no version file\nelse:\n version_regex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n m = re.search(version_regex, version_line, re.M)\n if m:\n __version__ = m.group(1)\n else:\n print(f'unable to find version in {version_fn}')\n raise RuntimeError(f'If {version_fn} exists, it is required to be well-formed')\n\nwith open(\"README.md\", \"r\") as rm:\n long_description = rm.read()\n\nsetup(\n name=__name__,\n packages=['gohlkegrabber'],\n version=__version__,\n license='MIT',\n description='Simple script to download .whl packages from www.lfd.uci.edu/~gohlke/pythonlibs.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='BMT, Jaap van der Velde',\n author_email='jaap.vandervelde@bmtglobal.com',\n url='https://github.com/jaapvandervelde/gohlkegrabber',\n download_url='https://github.com/jaapvandervelde/gohlkegrabber/archive/v'+__version__+'.tar.gz',\n keywords=['package', 'download', 'gohlke', 'wheel'],\n install_requires=[\n 'lxml>=4.4.2'\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n ],\n entry_points={\n 'console_scripts': ['ggrab=gohlkegrabber:cli_entry_point'],\n }\n)\n","repo_name":"jaapvandervelde/gohlkegrabber","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"30153682887","text":"# https://www.acmicpc.net/problem/2108\n\n# 산술평균 : N개의 수들의 합을 N으로 나눈 값\n# 중앙값 : N개의 수들을 증가하는 순서로 나열했을 경우 그 중앙에 위치하는 값\n# 최빈값 : N개의 수들 중 가장 많이 나타나는 값\n# 범위 : N개의 수들 중 최댓값과 최솟값의 차이\n\n# 첫째 줄에는 산술평균을 출력한다. 소수점 이하 첫째 자리에서 반올림한 값을 출력한다.\n# 둘째 줄에는 중앙값을 출력한다.\n# 셋째 줄에는 최빈값을 출력한다. 여러 개 있을 때에는 최빈값 중 두 번째로 작은 값을 출력한다.\n# 넷째 줄에는 범위를 출력한다\n\nimport sys\n\ninput = sys.stdin.readline\n\nlength = int(input())\nnums = []\nnums_count = {}\n\nfor i in range(length):\n number = int(input())\n nums.append(number)\n if number not in nums_count.keys():\n nums_count[number] = 0\n nums_count[number] += 1\n\nnums.sort()\n\nmax_occur = 9999\nmax_occur_count = max(nums_count.values())\n\nfor k, v in sorted(nums_count.items(), key=lambda x:x[0]):\n if v == max_occur_count:\n if max_occur != 9999:\n max_occur = k\n break\n max_occur = k\n \nprint(f'{sum(nums)/length:.0f}')\nprint(nums[(length-1)//2])\nprint(max_occur)\nprint(max(nums) - min(nums))\n\n# =========================================\n# 5\n# 1\n# 3\n# 8\n# -2\n# 2\n\n# 2\n# 2\n# 1\n# 10","repo_name":"eagerithm/algorithms","sub_path":"bugoverdose/sort/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6711443857","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom typing import List, Dict, Iterable, Tuple, Callable\nfrom matplotlib import pyplot as plt\nfrom collections import Counter\nimport sys\nimport os\nimport string\nimport urllib.request\nimport requests\nimport curl\nimport pycurl\nimport webbrowser\nimport numpy as np\nimport math\nimport pandas as pd\nfrom matplotlib.pyplot import figure\nfrom py.xml import raw\nfrom requests.api import get\n\n\n# In[2]:\n\n\n# Intialize KF\n\n# Number of samples\nN = 1000\n\n# Sampling size\nTs = 0.001\n\n# Sample range\nsample_range = np.linspace(0,1,N)\n\n# System matrix - state\nF = np.array([[1, Ts], [0,1]])\n\n# System matrix - input\nG = np.array([[-0.5*(Ts**2)], [-Ts]])\n# G = [[-0.5*(Ts**2)], [-Ts]]\n\n# Input vector\nu = 9.80665\n\n# Observation matrix\nH = np.array([1,0])\n\n# Sigma - Standard Deviations\nQ = np.array([[0,0],[0,0]])\n\n# Process noise covariance matrix\n# Q_0 = np.array([[(Ts**3)/3, (Ts**2)/2], [(Ts**2)/2, Ts]])\n# Q = sigma2Q * (Q_0)\n# P = 10 *Q\n\n# R = sigma2R * 1\n\n# Identity matrix\nI = np.identity(2)\n\n\n# In[3]:\n\n\n# Define the initial position and velocity\n# Position - m\ny0 = 100\n# Velocity - m/s\nv0 = 0\n\n\n# In[4]:\n\n\n# Initialize the state vector\nx_t = np.zeros((2,N))\nx_t[:,0] = [y0,v0]\nx_t\n\n\n# In[5]:\n\n\n# Time update - prediction\nfor k in range(1,N):\n x_t[:,k] = np.dot(F,x_t[:,k-1]) + np.dot(G.T,u)\n\nprint(x_t)\n# print(x_t + G*u)\n\n\n# In[6]:\n\n\n# Generate noisy measurements from state\n# m^2/s^2\nfor k in range(1,N):\n x_t[:,k] = np.dot(F,x_t[:,k-1]) + np.dot(G.T,u)\nR = 0\n# Measurement noise\nv = np.sqrt(R) + np.random.randn(N)\n# Noisy measurement\nz = np.matmul(H,x_t) + v\n\n\n# In[7]:\n\n\n# Activate Kalman Filter\n\n# Initialize state vector\nx = np.zeros((2,N))\nx[:,0] = [105,0]\n\n# Initialize the covariance matrix\n# P = np.array([[10,0],[0, 0.01]])\nP = np.array([[0.01,0.01],[0.01, 0.01]])\n\nfor k in range(1,N):\n x[:,k] = np.dot(F,x[:,k-1]) + np.dot(G.T,u)\n P = np.matmul(F, np.matmul(P,F.T)) + Q\n S = np.matmul(H,np.matmul(P,H.T)) + R\n K = np.matmul(P,H.T)/S\n nu = z[k] - np.matmul(H,x[:,k-1])\n x[:,k] = x[:,k-1] + np.dot(K,nu)\n P = P - np.dot(K,np.dot(S,K.T))\n# P = np.matmul((I - np.matmul(K,H)),P)\n\n\n# In[8]:\n\n\n# # # Activate Kalman Filter\n\n# # # Initialize state vector\n# # x = np.zeros((2,N))\n# # x[:,0] = [105,0]\n\n# # # Initialize the covariance matrix\n# # P = np.array([[10,0],[0, 0.01]])\n\n\n# # Perform Kalman filter\n# for k in range(1,N):\n# # Predict the state vector\n# x[:,k] = np.matmul(F,x[:,k-1]) + (G*u).T\n# # Predict the covariance\n# P_0 = np.matmul(F,P)\n# P = np.matmul(P_0,np.transpose(F)) + Q\n# # Calculate Kalman Gain\n# S = np.matmul(np.matmul(H,P),np.transpose(H)) + R\n# K = np.matmul(P,np.transpose(H))/S\n# # Update the state vector\n# nu = z[k] - np.matmul(H,x[:,k-1])\n# x[:,k] = x[:,k-1] + np.dot(K,nu)\n# # Update the covariance\n# P = np.matmul((I - np.matmul(K,H)),P)\n \n\n\n# In[9]:\n\n\n# Plot\n\nplot1 = plt.figure(1)\nplt.plot(sample_range, z, sample_range,x[0,:],sample_range,x_t[0,:])\nplt.grid()\n\nplot2 = plt.figure(2)\nplt.plot(sample_range, z, sample_range,x[0,:],sample_range,x_t[0,:])\nplt.grid()\n\nplot3 = plt.figure(3)\nplt.plot(sample_range,x_t[1,:], sample_range, x[1,:])\nplt.grid()\n\n# plot4 = plt.figure(4)\n# plt.plot(sample_range,x_t[0,:], sample_range, x)\n# plt.grid()\n\nplot5 = plt.figure(5)\nplt.plot(sample_range,x_t[0,:] - x[0,:])\nplt.grid()\n\nplot6 = plt.figure(6)\nplt.plot(sample_range,x_t[1,:] - x[1,:])\nplt.grid()\n\n\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"DizzleMoon/Example-Codes-Edit-V2","sub_path":"Kalman Filter/Test_KalmanFilter_v3-Copy22.py","file_name":"Test_KalmanFilter_v3-Copy22.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74499338080","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 8 15:10:51 2020\r\n\r\n@author: David\r\n\"\"\"\r\n\r\nimport scipy.sparse as sparse\r\nimport scipy.sparse.linalg as spalg\r\nimport scipy.fft as scifft\r\nimport numpy as np\r\nclass LU:\r\n def __init__(self, M):\r\n self.LU = spalg.splu(M)\r\n \r\n def solve(self, x):\r\n y = self.LU.solve(x)\r\n return y\r\n\r\n\r\nclass Factorize:\r\n def __init__(self, M):\r\n self.factorized = spalg.factorized(M)\r\n \r\n def solve(self, x):\r\n y = self.factorized(x)\r\n return y\r\n\r\nclass FFT:\r\n def __init__(self, M):\r\n self.diag_inv = np.reciprocal(np.real(scifft.fft(M[:,0].todense().T)[0]))\r\n\r\n def solve(self, x):\r\n v = scifft.fft(x)\r\n u = self.diag_inv*v\r\n y = scifft.ifft(u)\r\n return np.real(y)\r\n \r\n \r\nclass FFTInv:\r\n def __init__(self, M):\r\n diag_inv = sparse.diags(np.reciprocal(np.real(scifft.fft(M[:,0].todense().T)[0]))).todense()\r\n A = scifft.ifft(diag_inv).T\r\n self.inv = np.real(scifft.fft(A))\r\n\r\n def solve(self, x):\r\n y = np.dot(self.inv, x)\r\n return y\r\n\r\nif __name__ == '__main__':\r\n import create_matrix as cm\r\n n = 4 \r\n M = cm.create_matrix(n,0.1)\r\n x = np.array([*range(n)])\r\n y = np.linalg.solve(M.todense(), x)\r\n solver = FFTInv(M)\r\n print(solver.inv)\r\n print(solver.solve(x))\r\n print(y)\r\n\r\n\r\n","repo_name":"davidy2911/Research-Internship","sub_path":"solvers.py","file_name":"solvers.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18627939358","text":"import base64\nimport prob0103\nfrom prob0105 import *\n\ntest_1 = b\"this is a test\"\ntest_2 = b\"wokka wokka!!!\"\ndata = base64.b64decode(open('6.txt', 'r').read())\n\ndef hamming_distance(string1, string2):\n score = 0\n xorTuples = zip(string1,string2)\n for x,y in xorTuples:\n a = bin(x^y)\n numberstring = a[2:]\n for i in numberstring:\n score += int(i)\n return score\n\n\ndef find_keylength(input_bytes):\n score_tuple = (0,0)\n for KEYSIZE in range(2,40):\n score = 0\n bytes_array = []\n for i in range(0, len(input_bytes), KEYSIZE):\n bytes_array.append(input_bytes[i:i+KEYSIZE])\n for x in range(0, len(bytes_array), 2):\n try:\n score += hamming_distance(bytes_array[x], bytes_array[x+1])\n except:\n pass\n if score_tuple == (0,0) or score_tuple[1] > score:\n score_tuple = (KEYSIZE, score)\n return score_tuple[0]\n\ndef get_key(size, input_bytes):\n answer = \"\"\n chunk_array = []\n repeating_chunk_array = []\n for i in range(0, len(input_bytes), size):\n chunk_array.append(input_bytes[i:i+size])\n for j in range(0,size):\n byte_array = []\n byte_string=b\"\"\n for i in chunk_array:\n if(len(i) > j):\n byte_string += bytes([i[j]])\n byte_array.append(i[j])\n else:\n pass\n answer += sort_scored(single_byte_xor(byte_array))[1][1]\n repeating_chunk_array.append(byte_string)\n return answer\n\ndef make_repeating(key, string):\n return (key * int(len(string)/len(key))+ key[0:(len(string)%len(key))])\n\ndef equal_string_xor(rep_key, phrase):\n return bytes([phrase[i]^rep_key[i] for i in range(len(phrase))])\n\nif __name__ == \"__main__\":\n print('\\n','this is a test for hamming space. the test answer is ------', hamming_distance(test_1, test_2), '\\n')\n print(equal_string_xor(bytes(make_repeating((get_key(find_keylength(data), data)), data), 'utf-8'), data).decode('utf-8'))\n","repo_name":"zalazalaza/cryptopals","sub_path":"prob0106.py","file_name":"prob0106.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12308978775","text":"import pandas as pd \nimport plotly.graph_objects as go\nimport plotly.express as px\n\n# Users over time \nusers_data = pd.read_csv('data/total_users.csv') \nuniswap_users_data = pd.read_csv('data/uniswap_users.csv') \ncompound_users_data = pd.read_csv('data/compound_users.csv') \nyearn_users_data = pd.read_csv('data/yearn_users.csv') \nuniswap_retention = pd.read_csv('data/uniswap_retention.csv') \nsushiswap_retention = pd.read_csv('data/sushiswap_retention.csv')\ncream_users_data = pd.read_csv('data/cream_users.csv')\npolygon_users_data = pd.read_csv('data/daily_polygon_users.csv')\nusers_by_project = pd.read_csv('data/users_by_project.csv')\nprint(uniswap_retention)\n# users\n\nfig = go.Figure()\n'''\nfig.add_trace(go.Scatter(x=users_by_project['date'], y=polygon_users_data['users'],\n mode='lines',\n name='lines',\n line_color='blue'))\n'''\n#retention \nfig = px.bar(users_by_project, x=users_by_project['date'], y=users_by_project[\"users\"], color=\"project\")\n\n\n\nfig.update_layout(\n xaxis=dict(\n showline=True,\n showgrid=False,\n showticklabels=True,\n linewidth=2,\n zeroline=True,\n linecolor='#F4F4F4',\n ticks='outside',\n tickfont=dict(\n family='Arial',\n size=22,\n color='rgb(82, 82, 82)',\n ),\n ),\n yaxis=dict(\n showgrid=True,\n zeroline=True,\n showline=True,\n showticklabels=True,\n gridcolor='#F4F4F4',\n tickfont=dict(\n family='Arial',\n size=22,\n color='blue',\n ),\n ),\n legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n ),\n autosize=True,\n\n plot_bgcolor='white'\n)\n\n'''\nfig.add_layout_image(\n dict(\n source=\"https://images.plot.ly/language-icons/api-home/python-logo.png\",\n xref=\"x\",\n yref=\"y\",\n x=0,\n y=3,\n sizex=2,\n sizey=2,\n sizing=\"stretch\",\n opacity=0.5,\n layer=\"below\")\n)\n'''\nfig.show()\n","repo_name":"aphrodite999/eth_analytics_main","sub_path":"defi_historical/misc/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71892550883","text":"# -*- coding: utf-8 -*-\n\n# 1st-run initialisation\n# designed to be called from Crontab's @reboot\n# however this isn't reliable (doesn't work on Win32 Service) so still in models for now...\n\n# Deployments can change settings live via appadmin\n\n# Set deployment_settings.base.prepopulate to False in Production (to save 1x DAL hit every page)\nif not deployment_settings.get_base_prepopulate() or db(db[\"s3_setting\"].id > 0).count():\n populate = False\nelse:\n populate = True\n\nif populate:\n\n # Themes\n tablename = \"admin_theme\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(\n name = T(\"Sahana Blue\"),\n logo = \"img/sahanapy_logo.png\",\n #header_background = \"img/header_bg.png\",\n #footer = \"footer.html\",\n col_background = \"336699\",\n col_menu = \"0066cc\",\n col_highlight = \"0077aa\",\n col_txt_background = \"f3f6ff\",\n col_txt_border = \"c6d1f5\",\n col_txt_underline = \"003366\",\n col_txt = \"006699\",\n col_input = \"ffffcc\",\n col_border_btn_out = \"6699cc\",\n col_border_btn_in = \"4589ce\",\n col_btn_hover = \"3377bb\",\n )\n table.insert(\n name = T(\"Sahana Green\"),\n logo = \"img/sahanapy_logo_green.png\",\n #header_background = \"img/header_bg.png\",\n #footer = \"footer.html\",\n col_background = \"337733\",\n col_menu = \"cc7722\",\n col_highlight = \"338833\",\n col_txt_background = \"f3f6ff\",\n col_txt_border = \"c6d1f5\",\n col_txt_underline = \"003366\",\n col_txt = \"006699\",\n col_input = \"ffffcc\",\n col_border_btn_out = \"6699cc\",\n col_border_btn_in = \"4589ce\",\n col_btn_hover = \"3377bb\",\n )\n table.insert(\n # Needs work\n # - some colours need changing independently of each other\n # - logo size needs storing\n name = T(\"Sahana Steel\"),\n logo = \"img/sahanapy_logo_ideamonk.png\",\n #header_background = \"img/header_bg.png\",\n #footer = \"footer.html\",\n col_background = \"dbdbdb\",\n col_menu = \"0066cc\",\n col_highlight = \"0077aa\",\n col_txt_background = \"f3f6ff\",\n col_txt_border = \"c6d1f5\",\n col_txt_underline = \"003366\",\n col_txt = \"eeeeee\",\n col_input = \"ffffcc\",\n col_border_btn_out = \"c6d1f5\",\n col_border_btn_in = \"4589ce\",\n col_btn_hover = \"3377bb\",\n )\n\n # Global Settings\n tablename = \"s3_setting\"\n table = db[tablename]\n # Ensure that the theme we defined is in the DB ready to be used as a FK\n db.commit()\n if not db(table.id > 0).count():\n table.insert(\n admin_name = T(\"Sahana Administrator\"),\n admin_email = \"support@Not Set\",\n admin_tel = T(\"Not Set\"),\n theme = 1\n )\n\n # Organisation Registry \n tablename = \"org_cluster\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(\n abrv = T(\"Agriculture\"),\n name = T(\"Agriculture\")\n )\n table.insert(\n abrv = T(\"Camp\"),\n name = T(\"Camp Coordination/Management\")\n )\n table.insert(\n abrv = T(\"Recovery\"),\n name = T(\"Early Recovery\")\n )\n table.insert(\n abrv = T(\"Education\"),\n name = T(\"Education\")\n )\n table.insert(\n abrv = T(\"Shelter\"),\n name = T(\"Emergency Shelter\")\n )\n table.insert(\n abrv = T(\"Telecommunications\"),\n name = T(\"Emergency Telecommunications\")\n )\n table.insert(\n abrv = T(\"Health\"),\n name = T(\"Health\")\n )\n table.insert(\n abrv = T(\"Logistics\"),\n name = T(\"Logistics\")\n )\n table.insert(\n abrv = T(\"Nutrition\"),\n name = T(\"Nutrition\")\n )\n table.insert(\n abrv = T(\"Protection\"),\n name = T(\"Protection\")\n )\n table.insert(\n abrv = T(\"WASH\"),\n name = T(\"Water Sanitation Hygiene\")\n )\n tablename = \"org_cluster_subsector\"\n table = db[tablename]\n # Ensure that the clusters we defined are in the DB ready to be used as a FK\n db.commit()\n if not db(table.id > 0).count():\n cluster_shelter = db(db.org_cluster.abrv == \"Shelter\").select(db.org_cluster.id, limitby=(0, 1)).first().id\n cluster_nutrition = db(db.org_cluster.abrv == \"Nutrition\").select(db.org_cluster.id, limitby=(0, 1)).first().id\n cluster_wash = db(db.org_cluster.abrv == \"WASH\").select(db.org_cluster.id, limitby=(0, 1)).first().id\n table.insert(\n cluster_id = cluster_shelter,\n abrv = T(\"Clothing\")\n )\n table.insert(\n cluster_id = cluster_shelter,\n abrv = T(\"Shelter\")\n )\n table.insert(\n cluster_id = cluster_nutrition,\n abrv = T(\"Cooking NFIs\")\n )\n table.insert(\n cluster_id = cluster_nutrition,\n abrv = T(\"Food Supply\")\n )\n table.insert(\n cluster_id = cluster_wash,\n abrv = T(\"Aggravating factors\")\n )\n table.insert(\n cluster_id = cluster_wash,\n abrv = T(\"Disease vectors\")\n )\n table.insert(\n cluster_id = cluster_wash,\n abrv = T(\"Drainage\")\n )\n table.insert(\n cluster_id = cluster_wash,\n abrv = T(\"Excreta disposal\")\n )\n table.insert(\n cluster_id = cluster_wash,\n abrv = T(\"Hygiene NFIs\")\n )\n table.insert(\n cluster_id = cluster_wash,\n abrv = T(\"Hygiene practice\")\n )\n table.insert(\n cluster_id = cluster_wash,\n abrv = T(\"Solid waste\")\n )\n table.insert(\n cluster_id = cluster_wash,\n abrv = T(\"Water supply\")\n )\n\n # Person Registry\n tablename = \"pr_person\"\n table = db[tablename]\n # Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL\n field = \"first_name\"\n db.executesql(\"CREATE INDEX %s__idx on %s(%s);\" % (field, tablename, field))\n field = \"middle_name\"\n db.executesql(\"CREATE INDEX %s__idx on %s(%s);\" % (field, tablename, field))\n field = \"last_name\"\n db.executesql(\"CREATE INDEX %s__idx on %s(%s);\" % (field, tablename, field))\n\n # Synchronisation\n tablename = \"sync_setting\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(proxy=\"\")\n\n # Incident Reporting System\n if \"irs\" in deployment_settings.modules:\n # Categories visible to ends-users by default\n tablename = \"irs_icategory\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(code = \"flood\")\n table.insert(code = \"geophysical.landslide\")\n table.insert(code = \"roadway.bridgeClosure\")\n table.insert(code = \"roadway.roadwayClosure\")\n table.insert(code = \"other.buildingCollapsed\")\n table.insert(code = \"other.peopleTrapped\")\n table.insert(code = \"other.powerFailure\")\n\n # Messaging Module\n if \"msg\" in deployment_settings.modules:\n tablename = \"msg_email_settings\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(\n inbound_mail_server = \"imap.gmail.com\",\n inbound_mail_type = \"imap\",\n inbound_mail_ssl = True,\n inbound_mail_port = 993,\n inbound_mail_username = \"username\",\n inbound_mail_password = \"password\",\n inbound_mail_delete = False,\n #outbound_mail_server = \"mail:25\",\n #outbound_mail_from = \"demo@sahanafoundation.org\",\n )\n # Need entries for the Settings/1/Update URLs to work\n tablename = \"msg_setting\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert( outgoing_sms_handler = \"Gateway\" )\n tablename = \"msg_modem_settings\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert( modem_baud = 115200 )\n tablename = \"msg_gateway_settings\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert( to_variable = \"to\" )\n tablename = \"msg_tropo_settings\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert( token_messaging = \"\" )\n tablename = \"msg_twitter_settings\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert( pin = \"\" )\n\n # Assessment\n if \"assess\" in deployment_settings.modules:\n tablename = \"assess_baseline_type\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert( name = \"# of population\")\n table.insert( name = \"# of households\" )\n table.insert( name = \"# of children under 5\" )\n table.insert( name = \"# of children\" )\n table.insert( name = \"# of cattle\" )\n table.insert( name = \"Ha. of fields\" )\n\n # Impacts\n if deployment_settings.has_module(\"irs\") or deployment_settings.has_module(\"assess\"): \n tablename = \"impact_type\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert( name = \"# of People Affected\" )\n table.insert( name = \"# People Needing Food\",\n cluster_id = \\\n shn_get_db_field_value(db = db,\n table = \"org_cluster\",\n field = \"id\",\n look_up = \"Food\",\n look_up_field = \"abrv\") \n )\n table.insert( name = \"# People at Risk From Vector-Borne Diseases\",\n cluster_id = \\\n shn_get_db_field_value(db = db,\n table = \"org_cluster\",\n field = \"id\",\n look_up = \"Health\",\n look_up_field = \"abrv\") \n )\n table.insert( name = \"# People without Access to Safe Drinking-Water\",\n cluster_id = \\\n shn_get_db_field_value(db = db,\n table = \"org_cluster\",\n field = \"id\",\n look_up = \"WASH\",\n look_up_field = \"abrv\") \n )\n table.insert( name = \"# Houses Damaged\",\n cluster_id = \\\n shn_get_db_field_value(db = db,\n table = \"org_cluster\",\n field = \"id\",\n look_up = \"Shelter\",\n look_up_field = \"abrv\") \n )\n table.insert( name = \"# Houses Flooded\",\n cluster_id = \\\n shn_get_db_field_value(db = db,\n table = \"org_cluster\",\n field = \"id\",\n look_up = \"Shelter\",\n look_up_field = \"abrv\") \n )\n table.insert( name = \"Water Level still high?\",\n cluster_id = \\\n shn_get_db_field_value(db = db,\n table = \"org_cluster\",\n field = \"id\",\n look_up = \"Shelter\",\n look_up_field = \"abrv\") \n )\n table.insert( name = \"Ha. Fields Flooded\",\n cluster_id = \\\n shn_get_db_field_value(db = db,\n table = \"org_cluster\",\n field = \"id\",\n look_up = \"Agriculture\",\n look_up_field = \"abrv\") \n )\n\n # Supply / Inventory\n tablename = \"supply_item_category\"\n table = db[tablename]\n if not db(table.id > 0).count():\n #shn_import_table(\"supply_item_category\")\n table.insert( name = \"Agriculture\" )\n #table.insert( name = \"Clothing\" )\n #table.insert( name = \"Equipment\" )\n table.insert( name = \"Food\" )\n table.insert( name = \"Health\" )\n #table.insert( name = \"NFIs\" )\n table.insert( name = \"Shelter\" )\n #table.insert( name = \"Transport\" )\n table.insert( name = \"WASH\" )\n tablename = \"supply_item\"\n table = db[tablename]\n if not db(table.id > 0).count():\n #shn_import_table(\"supply_item_pakistan\")\n agriculture = db(db.supply_item_category.name == \"Agriculture\").select(db.supply_item_category.id, limitby=(0, 1)).first().id\n food = db(db.supply_item_category.name == \"Food\").select(db.supply_item_category.id, limitby=(0, 1)).first().id\n health = db(db.supply_item_category.name == \"Health\").select(db.supply_item_category.id, limitby=(0, 1)).first().id\n shelter = db(db.supply_item_category.name == \"Shelter\").select(db.supply_item_category.id, limitby=(0, 1)).first().id\n wash = db(db.supply_item_category.name == \"WASH\").select(db.supply_item_category.id, limitby=(0, 1)).first().id\n table.insert(\n item_category_id = agriculture,\n name = \"Rice Seed\",\n unit = \"sack20kg\",\n comments = \"This should provide enough seed for 1 Hectare of land\"\n )\n table.insert(\n item_category_id = food,\n name = \"Rice\",\n unit = \"sack50kg\",\n comments = \"This should feed 125 people for 1 day\"\n )\n table.insert(\n item_category_id = food,\n name = \"Cooking Utensils\",\n unit = \"kit\",\n comments = \"Cooking Utensils for a Household\"\n )\n table.insert(\n item_category_id = health,\n name = \"First Ait Kit\",\n unit = \"kit\",\n comments = \"This should provide basic first aid (bandages, oral rehydration salts, etc) for 100 people to self-administer\"\n )\n table.insert(\n item_category_id = health,\n name = \"Medical Kit\",\n unit = \"kit\",\n comments = \"This should provide medical supplies (medicines, vaccines) for a professional clinic to provide assistance to a total community of 10,000 people.\"\n )\n table.insert(\n item_category_id = shelter,\n name = \"Shelter Kit\",\n unit = \"kit\",\n comments = \"This kit is suitable to provide emergency repair to a damaged home. It contains a tarpaulin, zinc sheet, wooden poles, hammer & nails\"\n )\n table.insert(\n item_category_id = shelter,\n name = \"Tent\",\n unit = \"piece\",\n comments = \"This should house a family of up to 8 people\"\n )\n table.insert(\n item_category_id = wash,\n name = \"Hygiene Kit\",\n unit = \"kit\",\n comments = \"Personal Hygiene supplies for 100 Households (5 persons/household): Each get 2x Buckets, 10x Soap, Cotton cloth\"\n )\n table.insert(\n item_category_id = wash,\n name = \"Water Purification Sachets\",\n unit = \"kit\",\n comments = \"Designed to provide a 1st phase drinking water purification solution at the household level. Contains 600 sachets to provide sufficient drinking water (4l) for 100 people for 30 days.\"\n )\n\n\n # Budget Module\n if \"budget\" in deployment_settings.modules:\n tablename = \"budget_parameter\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(\n )\n\n # Logistics (old)\n if \"lms\" in deployment_settings.modules:\n tablename = \"lms_catalog\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(\n name=\"Default\",\n description=\"Default Catalog\",\n comments=\"All items are by default added to this Catalog\"\n )\n\n # Ticketing System\n if \"ticket\" in deployment_settings.modules:\n tablename = \"ticket_category\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert( name = \"Report Missing Person\" )\n table.insert( name = \"Report Security Incident\" )\n table.insert( name = \"Report Information\" )\n table.insert( name = \"Request for Assistance\" )\n table.insert( name = \"Offer of Help\" )\n\n # GIS Module\n tablename = \"gis_marker\"\n table = db[tablename]\n # Can't do sub-folders :/\n # need a script to read in the list of default markers from the filesystem, copy/rename & populate the DB 1 by 1\n if not db(table.id > 0).count():\n # We want to start at ID 1, but postgres won't let us truncate() & not needed anyway this is only run on 1st_run.\n #table.truncate()\n table.insert(\n name = \"marker_red\",\n height = 34,\n width = 20,\n image = \"gis_marker.image.marker_red.png\"\n )\n table.insert(\n name = \"marker_yellow\",\n height = 34,\n width = 20,\n image = \"gis_marker.image.marker_yellow.png\"\n )\n table.insert(\n name = \"marker_amber\",\n height = 34,\n width = 20,\n image = \"gis_marker.image.marker_amber.png\"\n )\n table.insert(\n name = \"marker_green\",\n height = 34,\n width = 20,\n image = \"gis_marker.image.marker_green.png\"\n )\n table.insert(\n name = \"person\",\n height = 50,\n width = 50,\n image = \"gis_marker.image.Civil_Disturbance_Theme.png\"\n )\n table.insert(\n name = \"school\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Edu_Schools_S1.png\"\n )\n table.insert(\n name = \"food\",\n height = 40,\n width = 40,\n image = \"gis_marker.image.Emergency_Food_Distribution_Centers_S1.png\"\n )\n table.insert(\n name = \"office\",\n height = 40,\n width = 40,\n image = \"gis_marker.image.Emergency_Operations_Center_S1.png\"\n )\n table.insert(\n name = \"shelter\",\n height = 40,\n width = 40,\n image = \"gis_marker.image.Emergency_Shelters_S1.png\"\n )\n table.insert(\n name = \"activity\",\n height = 40,\n width = 40,\n image = \"gis_marker.image.Emergency_Teams_S1.png\"\n )\n table.insert(\n name = \"hospital\",\n height = 40,\n width = 40,\n image = \"gis_marker.image.E_Med_Hospital_S1.png\"\n )\n table.insert(\n name = \"earthquake\",\n height = 50,\n width = 50,\n image = \"gis_marker.image.Geo_Earth_Quake_Epicenter.png\"\n )\n table.insert(\n name = \"volcano\",\n height = 50,\n width = 50,\n image = \"gis_marker.image.Geo_Volcanic_Threat.png\"\n )\n table.insert(\n name = \"tsunami\",\n height = 50,\n width = 50,\n image = \"gis_marker.image.Hydro_Meteor_Tsunami_ch.png\"\n )\n table.insert(\n name = \"church\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Public_Venue_Church_S1.png\"\n )\n table.insert(\n name = \"mosque\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Public_Venue_Mosque_S1.png\"\n )\n table.insert(\n name = \"temple\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Public_Venue_Temple_S1.png\"\n )\n table.insert(\n name = \"phone\",\n height = 10,\n width = 5,\n image = \"gis_marker.image.SMS_Message_Phone.png\"\n )\n table.insert(\n name = \"orphanage\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Special_Needs_Child_Day_Care_S1.png\"\n )\n table.insert(\n name = \"airport\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Trans_Airport_S1.png\"\n )\n table.insert(\n name = \"bridge\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Trans_Bridge_S1.png\"\n )\n table.insert(\n name = \"helicopter\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Trans_Helicopter_Landing_Site_S1.png\"\n )\n table.insert(\n name = \"port\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Trans_Port_S1.png\"\n )\n table.insert(\n name = \"rail_station\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Trans_Rail_Station_S1.png\"\n )\n table.insert(\n name = \"vehicle\",\n height = 50,\n width = 50,\n image = \"gis_marker.image.Transport_Vehicle_Theme.png\"\n )\n table.insert(\n name = \"water\",\n height = 33,\n width = 44,\n image = \"gis_marker.image.Water_Supply_Infrastructure_Theme_S1.png\"\n )\n table.insert(\n name = \"volunteer\",\n height = 40,\n width = 39,\n image = \"gis_marker.image.Volunteer.png\"\n )\n tablename = \"gis_symbology\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(\n name = \"Australasia\"\n )\n table.insert(\n name = \"Canada\"\n )\n table.insert(\n name = \"US\"\n )\n tablename = \"gis_projection\"\n table = db[tablename]\n if not db(table.id > 0).count():\n # We want to start at ID 1, but postgres won't let us truncate() & not needed anyway this is only run on 1st_run.\n #table.truncate()\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-PROJECTION-900913\",\n name = \"Spherical Mercator\",\n epsg = 900913,\n maxExtent = \"-20037508, -20037508, 20037508, 20037508.34\",\n maxResolution = 156543.0339,\n units = \"m\"\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-PROJECTION-4326\",\n name = \"WGS84\",\n epsg = 4326,\n maxExtent = \"-180,-90,180,90\",\n maxResolution = 1.40625,\n units = \"degrees\"\n # OSM use these:\n #maxResolution = 156543.0339,\n #units = \"m\"\n )\n\n tablename = \"gis_config\"\n table = db[tablename]\n # Ensure that the projection/marker we defined are in the DB ready to be used as FKs\n db.commit()\n symbology_us = db(db.gis_symbology.name == \"US\").select(db.gis_symbology.id, limitby=(0, 1)).first().id\n if not db(table.id > 0).count():\n # We want to start at ID 1\n table.truncate()\n table.insert(\n lat = \"51.8\",\n lon = \"-1.3\",\n zoom = 7,\n projection_id = 1,\n marker_id = 1,\n map_height = 600,\n map_width = 800,\n symbology_id = symbology_us,\n wmsbrowser_url = \"http://geo.eden.sahanafoundation.org/geoserver/wms?service=WMS&request=GetCapabilities\"\n )\n\n tablename = \"gis_feature_class\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-TRACK\",\n name = \"Track\",\n gps_marker = \"TracBack Point\",\n resource = \"gis_track\"\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-L0\",\n name = \"Country\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-L1\",\n name = \"Province\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-L2\",\n name = \"District\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-L3\",\n name = \"Town\",\n gps_marker = \"City (Medium)\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-L4\",\n name = \"Village\",\n gps_marker = \"City (Small)\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-AIRPORT\",\n name = \"Airport\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"airport\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Airport\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-BRIDGE\",\n name = \"Bridge\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"bridge\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Bridge\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-CHURCH\",\n name = \"Church\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"church\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Church\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-FOOD\",\n name = \"Food\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"food\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Restaurant\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-HOSPITAL\",\n name = \"Hospital\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"hospital\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Medical Facility\",\n resource = \"hms_hospital\"\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-INCIDENT\",\n name = \"Incident\",\n gps_marker = \"Danger Area\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-OFFICE\",\n name = \"Office\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"office\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Building\",\n resource = \"org_office\"\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-PERSON\",\n name = \"Person\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"person\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Contact, Dreadlocks\",\n resource = \"pr_person\"\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-PORT\",\n name = \"Port\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"port\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Marina\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-PROJECT\",\n name = \"Project\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-SCHOOL\",\n name = \"School\",\n marker_id = db(db.gis_marker.name == \"school\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"School\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-SHELTER\",\n name = \"Shelter\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"shelter\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Campground\",\n resource = \"cr_shelter\"\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-SMS\",\n name = \"SMS\",\n marker_id = db(db.gis_marker.name == \"phone\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-VEHICLE\",\n name = \"Vehicle\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"vehicle\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Car\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-VOLUNTEER\",\n name = \"Volunteer\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"volunteer\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Contact, Dreadlocks\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-WAREHOUSE\",\n name = \"Warehouse\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"office\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Building\",\n )\n table.insert(\n uuid = \"www.sahanafoundation.org/GIS-FEATURE-CLASS-WATER\",\n name = \"Water\",\n symbology_id = symbology_us,\n marker_id = db(db.gis_marker.name == \"water\").select(db.gis_marker.id, limitby=(0, 1)).first().id,\n gps_marker = \"Drinking Water\",\n )\n tablename = \"gis_apikey\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(\n name = \"google\",\n apikey = \"ABQIAAAAgB-1pyZu7pKAZrMGv3nksRRi_j0U6kJrkFvY4-OX2XYmEAa76BSH6SJQ1KrBv-RzS5vygeQosHsnNw\",\n description = \"localhost\"\n )\n table.insert(\n name = \"yahoo\",\n apikey = \"euzuro-openlayers\",\n description = \"trial - replace for Production use\"\n )\n table.insert(\n name = \"multimap\",\n apikey = \"metacarta_04\",\n description = \"trial - replace for Production use\"\n )\n tablename = \"gis_layer_feature\"\n table = db[tablename]\n if not db(table.id > 0).count():\n table.insert(\n name = \"Incident Reports\",\n module = \"irs\",\n resource = \"ireport\",\n popup_label = \"Incident\",\n # Default (but still better to define here as otherwise each feature needs to check it's feature_class)\n marker_id = db(db.gis_marker.name == \"marker_red\").select(db.gis_marker.id, limitby=(0, 1)).first().id\n )\n table.insert(\n name = \"Shelters\",\n module = \"cr\",\n resource = \"shelter\",\n popup_label = \"Shelter\",\n marker_id = db(db.gis_marker.name == \"shelter\").select(db.gis_marker.id, limitby=(0, 1)).first().id\n )\n table.insert(\n name = \"Requests\",\n module = \"rms\",\n resource = \"req\",\n popup_label = \"Request\",\n marker_id = db(db.gis_marker.name == \"marker_yellow\").select(db.gis_marker.id, limitby=(0, 1)).first().id\n )\n table.insert(\n name = \"Assessments\",\n module = \"assess\",\n resource = \"rat\",\n popup_label = \"Rapid Assessment\",\n marker_id = db(db.gis_marker.name == \"marker_green\").select(db.gis_marker.id, limitby=(0, 1)).first().id\n )\n table.insert(\n name = \"Activities\",\n module = \"project\",\n resource = \"activity\",\n popup_label = \"Activity\",\n marker_id = db(db.gis_marker.name == \"activity\").select(db.gis_marker.id, limitby=(0, 1)).first().id\n )\n table.insert(\n name = \"Warehouses\",\n module = \"inventory\",\n resource = \"store\",\n popup_label = \"Warehouse\",\n marker_id = db(db.gis_marker.name == \"office\").select(db.gis_marker.id, limitby=(0, 1)).first().id\n )\n tablename = \"gis_layer_openstreetmap\"\n table = db[tablename]\n if not db(table.id > 0).count():\n # Populate table\n for subtype in gis_layer_openstreetmap_subtypes:\n if subtype in [\"Taiwan\"]:\n # Local OSM layers should be disabled by default in default builds\n table.insert(\n name = \"OSM \" + subtype,\n subtype = subtype,\n enabled = False\n )\n else:\n table.insert(\n name = \"OSM \" + subtype,\n subtype = subtype\n )\n tablename = \"gis_layer_google\"\n table = db[tablename]\n if not db(table.id > 0).count():\n # Populate table\n for subtype in gis_layer_google_subtypes:\n table.insert(\n name = \"Google \" + subtype,\n subtype = subtype,\n enabled = False\n )\n tablename = \"gis_layer_yahoo\"\n table = db[tablename]\n if not db(table.id > 0).count():\n # Populate table\n for subtype in gis_layer_yahoo_subtypes:\n table.insert(\n name = \"Yahoo \" + subtype,\n subtype = subtype,\n enabled = False\n )\n #tablename = \"gis_layer_bing\"\n #table = db[tablename]\n #if not db(table.id > 0).count():\n # Populate table\n # for subtype in gis_layer_bing_subtypes:\n # table.insert(\n # name = \"Bing \" + subtype,\n # subtype = subtype,\n # enabled = False\n # )\n tablename = \"gis_layer_mgrs\"\n table = db[tablename]\n if not db(table.id > 0).count():\n # Populate table\n table.insert(\n name = \"MGRS Atlas PDFs\",\n description = \"http://en.wikipedia.org/wiki/Military_grid_reference_system\",\n url = \"http://www.sharedgeo.org/datasets/shared/maps/usng/pdf.map?VERSION=1.0.0&SERVICE=WFS&request=GetFeature&typename=wfs_all_maps\",\n enabled = False\n )\n tablename = \"gis_layer_wms\"\n table = db[tablename]\n if not db(table.id > 0).count():\n # Populate table\n table.insert(\n name = \"VMap0\",\n description = \"A Free low-resolution Vector Map of the whole world\",\n url = \"http://labs.metacarta.com/wms/vmap0\",\n #projection_id = db(db.gis_projection.epsg == 4326).select(limitby=(0, 1)).first().id,\n layers = \"basic\",\n enabled = False\n )\n table.insert(\n name = \"Blue Marble\",\n description = \"A composite of four months of MODIS observations with a spatial resolution (level of detail) of 1 square kilometer per pixel.\",\n url = \"http://maps.opengeo.org/geowebcache/service/wms\",\n #projection_id = db(db.gis_projection.epsg == 4326).select(limitby=(0, 1)).first().id,\n layers = \"bluemarble\",\n enabled = False\n )\n tablename = \"gis_layer_georss\"\n table = db[tablename]\n if not db(table.id > 0).count():\n # Populate table\n table.insert(\n name = \"Earthquakes\",\n description = \"USGS: Global 7-day\",\n url = \"http://earthquake.usgs.gov/eqcenter/catalogs/eqs7day-M2.5.xml\",\n projection_id = db(db.gis_projection.epsg == 4326).select(limitby=(0, 1)).first().id,\n marker_id = db(db.gis_marker.name == \"earthquake\").select(limitby=(0, 1)).first().id,\n enabled = False\n )\n table.insert(\n name = \"Volcanoes\",\n description = \"USGS: US recent\",\n url = \"http://volcano.wr.usgs.gov/rss/vhpcaprss.xml\",\n projection_id = db(db.gis_projection.epsg == 4326).select(limitby=(0, 1)).first().id,\n marker_id = db(db.gis_marker.name == \"volcano\").select(limitby=(0, 1)).first().id,\n enabled = False\n )\n\n tablename = \"gis_location\"\n table = db[tablename]\n if not db(table.id > 0).count():\n # L0 Countries\n import_file = os.path.join(request.folder,\n \"private\", \"import\",\n \"countries.csv\")\n table.import_from_csv_file(open(import_file,\"r\"))\n # Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL\n field = \"name\"\n db.executesql(\"CREATE INDEX %s__idx on %s(%s);\" % (field, tablename, field))\n\n # Authorization\n # User Roles (uses native Web2Py Auth Groups)\n table = auth.settings.table_group_name\n if not db(db[table].id > 0).count():\n # The 1st 4 permissions are hard-coded for performance reasons\n # This must stay as id=1\n auth.add_group(\"Administrator\", description = \"System Administrator - can access & make changes to any data\")\n # This must stay as id=2\n auth.add_group(\"Authenticated\", description = \"Authenticated - all logged-in users\")\n # This must stay as id=3\n auth.add_group(\"Creator\", description = \"Creator - dummy role which isn't meant to have users added to it. Used to restrict records to just those created by the user\")\n # Optional roles for delegating access\n # This must stay as id=4\n auth.add_group(\"Editor\", description = \"Editor - can access & make changes to any unprotected data\")\n auth.add_group(\"UserAdmin\", description = \"UserAdmin - allowed to manage the membership of the Editor role\")\n #auth.add_group(\"Restricted\", description = \"Restricted - is given a simplified full-screen view so as to minimise the possibility of errors\")\n # GIS\n auth.add_group(\"MapAdmin\", description = \"MapAdmin - allowed access to edit the MapService Catalogue\")\n # DVI\n auth.add_group(\"DVI\", description = \"DVI - allowed access to the DVI module\")\n # HMS\n auth.add_group(\"HMSAdmin\", description = \"HMSAdmin - full access to HMS\")\n auth.add_group(\"HMSOfficer\", description = \"HMSOfficer - permission to edit requests and pledges\")\n auth.add_group(\"HMSFacility\", description = \"HMSFacility - permission to submit status and requests\")\n auth.add_group(\"HMSOrg\", description = \"HMSOrg - permission to submit pledges\")\n auth.add_group(\"HMSViewer\", description = \"HMSViewer - permission to access HMS\")\n # Ticketing\n auth.add_group(\"TicketAdmin\", description = \"TicketAdmin - full access to Ticketing\")\n\n # Security Defaults for all tables (if using 'full' security policy)\n if session.s3.security_policy != 1:\n table = auth.settings.table_permission_name\n if not db(db[table].id > 0).count():\n # For performance we only populate this once (at system startup)\n # => need to populate manually when adding new tables to the database! (less RAD)\n authenticated = auth.id_group(\"Authenticated\")\n #editors = auth.id_group(\"Editor\")\n for tablename in db.tables:\n table = db[tablename]\n # allow all registered users the ability to Read all records\n auth.add_permission(authenticated, \"read\", table)\n # allow anonymous users the ability to Read all records\n #auth.add_permission(anonymous, \"read\", table)\n # Editors can make changes\n auth.add_permission(editors, \"create\", table)\n auth.add_permission(editors, \"update\", table)\n auth.add_permission(editors, \"delete\", table)\n\n # Module-specific defaults can be set here\n #table = pr_person\n # Clear out defaults\n #auth.del_permission(authenticated, \"read\", table)\n #auth.del_permission(editors, \"create\", table)\n #auth.del_permission(editors, \"update\", table)\n #auth.del_permission(editors, \"delete\", table)\n # Add specific Role(s)\n #id = auth.id_group(\"myrole\")\n #auth.add_permission(id, \"read\", table)\n #auth.add_permission(id, \"create\", table)\n #auth.add_permission(id, \"update\", table)\n #auth.add_permission(id, \"delete\", table)\n\n # Ensure DB population committed when running through shell\n db.commit()\n","repo_name":"ksetyadi/Sahana-Eden","sub_path":"models/zzz_1st_run.py","file_name":"zzz_1st_run.py","file_ext":"py","file_size_in_byte":42166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72955953440","text":"#Author Ciaran Coady\n# Student Number 17326951\n# Python Program for Lowest Common Ancestor in a Directed Acyclic Graph\n# A Node in the graph \nclass Node: \n\t# Constructor to create a new binary node \n\tdef __init__(self, key): \n\t\tself.key = key \n\t\tself.left = None\n\t\tself.right = None\n\t\tself.visited = False\n\ndef findLCA(root, key1, key2):\n\t# Check if the initial root node is null, if so don't recurse\n\tif(root is not None):\n\t\tif(root.left is None and root.right is None and (key1 is not key2)):\n\t\t\treturn -1\n\treturn findLCARecursive(root, key1, key2, [False], [False])\n\ndef findLCARecursive(root, key1, key2, found1, found2):\n\n\t# If the node is null return\n\tif(root is None):\n\t\treturn -1\n\n\t# If the node has been visited return as we have found a cycle\n\t#if(root.visited is True):\n\t#\treturn -1\n\n\t# Otherwise visit this node and search its left and right subtrees\n\t#root.visited = True\n\tleft_subtree = findLCARecursive(root.left, key1, key2, found1, found2)\n\tright_subtree = findLCARecursive(root.right, key1, key2, found1, found2)\n\t# After we've made the recursive call we are no longer checking for cycles\n\t# so leave the node the way we found it \n\t#root.visited = False\n\n\t# If we find a key, set its boolean and return the key\n\tif(root.key is key1):\n\t\tfound1[0] = True\n\t\treturn root.key\n\t\t\n\tif(root.key is key2):\n\t\tfound2[0] = True\n\t\treturn root.key\n\n\t# If we have found both keys do a case analysis to return the correct value\t\n\tif(found1[0] is True and found2[0] is True):\n\t\tif(left_subtree is not -1 and right_subtree is not -1):\n\t\t\treturn root.key\n\t\telif(left_subtree is not -1):\n\t\t\treturn left_subtree\n\t\telse:\n\t\t\treturn right_subtree\n\n\t# If execution has fallen through to here it means the keys lie in the same\n\t# subtree, return the LCA\n\tif(left_subtree is not -1):\n\t\treturn left_subtree\n\t\n\tif(right_subtree is not -1):\n\t\treturn right_subtree\n\t\n\t# No keys found return failure\n\treturn -1\n\n##### A second implementation of finding LCA for a binary tree\n##### which is sourced from geeks4geeks\n\n# Finds the path from root node to given root of the tree. \n# Stores the path in a list path[], returns true if path \n# exists otherwise false \ndef findPath( root, path, k): \n\n\t# Baes Case \n\tif root is None: \n\t\treturn False\n\n\t# Store this node is path vector. The node will be \n\t# removed if not in path from root to k \n\tpath.append(root.key) \n\n\t# See if the k is same as root's key \n\tif root.key == k : \n\t\treturn True\n\n\t# Check if k is found in left or right sub-tree \n\tif ((root.left != None and findPath(root.left, path, k)) or\n\t\t\t(root.right!= None and findPath(root.right, path, k))): \n\t\treturn True\n\n\t# If not present in subtree rooted with root, remove \n\t# root from path and return False \n\t\n\tpath.pop() \n\treturn False\n\n# Returns LCA if node n1 , n2 are present in the given \n# binary tre otherwise return -1 \ndef findLCA2(root, n1, n2): \n\n\t# To store paths to n1 and n2 fromthe root \n\tpath1 = [] \n\tpath2 = [] \n\n\t# Find paths from root to n1 and root to n2. \n\t# If either n1 or n2 is not present , return -1 \n\tif (not findPath(root, path1, n1) or not findPath(root, path2, n2)): \n\t\treturn -1\n\n\t# Compare the paths to get the first different value \n\ti = 0\n\twhile(i < len(path1) and i < len(path2)): \n\t\tif path1[i] != path2[i]: \n\t\t\tbreak\n\t\ti += 1\n\treturn path1[i-1] \n","repo_name":"ciarancoady98/Lowest-Common-Ancestor","sub_path":"src/BinaryTree.py","file_name":"BinaryTree.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33918305548","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nfrom common import *\nfrom search import *\nfrom web import *\n\n\nDIRS = {'N': (0, 1), 'S': (0, -1), 'W': (-1, 0), 'E': (1, 0)}\n\n\ng = defaultdict(set)\nx = y = 0\ns = []\nfor c in dt[1:-1]:\n if c in DIRS:\n dx, dy = DIRS[c]\n nx, ny = x + dx, y + dy\n g[(x, y)].add((nx, ny))\n g[(nx, ny)].add((x, y))\n x, y = nx, ny\n continue\n if c == '(':\n s.append((x, y))\n continue\n if c == '|':\n x, y = s[-1]\n continue\n if c == ')':\n x, y = s.pop()\n continue\n print(c)\n\nmx = 0\nfar = set()\nclass State(BaseSearchState):\n def __init__(self, loc, dist):\n super().__init__()\n self.loc = loc\n self.dist = dist\n\n def is_valid(self):\n return True\n\n def is_finished(self):\n return False\n\n def get_neighbors(self):\n return [State(e, self.dist + 1) for e in g[self.loc]]\n\n def get_dist_from_start(self):\n return self.dist\n\n def process(self):\n global mx\n mx = max(mx, self.dist)\n if self.dist >= 1000:\n far.add(self.loc)\n\nState((0, 0), 0).search()\n\n\n# part 1\nsm(mx)\n\n\n# part 2\nsm(len(far))\n","repo_name":"zswaff/advent","sub_path":"2018/20/sln.py","file_name":"sln.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24837271795","text":"import array\nimport time\n\ndata = [l.strip() for l in open('2021/25/input.txt', 'rb')]\nheight = len(data)\nwidth = len(data[0])\n\nseafloor = array.array('B')\nfor row in data:\n seafloor.frombytes(row)\n\ndef print_floor():\n for y in range(height):\n p = y*width\n line = seafloor[p:p+width]\n print(line.tobytes().decode('ascii'))\n\nprint('initial state:')\nprint_floor()\n\nt = 0\ni = 0\nwhile True:\n i += 1\n\n changed = False\n for herd in (ord(b'>'), ord(b'v')):\n newfloor = array.array('B', seafloor)\n for y in range(height):\n for x in range(width):\n if seafloor[y*width+x] == herd:\n nx,ny = x,y\n if herd == ord(b'>'):\n nx += 1\n if nx >= width:\n nx = 0\n else:\n ny += 1\n if ny >= height:\n ny = 0\n if seafloor[ny*width+nx] == ord('.'):\n newfloor[ny*width+nx] = herd\n newfloor[y*width+x] = ord('.')\n if seafloor != newfloor:\n changed = True\n seafloor = newfloor\n\n tn = time.time()\n if tn-t > 1:\n t = tn\n print('\\nafter {} steps:'.format(i))\n print_floor()\n\n if not changed:\n break\n\nprint('\\nfinal state:')\nprint_floor()\nprint('stable after {} steps'.format(i))","repo_name":"kapet/adventofcode","sub_path":"2021/25/one.py","file_name":"one.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14037567396","text":"# classes\nclass Employee(object):\n # attribute = age, address, name\n # behavior = pass\n pass\n\nemployee1 = Employee()\n\n##############################################################################################################\n\n# attribute\nclass Footballer:\n football_club = \"Fenerbahce\"\n age = 30\n \nf1 = Footballer()\n\n#print(f1)\n#print(f1.age)\n#print(f1.football_club)\n\nf1.football_club = \"Goztepe\"\n\n#print(f1.football_club)\n\n##############################################################################################################\n\n# methods\nclass Square(object):\n edge = 5 #attribute\n area = 0\n \n def area1(self):\n self.area = self.edge * self.edge \n # print(\"Area: \",self.area)\n \ns1 = Square()\n#print(s1)\n#print(s1.edge)\n#print(s1.area())\n\ns1.edge = 7\ns1.area1()\n#print(s1.edge)\n\n##############################################################################################################\n\n# methods vs function\n\nclass Emp(object):\n \n age = 25\n salary = 1000\n \n def ageSalaryRatio(self):\n return self.age / self.salary\n \ne1 = Emp()\ne1.ageSalaryRatio()\n\n#print(e1.ageSalaryRatio())\n\n\n# function\ndef ageSalaryRatio(age, salary):\n a = age / salary\n #print(\"Function: \", a)\n\nageSalaryRatio(25, 1000)\n\n\ndef findArea(a, b) :\n area = a*b**2\n # print(area)\n return area \n \npi = 3.14\nr = 5\n\nresult1 = findArea(pi, r)\nresult2 = findArea(pi, 10)\n\n#print(result1 + result2)\n\n##############################################################################################################\n\n# initializer or constructor\nclass Animal(object):\n\n def __init__(self, name, age): # (name, age) = (\"dog\", 2) = (a, b)\n self.name = name\n self.age = age\n \n def getAge(self):\n return self.age\n \n def getName(self):\n return self.name\n \na1 = Animal(\"dog\", 2)\na2 = Animal(\"cat\", 3)\na3 = Animal(\"bird\", 1)\n\n","repo_name":"batuhansenn/100DaysOfCode","sub_path":"Days1/OOPCodes/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"674446332","text":"import os\nfrom PIL import Image\nfrom pyparsing import original_text_for\nfrom torch.utils.data import Dataset\nimport torch\nimport torchvision.transforms as transforms\nimport numpy as np\nimport cv2\n\ndef get_transforms(cfg):\n \n train_transform = transforms.Compose([\n transforms.Resize((cfg.DATA.RESIZE_SIZE, cfg.DATA.RESIZE_SIZE)),\n transforms.RandomCrop((cfg.DATA.CROP_SIZE, cfg.DATA.CROP_SIZE)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(list(map(float, cfg.DATA.IMAGE_MEAN)), list(map(float, cfg.DATA.IMAGE_STD)))\n ])\n test_transform = transforms.Compose([\n transforms.Resize((cfg.DATA.CROP_SIZE, cfg.DATA.CROP_SIZE)),\n transforms.ToTensor(),\n transforms.Normalize(list(map(float, cfg.DATA.IMAGE_MEAN)), list(map(float, cfg.DATA.IMAGE_STD)))\n ])\n\n orig_transform = transforms.Compose([\n transforms.Resize((cfg.DATA.CROP_SIZE, cfg.DATA.CROP_SIZE)),\n transforms.ToTensor(),\n # transforms.Normalize(list(map(float, cfg.DATA.IMAGE_MEAN)), list(map(float, cfg.DATA.IMAGE_STD)))\n ])\n \n test_tencrops_transform = transforms.Compose([\n transforms.Resize((cfg.DATA.RESIZE_SIZE, cfg.DATA.RESIZE_SIZE)),\n transforms.TenCrop(cfg.DATA.CROP_SIZE),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(cfg.DATA.IMAGE_MEAN, cfg.DATA.IMAGE_STD)\n (transforms.ToTensor()(crop)) for crop in crops])),\n ])\n return train_transform, test_transform, test_tencrops_transform, orig_transform\n\n\nclass CUBDataset(Dataset):\n \"\"\" 'CUB '\n\n Args:\n root (string): Root directory of dataset where directory \"CUB_200_2011\" exists.\n cfg (dict): Hyperparameter configuration.\n is_train (bool): If True. create dataset from training set, otherwise creates from test set.\n val (bool): validation dataset for finetuning hyperparameters.\n \"\"\"\n def __init__(self, root, cfg, is_train, val=False):\n\n self.root = root\n self.cfg = cfg\n self.is_train = is_train\n self.resize_size = cfg.DATA.RESIZE_SIZE\n self.crop_size = cfg.DATA.CROP_SIZE\n\n with open(os.path.join(root, 'images.txt'), 'r') as o:\n self.image_list = self.remove_1st_column(o.readlines())\n with open(os.path.join(root, 'image_class_labels.txt'), 'r') as o:\n self.label_list = self.remove_1st_column(o.readlines())\n with open(os.path.join(root, 'train_test_split.txt'), 'r') as o:\n self.split_list = self.remove_1st_column(o.readlines())\n with open(os.path.join(root, 'bounding_boxes.txt'), 'r') as o:\n self.bbox_list = self.remove_1st_column(o.readlines())\n \n self.train_transform, self.onecrop_transform, self.tencrops_transform, self.orig_transform = get_transforms(cfg)\n if cfg.TEST.TEN_CROPS:\n self.test_transform = self.tencrops_transform\n else:\n self.test_transform = self.onecrop_transform\n\n if is_train:\n self.index_list = self.get_index(self.split_list, '1')\n else:\n self.index_list = self.get_index(self.split_list, '0')\n \n self.val = val\n if val:\n self.image_dir = os.path.join(self.root, 'CUBV2')\n # val2/1/1.jpeg,1\n datalist = os.path.join(self.root, 'CUBV2', 'val', 'image_ids.txt')\n labelList = os.path.join(self.root, 'CUBV2', 'val', 'class_labels.txt')\n bboxlist = os.path.join(self.root, 'CUBV2', 'val', 'localization.txt')\n class_labels = {}\n boxes = {}\n dataList = []\n with open(datalist) as f:\n for line in f.readlines():\n dataList.append(line.strip('\\n')) \n with open(labelList) as f:\n for line in f.readlines():\n image_id, class_label = line.strip('\\n').split(',')\n class_labels[image_id] = int(class_label)\n with open(bboxlist) as f:\n for line in f.readlines():\n image_id, x0s, x1s, y0s, y1s = line.strip('\\n').split(',')\n x0, x1, y0, y1 = int(x0s), int(x1s), int(y0s), int(y1s)\n if image_id in boxes:\n boxes[image_id].append([x0, x1, y0, y1])\n else:\n boxes[image_id] = [[x0, x1, y0, y1]] \n \n self.val2_class_labels = class_labels\n self.val2_boxes = boxes\n self.val2_names = dataList\n\n def get_index(self, list, value):\n index = []\n for i in range(len(list)):\n if list[i] == value:\n index.append(i)\n return index\n\n def remove_1st_column(self, input_list):\n output_list = []\n for i in range(len(input_list)):\n if len(input_list[i][:-1].split(' '))==2:\n output_list.append(input_list[i][:-1].split(' ')[1])\n else:\n output_list.append(input_list[i][:-1].split(' ')[1:])\n return output_list\n\n def __getitem__(self, idx):\n name = self.image_list[self.index_list[idx]]\n image_path = os.path.join(self.root, 'images', name)\n \n label = int(self.label_list[self.index_list[idx]])-1\n \n if self.val:\n name = self.val2_names[idx]\n label = self.val2_class_labels[name]\n image = Image.open(os.path.join(self.image_dir, name)).convert('RGB')\n bbox = self.val2_boxes[name][0] # only one is available\n else: \n image = Image.open(image_path).convert('RGB')\n bbox = self.bbox_list[self.index_list[idx]]\n bbox = [int(float(value)) for value in bbox]\n \n image_size = list(image.size) \n \n if self.is_train:\n image = self.train_transform(image)\n return image, label\n else:\n orig = self.orig_transform(image)\n image = self.test_transform(image)\n\n [x, y, bbox_width, bbox_height] = bbox\n # if self.is_train:\n # resize_size = self.resize_size\n # crop_size = self.crop_size\n # shift_size = (resize_size - crop_size) // 2\n resize_size = self.crop_size\n crop_size = self.crop_size\n shift_size = 0\n [image_width, image_height] = image_size\n left_bottom_x = int(max(x / image_width * resize_size - shift_size, 0))\n left_bottom_y = int(max(y / image_height * resize_size - shift_size, 0))\n right_top_x = int(min((x + bbox_width) / image_width * resize_size - shift_size, crop_size - 1))\n right_top_y = int(min((y + bbox_height) / image_height * resize_size - shift_size, crop_size - 1))\n\n # gt_bbox = [left_bottom_x, left_bottom_y, right_top_x, right_top_y]\n # gt_bbox = torch.tensor(gt_bbox)\n gt_bbox = np.array([left_bottom_x, left_bottom_y, right_top_x, right_top_y]).reshape(-1)\n gt_bbox = \" \".join(list(map(str, gt_bbox)))\n \n return image, label, gt_bbox, name, orig\n\n def __len__(self):\n if self.val:\n return len(self.val2_names)\n else:\n return len(self.index_list)\n\n\n\n\n\n\n\n\n","repo_name":"164140757/SCM","sub_path":"lib/datasets/cub.py","file_name":"cub.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"54"} +{"seq_id":"31846021065","text":"# https://practice.geeksforgeeks.org/problems/nth-fibonacci-number1335/1\nn = int(input(\"Fib? \"))\n\n# O(2^n) time complexity.\n\n\ndef recursion(n):\n if n == 0 or n == 1:\n return n\n return recursion(n-1) + recursion(n-2)\n\n\n# recursion+table\n# overlapping subproblem reduced.\ndp = [-1 for _ in range(n+1)]\n\n\ndef topDown(n):\n if n == 0 or n == 1:\n return n\n\n if dp[n] != -1:\n return dp[n]\n\n dp[n] = topDown(n-1) + topDown(n-2)\n return dp[n]\n\n# tabulation with no rec, iterattion\n# O(n) time complexity and space complexity.\n\n\ndef bottomUp(n):\n if n == 0 or n == 1:\n return n\n dp = [0 for _ in range(n+1)]\n dp[0] = 0\n dp[1] = 1\n for i in range(2, n+1):\n dp[i] = dp[i-1] + dp[i-2]\n return dp[n]\n\n\nprint(f\"Pure recursion: {recursion(n)}. \")\nprint(f\"TopDown dp: {topDown(n)}. \")\nprint(f\"BottomUp dp: {bottomUp(n)}. \")\n","repo_name":"okmd/leetcode","sub_path":"450Q/nth-fibonacci-number1335.py","file_name":"nth-fibonacci-number1335.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"71482869281","text":"# -*- coding: utf-8 -*-\n# Created: 4 July 2018\n# Author: Ashton S. Reimer\n\nimport re\nimport os\nimport glob\nimport tables\nimport numpy as np\nfrom datetime import datetime\nimport tempfile\nimport configparser as ConfigParser\nimport traceback\nimport sys\nimport argparse\nfrom .plot_nenotr import do_plots\nfrom .make_nenotr_plots import replot_pcolor_all\nfrom .repack import Repackh5\nfrom .datahandler import DataHandler\n\n# output file definition\nh5paths = [['/ProcessingParams','Experiment Parameters'],\n ['/NeFromPower','Electron density From Power'],\n ['/Site','Site Parameters'],\n ['/Time','Time Information'],\n #['Antenna','/Antenna','Antenna Motion Parameters'],\n ]\n\nh5attribs = {'/BeamCodes' : [('TITLE','BeamCodes'),('Description','Beamcode array'),('Size','Nbeams x 4 (Beamcode, Azimuth (degrees), Elevation (degrees), System constant (m^5/s)')],\n '/NeFromPower/Altitude' : [('TITLE','Altitude'),('Unit','Meters'),('Description','Altitude assuming local flat Earth.')],\n '/NeFromPower/Range' : [('TITLE','Range'),('Unit','Meters')],\n '/NeFromPower/Ne_NoTr' : [('TITLE','Raw Electron Density'),('Description','Electron density from power. May contain range aliased F region and/or other range dependent noise sources.'),('Unit','m^{-3}'),('Size','Nrecords x Nbeams x Nranges')],\n '/NeFromPower/errNe_NoTr' : [('TITLE','Error in Raw Electron Density'),('Unit','m^{-3}'),('Size','Nrecords x Nbeams x Nranges')],\n '/NeFromPower/SNR' : [('TITLE','Signal to Noise Ratio'),('Description','SNR from power'),('Size','Nrecords x Nbeams x Nranges')],\n '/NeFromPower/errSNR' : [('TITLE','Error in Signal to Noise Ratio'),('Size','Nrecords x Nbeams x Nranges')],\n '/ProcessingParams/ProcessingTimeStamp' : [('TITLE','Processing Time Stamp')],\n '/ProcessingParams/BaudLength' : [('TITLE','Baud Length'),('Unit','Seconds')],\n '/ProcessingParams/PulseLength' : [('TITLE','Pulse Length'),('Unit','Seconds')],\n '/ProcessingParams/RxFrequency' : [('TITLE','Rx Frequency'),('Description','Receive frequency'),('Unit','Hertz')],\n '/ProcessingParams/TxFrequency' : [('TITLE','Tx Frequency'),('Description','Transmit frequency'),('Unit','Hertz')],\n '/ProcessingParams/TxPower' : [('TITLE','Tx Power'),('Unit','Watts'),('Description','Average transmit power over integration'),('Size','Nrecords')],\n '/ProcessingParams/AeuRx' : [('TITLE','Rx AEUs'),('Description','Number of AEUs on receive'),('Size','Nrecords')],\n '/ProcessingParams/AeuTx' : [('TITLE','Tx AEUs'),('Description','Number of AEUs on transmit'),('Size','Nrecords')],\n '/ProcessingParams/AeuTotal' : [('TITLE','Total AEUs'),('Description','Total number of system AEUs'),('Size','Nrecords')],\n '/Site/Altitude' : [('TITLE','Altitude'),('Description','Altitude of site'),('Unit','Meters')],\n '/Site/Code' : [('TITLE','Site Code')],\n '/Site/Latitude' : [('TITLE','Latitude'),('Description','Latitude of site'),('Unit','Degrees North')],\n '/Site/Longitude' : [('TITLE','Longitude'),('Description','Longitude of site'),('Unit','Degrees East')],\n '/Site/Name' : [('TITLE','Name'),('Description','Site Name')],\n '/Time/Day' : [('TITLE','Day of Month'),('Size','Nrecords x 2 (Start and end of integration')],\n '/Time/Month' : [('TITLE','Month'),('Size','Nrecords x 2 (Start and end of integration')],\n '/Time/Year' : [('TITLE','Year'),('Size','Nrecords x 2 (Start and end of integration')],\n '/Time/doy' : [('TITLE','Day of Year'),('Size','Nrecords x 2 (Start and end of integration')],\n '/Time/UnixTime' : [('TITLE','Unix Time'),('Size','Nrecords x 2 (Start and end of integration'),('Unit','Seconds')],\n # '/Antenna/AvgAzimuth' : [('TITLE','Average Azimuth Angle'),('Description','Average azimuth angle over integration'),('Size','Nrecords'),('Unit','Degrees')],\n # '/Antenna/AvgElevation' : [('TITLE','Average Elevation Angle'),('Description','Average elevation angle over integration'),('Size','Nrecords'),('Unit','Degrees')],\n # '/Antenna/Azimuth' : [('TITLE','Azimuth Angle'),('Description','Azimuth angle range over integration'),('Size','Nrecords x 2'),('Unit','Degrees')],\n # '/Antenna/Elevation' : [('TITLE','Elevation Angle'),('Description','Elevation angle range over integration'),('Size','Nrecords x 2'),('Unit','Degrees')],\n # '/Antenna/Event' : [('TITLE','Event'),('Description','Antenna event over integration'),('Size','Nrecords')],\n # '/Antenna/Mode' : [('TITLE','Mode'),('Description','Antenna mode over integration'),('Size','Nrecords')],\n }\n\n\n\nclass CalcNeNoTr(object):\n def __init__(self,configfile):\n\n # read the config file\n print(\"Reading configuration file...\")\n self.configfile = configfile\n self.config = self.parse_config()\n\n # find files using config information\n print(\"Finding files...\")\n self.filelists = self.find_files()\n\n # initialize the data handlers\n print(\"Checking available data...\")\n self.num_freqs = len(self.filelists)\n self.datahandlers = [DataHandler(self.filelists[i]) for i in range(self.num_freqs)]\n\n # get experiment mode name\n self.mode_name = self.get_mode_name()\n print(\"Experiment mode is %s\" % self.mode_name)\n\n # if a ksys file was provided, try to load it.\n print(\"Checking ksys file...\")\n if self.config['input'].get('ksys_file', None) is None:\n self.ksys = self.__load_ksys_from_data()\n self.calibrated = False\n print(\"No file provided. Ksys loaded from data.\")\n else:\n self.ksys = self.__load_ksys_from_file()\n self.calibrated = True\n print(\"Ksys file loaded.\")\n\n # find all unique times from the data handlers and then\n # determine the integration periods to calculate Ne_NoTr for\n print(\"Calculating integration periods...\")\n self.times = self.get_unique_times()\n self.integration_periods = self.get_integration_periods()\n\n # if no ambiguity function file is in the config, check if one is in \n # the input files\n if self.config['input'].get('amb_path',None) is None:\n self.ambiguity = self.__load_amb_from_data() # TODO! TEST THIS ON MSWINDS26.v03\n else:\n self.ambiguity = self.__load_amb_from_file()\n\n # make sure output directory is available and if not create it\n print(\"Validating output directory...\")\n output_dir = self.config['output']['output_path']\n if not os.path.exists(output_dir):\n print(\" Output directory doesn't exist!\")\n print(\" Attempting to create one...\")\n os.makedirs(output_dir)\n\n\n def __load_ksys_from_data(self):\n fname = self.datahandlers[0].filelist[0]\n \n with tables.open_file(fname,'r') as h5:\n beamcodemap = h5.root.Setup.BeamcodeMap.read()\n\n return np.array(beamcodemap)\n\n\n def __load_ksys_from_file(self):\n return np.loadtxt(self.config['input']['ksys_file'])\n\n\n\n def __load_amb_from_data(self):\n fname = self.datahandlers[0].filelist[0]\n amb_path = \"%s/Data/\" % self.datahandlers[0].nenotr_mode\n \n with tables.open_file(fname,'r') as h5:\n node = h5.get_node(amb_path)\n bandwidth = node.Ambiguity.Bandwidth.read()\n wlag =node.Ambiguity.Wlag.read()\n\n return {'bandwidth': bandwidth, 'wlag': wlag}\n\n\n def __load_amb_from_file(self):\n fname = self.config['input']['amb_path']\n with tables.open_file(fname,'r') as h5:\n bandwidth = h5.root.Bandwidth.read()\n wlag = h5.root.Wlag.read()\n\n return {'bandwidth': bandwidth, 'wlag': wlag}\n\n\n def parse_config(self):\n required_sections_options = {'default': {'integ': str},\n 'input': {'file_paths': str},\n 'output': {'output_name':str,\n 'output_path': str,\n },\n 'nenotr_options': {'mean_or_median': str,\n 'recs2integrate': int},\n }\n\n optional_sections_options = {'input': {'amb_path': str,\n 'ksys_file': str\n },\n }\n\n # read the config file and convert to dictionary\n parser = ConfigParser.ConfigParser()\n parser.read(self.configfile)\n parsed_config = self.__config_to_dict_helper(parser)\n\n # check the config file to make sure we have all required information\n for section in required_sections_options.keys():\n if parsed_config.get(section,None) is None:\n msg = 'Required section: \"%s\" is missing from config.' % section\n raise AttributeError(msg)\n for option in required_sections_options[section].keys():\n if parsed_config[section].get(option,None) is None:\n msg = 'Required option: \"%s\" is missing' % option\n msg += ' from the \"%s\" section in the config.' % section\n raise AttributeError(msg)\n\n # convert the input config data to the required format\n type_func = required_sections_options[section][option]\n converted = type_func(parsed_config[section][option])\n parsed_config[section][option] = converted\n\n # make sure optional options are formatted as required\n for section in optional_sections_options.keys():\n for option in optional_sections_options[section].keys():\n # convert the input config data to the required format\n type_func = optional_sections_options[section][option]\n try:\n converted = type_func(parsed_config[section][option])\n parsed_config[section][option] = converted\n except KeyError:\n pass\n\n return parsed_config\n\n\n def get_unique_times(self):\n all_times = list()\n for i in range(self.num_freqs):\n all_times.extend(list(self.datahandlers[i].times))\n all_times = [tuple(x) for x in all_times]\n unique_times = np.array(sorted(list(set(list(all_times)))))\n\n # now detect time pairs that have 0 difference in start or end time\n # sometimes raw files don't have exactly the same time windows...\n if self.num_freqs > 1:\n diffs = np.diff(unique_times,axis=0) # diff the start and end times\n diffs = np.array([[x[0].total_seconds(),x[1].total_seconds()] for x in diffs])\n inds = np.where(~((diffs[:,0] == 0) | (diffs[:,1] == 0)))[0] # exclude times where start or end diffs are 0\n unique_times = unique_times[inds,:]\n\n return unique_times\n\n\n def get_integration_periods(self):\n integration_periods = list()\n start_time = None\n integration_time = self.config['nenotr_options']['recs2integrate']\n num_times = len(self.times)\n for i,time_pair in enumerate(self.times):\n temp_start_time, temp_end_time = time_pair\n if start_time is None:\n start_time = temp_start_time\n time_diff = (temp_end_time - start_time).total_seconds()\n\n if time_diff >= integration_time:\n integration_periods.append([start_time,temp_end_time])\n start_time = None\n continue\n\n # Add an integration period for when we are at the end of the files\n # but we haven't reached the requested integration time\n if (i == num_times -1):\n integration_periods.append([start_time,temp_end_time])\n\n return np.array(integration_periods)\n\n\n def find_files(self):\n # we need to find all files that match the search strings\n # and check every input file path for them\n search_paths_by_freq = self.config['input']['file_paths'].split(',')\n num_freqs = len(search_paths_by_freq)\n\n filelists = [[] for x in range(num_freqs)]\n for i in range(num_freqs):\n paths = search_paths_by_freq[i].split(':')\n temp = list()\n for path in paths:\n files_found = glob.glob(path)\n num_files_found = len(files_found)\n if num_files_found == 0:\n print('No files matching \"%s\"' % str(path))\n temp.extend(glob.glob(path))\n\n filelists[i].extend(sorted(temp))\n\n # Now trim frequencies with no files in them\n i = 0\n while i < len(filelists):\n if len(filelists[i]) == 0:\n filelists.pop(i)\n else:\n i += 1\n num_freqs = len(filelists)\n\n # calculate the total number of files\n num_files = 0\n for i in range(len(filelists)):\n num_files += len(filelists[i])\n\n if num_files == 0:\n raise Exception('No files found!')\n\n filestr = \"files\" if num_files > 1 else \"file\"\n freqstr = \"frequencies\" if num_freqs > 1 else \"frequency\"\n print(\"Found %s %s for %s %s.\" % (num_files,filestr,num_freqs,freqstr))\n\n return filelists\n\n\n def get_mode_name(self):\n # could add a check to make sure all files are from the same experiment mode?\n fname = self.datahandlers[0].filelist[0]\n print(\"fname in get_mode_name\",fname)\n try:\n with tables.open_file(fname) as h5file:\n expname = h5file.get_node('/Setup/Experimentfile').read()\n print(\"type(expname)\",type(expname))\n if type(expname)==np.ndarray:\n expname=expname[0]\n print(\"type(expname)\",type(expname))\n expname=expname.splitlines()[1].split(b'=')[1] # byte-like obj. python3\n except Exception as e:\n print(\"Could not determine mode name because: %s\" % (str(e)))\n print(\"Defaulting to 'unknown'.\")\n try:\n expname=bytes('unknown') #python2\n except:\n expname=bytes('unknown','utf-8') #python3\n\n return expname\n\n\n @staticmethod\n def __config_to_dict_helper(configparserclass):\n # based on https://gist.github.com/amitsaha/3065184\n # converts a config parser object to a dictionary\n config = dict()\n defaults = configparserclass.defaults()\n sections = configparserclass.sections()\n\n temp = dict()\n for key in defaults:\n temp[key] = defaults[key]\n config['default'] = temp\n default_options = temp.keys()\n\n for section in sections:\n opts = configparserclass.options(section)\n options = [x for x in opts if not x in default_options]\n temp = dict()\n for option in options:\n temp[option] = configparserclass.get(section,option)\n config[section.lower()] = temp\n\n return config\n\n\n # Take all data from all frequencies for each integrated time step,\n # pass this to a function that formats things correctly and then \n # pass the output of that into the Ne_NoTr calculation code\n # This function gets averaged quantities per frequency from another\n # function. Then it combines everything, calculates density, and \n # then calculates the variances in everything.\n # Code doesn't write anything to disk, uses RAM.\n def calculate_nenotr(self):\n if self.config['nenotr_options']['mean_or_median'] == 'mean':\n func = np.nanmean\n if self.config['nenotr_options']['mean_or_median'] == 'median':\n func = np.nanmedian \n\n array_keys = ['signal','snr','calsignal','noise','uncal_signal',\n 'power','calwithnoise','noisepower']\n sample_keys = ['noisepowersamples','powsamples','calsamples']\n nenotr = dict()\n nenotr['snr'] = None\n signal_ranges = None\n num_integrations = len(self.integration_periods)\n\n for i,integration_period in enumerate(self.integration_periods):\n print(\"Integration period %s/%s\" % (str(i+1),str(num_integrations)))\n for j,datahandler in enumerate(self.datahandlers):\n data, _ = datahandler.get_records(integration_period[0],integration_period[1])\n data['ambiguity'] = self.ambiguity\n\n output = self.get_signal_and_snr(data,self.config)\n\n if nenotr['snr'] is None:\n num_beams, num_ranges = output['snr'].shape\n \n for key in array_keys:\n nenotr[key] = np.zeros((num_integrations,num_beams,num_ranges,self.num_freqs))\n for key in sample_keys:\n nenotr[key] = np.zeros((num_integrations,num_beams,num_ranges,self.num_freqs))\n\n nenotr['txpower'] = np.zeros((num_integrations,))\n nenotr['aeurx'] = np.zeros((num_integrations,))\n nenotr['aeutx'] = np.zeros((num_integrations,))\n nenotr['aeutotal'] = np.zeros((num_integrations,))\n nenotr['txfreq'] = np.zeros((num_integrations,self.num_freqs))\n nenotr['rxfreq'] = np.zeros((num_integrations,self.num_freqs))\n\n # quantities that do not vary with time \n nenotr['range'] = data['data']['range']\n nenotr['pulsewidth'] = data['data']['pulsewidth']\n nenotr['baudlength'] = data['data']['txbaud']\n nenotr['bmcodes'] = data['bmcodes']\n\n # save data from different DTCs\n for key in array_keys:\n nenotr[key][i,:,:,j] = output[key]\n for key in sample_keys:\n nenotr[key][i,:,:,j] = output[key]\n\n nenotr['txfreq'][i,j] = func(data['txfreq'])\n nenotr['rxfreq'][i,j] = func(data['rxfreq'])\n\n # assume txpower is the same for each channel\n nenotr['txpower'][i] = func(data['txpower'])\n nenotr['aeurx'][i] = func(data['aeurx'])\n nenotr['aeutx'][i] = func(data['aeutx'])\n nenotr['aeutotal'][i] = func(data['aeutotal'])\n\n # figure out the average tx and rx frequency\n nenotr['txfreq'] = func(nenotr['txfreq'])\n nenotr['rxfreq'] = func(nenotr['rxfreq'])\n\n ## NOW CALCULATE DENSITY, SNR, and ERRORS\n\n # first txpower arrays\n nenotr['txpower'] = np.repeat(nenotr['txpower'][:,np.newaxis],num_beams,axis=1)\n nenotr['txpower'] = np.repeat(nenotr['txpower'][:,:,np.newaxis],num_ranges,axis=2)\n nenotr['txpower'] = np.repeat(nenotr['txpower'][:,:,:,np.newaxis],self.num_freqs,axis=3)\n\n # ksys\n required_inds = list()\n for bmcode in nenotr['bmcodes'][0,:]:\n required_inds.append(np.where(self.ksys[:,0] == bmcode)[0][0])\n nenotr['ksys'] = self.ksys[required_inds,3]\n nenotr['ksys'] = np.repeat(nenotr['ksys'][:,np.newaxis],num_ranges,axis=1)\n nenotr['ksys'] = np.repeat(nenotr['ksys'][np.newaxis,:,:],num_integrations,axis=0)\n nenotr['ksys'] = np.repeat(nenotr['ksys'][:,:,:,np.newaxis],self.num_freqs,axis=3)\n elevation = self.ksys[required_inds,2] * np.pi / 180.\n\n # convert received power to nenotr and scale by ambiguity function lag 0\n signal_to_density = np.ones((num_integrations,num_beams,num_ranges,self.num_freqs))\n ranges = np.repeat(nenotr['range'],num_beams,axis=0)\n ranges = np.repeat(ranges[np.newaxis,:,:],num_integrations,axis=0)\n ranges = np.repeat(ranges[:,:,:,np.newaxis],self.num_freqs,axis=3)\n signal_to_density *= 2.0 * ranges**2 \n signal_to_density /= (nenotr['pulsewidth'] * nenotr['txpower'] * nenotr['ksys'])\n nenotr['density'] = nenotr['signal'] * signal_to_density\n nenotr['density'] /= np.sum(np.abs(data['ambiguity']['wlag'][0,:]))\n\n # determine the variances for everything\n uncal_signal = nenotr['uncal_signal']\n noisepower = nenotr['noisepower']\n noise_samples = nenotr['noisepowersamples']\n power = nenotr['power']\n power_samples = nenotr['powsamples']\n calplusnoise = nenotr['calwithnoise']\n cal_samples = nenotr['calsamples']\n cal = nenotr['calsignal']\n\n # variance of noise power estimate\n var_noise = noisepower**2 / noise_samples\n # variance of power estimates with noise removed\n var_uncal_signal = power**2 / power_samples + var_noise\n var_cal = calplusnoise**2 / cal_samples + var_noise\n\n # variance of received (power - noise) / (cal - noise) and density\n var_signal_cal = var_uncal_signal / cal**2 + (uncal_signal**2 * var_cal) / cal**4\n ambiguity_factor = np.sum(np.abs(data['ambiguity']['wlag'][0,:]))\n var_density = var_signal_cal * signal_to_density**2 / ambiguity_factor**2\n\n # variance of signal to noise ratio\n var_snr = var_uncal_signal / noisepower**2 + (uncal_signal**2 * var_noise) / noisepower**4\n\n # save standard deviations for output\n nenotr['edensity'] = np.sqrt(np.sum(var_density,axis=3)/self.num_freqs)\n nenotr['esnr'] = np.sqrt(np.sum(var_snr,axis=3)/self.num_freqs)\n\n # save average electron density and average snr\n nenotr['density'] = func(nenotr['density'],axis=3)\n nenotr['snr'] = func(nenotr['snr'],axis=3)\n\n # trim unnecessary dimensions\n nenotr['txpower'] = np.squeeze(nenotr['txpower'][:,0,0,0])\n\n # convert range into altitude for each beam\n altitude = np.zeros((num_beams,num_ranges))\n elevation = np.repeat(elevation[:,np.newaxis],num_ranges,axis=1)\n for i in range(len(required_inds)):\n altitude[i,:] = nenotr['range'] * np.sin(elevation[i,:])\n nenotr['altitude'] = altitude\n\n nenotr['bmcodes'] = self.ksys[required_inds,:]\n\n return nenotr\n\n\n @staticmethod\n def get_signal_and_snr(data,config):\n boltzmann = 1.38064852e-23\n # setup the function that will perform mean or median\n if config['nenotr_options']['mean_or_median'] == 'mean':\n func = np.nanmean\n if config['nenotr_options']['mean_or_median'] == 'median':\n func = np.nanmedian\n\n # cal source power in watts\n cal_source_power = data['ambiguity']['bandwidth'] * data['caltemp'] * boltzmann \n\n # get an estimate of the mean/median noise power\n noise = data['noise']['power']\n noise_pi = data['noise']['pulsesintegrated']\n num_times, num_beams, num_ranges = noise.shape\n mean_noise = func(func(noise,axis=2)/noise_pi,axis=0)\n num_noise_samples = np.sum(noise_pi, axis=0) * num_ranges\n\n # get an estimate of the mean/median cal source+noise power (then do measured cal - noise)\n cal = data['cal']['power']\n cal_pi = data['cal']['pulsesintegrated']\n num_times, num_beams, num_ranges = cal.shape\n mean_cal_plus_noise = func(func(cal,axis=2)/cal_pi,axis=0)\n num_cal_samples = np.sum(cal_pi,axis=0) * num_ranges\n mean_cal = (mean_cal_plus_noise - mean_noise) / cal_source_power\n\n # get an estimate of the mean/median signal+noise power (then do sig - noise)\n power = data['data']['power']\n power_pi = data['data']['pulsesintegrated']\n num_times, num_beams, num_ranges = power.shape\n power_pi = np.repeat(power_pi[:,:,np.newaxis],num_ranges,axis=2)\n mean_signal_plus_noise = func(power/power_pi,axis=0)\n\n # make all arrays have the same dimensions\n mean_noise = np.repeat(mean_noise[:,np.newaxis],num_ranges,axis=1)\n num_noise_samples = np.repeat(num_noise_samples[:,np.newaxis],num_ranges,axis=1)\n mean_cal = np.repeat(mean_cal[:,np.newaxis],num_ranges,axis=1)\n num_cal_samples = np.repeat(num_cal_samples[:,np.newaxis],num_ranges,axis=1)\n\n mean_cal_plus_noise = np.repeat(mean_cal_plus_noise[:,np.newaxis],num_ranges,axis=1)\n\n mean_signal = mean_signal_plus_noise - mean_noise\n num_sig_samples = np.sum(power_pi,axis=0)\n\n # get snr\n mean_snr = mean_signal / mean_noise\n\n # get signal and noise in Watts\n mean_signal_cal = mean_signal / mean_cal\n mean_noise_cal = mean_noise / mean_cal\n\n output = dict()\n key_list = ['signal','uncal_signal','snr','calsignal','noise','power',\n 'powsamples','calwithnoise','calsamples','noisepower',\n 'noisepowersamples']\n output_list = [mean_signal_cal,mean_signal,mean_snr,mean_cal,\n mean_noise_cal,mean_signal_plus_noise,num_sig_samples,\n mean_cal_plus_noise,num_cal_samples,mean_noise,\n num_noise_samples]\n for key,outarray in zip(key_list,output_list):\n output[key] = outarray\n\n return output\n\n\n def get_site(self):\n site = dict()\n fname = self.datahandlers[0].filelist[0]\n with tables.open_file(fname,'r') as h5:\n site['altitude'] = h5.root.Site.Altitude.read()\n site['code'] = h5.root.Site.Code.read()\n site['latitude'] = h5.root.Site.Latitude.read()\n site['longitude'] = h5.root.Site.Longitude.read()\n site['name'] = h5.root.Site.Name.read()\n\n return site\n\n\n def get_time(self):\n epoch = datetime(1970,1,1)\n time_shape = self.integration_periods.shape\n time = dict()\n keys = ['day','month','year','doy','unixtime']\n for key in keys:\n time[key] = np.zeros(time_shape,dtype=np.int)\n\n for i,pair in enumerate(self.integration_periods):\n time['day'][i,:] = np.array([pair[0].day,pair[1].day])\n time['month'][i,:] = np.array([pair[0].month,pair[1].month])\n time['year'][i,:] = np.array([pair[0].year,pair[1].year])\n time['doy'][i,:] = np.array([pair[0].timetuple().tm_yday,pair[1].timetuple().tm_yday])\n diff_pair = [(pair[0]-epoch).total_seconds(),(pair[1]-epoch).total_seconds()]\n time['unixtime'][i,:] = diff_pair\n\n return time\n\n\n def run(self, do_calc = True, plot_type = \"0\"):\n\n output_file = self.config['output']['output_name']\n\n if do_calc:\n # First check if output file is able to be created\n temp_file = tempfile.mktemp()\n # Run the calculator and write output to a file\n output = self.calculate_nenotr()\n\n # get Site information\n site = self.get_site()\n # get Time information\n time = self.get_time()\n\n # Get current date and time\n date = datetime.utcnow()\n processing_time = date.strftime(\"%a, %d %b %Y %H:%M:%S +0000\")\n\n # Write the output\n # set up the output file\n print(\"Writing data to file...\")\n with tables.open_file(temp_file,'w') as h5:\n for h5path in h5paths:\n group_path, group_name = os.path.split(h5path[0])\n h5.create_group(group_path,group_name,title=h5path[1],createparents=True)\n\n node_path = '/ProcessingParams'\n h5.create_array(node_path,'AeuRx',output['aeurx'],createparents=True)\n h5.create_array(node_path,'AeuTotal',output['aeutotal'],createparents=True)\n h5.create_array(node_path,'AeuTx',output['aeutx'],createparents=True)\n h5.create_array(node_path,'BaudLength',output['baudlength'],createparents=True)\n h5.create_array(node_path,'ProcessingTimeStamp',np.array(processing_time),createparents=True)\n h5.create_array(node_path,'PulseLength',output['pulsewidth'],createparents=True)\n h5.create_array(node_path,'RxFrequency',output['rxfreq'],createparents=True)\n h5.create_array(node_path,'TxFrequency',output['txfreq'],createparents=True)\n h5.create_array(node_path,'TxPower',output['txpower'],createparents=True)\n\n node_path = '/Site'\n h5.create_array(node_path,'Altitude',site['altitude'],createparents=True)\n h5.create_array(node_path,'Code',site['code'],createparents=True)\n h5.create_array(node_path,'Latitude',site['latitude'],createparents=True)\n h5.create_array(node_path,'Longitude',site['longitude'],createparents=True)\n h5.create_array(node_path,'Name',site['name'],createparents=True)\n\n node_path = '/NeFromPower'\n h5.create_array(node_path,'Altitude',output['altitude'],createparents=True)\n h5.create_array(node_path,'Range',output['range'],createparents=True)\n h5.create_array(node_path,'Ne_NoTr',output['density'],createparents=True)\n h5.create_array(node_path,'errNe_NoTr',output['edensity'],createparents=True)\n h5.create_array(node_path,'SNR',output['snr'],createparents=True)\n h5.create_array(node_path,'errSNR',output['esnr'],createparents=True)\n\n node_path = '/Time'\n h5.create_array(node_path,'Day',time['day'],createparents=True)\n h5.create_array(node_path,'Month',time['month'],createparents=True)\n h5.create_array(node_path,'Year',time['year'],createparents=True)\n h5.create_array(node_path,'doy',time['doy'],createparents=True)\n h5.create_array(node_path,'UnixTime',time['unixtime'],createparents=True)\n\n # beamcodes\n h5.create_array('/','BeamCodes',output['bmcodes'],createparents=True)\n\n # radar mode\n h5.create_array('/','RadarMode',self.mode_name,createparents=True)\n\n # Add calibration information\n print(\"Adding calibration information...\")\n if self.calibrated:\n cal_file = self.config['input']['ksys_file']\n cal_method = self.config['input']['calibration_method']\n else:\n cal_file = 'None'\n cal_method = 'uncalibrated'\n cal_data = self.ksys\n self.add_calibration_info(temp_file,cal_data,cal_file,cal_method)\n\n # Add configuration information\n print(\"Adding configuration information...\")\n rawfiles = [x.filelist for x in self.datahandlers]\n self.write_config_info(temp_file,rawfiles)\n\n with tables.open_file(temp_file,'r+') as h5:\n for key in h5attribs.keys():\n for attr in h5attribs[key]:\n # print \n h5.set_node_attr(key,attr[0],attr[1])\n # try: h5.set_node_attr(key,attr[0],attr[1])\n # except: ''\n\n # repack the file with compression\n print(\"Repacking the file with compression...\")\n repack = Repackh5(temp_file,output_file)\n repack.repack()\n # remove the temporary file\n print(\"Cleaning up...\")\n os.remove(temp_file)\n print(\"Making plots...\")\n try:\n output_dir = self.config['output']['output_path']\n dirname = 'plots_nenotr_' + self.config['default']['integ']\n plots_dir = os.path.join(output_dir,dirname)\n os.makedirs(plots_dir, exist_ok=True)\n if plot_type == \"0\":\n replot_pcolor_all(output_file,saveplots=1,opath=plots_dir)\n elif plot_type == \"1\":\n do_plots(output_file,plots_dir)\n except Exception as e:\n print(\"Plotting failed: %s\" % str(e))\n print(''.join(traceback.format_exception(*sys.exc_info())))\n\n print(\"Done!\")\n\n\n\n def add_calibration_info(self,fname,cal_data,cal_fname,cal_method):\n t = datetime.utcnow()\n datestr = t.strftime(\"%Y-%m-%d\")\n # corrChirp=True; chirpCorr=-20.0 #Only used for old RISR-N data that had a transmitter problem\n\n # Open the fitted h5 file\n with tables.open_file(fname,'r+') as h5:\n # Add the current date to state when the calibration was done\n node_path = '/Calibration'\n h5.create_array(node_path,'CalDate',np.array(datestr),title='Calibration Date',createparents=True)\n # Include the calibration info in the calibration file\n h5.create_array(node_path,'CalDataBeam',cal_data,title='Calibration (ksys) Array',createparents=True)\n # Include the calibration filename\n h5.create_array(node_path,'CalFileBeam',np.array(os.path.basename(cal_fname)),title='Calibration (ksys) filename',createparents=True)\n # Specifiy the calibration method\n h5.create_array(node_path,'CalibrationMethod',np.array(cal_method),title='Method used for calibration.',createparents=True)\n\n\n def write_config_info(self,h5name,raw_files):\n import platform\n import getpass\n\n # Configuration Information\n #Version Number: Follows convention: major.minor.year.month.day\n from . import __version__\n version = __version__\n\n # Computer information:\n PythonVersion = platform.python_version()\n Type = platform.machine()\n System = \"%s %s %s\" % (platform.system(),platform.release(),platform.version())\n User = getpass.getuser()\n Hostname = platform.node()\n if len(Hostname) == 0:\n import socket\n Hostname = socket.gethostname()\n\n # Get the config file used\n cf = self.configfile\n Path = os.path.dirname(os.path.abspath(cf))\n Name = os.path.basename(cf)\n\n with open(cf,'r') as f:\n Contents = \"\".join(f.readlines())\n\n # Record the raw files used\n # Make a string listing all the files\n RawFiles = ''\n for i,files in enumerate(raw_files):\n temp = \"\\n\".join(files)\n if i != 0:\n RawFiles += '\\n'\n RawFiles += temp\n\n # Record the directory where fitted files can be found\n OutputPath = os.path.abspath(self.config['output']['output_path'])\n\n # Open the fitted h5 file\n with tables.open_file(h5name,'r+') as h5:\n node_path = '/ProcessingParams'\n h5.create_group(node_path,'ComputerInfo',title='Processing Computer Information',createparents=True)\n h5.create_group(node_path,'ConfigFiles',title='Config File Information',createparents=True)\n h5.create_array(node_path,'SoftwareVersion',np.array(version),title='Version of software that made this file',createparents=True)\n h5.create_array(node_path,'RawFiles',np.array(RawFiles),title='The raw files used to produce this file',createparents=True)\n h5.create_array(node_path,'OutputPath',np.array(OutputPath),title='Path where this file was originally made',createparents=True)\n node_path = '/ProcessingParams/ComputerInfo'\n h5.create_array(node_path,'PythonVersion',np.array(PythonVersion),title='Version of python used',createparents=True)\n h5.create_array(node_path,'Type',np.array(Type),title='Type of operating system',createparents=True)\n h5.create_array(node_path,'System',np.array(System),title='System information',createparents=True)\n h5.create_array(node_path,'User',np.array(User),title='Username',createparents=True)\n h5.create_array(node_path,'Host',np.array(Hostname),title='Hostname of the computer',createparents=True)\n node_path = '/ProcessingParams/ConfigFiles/File1'\n h5.create_array(node_path,'Name',np.array(Name),createparents=True)\n h5.create_array(node_path,'Path',np.array(Path),createparents=True)\n h5.create_array(node_path,'Contents',np.array(Contents),createparents=True)\n\n\nconfig_file_help = \"\"\"Calculate electron density from level 1 coherently coded data\nfiles. The code requires power estimates of signal, noise, and cal.\n\nRequires a configuration file containing the following example format:\n[DEFAULT]\n#optional variables to use in substitutions below\nEXPNAME=20171003.001\nINTEG=20sec\n[NENOTR_OPTIONS]\n#number of seconds of data to integrate\nRecs2integrate=20\n#use mean or median\nmean_or_median=median\n[INPUT]\n#input paths (separate frequencies with comma,\n# separate searches within same frequency with colons)\n# example 2 frequency, 2 search path per frequency\nfile_paths=/path1/*.dt0.h5:/path2/*.dt0.h5,/path1/*.dt1.h5:/path2/*.dt1.h5\n# Optional: Path to file containing lag ambiguity function\nAMB_PATH=/home/asreimer/temp/ne_pow/AmbFunc.h5\n# Optional: Path to the system constant file. If not provided, loaded from data\n#ksys_file=/home/asreimer/temp/ne_pow/cal-201710-calibration-ksys-10.05.2017.txt\n#calibration_method=plasma line\n[OUTPUT]\n# Output path\nOUTPUT_PATH=/path/to/output/directory/%%(EXPNAME)s\n# Output filename\nOUTPUT_NAME=%%(OUTPUT_PATH)s/%%(EXPNAME)s_nenotr_%%(INTEG)s.h5\n\"\"\"\n\n\n# a function to run this file as a script\ndef main():\n\n # Build the argument parser tree\n parser = argparse.ArgumentParser(description=config_file_help,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('config_file',help='A configuration file.')\n parser.add_argument('--only_plot', action=argparse.BooleanOptionalAction,\n help='Read an existing file and only replot.')\n parser.add_argument('--plot_type', default='0',\n help='0: original plot routine, 1: shared x axis.')\n args = vars(parser.parse_args())\n nenotr = CalcNeNoTr(args['config_file'])\n if args['only_plot']:\n nenotr.run(do_calc=False, plot_type = args['plot_type'])\n else:\n nenotr.run(do_calc=True, plot_type = args['plot_type'])\n\n\n\n# Run as a script\nif __name__ == '__main__':\n main()\n","repo_name":"amisr/nenotr","sub_path":"nenotr/nenotr.py","file_name":"nenotr.py","file_ext":"py","file_size_in_byte":38607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40227488095","text":"import sys\r\n\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import *\r\n\r\nclass book (QWidget) :\r\n def __init__(self, name) : # 생성시 필요한 데이터를 생성자의 파라미터로 지정\r\n super().__init__()\r\n\r\n self.name = str(name) # 이 위젯 고유의 이름, 생성시 설정\r\n \r\n self.order = 0 # 위젯의 식별 번호, 각 위젯이 고유 값을 가짐, 생성시 설정, 변경 불가능\r\n\r\n # 위젯의 종류에 따라 사용자 임의로 설정, 실행 중 변경 불가능\r\n self.kind = \"book\" # 위젯의 종류, 위젯 파일의 이름\r\n self.size_x = 200 # 위젯의 가로 길이, 폭\r\n self.size_y = 80 # 위젯의 세로 길이, 높이\r\n\r\n # 위젯의 종류에 따라 사용자 임의로 설정, 실행 중 변경 가능\r\n self.is_connected = False # 위젯이 연결되었는지 확인\r\n self.is_connecting = True # 위젯이 연결 대상이 필요한지 확인\r\n self.list_widget_connected = [] # 연결하여 사용중인 위젯의 목록, 연결 위젯인 경우만 필요\r\n\r\n self.resize(self.size_x, self.size_y) # 크기 설정\r\n\r\n ######################## 위젯 내용 작성 #######################\r\n self.font = QFont(\"Arial\", 10, QFont.Bold)\r\n self.font.setPixelSize(14)\r\n self.setStyleSheet(\"background-color : #FFFFFFFF\")\r\n\r\n self.max_page = 0\r\n self.now_page = 0\r\n self.widget_memo = None\r\n self.widget_paint = None\r\n self.book_data = []\r\n\r\n self.label = QLabel(\"\", self)\r\n self.label.setFont(self.font)\r\n self.label.resize(120, 30)\r\n self.label.move(40, 10)\r\n self.label.setAlignment(Qt.AlignCenter)\r\n\r\n self.new_btn = QPushButton(\"새 페이지\",self)\r\n self.new_btn.clicked.connect(self.new_page)\r\n self.new_btn.setFont(self.font)\r\n self.new_btn.resize(100, 30)\r\n self.new_btn.move(50, 40)\r\n\r\n self.prev_btn = QPushButton(\"이전\",self)\r\n self.prev_btn.clicked.connect(self.prev_page)\r\n self.prev_btn.setFont(self.font)\r\n self.prev_btn.resize(50, 30)\r\n self.prev_btn.move(10, 40)\r\n\r\n self.next_btn = QPushButton(\"다음\",self)\r\n self.next_btn.clicked.connect(self.next_page)\r\n self.next_btn.setFont(self.font)\r\n self.next_btn.resize(50, 30)\r\n self.next_btn.move(140, 40)\r\n\r\n\r\n ##################### 위젯 내용 작성 종료 #######################\r\n\r\n\r\n ######################### 필수 함수 #############################\r\n # 필수 함수 editData, getData, setData, getOrder, setOrder, getInfo, \r\n # getSize, getName, setName, getKind\r\n # 필수 함수들은 반드시 존재해야 하며, \r\n # editData를 제외하고는 파라미터와 리턴을 변경하면 안됨\r\n # 필요에 따라 내용을 추가할 수 있음\r\n\r\n def editData(self, name) :\r\n # 위젯의 특정 데이터를 수정\r\n self.name = str(name)\r\n #self.size_x = int(width)\r\n #self.size_y = int(height)\r\n\r\n # 입력되는 데이터에 따라 위젯을 조정\r\n #self.resize(self.size_x, self.size_y)\r\n\r\n def getData(self) : # 위젯의 데이터를 딕셔너리 형태로 리턴\r\n self.save_page()\r\n\r\n data = {}\r\n data[\"name\"] = self.name\r\n data[\"order\"] = self.order\r\n data[\"kind\"] = self.kind\r\n data[\"size_x\"] = self.size_x\r\n data[\"size_y\"] = self.size_y\r\n data[\"is_connected\"] = self.is_connected\r\n data[\"is_connecting\"] = self.is_connecting\r\n\r\n list_connection = []\r\n dict_memo = {}\r\n dict_memo[\"name\"] = self.widget_memo.getName()\r\n dict_memo[\"kind\"] = self.widget_memo.getKind()\r\n list_connection.append(dict_memo)\r\n dict_paint = {}\r\n dict_paint[\"name\"] = self.widget_paint.getName()\r\n dict_paint[\"kind\"] = self.widget_paint.getKind()\r\n list_connection.append(dict_paint)\r\n data[\"connection\"] = list_connection\r\n\r\n data[\"etc\"] = self.book_data\r\n return data\r\n\r\n def setData(self, data) : # 딕셔너리 형태의 데이터로 위젯의 모든 데이터를 다시 설정\r\n self.name = data[\"name\"]\r\n self.order = data[\"order\"]\r\n #self.kind = data[\"kind\"] # 재설정할 필요 없는 데이터\r\n #self.size_x = data[\"size_x\"]\r\n #self.size_y = data[\"size_y\"]\r\n self.is_connected = data[\"is_connected\"]\r\n #self.is_connecting = data[\"is_connecting\"] # 재설정할 필요 없는 데이터\r\n\r\n # 위젯의 연결은 저장 데이터를 불러오는 함수에서 설정\r\n self.book_data = data[\"etc\"]\r\n self.max_page = len(self.book_data)\r\n # 입력되는 데이터에 따라 위젯을 조정\r\n #self.resize(self.size_x, self.size_y)\r\n self.now_page = 0\r\n self.show_page(self.now_page)\r\n\r\n def getOrder(self) : return self.order # order값을 리턴\r\n\r\n def setOrder(self, order) : self.order = int(order) # order값을 설정\r\n\r\n def getInfo(self) :\r\n info = \"그림과 그에 대한 설명을 가진 도감 위젯\\n\"\r\n info = info + \"memo와 paint 위젯의 연결이 필요함\"\r\n return info\r\n\r\n def getSize(self) : return (self.size_x, self.size_y)\r\n\r\n def getName(self) : return self.name\r\n\r\n def setName(self, name) : self.name = str(name)\r\n\r\n def getKind(self) : return self.kind\r\n\r\n\r\n ######################## 연결 위젯 필수 함수 #########################\r\n # 다른 위젯의 기능을 이용할 수 있는 위젯이 필수적으로 가져야 하는 함수\r\n # 생성자 __init__이 실행된 이후 is_connecting��� true인 경우 이어서 실행됨\r\n # 파라미터로 연결할 위젯을 지정\r\n # 파라미터의 명칭이 연결할 위젯의 파일 이름과 같아야 함\r\n\r\n def setConnection(self, memo, paint) :\r\n self.widget_memo = memo\r\n self.widget_paint = paint\r\n self.new_page()\r\n\r\n def getConnection(self) :\r\n connected_widget = []\r\n connected_widget.append(self.widget_memo)\r\n connected_widget.append(self.widget_paint)\r\n return connected_widget\r\n\r\n\r\n ##################### 사용자 함수 작성 #########################\r\n # 특정 페이지 표시\r\n def show_page(self, n) :\r\n self.widget_memo.set_text(self.book_data[n][\"memo\"])\r\n self.widget_paint.set_image(self.book_data[n][\"paint\"])\r\n self.set_label_page(n)\r\n\r\n def prev_page(self) :\r\n if self.now_page - 1 >= 0 : # 이전 페이지가 존재해야 함\r\n # 현재 페이지 정보 저장\r\n self.save_page()\r\n\r\n # 이전 페이지 표시\r\n self.now_page -= 1\r\n self.show_page(self.now_page)\r\n else : # 이전 페이지가 존재하지 않는 경우, 이무것도 하지 않음\r\n #print(\"not have prev page\")\r\n pass \r\n\r\n def next_page(self) :\r\n if self.now_page + 1 < self.max_page : # 다음 페이지가 존재해야 함\r\n # 현재 페이지 정보 저장\r\n self.save_page()\r\n\r\n # 다음 페이지 표시\r\n self.now_page += 1\r\n self.show_page(self.now_page)\r\n else : # 다음 페이지가 존재하지 않는 경우, 이무것도 하지 않음\r\n #print(\"not have next page\")\r\n pass \r\n\r\n def new_page(self) :\r\n if self.max_page != 0 :\r\n self.save_page()\r\n\r\n self.max_page = self.max_page + 1\r\n self.now_page = self.max_page - 1\r\n \r\n self.widget_memo.clear_text()\r\n self.widget_paint.clear_image()\r\n \r\n page_data = {}\r\n page_data[\"memo\"] = self.widget_memo.get_text()\r\n page_data[\"paint\"] = self.widget_paint.get_image()\r\n \r\n self.book_data.append(page_data)\r\n self.set_label_page(self.now_page)\r\n\r\n def set_label_page(self, page) :\r\n s = \"현재 페이지 : \" + str(page + 1)\r\n self.label.setText(s)\r\n\r\n def save_page(self) :\r\n if self.now_page in range(0, self.max_page) :\r\n self.book_data[self.now_page][\"memo\"] = self.widget_memo.get_text()\r\n self.book_data[self.now_page][\"paint\"] = self.widget_paint.get_image()","repo_name":"kimdonghyun805/ASDF","sub_path":"WidgetFiles/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":8379,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17191339413","text":"from os.path import abspath\nfrom os import walk\n\nclass Avatars:\n def __init__(self):\n # Номера доступных и недоступных образов\n self.public = self.secret = 0\n\n # Назначаем путь к изображениям аватаров\n path = abspath(__file__).split('\\\\')[:-2]\n self.path = '\\\\'.join(path + ['GUI', 'Avatars'])\n\n self.PublicAvatar = self.get_AvatarList('Public')\n self.SecretAvatar = self.get_AvatarList('Secret')\n\n def get_AvatarList(self, type):\n for path, dirnames, filenames in walk(f'{self.path}/{type}/'):\n avatarsID = sorted({int(i[:-4]) for i in filenames})\n\n Avatars = {}\n for i in range(len(avatarsID)):\n Avatars[i] = {'id': avatarsID[i],\n 'image': f'{self.path}\\\\{type}\\\\{avatarsID[i]}.png'}\n\n return Avatars\n\n def get_PublicAvatar(self, increment=1):\n self.public = (self.public + increment) % len(self.PublicAvatar)\n\n return self.PublicAvatar[self.public]\n\n def get_SecretAvatar(self, increment=1):\n self.secret = (self.secret + increment) % len(self.SecretAvatar)\n\n return self.SecretAvatar[self.secret]","repo_name":"DDTE2/ClearMode_Help","sub_path":"Algorithms/AvatarList.py","file_name":"AvatarList.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3703913505","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport attr\n\nfrom .clip import AudioClip, InstrumentClip\nfrom .instrument import AudioTrack, MidiChannel, CvChannel, Sound, Kit\nfrom .util import ElementGetter, SAMPLE_RATE_HZ, PPQN, SECONDS_PER_MINUTE\n\n\n@attr.s\nclass Project(object):\n tempo = attr.ib()\n instruments = attr.ib(factory=list)\n sections = attr.ib(factory=list)\n clips = attr.ib(factory=list)\n arrange_only_clips = attr.ib(factory=list)\n\n @classmethod\n def from_element(cls, element):\n with ElementGetter(element) as e:\n deluge_project = Project(\n tempo=deluge_timer_to_tempo(\n e.get_attrib(\"timePerTimerTick\", int),\n e.get_attrib(\"timerTickFraction\", int)),\n instruments=e.get_child(\"instruments\", Project._parse_instruments,\n []),\n sections=e.get_child(\"sections\", Project._parse_sections, []),\n clips=e.get_child(\"sessionClips\", Project._parse_clips, []),\n arrange_only_clips=e.get_child(\"arrangementOnlyTracks\",\n Project._parse_clips, []))\n\n return deluge_project\n\n @staticmethod\n def _parse_instruments(element):\n with ElementGetter(element) as e:\n return e.get_any_children({\n \"audioTrack\": AudioTrack.from_element,\n \"midiChannel\": MidiChannel.from_element,\n \"cvChannel\": CvChannel.from_element,\n \"sound\": Sound.from_element,\n \"kit\": Kit.from_element,\n })\n\n @staticmethod\n def _parse_sections(element):\n # Ignore for now.\n return []\n\n @staticmethod\n def _parse_clips(element):\n with ElementGetter(element) as e:\n return e.get_any_children(\n {\n \"audioClip\": AudioClip.from_element,\n \"instrumentClip\": InstrumentClip.from_element,\n },\n # Append an empty clip to keep the instance indices in order.\n unknown_converter=lambda: None)\n\n\ndef deluge_timer_to_tempo(time_per_timer_tick, timer_tick_fraction):\n # Based on Downrush's calculation.\n # Get the tick fraction in floating point (0 = .0, 0xFFFFFFFF = .9999...)\n timer_tick_fraction = timer_tick_fraction / 0x100000000\n # How many samples are in each pulse.\n samples_per_pulse = time_per_timer_tick + timer_tick_fraction\n\n # Deluge uses 48 PPQN (pulses per quarter note), and a 44100Hz sampling rate.\n #\n # Example for 120 BPM:\n # 44100 samples 1 quarter note 60 seconds 1 pulse 120 quarter notes\n # --------------- * --------------- * ---------- * ------------------- = --------------------\n # 1 second 48 pulses 1 minute 459.375 samples 1 minute\n\n tempo = SAMPLE_RATE_HZ / PPQN * SECONDS_PER_MINUTE / samples_per_pulse\n\n return tempo\n","repo_name":"dcower/pydel","sub_path":"pydel/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"16627606934","text":"#!/bin/python\nfrom Pegasus.DAX3 import *\nimport os\nimport sys\nimport pandas\n\nclass Component(object):\n \"\"\"\n Component parent class.\n \"\"\"\n def __init__(self, matrix, hierarchy=[\"PROJECT\", \"ID\"], name=\"Project\", transferflag=True):\n self.name = name\n self.messages = []\n self.matrix = matrix.copy()\n self.files = {}\n self.hierarchy = hierarchy\n self.initial_steps = []\n self.final_steps = []\n self.transferflag = transferflag\n\n @classmethod\n def get_arg_mappings(cls):\n args = {}\n return args\n\n def add_to_dax(self, dax, process):\n return dax\n\n def save_files(self, root):\n for filename, contents in self.files.iteritems():\n with open(root+\"/\"+filename, \"w\") as f:\n f.write(contents)\n\n def reset_messages(self):\n self.messages = []\n","repo_name":"pegasus-isi/dipa-workflow","sub_path":"components/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"30931259606","text":"import tensorflow as tf\nimport time\nimport math\nimport numpy as np\n\nimport Cifar10Reader\nimport ResNet_Model\n\nflags = tf.flags\nIMAGE_PIXELS = 28\n# 定义默认训练参数和数据路径\nflags.DEFINE_string('train_dir', 'tmp/train', '')\nflags.DEFINE_string('data_dir', 'cifar-10-python/cifar-10-batches-py/', 'Directory for storing mnist data')\nflags.DEFINE_integer('train_steps', 10000, 'Number of training steps to perform')\nflags.DEFINE_integer('batch_size', 100, 'Training batch size ')\nflags.DEFINE_float('learning_rate', 0.01, 'Learning rate')\n# 定义分布式参数\n# 参数服务器parameter server节点\nflags.DEFINE_string('ps_hosts', '192.168.32.145:22221', 'Comma-separated list of hostname:port pairs')\n# 两个worker节点\nflags.DEFINE_string('worker_hosts', '192.168.32.146:22221, 192.168.32.160:22221',\n 'Comma-separated list of hostname:port pairs')\n# 设置job name参数\nflags.DEFINE_string('job_name', None, 'job name: worker or ps')\n# 设置任务的索引\nflags.DEFINE_integer('task_index', None, 'Index of task within the job')\n# 选择异步并行,同步并行\nflags.DEFINE_integer(\"sync\", 1, \"是否采用分布式的同步模式,1表示同步模式,0表示异步模式\")\n# 选择计算设备\nflags.DEFINE_string('device', '/gpu:0', 'format like \"/cpu:1\" or \"/gpu:2\"')\n\nFLAGS = flags.FLAGS\n\n\ndef train(logits, labels):\n with tf.device(FLAGS.device):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n labels=labels, logits=logits, name='cross_entropy_per_example')\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy_mean)\n return train_step, cross_entropy_mean\n\n\ndef test(logits, labels):\n with tf.device(FLAGS.device):\n correct_prediction = tf.equal(tf.argmax(labels, 1), tf.argmax(tf.reshape(logits, (32, 10)), 1))\n count = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))\n return count\n\n\ndef main():\n if FLAGS.job_name is None or FLAGS.job_name == '':\n raise ValueError('Must specify an explicit job_name !')\n else:\n print('job_name : %s' % FLAGS.job_name)\n if FLAGS.task_index is None or FLAGS.task_index == '':\n raise ValueError('Must specify an explicit task_index!')\n else:\n print('task_index : %d' % FLAGS.task_index)\n\n ps_spec = FLAGS.ps_hosts.split(',')\n worker_spec = FLAGS.worker_hosts.split(',')\n\n # 创建集群\n cluster = tf.train.ClusterSpec({'ps': ps_spec, 'worker': worker_spec})\n server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)\n if FLAGS.job_name == 'ps':\n server.join()\n\n is_chief = (FLAGS.task_index == 0)\n # worker_device = '/job:worker/task%d/cpu:0' % FLAGS.task_index\n train_reader = Cifar10Reader.Reader(['cifar-10-python\\\\cifar-10-batches-py\\\\data_batch_1',\n 'cifar-10-python\\\\cifar-10-batches-py\\\\data_batch_2',\n 'cifar-10-python\\\\cifar-10-batches-py\\\\data_batch_3',\n 'cifar-10-python\\\\cifar-10-batches-py\\\\data_batch_4',\n 'cifar-10-python\\\\cifar-10-batches-py\\\\data_batch_5'])\n if is_chief is True:\n test_reader = Cifar10Reader.Reader(['cifar-10-python\\\\cifar-10-batches-py\\\\test_batch'])\n with tf.device(tf.train.replica_device_setter(cluster=cluster)):\n # step\n global_step = tf.Variable(0, name='global_step', trainable=False) # 创建纪录全局训练步数变量\n # input\n x = tf.placeholder(tf.float32, [None, 32, 32, 3])\n y = tf.placeholder(tf.int32, [None, 10])\n # train\n logits, _ = ResNet_Model.resnet_v2_50(x, 10)\n train_op, loss_op = train(logits, y)\n # test\n test_op = test(logits, y)\n # 生成本地的参数初始化操作init_op\n init_op = tf.global_variables_initializer()\n\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op, recovery_wait_secs=1,\n global_step=global_step)\n if is_chief:\n print('Worker %d: Initailizing session...' % FLAGS.task_index)\n else:\n print('Worker %d: Waiting for session to be initialized...' % FLAGS.task_index)\n sess = sv.prepare_or_wait_for_session(server.target)\n print('Worker %d: Session initialization complete.' % FLAGS.task_index)\n\n local_step = 0\n start_time = time.time()\n while True:\n train_images, train_labels = train_reader.next_batch(FLAGS.batch_size)\n train_images = tf.cast(train_images, tf.float32)\n train_labels = tf.cast(tf.one_hot(train_labels, 10), tf.int32)\n train_images, train_labels = sess.run([train_images, train_labels])\n _, loss, step = sess.run([train_op, loss_op, global_step], feed_dict={x: train_images, y: train_labels})\n local_step += 1\n if local_step % 100 == 0:\n duration = time.time() - start_time()\n print('Worker %d: training step %d dome (global step:%d)' % (FLAGS.task_index, local_step, step))\n if is_chief is True:\n correct_count = 0.0\n input_count = 0\n while test_reader.epoch < 1:\n test_images, test_labels = test_reader.next_batch(FLAGS.batch_size)\n test_images = tf.cast(test_images, tf.float32)\n test_labels = tf.cast(tf.one_hot(test_labels, 10), tf.int32)\n test_images, test_labels = sess.run([test_images, test_labels])\n input_count += len(test_labels)\n correct_count += sess.run(test_op, feed_dict={x: test_images, y: test_labels})\n print('time: %.5f, loss: %.3f, acc: %.3f' % (duration, loss, correct_count/input_count))\n test_reader.clear()\n start_time = time.time()\n if step >= FLAGS.train_steps:\n break\n sess.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Grade-Two/GCCoopForTrain","sub_path":"main_multi.py","file_name":"main_multi.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23195639995","text":"\"\"\"\nThe elastic search communication library.\nIt is a utility thin wrapper around the python elastic search library\n\nTasks:\n - Create index\n - Destroy index\n - Search in radius\n - Search in polygon\n\n\"\"\"\n# Python\nimport json\n# Packages\nimport requests\n\n\nclass Elastic(object):\n\n def __init__(self, host='0.0.0.0:9200', index='geo'):\n \"\"\"Init the elastic connection\"\"\"\n self.host = host\n self.index = index\n self.request_url = 'http://{0}/{1}/'.format(host, index)\n\n def create_index(self):\n \"\"\"\n Creates the geo index\n A check is made to make sure that the\n index does not yet exits. If the index\n already exists, a False is returend\n\n Returns True if the index is created,\n False otherwise\n \"\"\"\n success = False # Pessimism\n # Checking if the index exists\n r = requests.head(self.request_url)\n if r.status_code == 404:\n # Index does not exsits, it is safe to create it\n # @TODO add support for index setting\n try:\n r = requests.put(self.request_url)\n if r.status_code < 300:\n success = True\n except Exception as e:\n print(e)\n return success\n\n def delete_index(self):\n \"\"\"Delete the index\"\"\"\n try:\n requests.delete(self.request_url)\n except Exception as e:\n print(e)\n return False\n return True\n\n def search_radius(self, point, radius, types=None, exclude=False):\n \"\"\"\n Perform a geo query search at point with a radius of radiu\n\n point: An array, in GeoJSON style of the point to check around\n GeoJSON is [lon, lat]\n radius: A number (int, float and str accepted) of the distance in\n meters\n\n The function has two optional parameters:\n types: A list of types to use in the query. This is to limit the\n result set.\n exclude: Reverse the list of types to use as exclude types instead of\n include types\n \"\"\"\n # Verifying input\n # Checking that radius is a number and convert it to m string\n try:\n radius = str(float(radius)) + 'm'\n except ValueError:\n # The radius is not a float number\n return False\n # Checking that the point is 2 items long\n if len(point) != 2:\n return False\n # Making sure the values are nummeric\n try:\n point[0] = float(point[0])\n point[1] = float(point[1])\n except ValueError:\n return False\n # The query dict\n # https://www.elastic.co/guide/en/elasticsearch/reference/2.3/query-dsl-geo-distance-query.html\n query = {\n \"query\":{\n \"bool\": {\n \"must\": {\n \"match_all\": {}\n },\n \"filter\": {\n \"geo_distance\": {\n \"distance\": radius,\n \"center\" : point,\n \"distance_type\": \"plane\"\n }\n }\n }\n }\n }\n r = requests.post(self.request_url + '_search', data=json.dumps(query))\n return r.json()\n\n def search_box(self, limits, types=None, exclude=False):\n \"\"\"\n Perform a geo query search at point with a radius of radiu\n limits: a dict with top, left, bottom and right as keys\n which contain the bounding box for the search\n \n The function has two optional parameters:\n types: A list of types to use in the query. This is to limit the\n result set.\n exclude: Reverse the list of types to use as exclude types instead of\n include types\n \"\"\"\n # Verifying input\n if len(limits) != 4:\n return False\n # The query dict\n # https://www.elastic.co/guide/en/elasticsearch/reference/2.3/query-dsl-geo-distance-query.html\n query = {\n \"query\": {\n \"bool\": {\n \"must\": {\n \"match_all\": {}\n },\n \"filter\": {\n \"geo_bounding_box\": {\n \"center\" : {}\n }\n }\n }\n }\n }\n for key, value in limits.items():\n try:\n query['query']['bool']['filter']['geo_bounding_box']['center'][key] = value\n except KeyError:\n # Missing a limit\n return False\n r = requests.post(self.request_url + '_search', data=json.dumps(query))\n return r.json()\n\n","repo_name":"tekinuslu/datapunt_geosearch","sub_path":"web/docker_dir/datapunt_geosearch/elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"22785363884","text":"def letterCombinationsPhoneAll(digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n if len(digits) == 0:\n return []\n dic = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}\n res = []\n for i in digits:\n if len(res) == 0:\n for j in dic[i]:\n res.append(j)\n else:\n tmp = []\n for j in dic[i]:\n for k in res:\n tmp.append(k + j)\n res = tmp\n return res\n\nprint(letterCombinationsPhoneAll('23'))","repo_name":"saraswatpuneet/leetcode_stuff","sub_path":"leetcode_python/lettercombo.py","file_name":"lettercombo.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16545513518","text":"class Solution:\n def isFlipedString(self, s1: str, s2: str) -> bool:\n if len(s1) == len(s2) == 0:\n return True\n s3 = s2+ s2\n for i in range(len(s3)):\n if s3[i] == s1[0]:\n if s3[i:i+len(s2)] == s1:\n return True\n return False\n","repo_name":"RosieYC/LCCI","sub_path":"LCCI_01.09._String_Rotation_LCCI.py","file_name":"LCCI_01.09._String_Rotation_LCCI.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18745602584","text":"# coding=utf-8\n\n# input: array with multiple strings\n# expected output: rank of the 3 most often repeated words in given set of strings and number of times they occured, case insensitive\n\nsentences = [\n 'Taki mamy klimat',\n 'Wszędzie dobrze ale w domu najlepiej',\n 'Wyskoczył jak Filip z konopii',\n 'Gdzie kucharek sześć tam nie ma co jeść',\n 'Nie ma to jak w domu',\n 'Konduktorze łaskawy zabierz nas do Warszawy',\n 'Jeżeli nie zjesz obiadu to nie dostaniesz deseru',\n 'Bez pracy nie ma kołaczy',\n 'Kto sieje wiatr ten zbiera burzę',\n 'Być szybkim jak wiatr',\n 'Kopać pod kimś dołki',\n 'Gdzie raki zimują',\n 'Gdzie pieprz rośnie',\n 'Swoją drogą to gdzie rośnie pieprz?',\n 'Mam nadzieję, że poradzisz sobie z tym zadaniem bez problemu',\n 'Nie powinno sprawić żadnego problemu, bo Google jest dozwolony',\n]\n\n# Example result:\n# 1. \"mam\" - 12\n# 2. \"tak\" - 5\n# 3. \"z\" - 2\n\n\n# Good luck! You can write all the code in this file.\nwords = {}\n\n\ndef clear_sentence(sentence):\n sentence_without_dots = sentence.replace(\".\", \"\")\n sentence_without_commas_and_dots = sentence_without_dots.replace(\",\", \"\")\n sentence_without_punctuation = sentence_without_commas_and_dots.replace(\"?\", \"\")\n sentence_lower_signs = sentence_without_punctuation.lower()\n return sentence_lower_signs\n\n\nfor sentence in sentences:\n cleared_sentence = clear_sentence(sentence)\n split_sentence = cleared_sentence.split()\n for word in split_sentence:\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n\nsorted_words = sorted(words.items(), key=lambda x: x[1])\nsorted_words.reverse()\n\nfor i in range(3):\n print(f\"{i + 1}. \\\"{sorted_words[i][0]}\\\" - {sorted_words[i][1]}\")","repo_name":"mikosovsky/AKAI_application","sub_path":"word-rank/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20771436262","text":"import rtfm.settings as my_settings\nfrom django.conf import settings\nimport django\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rtfm.settings')\ndjango.setup()\n\nfrom core.models import *\nimport random as rnd\nimport time\n\n\ndef make_sessions():\n drivers = Driver.objects.all()\n transports = Transport.objects.all()\n clients = Passenger.objects.all()\n statuses = [Status.objects.get(status_name=\"Success\"),\n Status.objects.get(status_name=\"Failed\")]\n traces = Trace.objects.all()\n for driver in drivers:\n for i in range(1, 10):\n trace = rnd.choice(traces)\n session = DriveSession(driver_id=driver,\n tr_id=rnd.choice(transports),\n start_time=int(time.time() - 43000 * i),\n is_continues=False,\n end_time=int(time.time() - 43000 * (i - 1)),\n trace_id=trace)\n session.save()\n\n\ndef make_active_session():\n drivers = Driver.objects.all()\n transports = Transport.objects.all()\n clients = Passenger.objects.all()\n statuses = [Status.objects.get(status_name=\"Success\"),\n Status.objects.get(status_name=\"Failed\")]\n traces = Trace.objects.all()\n driver = rnd.choice(drivers)\n transport = rnd.choice(transports)\n trace = rnd.choice(traces)\n session = DriveSession(driver_id=driver, tr_id=transport,\n trace_id=trace, start_time=int(time.time()),\n is_continues=True\n )\n print(session)\n session.save()\n \n\ndef make_transactions():\n drivers = Driver.objects.all()\n transports = Transport.objects.all()\n clients = Passenger.objects.all()\n statuses = [Status.objects.get(status_name=\"Success\"),\n Status.objects.get(status_name=\"Failed\")]\n sessions = DriveSession.objects.all()\n \n for client in clients:\n for j in range(10):\n for i in range(rnd.randint(1, 4)):\n session = rnd.choice(sessions)\n value = session.trace_id.cost\n if (session.end_time is not None):\n time = session.start_time + rnd.randint(0, session.end_time - session.start_time)\n else:\n time = session.start_time + rnd.randint(0, int(time.time()) - session.start_time)\n tran_id = (i * 10000) + rnd.randint(0, 10000000)\n tran = Transaction(client_id=client,\n session_id=session,\n value=value,\n time=time,\n transaction_id=tran_id,\n status=statuses[rnd.randint(0, 1)]\n )\n tran.save()\n\n\ndef erase_sessions_and_transactions():\n DriveSession.objects.all().delete()\n Transaction.objects.all().delete()\n\nerase_sessions_and_transactions()\nmake_sessions()\nmake_transactions()\nmake_active_session()\n","repo_name":"Rexarrior/rtfm_backend","sub_path":"rtfm/repeated_db_fetch.py","file_name":"repeated_db_fetch.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72358607843","text":"\"\"\"Perform BLEU scoring for given model as described in the paper\"\"\"\n\nfrom typing import Dict, List\n\nimport torch\nfrom fairseq import utils\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.translate.bleu_score import sentence_bleu\nfrom tqdm import tqdm\n\nfrom anp4nlg.models.np.neural_process import NeuralProcess\n\n\ndef load_data(path: str='wikitext-103/wiki.test.tokens') -> str:\n txt = ''\n with open(path, 'r') as datafile:\n line = datafile.readline()\n while line:\n txt += line.replace('\\n', '')\n line = datafile.readline()\n\n return txt\n\n\ndef load_model(\n model_dir: str='checkpoints',\n checkpoint_file: str='checkpoint_32.pt',\n data_dir: str='data-bin/wikitext-103'\n) -> NeuralProcess:\n lm = NeuralProcess.from_pretrained(\n model_dir,\n checkpoint_file=checkpoint_file,\n data_name_or_path=data_dir\n )\n\n lm.eval()\n\n return lm\n\n\ndef data_to_tokens(\n dataset: str, model: NeuralProcess, sentence_length: int=64\n) -> torch.Tensor:\n dictionary = model.models[0].decoder.dictionary\n tokens = dictionary.encode_line(\n dataset, append_eos=False, add_if_not_exist=False)\n num_sentences = len(tokens) // sentence_length\n tokens = tokens[:num_sentences * sentence_length]\n\n return tokens.reshape(num_sentences, sentence_length, 1)\n\n\ndef genereate_sentence_pairs(\n dataset: torch.Tensor, model: NeuralProcess, context_size: int=21,\n sampling_topk: int=20, no_repeat_ngram_size: int=3, tempearture: int=1.5,\n min_len: int=64\n) -> List[Dict[str, str]]:\n dictionary = model.models[0].decoder.dictionary\n\n sentences = []\n\n def to_string(tokens: torch.Tensor) -> str:\n return dictionary.string(utils.strip_pad(tokens, dictionary.pad()),\n bpe_symbol=\"@@ \")\n\n for sentence_tokens in tqdm(dataset):\n context_tokens = sentence_tokens[:context_size]\n target_tokens = sentence_tokens[context_size:]\n\n num_successful = 0\n num_failed = 0\n\n try:\n pred = model.sample(\n dictionary.string(context_tokens),\n sampling=True, sampling_topk=sampling_topk,\n no_repeat_ngram_size=no_repeat_ngram_size, tempearture=tempearture,\n min_len=min_len)\n\n sentences.append({\n \"ctx\": to_string(context_tokens),\n \"tgt\": to_string(target_tokens),\n \"pred\": pred[min_len:],\n })\n\n num_successful += 1\n except RuntimeError:\n num_failed += 1\n\n print(f'Successful: {num_successful}, failed: {num_failed}, total: {num_successful + num_failed}')\n\n return sentences\n\n\ndef score(sentences: List[Dict[str, str]]) -> float:\n stop_words = set(stopwords.words('english'))\n stop_words.update(['.', ',', '\"', \"'\", '?', '!', ':', ';', '(', ')',\n '[', ']', '{', '}', '<', 'unk', '>', \"''\"])\n\n def preprocess(sentence):\n tokens = word_tokenize(sentence)\n tokens = [t for t in tokens if t not in stop_words]\n return tokens\n\n scores = []\n highscore_sentence = ''\n highscore = 0.0\n\n for sentence in tqdm(sentences):\n reference, hypothesis = preprocess(sentence[\"tgt\"]), preprocess(sentence[\"pred\"])\n score = sentence_bleu([reference], hypothesis, weights=(1, 0))\n scores.append(score)\n\n if score > highscore:\n highscore = score\n highscore_sentence = sentence\n\n print('Highest scoring sentence has score', highscore)\n print('Highscore sentence context:', highscore_sentence[\"ctx\"])\n print('Highscore sentence reference:', highscore_sentence[\"tgt\"])\n print('Highscore sentence hypothesis:', highscore_sentence[\"pred\"])\n\n return sum(scores) / len(scores)\n\n\nmodel = load_model()\ndataset = data_to_tokens(load_data(), model)\nsentence_pairs = genereate_sentence_pairs(dataset, model)\nbleu_score = score(sentence_pairs)\n\nprint('BLEU:', bleu_score)\n","repo_name":"lodewijkvankeizerswaard/ANP4NLG","sub_path":"bleu.py","file_name":"bleu.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10770357960","text":"import os\nimport subprocess\nfrom moviepy.editor import *\nimport time\n\n\ndef shell():\n os.system(\"cls\")\n\n while True:\n print(\n \"\"\"\n ·▄▄▄▄ ▄▄▌ ▐ ▄▌ ▐ ▄ ▄▄▌ ▄▄▄· ·▄▄▄▄ ▄▄▄· ▄▄▄▄▄ ▄▄▄ \n ██▪ ██ ▪ ██· █▌▐█•█▌▐███• ▪ ▐█ ▀█ ██▪ ██ ▐█ ▀█ •██ ▪ ▀▄ █·\n ▐█· ▐█▌ ▄█▀▄ ██▪▐█▐▐▌▐█▐▐▌██▪ ▄█▀▄ ▄█▀▀█ ▐█· ▐█▌▄█▀▀█ ▐█.▪ ▄█▀▄ ▐▀▀▄ \n ██. ██ ▐█▌.▐▌▐█▌██▐█▌██▐█▌▐█▌▐▌▐█▌.▐▌▐█ ▪▐▌██. ██ ▐█ ▪▐▌ ▐█▌·▐█▌.▐▌▐█•█▌\n ▀▀▀▀▀• ▀█▄▀▪ ▀▀▀▀ ▀▪▀▀ █▪.▀▀▀ ▀█▄▀▪ ▀ ▀ ▀▀▀▀▀• ▀ ▀ ▀▀▀ ▀█▄▀▪.▀ ▀ \n \n \n \"\"\")\n\n print(\"1. Mp4\")\n print(\"2. Mp3\")\n print(\"3. Exit\")\n\n try:\n inp = input()\n if int(inp) == 1:\n os.system(\"cls\")\n print(\"[+] Incolla il link youtube di una playlist o di una canzone.\")\n url = input(\"\")\n time.sleep(3)\n os.system(\"mkdir mp4 & copy youtube-dl.exe mp4 & cd mp4 & youtube-dl.exe -i -f mp4 --yes-playlist \" + url)\n os.system(\"del mp4\\youtube-dl.exe\")\n print(\"\\n\\n\\n[+] Tutto fatto!\")\n continue\n\n elif int(inp) == 2:\n os.system(\"cls\")\n print(\"[+] Incolla il link youtube di una playlist o di una canzone.\")\n url = input(\"\")\n time.sleep(3)\n os.system(\"mkdir mp4 & copy youtube-dl.exe mp4 & cd mp4 & youtube-dl.exe -i -f mp4 --yes-playlist \" + url)\n print(\"[+] Tutto fatto, converto ora i file in mp3\")\n foldermp3 = 'mp3\\\\'\n foldermp4 = 'mp4\\\\'\n os.system(\"mkdir mp3\")\n os.system(\"del mp4\\youtube-dl.exe\")\n for file in os.listdir(foldermp4):\n mp4 = VideoFileClip(foldermp4 + file)\n mp3 = mp4.audio\n mp3.write_audiofile(foldermp3 + (file.split(\".mp4\")[0] + \".mp3\"))\n mp3.close()\n mp4.close()\n print(\"finito di convertire\")\n\n os.system(\"cls\")\n print(\"\\n\\nVuoi eliminare i file mp4? \")\n\n if str(input(\"\\n\\n\\n1. Si\\n2. No\\n\")) == \"1\":\n os.system(\"rmdir /Q /S \" + foldermp4)\n print(\"[+] Tutto finito!\")\n else:\n os.system(\"cls\")\n print(\"[+] Tutto finito!\")\n elif int(inp) == 3:\n os.system(\"cls\")\n print(\"Auf wiedersehen!\")\n exit()\n else:\n os.system(\"cls\")\n print(\"Cio che hai digitato non è incluso nelle possibili scelte.\")\n\n except Exception:\n os.system(\"cls\")\n print(\"[-] Qualcosa è andato storto, riprova :(\\n\\n\")\n\n continue\n\n\nif __name__ == \"__main__\":\n shell()\n","repo_name":"nessunonessuno/YoutubePlaylistDownloaderWIN","sub_path":"runWIN.py","file_name":"runWIN.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12784664800","text":"# -*- coding: utf-8 -*-\n'''\n# Created on 2021/04/13 17:47:01\n# @filename: train.py\n# @author: tcxia\n'''\n\n\nfrom data.dataset import QADataset\nfrom model.xlnet import XLNetQA\nfrom model.bert import BertForMultipleChoice\nfrom util.optimation import BertAdam\n\nfrom transformers import XLNetTokenizer, BertTokenizer\nfrom transformers import AdamW\n\nimport torch\nimport torch.utils.data as tud\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef pred_correct(output_prob, targets):\n _, output_label = output_prob.max(dim=1)\n correct = (output_label == targets).sum()\n return correct.item()\n\n\ndef Train(model, train_loader, optimizer, epoches, device):\n\n for epoch in range(epoches):\n model.train()\n\n train_loss = 0.\n correct_num = 0\n for i, batch in enumerate(train_loader):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_masks, segment_ids, labels = batch\n\n optimizer.zero_grad()\n\n loss = model(input_ids, segment_ids, input_masks, labels)\n\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), 5.)\n optimizer.step()\n\n train_loss += loss.item()\n\n epoch_loss = train_loss / len(train_loader)\n print(\"Epoch: {} | Loss: {} \".format(epoch, epoch_loss))\n\n\nif __name__ == '__main__':\n max_seq_len = 128\n batch_size = 4\n epoches = 10\n gradient_acc_step = 1\n\n device = torch.device(\"cuda:3\" if torch.cuda.is_available() else \"cpu\")\n\n train_file = '/data/nlp_dataset/qa-public/train.json'\n\n # pretrained_path = '/data/nlp_dataset/pre_train_models/chinese-xlnet-base'\n pretrained_path = '/data/nlp_dataset/pre_train_models/bert-base-chinese'\n tokenizer = BertTokenizer.from_pretrained(pretrained_path, do_lower_case=True)\n\n train_set = QADataset(train_file, tokenizer, max_seq_len)\n train_loader = tud.DataLoader(train_set, batch_size=batch_size, shuffle=True)\n\n # model = XLNetQA(pretrained_path)\n\n bert_name = 'bert-base-chinese'\n model = BertForMultipleChoice.from_pretrained(\n bert_name,\n num_choices=4,\n cache_dir='/data/nlp_dataset/pre_train_models')\n model.to(device)\n # print(model)\n\n param_optimizer = list(model.named_parameters())\n # print(param_optimizer)\n\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {\n 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n t_total = int(len(train_set) / batch_size / gradient_acc_step * epoches)\n optimizer = BertAdam(optimizer_grouped_parameters, lr=5e-5, warmup=0.1, t_total=t_total)\n # optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5)\n\n # optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n\n\n Train(model, train_loader, optimizer, epoches, device)\n","repo_name":"tcxia/nlpdemo","sub_path":"competition/qa-public/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41316965001","text":"# -*- coding: utf-8\nfrom emoji import emojize\nkiss_mark = emojize(\":kiss_mark:\")\nlove_letter = emojize(\":love_letter:\")\nheart_with_arrow = emojize(\":heart_with_arrow:\")\nheart_with_ribbon = emojize(\":heart_with_ribbon:\")\nsparkling_heart = emojize(\":sparkling_heart:\")\ngrowing_heart = emojize(\":growing_heart:\")\nbeating_heart = emojize(\":beating_heart:\")\nrevolving_hearts = emojize(\":revolving_hearts:\")\ntwo_hearts = emojize(\":two_hearts:\")\nheart_decoration = emojize(\":heart_decoration:\")\nheart_exclamation = emojize(\":heart_exclamation:\")\nbroken_heart = emojize(\":broken_heart:\")\nheart_on_fire = emojize(\":heart_on_fire:\")\nmending_heart = emojize(\":mending_heart:\")\nred_heart = emojize(\":red_heart:\")\norange_heart = emojize(\":orange_heart:\")\nyellow_heart = emojize(\":yellow_heart:\")\ngreen_heart = emojize(\":green_heart:\")\nblue_heart = emojize(\":blue_heart:\")\npurple_heart = emojize(\":purple_heart:\")\nbrown_heart = emojize(\":brown_heart:\")\nblack_heart = emojize(\":black_heart:\")\nwhite_heart = emojize(\":white_heart:\")\nhundred_points = emojize(\":hundred_points:\")\nanger_symbol = emojize(\":anger_symbol:\")\ncollision = emojize(\":collision:\")\ndizzy = emojize(\":dizzy:\")\nsweat_droplets = emojize(\":sweat_droplets:\")\ndashing_away = emojize(\":dashing_away:\")\nhole = emojize(\":hole:\")\nbomb = emojize(\":bomb:\")\nspeech_balloon = emojize(\":speech_balloon:\")\neye_in_speech_bubble = emojize(\":eye_in_speech_bubble:\")\nleft_speech_bubble = emojize(\":left_speech_bubble:\")\nright_anger_bubble = emojize(\":right_anger_bubble:\")\nthought_balloon = emojize(\":thought_balloon:\")\nzzz = emojize(\":zzz:\")\n","repo_name":"numengo/python-vishuda","sub_path":"vishuda/models/emojis/smileys_emotion/emotion.py","file_name":"emotion.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2680500253","text":"import torch.multiprocessing as mp\nfrom multiprocessing import Process\nfrom multiprocessing import Queue as queue\nfrom queue import Queue\n#for Alphaposeinfo\n#for AlphaPoseQ\nfrom AlphaPose.yolo.preprocess import prep_frame\n\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.utils.data\nimport numpy as np\nfrom AlphaPose.dataloader import DetectionLoader, DetectionProcessor, Mscoco\nfrom AlphaPose.SPPE.src.main_fast_inference import *\nfrom AlphaPose.SPPE.src.utils.eval import *\n\nimport ntpath\nimport os\nimport sys\nimport time\nfrom AlphaPose.fn import getTime,vis_frame\nimport cv2\nfrom AlphaPose.pPose_nms import pose_nms\n\n\nfrom threading import Thread\n\nclass AlphaPoseInfo:\n def __init__(self):\n self.dataset = 'coco'\n self.fast_inference = True\n self.nThreads = 30\n self.sp = True\n self.use_pyranet = True\n self.inputResH = 320\n self.inputResW = 256\n self.outputResH = 80\n self.outputResW = 64\n self.scale = 0.25\n self.rotate = 30\n self.hmGauss = 1\n self.baseWidth = 9\n self.cardinality = 5\n self.nResidual = 1\n self.net = 'res152'\n self.mode = \"normal\"\n self.inp_dim = 608\n self.confidence = 0.05\n self.nms_thesh = 0.6\n self.num_classes = 80\nclass AlphaPoseQ:\n def __init__(self, queueSize=50):\n# self.Q = mp.Queue(maxsize=queueSize)\n self.Q = Queue(maxsize=queueSize)\n def push(self,item,inputSize=608):\n frame = item\n img, orig_img, im_dim_list = prep_frame(frame, inputSize)\n with torch.no_grad():\n im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)\n self.Q.put((img,[orig_img],[\"temp\"],im_dim_list))\n def getitem(self):\n return self.Q.get()\n def length(self):\n return self.Q.qsize() \n\nclass AlphaPoseLoader:\n def __init__(self,queueSize=1024):\n self.alphaPoseInfo = AlphaPoseInfo()\n self.inputQ = AlphaPoseQ()\n self.det_loader = DetectionLoader(self.alphaPoseInfo, self.inputQ, batchSize=1)\n self.det_processor = DetectionProcessor(self.alphaPoseInfo, self.det_loader)\n self.pose_dataset = Mscoco(self.alphaPoseInfo)\n\n if self.alphaPoseInfo.fast_inference:\n self.pose_model = InferenNet_fast(4 * 1 +1, self.pose_dataset)\n else:\n self.pose_model = InferenNet(4*1+1, self.pose_dataset)\n self.pose_model.cuda()\n self.pose_model.eval()\n\n self.outputQ = Queue(maxsize=queueSize)\n self.startTime = time.time()\n def start(self):\n# p = mp.Process(target=self.run, args=())\n# p.daemon = True\n# p.start()\n t = Thread(target=self.run, args=())\n t.start()\n t.join()\n return self\n\n def run(self):\n startTime = time.time()\n det_results = self.det_loader.update()\n self.det_processor.update(det_results)\n runtime_profile = {\n 'dt': [],\n 'pt': [],\n 'pn': []\n }\n start_time = getTime()\n with torch.no_grad():\n (inps, orig_img, im_name, boxes, scores, pt1, pt2) = self.det_processor.read()\n if orig_img is None:\n return\n ckpt_time, det_time = getTime(start_time)\n runtime_profile['dt'].append(det_time)\n datalen = inps.size(0)\n batchSize = 1\n num_batches = datalen//batchSize\n \n hm = []\n for j in range(num_batches):\n inps_j = inps[j*batchSize:min((j+1)*batchSize, datalen)].cuda()\n hm_j = self.pose_model(inps_j)\n hm.append(hm_j)\n hm = torch.cat(hm)\n ckpt_time, pose_time = getTime(ckpt_time)\n runtime_profile['pt'].append(pose_time)\n hm = hm.cpu().data\n ckpt_time, post_time = getTime(ckpt_time)\n runtime_profile['pn'].append(post_time)\n self.outputQ.put((boxes, scores, hm, pt1, pt2))\n preds_hm, preds_img, preds_scores = getPrediction(hm, pt1, pt2, 320, 256, 80, 64)\n result = pose_nms(boxes, scores, preds_img, preds_scores)\n result = {\n 'imgname': 1,\n 'result': result\n }\n #finalimg = vis_frame(orig_img, result)\n #cv2.imwrite(\"finalresult.png\", finalimg)\n endTime = time.time()\n print(\"\\tAlphaPose Inference Time : \", endTime-startTime)\n self.startTime = endTime\n\n","repo_name":"kyungjin-lee/Streaming3","sub_path":"StreamingServer/AlphaPoseLoader.py","file_name":"AlphaPoseLoader.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30513822652","text":"# Importando libería Math\n# Las funciones trigonométricas están en radianes en la librería Math.\n# Los ángulos deben ser convertidos a Radianes. convierte con: math.radians(angulo)\n# LA librería math no trabaja con numeros complejos. Debe importar cmath para complejos\n# math.sqrt(9) ->->->->-> cmath.sqrt(-9)\n\nimport math\n\nprint(math.log(4))\n\n\n# Ejercicio 1: \n# Un amigo quiere regalar a otro dos libros y los quiere legir entre los 15 que le gustan. \n# ¿De cuantas formas puede hacerlo?. Construya un Scrip que, al ejecutarlo, arroje el resultado.\n\n# Función Comb...\nLibros = 15\nRegalos = 2\nComb = math.comb(Libros, Regalos)\nprint(\"\\n l número de combinaciones es:\",Comb)\n#Impresión con el formato: (En caso de tener float, cammbiar a %f)\nprint(\"\\nEl número de combinaciones de %d elementos entre %d elementos, es %d.\" %(Regalos, Libros, Comb))\n\n#Calculando la combinatoria sin funcion comb, n!/(r!*(n-r)!)\nCombF = (math.factorial(Libros)) / ((math.factorial(Regalos))*(math.factorial(Libros-Regalos)))\nprint(\"\\nEl número de combinaciones de %d elementos entre %d elementos, es %d.\" %(Regalos, Libros, CombF))\n\n\n# Ejercicio 2:\n# Elabore un Scrip que muestre el factorial de un número, Considere que este número es de coma flotante\n\nnumero = 10.4\nnumeroApxA = math.ceil(numero)\nnumeroApxB = math.floor(numero)\nfactorial1 = math.factorial(numeroApxA)\nfactorial2 = math.factorial(numeroApxB)\nprint(\"\\nEl factorial de %f redondeado hacia abajo es %d.\" %(numero,factorial2))\nprint(\"El factorial de %f redondeado hacia arriba es %d.\" %(numero,factorial1))\n\n\n# Ejercicio 3:\n# El radio de una circunferencia es de 5m. Escriba un código que, al ejecutarlo, determine el área del circulo. \n# Use la librería Math y defina la dimensión geométrica dada como la variable 'radio = \"5\"'. A=Pi*r**2\n\nradio = \"5\"\nradio =int(radio)\narea = (math.pi)*(radio**2)\nprint(\"\\nEl Area del circulo de radio %dm es %fm2.\" %(radio,area))\n\n\n# Ejercicio 4:\n# Construya un código que, dado un ángulo en angulos, calcule el valor de las seis variables trigonométricas principales.\n\nangulo = 90\n# Convierte el ángulo de grados a radianes.\nangulorad = math.radians(angulo)\nseno = math.sin(angulorad)\ncoseno = math.cos(angulorad)\ntangente = math.tan(angulorad)\ncotangente = 1/tangente\nsecante = 1/coseno\ncosecante = 1/seno\n# Convierte el ángulo de radianes a grados.\nangulorad = math.degrees(angulorad)\nprint(\"\\nEl seno de %d° es %f rad.\" %(angulo,seno))\nprint(\"El coseno de %d° es %f rad.\" %(angulo,coseno))\nprint(\"La tangente de %d° es %f rad.\" %(angulo,tangente))\nprint(\"La cotangente de %d° es %f rad.\" %(angulo,cotangente))\nprint(\"La secante de %d° es %f rad.\" %(angulo,secante))\nprint(\"La cosecate de %d° es %f rad.\" %(angulo,cosecante))\n\n\n# Ejercicio 5:\n# La siguiente ecuación permite calcular el factor de fricción (f) para un fluido a través de una tubería:\n# 1/(raiz(f)) = -1,8log((6,9/Re)+(5x10^-6)).\n# El parámetro Re depende del diámetro de la tubería (D), la velocidad del fluido (v), su densidad (p) y viscosidad (u):\n# Re = (p*v*D)/u\n# p = 1000 \n# v = 8,5 \n# D = 0,2 \n# u = 1x10^-6\n# Construya un scrip cuyo objetivo sea el cálculo del factor de fricción.\n\ndensidad = 1000\nvelocidad = 8.5\ndiametro = 0.2\nviscosidad = 1e-6 # Uno por diez a la menos seis\n\nRe = densidad * velocidad * diametro / viscosidad\nx = -1.8*(math.log10(6.9/Re+5e-6))\nfriccion = (1/x)**2\nprint(\"\\nEl factor de fricción es: %f.\" %(friccion))\nprint(\"El factor de fricción es: \", friccion)\nprint(\"El factor de fricción es: \", \"{:.4f}\".format(friccion)) #Impresión formateada en 4 digitos decimales.","repo_name":"djotalorab/MisionTIC2022","sub_path":"MisionTIC_Ciclo1_python/Sesion2/2_1.py","file_name":"2_1.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7786324817","text":"# Sometimes you’ll want to accept an arbitrary number of arguments, but\n# you won’t know ahead of time what kind of information will be passed\n# to the function. In this case, you can write functions that accept as\n# many key-value pairs as the calling statement provides.\ndef build_profile(first, last, **user_info):\n \"\"\"Build a dictionary containing everything we know about a user.\"\"\"\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info\n\nuser_profile = build_profile('albert', 'einstein',\nlocation='princeton',\nfield='physics')\n\nprint(user_profile)\n","repo_name":"xerifeazeitona/PCC_Basics","sub_path":"chapter_08/examples/8_user_profile.py","file_name":"8_user_profile.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38985778593","text":"import logging\nfrom threading import Thread, Condition\n\nfrom zeroconf import ServiceBrowser, Zeroconf\n\n\nGOOGLE_CAST_IDENTIFIER = \"_googlecast._tcp.local.\"\n\n\nclass DiscoveryCallback:\n\n def on_chromecast_appeared(self, device_name, model_name, ip_address, port):\n pass\n\n def on_chromecast_disappeared(self, device_name):\n pass\n\n\nclass ChromecastDiscovery(Thread):\n \"\"\"\n Original code borrowed from pychromecast discovery, adapted to run in background all the time.\n \"\"\"\n\n def __init__(self, discovery_callback):\n super().__init__()\n\n self.logger = logging.getLogger(\"discovery\")\n self.discovery_callback = discovery_callback\n self.run_condition = Condition()\n self.services = {}\n\n def start_discovery(self):\n self.logger.debug(\"starting discovery\")\n self.start()\n\n def stop_discovery(self):\n self.logger.debug(\"stopping discovery\")\n\n with self.run_condition:\n self.run_condition.notify_all()\n\n def run(self):\n zeroconf = Zeroconf()\n browser = ServiceBrowser(zeroconf, GOOGLE_CAST_IDENTIFIER, self)\n\n try:\n with self.run_condition:\n self.run_condition.wait()\n\n self.logger.debug(\"end of run-body (discovery)\")\n\n finally:\n browser.cancel()\n zeroconf.close()\n\n def remove_service(self, zconf, typ, name):\n \"\"\" Remove a service from the collection. \"\"\"\n\n # easy filtering\n if not name.endswith(GOOGLE_CAST_IDENTIFIER):\n return\n\n self.logger.info(\"removing chromecast with name \\\"%s\\\"\" % name)\n\n if name in self.services:\n self.discovery_callback.on_chromecast_disappeared(self.services[name])\n self.services.pop(name, None)\n\n def add_service(self, zconf, typ, name):\n \"\"\" Add a service to the collection. \"\"\"\n # easy filtering\n if not name.endswith(GOOGLE_CAST_IDENTIFIER):\n return\n\n self.logger.info(\"adding chromecast with name \\\"%s\\\"\" % name)\n\n service = None\n tries = 0\n while service is None and tries < 4:\n try:\n service = zconf.get_service_info(typ, name)\n except IOError:\n # If the zeroconf fails to receive the necessary data we abort adding the service\n break\n tries += 1\n\n if not service:\n self.logger.warn(\"services not discovered for device\")\n return\n\n address = service.parsed_scoped_addresses()[0]\n\n def get_value(key):\n value = service.properties.get(key.encode('utf-8'))\n\n return value.decode('utf-8')\n\n model_name = get_value('md')\n device_name = get_value('fn')\n self.logger.info(\"chromecast device name \\\"%s\\\"\" % device_name)\n\n self.services[name] = device_name\n self.discovery_callback.on_chromecast_appeared(device_name, model_name, address, service.port)\n","repo_name":"nohum/chromecast-mqtt-connector","sub_path":"helper/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"54"} +{"seq_id":"40543692154","text":"import argparse\nimport os\nfrom numpy import random\nfrom pathlib import Path\nimport shutil\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', required=True, help='input folder with images to process')\n parser.add_argument('--test', required=False, default=0.3, type=float, help='proportion in 0.XX format how many percentage of images should be moved to test folder (default is 0.3)')\n parser.add_argument('--train', required=False, default=0.7, type=float, help='proportion in 0.XX format how many percentage of images should be moved to train folder (default is 0.7')\n parser.add_argument('--randomize', required=False, default=False, type=bool)\n\n args = parser.parse_args()\n \n print('Generating %0.1f%% train images and %0.1f%% test images...' % (args.train * 100, args.test * 100))\n\n if not os.path.isdir(args.input):\n exit('Input is not a folder')\n\n test_dataset_dir_path = os.path.join(args.input, 'test')\n train_dataset_dir_path = os.path.join(args.input, 'train')\n\n # Cleanup of previous test dataset\n if os.path.isdir(test_dataset_dir_path):\n shutil.rmtree(test_dataset_dir_path)\n\n # Cleanup of previous train dataset\n if os.path.isdir(train_dataset_dir_path):\n shutil.rmtree(train_dataset_dir_path)\n\n print('Test dataset will be placed here:', test_dataset_dir_path)\n print('Train dataset will be placed here:', train_dataset_dir_path)\n \n # Get all absolute paths for files in given directory\n files_paths = [os.path.abspath(os.path.join(dirpath, filename)) for dirpath, dirnames, filenames in os.walk(args.input) for filename in filenames]\n\n # Check if all images have xmls\n images_paths = sorted([x for x in files_paths if not x.endswith('.xml')])\n xmls_paths = sorted([x for x in files_paths if x.endswith('.xml')])\n\n images_paths_length = len(images_paths)\n xmls_paths_length = len(xmls_paths)\n\n print('Images count', images_paths_length)\n print('Xmls count', xmls_paths_length)\n\n if images_paths_length != xmls_paths_length:\n untagged_images = set([image_name.split('.')[0] for image_name in images_paths]) - set([xml_name.split('.')[0] for xml_name in xmls_paths])\n print(untagged_images)\n exit(\"Some images were untagged\")\n \n # dictionary in format (path_to_image, path_to_xml_corresponding_to_that_image)\n images_xmls_dict = (dict(zip(images_paths, xmls_paths)))\n\n test_images_paths = []\n if not args.randomize:\n test_images_paths = images_paths[0:int(args.test * images_paths_length)]\n else:\n # take random images paths to test taking into account proportion\n test_images_paths = random.choice(images_paths, int(args.test * images_paths_length), replace=False)\n\n os.mkdir(test_dataset_dir_path)\n\n # copy randomly choosen images and xml to test dataset folder\n for image_path in test_images_paths:\n xml_path = images_xmls_dict[image_path]\n shutil.copyfile(image_path, os.path.join(test_dataset_dir_path, Path(image_path).name))\n shutil.copyfile(xml_path, os.path.join(test_dataset_dir_path, Path(xml_path).name))\n\n os.mkdir(train_dataset_dir_path)\n\n # get paths to images that were not inluded in test dataset\n train_images_paths = list(set(images_paths) - set(test_images_paths))\n\n # copy randomly choosen images and xml to train dataset folder\n for image_path in train_images_paths:\n xml_path = images_xmls_dict[image_path]\n shutil.copyfile(image_path, os.path.join(train_dataset_dir_path, Path(image_path).name))\n shutil.copyfile(xml_path, os.path.join(train_dataset_dir_path, Path(xml_path).name))\n\n print('Done...')\n print('All images count:', images_paths_length)\n print('Test images count:', len(test_images_paths))\n print('Train images count:', len(train_images_paths))\n\n","repo_name":"przemo2174/CNNTools","sub_path":"CNNTools/test_train_split.py","file_name":"test_train_split.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17867516637","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# 使用一个flag 标志层的结束,只有最后一层所有子节点都为空才会结束遍历\n# 一系列自然数相乘,有一个是0就是0,所有都为正才是正\nclass Solution:\n def levelOrder(self, root):\n if root == None:\n return []\n\n levels = [[root]] # 按层存点\n level = [] # 存一层的点\n ret = [[root.val]] # 按层存值\n t = [] # 存一层的值\n while True:\n for node in levels[-1]:\n if node.left or node.right:\n if node.left:\n level.append(node.left)\n t.append(node.left.val)\n if node.right:\n level.append(node.right)\n t.append(node.right.val)\n if t == []:\n # 如果最后是[] 说明上一层是最后一层\n return ret\n levels.append(level)\n ret.append(t)\n t = []\n level = []","repo_name":"Nobody0321/MyCodes","sub_path":"OJ/LeetCode/102. Binary Tree Level Order Traversal.py","file_name":"102. Binary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34491966810","text":"#!/usr/bin/python\n#loading up modules\nfrom w1thermsensor import W1ThermSensor\nimport os\nimport time\nfrom influxdb.influxdb08 import InfluxDBClient\n\nwhile True:\n\n sensor1 = W1ThermSensor(W1ThermSensor.THERM_SENSOR_DS18B20, \"041501a634ff\")\n sensor1_val = sensor1.get_temperature()\n\n sensor2 = W1ThermSensor(W1ThermSensor.THERM_SENSOR_DS18B20, \"041501aedaff\")\n sensor2_val = sensor2.get_temperature()\n\n sensor3 = W1ThermSensor(W1ThermSensor.THERM_SENSOR_DS18B20, \"031501c516ff\")\n sensor3_val = sensor3.get_temperature()\n\n sensor4 = W1ThermSensor(W1ThermSensor.THERM_SENSOR_DS18B20, \"031501c51fff\")\n sensor4_val = sensor4.get_temperature()\n\n sensor5 = W1ThermSensor(W1ThermSensor.THERM_SENSOR_DS18B20, \"031501c1e1ff\")\n sensor5_val = sensor5.get_temperature()\n\n\n print(\"posting data\")\n\n\n sensor1_json_body =[\n {\n \"name\" : \"sensor1_temp_c\",\n \"columns\" : [\"value\", \"sensor\"],\n \"points\" : [\n [sensor1_val, \"sensor01\"]\n ]\n }\n ]\n\n sensor2_json_body =[\n {\n \"name\" : \"sensor2_temp_c\",\n \"columns\" : [\"value\", \"sensor\"],\n \"points\" : [\n [sensor2_val, \"sensor02\"]\n ]\n }\n ]\n\n sensor3_json_body =[\n {\n \"name\" : \"sensor3_temp_c\",\n \"columns\" : [\"value\", \"sensor\"],\n \"points\" : [\n [sensor3_val, \"sensor03\"]\n ]\n }\n ]\n\n sensor4_json_body =[\n {\n \"name\" : \"sensor4_temp_c\",\n \"columns\" : [\"value\", \"sensor\"],\n \"points\" : [\n [sensor4_val, \"sensor04\"]\n ]\n }\n ]\n\n sensor5_json_body =[\n {\n \"name\" : \"sensor5_temp_c\",\n \"columns\" : [\"value\", \"sensor\"],\n \"points\" : [\n [sensor5_val, \"sensor05\"]\n ]\n }\n ]\n\n\n client = InfluxDBClient('SEVRVERIP', 8086, 'USERNAME', 'PASSWORD', 'DB_NAME')\n client.write_points(sensor1_json_body)\n client.write_points(sensor2_json_body)\n client.write_points(sensor3_json_body)\n client.write_points(sensor4_json_body)\n client.write_points(sensor5_json_body)\n print(sensor1_val)\n print(sensor2_val)\n print(sensor3_val)\n print(sensor4_val)\n print(sensor5_val)\n time.sleep(2)\n","repo_name":"ainsey11/SendPiTempToGrafana","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"32218163673","text":"# Ewerton Vieira de Silles\n# 01/05/2019\n#\n# Programa que exibe um menu de uma calculadora que realiza as operacoes de multiplicacao; divisao;\n# mmc e mdc entre dois numeros; e todos os numeros primos ate um determinado limite\n\n# Funcao - calcula o produto de uma multiplicacao somando um fator a ele mesmo tantas vezes quanto vale o outro fator\n\ndef multiplicar(fator1, fator2):\n multiplicador = fator1\n if fator2 == 0:\n multiplicador = 0\n else:\n for cont in range(1, fator2):\n multiplicador += fator1\n return multiplicador\n\n# Funcao - calcula o quociente e o resto de uma divisao subtraindo o divisor do dividendo tantas vezes quanto possivel\n\ndef dividir(dividendo, divisor):\n resto = dividendo\n if dividendo < divisor:\n quociente = 0\n else:\n resto -= divisor\n quociente, resto = dividir(resto, divisor)\n quociente += 1\n return quociente, resto\n\n# Funcao - calcula o MMC entre dois numeros testando se os numeros posteriores ao maior sao divisiveis por ambos\n\ndef mmc(numero1, numero2):\n if (numero1 > numero2):\n maior = numero1\n else:\n maior = numero2\n while True:\n if (maior % numero1 == 0 and maior % numero2 == 0):\n return maior\n else:\n maior += 1\n\n# Funcao - calcula o MDC entre dois numeros testando se ambos sao divisiveis pelos numeros anteriores ao menor\n\ndef mdc(numero1, numero2):\n if (numero1 < numero2):\n menor = numero1\n else:\n menor = numero2\n while True:\n if (numero1 % menor == 0 and numero2 % menor == 0):\n return menor\n else:\n menor -= 1\n\n# Funcao - verifica quantos divisores tem cada numero e exibe todos os primos(numeros que so tem dois divisores) ate um determinado limite\n\ndef primos(limite):\n primos = '2'\n for numero in range(3, limite + 1):\n totaldivisores = 0\n for divisor in range(1, numero + 1):\n if (numero % divisor == 0):\n totaldivisores += 1\n if (totaldivisores == 2):\n primos += ', ' + str(numero)\n return primos\n\n\n# Funcao - imprime o menu principal do programa com seis opcoes\n\ndef menu():\n print('=-=-=-= Calculadora =-=-=-='\n '\\nSelecione uma opção do menu:'\n '\\n [1] Multiplicar'\n '\\n [2] Dividir'\n '\\n [3] M.M.C.'\n '\\n [4] M.D.C.'\n '\\n [5] Primos'\n '\\n [6] Sair do programa')\n\n\n######## Programa principal ########\n\ndef main():\n menu()\n opcao = int(input('Qual sua opção? ').strip())\n while (opcao < 1 or opcao > 6):\n print('Opção inválida!')\n opcao = int(input('Qual sua opção? ').strip())\n if (opcao == 1):\n fator1 = int(input('Entre com o primeiro fator (número inteiro maior ou igual a zero): '))\n while (fator1 < 0):\n fator1 = int(input('Valor inválido! Por favor, entre com um número inteiro maior ou igual a zero: '))\n fator2 = int(input('Entre com o segundo fator (número inteiro maior ou igual a zero): '))\n while (fator2 < 0):\n fator2 = int(input('Valor inválido! Por favor, entre com um número maior ou igual a zero: '))\n produto = multiplicar(fator1, fator2)\n print('=-=' * 10)\n print('O produto de', fator1, 'e', fator2, 'vale', produto)\n elif (opcao == 2):\n dividendo = int(input('Entre com o dividendo (número inteiro maior ou igual a zero): '))\n while (dividendo < 0):\n dividendo = int(input('Valor inválido! Por favor, entre com um número maior ou igual a zero: '))\n divisor = int(input('Entre com o divisor (número inteiro maior que zero): '))\n while (divisor <= 0):\n divisor = int(input('Valor inválido! Por favor, entre com um número maior que zero: '))\n quociente, resto = dividir(dividendo, divisor)\n print('=-=' * 10)\n print('A divisão', str(dividendo) + '/' + str(divisor), 'tem quociente igual a', quociente, 'e resto igual a', resto)\n elif (opcao == 3):\n numero1 = int(input('Entre com um número inteiro maior que zero: '))\n while (numero1 <= 0):\n numero1 = int(input('Valor inválido! Por favor, entre com um número maior que zero: '))\n numero2 = int(input('Entre com outro número inteiro maior que zero: '))\n while (numero2 <= 0):\n numero2 = int(input('Valor inválido! Por favor, entre com um número maior que zero: '))\n mmcnumero = mmc(numero1, numero2)\n print('=-=' * 10)\n print('O M.M.C. entre', numero1, 'e', numero2, 'vale', mmcnumero)\n elif (opcao == 4):\n numero1 = int(input('Entre com um número inteiro maior que zero: '))\n while (numero1 <= 0):\n numero1 = int(input('Valor inválido! Por favor, entre com um número maior que zero: '))\n numero2 = int(input('Entre com outro número inteiro maior que zero: '))\n while (numero2 <= 0):\n numero2 = int(input('Valor inválido! Por favor, entre com um número maior que zero: '))\n mdcnumero = mdc(numero1, numero2)\n print('=-=' * 10)\n print('O M.D.C. entre', numero1, 'e', numero2, 'vale', mdcnumero)\n elif (opcao == 5):\n limite = int(input('Entre com o limite (número inteiro maior que 1): '))\n while (limite < 2):\n limite = int(input('Valor inválido! Por favor, entre com um número maior que 1: '))\n if (limite == 2):\n print('=-=' * 10)\n print('2 é o primeiro número primo.')\n else:\n primoslimite = primos(limite)\n print('=-=' * 10)\n print('Os números primos até', limite, 'são:', primoslimite + '.')\n print('=-=' * 10)\n if (opcao != 6):\n main()\n\n\n##### Inicializacao do programa\n\nmain()\n\n##### Termino do programa\n\nprint('Fim do programa! Volte sempre!')\n","repo_name":"ewerton5/Python-Projects","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13462773512","text":"import sys\r\ninput=sys.stdin.readline\r\nsys.setrecursionlimit(10**6)\r\nfrom collections import defaultdict\r\n\r\nn,m = map(int,input().split())\r\nbead = defaultdict(list) \r\ncriteria = n // 2\r\nvis = [False] * n\r\nup_cnt = [0] * n\r\ndown_cnt = [0] * n\r\n\r\nfor _ in range(m):\r\n heavy,light = map(int,input().split())\r\n bead[light].append(heavy)\r\n bead[-heavy].append(-light)\r\n \r\ndef dfs(i):\r\n global cnt\r\n vis[i-1] = True\r\n for b in bead[i]:\r\n if not vis[b-1]:\r\n cnt += 1\r\n dfs(b)\r\n \r\ndef dfs2(i):\r\n global cnt\r\n vis[-i-1] = True\r\n for b in bead[i]:\r\n if not vis[-b-1]:\r\n cnt += 1\r\n dfs2(b)\r\n \r\nfor i in range(1,n+1):\r\n cnt = 0\r\n dfs(i)\r\n up_cnt[i-1] = cnt\r\n vis = [False] * n\r\n cnt = 0 \r\n dfs2(-i)\r\n down_cnt[i-1] = cnt\r\n vis = [False] * n \r\n \r\nans = 0 \r\nfor c in up_cnt:\r\n if c > criteria:\r\n ans += 1\r\nfor c in down_cnt:\r\n if c > criteria:\r\n ans += 1\r\nprint(ans)","repo_name":"yootal/CodingTest","sub_path":"백준/Gold/2617. 구슬 찾기/구슬 찾기.py","file_name":"구슬 찾기.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23897748950","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom concurrent.futures import ThreadPoolExecutor\nimport logging\n\ncodenames = ['trusty', 'xenial', 'bionic', 'focal']\narchs = ['amd64', 'i386']\n\npattern = re.compile('([0-9.]+)-([0-9]+)ubuntu([0-9.]+)')\n\n\ndef get_proxy():\n return requests.get(\"http://127.0.0.1:5010/get/\").json()\n\n\ndef delete_proxy(proxy):\n requests.get(\"http://127.0.0.1:5010/delete/?proxy={}\".format(proxy))\n\n\ndef get_resp(url):\n proxy = get_proxy().get(\"proxy\")\n retry_count = 5\n while retry_count > 0:\n try:\n resp = requests.get(url,\n proxies={\n \"http\": f\"http://{proxy}\",\n \"https\": f\"http://{proxy}\",\n }, timeout=3)\n\n return resp\n except Exception:\n retry_count -= 1\n delete_proxy(proxy)\n return None\n\n\ndef always_retry(url):\n while True:\n resp = get_resp(url)\n if resp is None:\n logging.error('retry')\n else:\n return resp\n\n\ndef get_downloadable_file(url):\n url = 'https://launchpad.net' + url\n r = always_retry(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n link = soup.find('div', {'id': 'downloadable-files'}).find('a')\n print(link.text, link.get('href'))\n filename = link.text\n filelink = link.get('href')\n return filename, filelink\n\n\ndef download(codename, arch):\n version = {}\n\n try:\n for pkg in ['libc6', 'libc6-dbg']:\n print(codename, arch, pkg)\n url = f'https://launchpad.net/ubuntu/{codename}/{arch}/{pkg}'\n content = always_retry(url).text\n\n soup = BeautifulSoup(content, 'html.parser')\n table = soup.find(\n 'table', {'class': 'listing', 'id': 'publishing-summary'})\n tbody = table.find('tbody')\n\n all_link = tbody.find_all('a')\n for link in all_link:\n match = pattern.match(link.text)\n if match:\n print(link.text)\n filename, link = get_downloadable_file(\n link.get('href'))\n version[filename] = link\n except Exception as e:\n print(codename, arch, e)\n\n print(codename, arch, 'end')\n with open(f'list-{codename}-{arch}', 'w') as f:\n for k, v in version.items():\n f.write(f'{k} {v}\\n')\n\n\nwith ThreadPoolExecutor(max_workers=len(codenames) * len(archs)) as executor:\n for codename in codenames:\n for arch in archs:\n executor.submit(download, codename, arch)\n","repo_name":"zeyugao/glibc-launchpad","sub_path":"download_list.py","file_name":"download_list.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"38279842017","text":"import os\n\nimport torch\nimport numpy as np\nfrom models import RNN\nimport cv2\nimport matplotlib.pyplot as plt\nimport mediapipe as mp\ntorch.backends.cudnn.benchmark = True\n\ninput_size = 42\nnum_layers = 2\nhidden_size = 256\nnum_classes = 2\n\n\ndef calc_bounding_rect(image, landmarks):\n image_width, image_height = image.shape[1], image.shape[0]\n\n landmark_array = np.empty((0, 2), int)\n\n for _, landmark in enumerate(landmarks.landmark):\n landmark_x = min(int(landmark.x * image_width), image_width - 1)\n landmark_y = min(int(landmark.y * image_height), image_height - 1)\n\n landmark_point = [np.array((landmark_x, landmark_y))]\n\n landmark_array = np.append(landmark_array, landmark_point, axis=0)\n\n x, y, w, h = cv2.boundingRect(landmark_array)\n\n return w,h\n\ndef hands_coordinate_recording(image,multi_hand_landmarks):\n landmark_point = []\n image_w,image_y = image.shape[1],image.shape[0]\n for hand_landmarks in multi_hand_landmarks:\n w,h = calc_bounding_rect(image,hand_landmarks)\n for point_index, landmark in enumerate(hand_landmarks.landmark):\n if point_index == 0:\n base_x, base_y = (landmark.x*image_w)/w, (landmark.y*image_y)/h\n landmark_point.append((landmark.x*image_w)/w - base_x)\n landmark_point.append((landmark.y*image_y)/h - base_y)\n return landmark_point\n\ndef main():\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(\"Device being used:\", device)\n\n with open('./dataloader/pajinsen_labels.txt', 'r') as f:\n class_names = f.readlines()\n f.close()\n # init model\n model = RNN(input_size,hidden_size,num_layers,num_classes,device).to(device)\n # model = R3D_model.R3DClassifier(num_classes=2, layer_sizes=(2, 2, 2, 2))\n checkpoint = torch.load('./train_models/train4_models/epoch-770.pth.tar',\n map_location=lambda storage, loc: storage)\n # checkpoint = torch.load('./train_models/hand_models/epoch-1999.pth.tar',\n # map_location=lambda storage, loc: storage)\n\n\n \"\"\"\n state_dict = model.state_dict()\n for k1, k2 in zip(state_dict.keys(), checkpoint.keys()):\n state_dict[k1] = checkpoint[k2]\n model.load_state_dict(state_dict)\n \"\"\"\n model.load_state_dict(checkpoint['state_dict']) # 模型参数\n # optimizer.load_state_dict(checkpoint['opt_dict'])#优化参数\n\n model.to(device)\n model.eval()\n\n # read video\n # video = \"./data/Pajinsen/normal/WIN_20220617_14_23_17_Pro.mp4\"\n # video = \"./data/Pajinsen/tremor/WIN_20220617_13_53_23_Pro.mp4\"\n # videos = './test/TremorCodeTest/test1' #normal,normal\n # videos = './test/TremorCodeTest/test2' #tremor,tremor\n # videos = './test/TremorCodeTest/test3' #tremor,tremor\n videos = './test/TremorCodeTest/test4' #tremor,normal\n # video = './test/TremorCodeTest/Tremor/Tremor1.mov'\n mp_hands = mp.solutions.hands\n hands = mp_hands.Hands(\n max_num_hands=1,\n min_detection_confidence=0.7,\n min_tracking_confidence=0.5,)\n result = []\n for video in os.listdir(videos):\n cap = cv2.VideoCapture(os.path.join(videos,video))\n retaining = True\n clip = [] # 记录视频的时序长度\n video_classify = {} # 记录每个视频的每个类别出现的此时\n for i in range(len(class_names)):\n video_classify[class_names[i].split(' ')[-1].strip()] = 0\n while retaining:\n retaining, frame = cap.read()\n if not retaining and frame is None:\n continue\n image = cv2.resize(frame, (960, 540))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n image.flags.writeable = False\n results = hands.process(image) # 检测图片中是否有手\n image.flags.writeable = True\n cap_height,cap_width = image.shape[0],image.shape[1]\n # results.multi_hand_landmarks 是否检测得到手,里面包括了关键的坐标和id\n if results.multi_hand_landmarks is not None:\n # 针对一只手进行操作\n print('hands detected,Please continue to')\n landmark_point = hands_coordinate_recording(image,results.multi_hand_landmarks)\n clip.append(landmark_point)\n if len(clip) == 32:\n inputs = np.array(clip).astype(np.float32)\n inputs = np.expand_dims(inputs, axis=0)\n inputs = torch.from_numpy(inputs)\n inputs = torch.autograd.Variable(inputs, requires_grad=False).to(device)\n with torch.no_grad():\n outputs = model.forward(inputs)\n\n # probs = torch.nn.Softmax(dim=1)(outputs)\n probs = torch.nn.Sigmoid()(outputs)\n label = torch.max(probs, 1)[1].detach().cpu().numpy()[0]\n print(probs[0][label])\n video_classify[class_names[label].split(' ')[-1].strip()] += 1\n cv2.putText(frame, class_names[label].split(' ')[-1].strip(), (20, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6,\n (0, 0, 255), 1)\n cv2.putText(frame, \"prob: %.4f\" % probs[0][label], (20, 40),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6,\n (0, 0, 255), 1)\n clip.pop(0)\n\n\n cv2.imshow('result', frame)\n cv2.waitKey(30)\n result.append(max(video_classify, key=lambda k: video_classify[k]))\n print(result)\n if len(set(result)) == 1 and list(set(result))[0] == 'tremor':\n print('2期:身体双边手部颤抖')\n elif len(set(result)) == 1 and list(set(result))[0] == 'normal':\n print('0期:无症状')\n elif len(set(result)) == 2:\n print('1期:身体单边手部颤抖')\n cap.release()\n cv2.destroyAllWindows()\n\n\n\ndef model_predict(input):\n pass\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n","repo_name":"1390806607/pytorch_mediapipe_lstm","sub_path":"predict_hand.py","file_name":"predict_hand.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13634134646","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport requests, json\nfrom mainsite import models \nimport random\n\n# Create your views here.\n\ndef index(request):\n mynames = [\"關艾\", \"C110134221\", \"あい\"]\n myname = random.choice(mynames)\n return render(request,\"index.html\",locals())\n\ndef nkustnews(request):\n data = models.NKUSTnews.objects.all()\n return render(request, \"nkustnews.html\", locals())\n\n\ndef all_data(request):\n url = \"https://data.tycg.gov.tw/opendata/datalist/datasetMeta/download?id=5ca2bfc7-9ace-4719-88ae-4034b9a5a55c&rid=a1b4714b-3b75-4ff8-a8f2-cc377e4eaa0f\"\n r = requests.get(url)\n data = json.loads(r.text)\n bicycle_data = data[\"retVal\"]\n for item in bicycle_data.values():\n new_record = models.HBicycleData(\n sna = item['sna'],\n sbi = int(item['sbi']),\n tot = int(item['tot']))\n new_record.save()\n # 從資料表裡面過濾出我們想要的資料\n data = models.HBicycleData.objects.filter()\n return render(request, \"filter.html\", locals())\n\ndef filtered_data(request):\n # 先刪除所有的舊資料\n models.HBicycleData.objects.all().delete()\n # 先把所有的資料放到資料庫中,比照all_data()中的程式碼\n url = \"https://data.tycg.gov.tw/opendata/datalist/datasetMeta/download?id=5ca2bfc7-9ace-4719-88ae-4034b9a5a55c&rid=a1b4714b-3b75-4ff8-a8f2-cc377e4eaa0f\"\n r = requests.get(url)\n data = json.loads(r.text)\n bicycle_data = data[\"retVal\"]\n for item in bicycle_data.values():\n new_record = models.HBicycleData(\n sna = item['sna'],\n sbi = int(item['sbi']),\n tot = int(item['tot']))\n new_record.save()\n # 從資料表裡面過濾出我們想要的資料\n data = models.HBicycleData.objects.filter(sbi__gte=10)\n return render(request, \"filter.html\", locals())\n\ndef phonelist(request, id=-1):\n if id==-1:\n data = models.PhoneModel.objects.all()\n else:\n maker = models.PhoneMaker.objects.get(id=id) #找一個用get\n data = models.PhoneModel.objects.filter(maker=maker) #找好多個,用filter\n return render(request, \"phonelist.html\", locals())\n\ndef chart(request):\n data = models.PhoneModel.objects.all()\n return render(request, \"chart.html\", locals())\n\ndef stock300list(request):\n data = models.StockInfo.objects.filter(price__gte=300).order_by('-price')\n numbers = len(data)\n return render(request, \"stocklist.html\", locals())\n\n\n\n\n","repo_name":"TYH-00/111-2_Python_Personal_Website","sub_path":"myweb/mainsite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12069824202","text":"n_max = -float('inf')\nfor a1 in range(1000):\n for a2 in range(a1, 1000):\n cond = True\n for x in range(1000):\n if (((not(5 <= x <= 60)) or (15 <= x <= 30)) and (a1 <= x <= a2)):\n cond = False\n break\n if cond:\n n_max = max(n_max, abs(a1 - a2))\nprint(n_max + 1)\n","repo_name":"KurmaevAmir/Preparation_for_the_EGE","sub_path":"Task 15/solution_4599.py","file_name":"solution_4599.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41964181561","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n d1,d2 = ListNode(0),ListNode(0)\n d1.next = head\n cur2 = d2\n \n while d1.next:\n p1 = d1.next\n p2 = d1.next.next\n count = 0\n while p2 and p2.val == p1.val:\n count+=1\n p2 = p2.next\n if count > 0:\n d1.next = p2\n else:\n tmp = d1.next\n d1.next = d1.next.next\n cur2.next = tmp\n cur2 = cur2.next\n cur2.next = None\n return d2.next\n \n","repo_name":"RahatIbnRafiq/leetcodeProblems","sub_path":"Linked List Problems/82. Remove Duplicates from Sorted List II.py","file_name":"82. Remove Duplicates from Sorted List II.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7972367841","text":"#!/usr/python\nimport os\n\nclass Handler(object):\n\n def __init__(self):\n self.nextHandler = None\n\n def handle(self, request):\n if self.nextHandler == None:\n print(\"Número no consumido.\\n\")\n return\n self.nextHandler.handle(request)\n\n#*-------------------------------- PrimeHandler\n\nclass PrimeHandler(Handler):\n\n def handle(self, number):\n print(f\"PrimeHandler: verifica si el número {number} es primo\")\n if self.is_prime(number):\n print (f'Número {number} consumido por PrimeHandler\\n')\n else:\n print(\"PrimeHandler: pasa al siguiente actuador\")\n super(PrimeHandler, self).handle(number)\n\n def is_prime(self, num):\n if num < 2:\n return False\n for i in range(2, int(num ** 0.5) + 1):\n if num % i == 0:\n return False\n return True\n\n\n#*-------------------------------- ParHandler\n\nclass ParHandler(Handler):\n\n def handle(self, number):\n print(f\"ParHandler: verifica si el número {number} es par\")\n if self.is_par(number):\n print(f'Número {number} consumido por ParHandler\\n')\n else:\n #print(\"ParHandler: pasa al siguiente actuador\")\n super(ParHandler, self).handle(number)\n\n def is_par(self, num):\n if num % 2 == 0:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n\n os.system(\"clear\")\n#*---------------------------------------------------------------\n#* Inicializa los actuadores\n#*---------------------------------------------------------------\n prime_handler = PrimeHandler()\n par_handler = ParHandler()\n\n#*---- Establece ahora la cadena de llamada\n\n prime_handler.nextHandler = par_handler\n\n#*---- Se envían números a la cadena de responsabilidad para que lo procese\n for i in range(1, 100):\n prime_handler.handle(i)\n\n\n","repo_name":"cristianchivisky/UADER_IS2_CHIVISKY","sub_path":"src/TP3c/ejercicio1.py","file_name":"ejercicio1.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29985259296","text":"import torch\n\na = torch.ones(1, 2, 3)\n\nb = torch.rand(4, 3)\n\nprint(b)\n\nc = torch.unsqueeze(b, 1)\n\nd = c.repeat(1, a.shape[1], 1)\n\nprint(a.shape, b.shape, c.shape, d.shape)\n\ne = torch.matmul(a.permute(1,0,2), d.permute(1,2,0))\n\n# expect 2, 6\n\ne = torch.squeeze(e)\n\nprint(e.shape)\n\nprint(e)\n\n","repo_name":"prince687028/cdn-test","sub_path":"models/test_dim.py","file_name":"test_dim.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13047130792","text":"import hashlib\nfrom datetime import datetime \nimport jwt\n\nfrom mongoDB import token as token_entity\n\nSECRET = \"7DbIZSLoIlGgSDEfBGs1\"\nALGORITHM = \"HS256\"\n\ndef get_token_value(user_id):\n value = user_id + str(datetime.now().timestamp())\n # encoding then sending to SHA512()\n result = hashlib.sha512(value.encode())\n result_hexadecimal = result.hexdigest()\n return result_hexadecimal\n\ndef encode_token_value(token_value):\n encoded_jwt = jwt.encode({\"value\": token_value}, SECRET, algorithm=ALGORITHM)\n return encoded_jwt\n\ndef decode_token(token):\n try:\n decoded_token = jwt.decode(token, SECRET, algorithms=[ALGORITHM])\n return decoded_token\n except Exception as e:\n print(\"An exception occurred ::\", e)\n \ndef store_token_value(token_value):\n try:\n token_entity.insert_one(\n {\n \"value\": token_value\n }\n )\n return True\n except Exception as e:\n print(\"An exception occurred ::\", e)\n return False\n\n\ndef token_value_is_valid(token_value):\n token_found = token_entity.find_one({'value': token_value})\n if token_found is None:\n return False\n else:\n return True \n\n\ndef delete_token_value(token_value):\n result = token_entity.delete_one({'value': token_value})\n return result\n","repo_name":"xmariia55x/GoSykel-Backend-Public","sub_path":"database_entities/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20881519070","text":"\"\"\"Move rental_num from user model to rent model\n\nRevision ID: b2b24cc4e012\nRevises: ac4be06a2e46\nCreate Date: 2023-08-30 19:37:36.424432\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'b2b24cc4e012'\ndown_revision = 'ac4be06a2e46'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('rent', schema=None) as batch_op:\n batch_op.add_column(sa.Column('rental_num', sa.Integer(), nullable=True))\n\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.drop_column('rental_num')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.add_column(sa.Column('rental_num', mysql.INTEGER(), autoincrement=False, nullable=True))\n\n with op.batch_alter_table('rent', schema=None) as batch_op:\n batch_op.drop_column('rental_num')\n\n # ### end Alembic commands ###\n","repo_name":"mathtkang/library-book-rental-service","sub_path":"migrations/versions/b2b24cc4e012_move_rental_num_from_user_model_to_rent_.py","file_name":"b2b24cc4e012_move_rental_num_from_user_model_to_rent_.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3821326624","text":"import os\nimport jsonpickle\n\nfrom os.path import join as _join\nfrom os.path import exists as _exists\n\nfrom osgeo import gdal\n\nfrom wepppy.all_your_base.geo.webclients import wmesque_retrieve\n\nfrom ...ron import Ron\nfrom ...base import NoDbBase, TriggerEvents\nfrom ...watershed import Watershed\n\nfrom wepppy.landcover.emapr import (\n OSUeMapR,\n OSUeMapR_Measures,\n OSUeMapR_Dataset\n)\n\n\ngdal.UseExceptions()\n\n_thisdir = os.path.dirname(__file__)\n_data_dir = _join(_thisdir, 'data')\n\n\n\nclass OSUeMapRNoDbLockedException(Exception):\n pass\n\n\nclass OSUeMapR_TS(NoDbBase):\n __name__ = 'OSUeMapR_TS'\n\n def __init__(self, wd, cfg_fn):\n super(OSUeMapR_TS, self).__init__(wd, cfg_fn)\n\n self.lock()\n\n # noinspection PyBroadException\n try:\n os.mkdir(self.emapr_dir)\n self.data = None\n self._emapr_start_year = None\n self._emapr_end_year = None\n self._emapr_mgr = None\n \n self.dump_and_unlock()\n\n except Exception:\n self.unlock('-f')\n raise\n\n #\n # Required for NoDbBase Subclass\n #\n\n # noinspection PyPep8Naming\n @staticmethod\n def getInstance(wd):\n with open(_join(wd, 'emapr_ts.nodb')) as fp:\n db = jsonpickle.decode(fp.read())\n assert isinstance(db, OSUeMapR_TS), db\n\n if _exists(_join(wd, 'READONLY')):\n db.wd = os.path.abspath(wd)\n return db\n\n if os.path.abspath(wd) != os.path.abspath(db.wd):\n db.wd = wd\n db.lock()\n db.dump_and_unlock()\n\n return db\n\n @property\n def _nodb(self):\n return _join(self.wd, 'emapr_ts.nodb')\n\n @property\n def _lock(self):\n return _join(self.wd, 'emapr_ts.nodb.lock')\n\n @property\n def emapr_end_year(self):\n return self._emapr_end_year\n\n @emapr_end_year.setter\n def emapr_end_year(self, value: int):\n self.lock()\n\n # noinspection PyBroadException\n try:\n self._emapr_end_year = value\n self.dump_and_unlock()\n\n except Exception:\n self.unlock('-f')\n raise\n \n @property\n def emapr_start_year(self):\n return self._emapr_start_year\n\n @emapr_start_year.setter\n def emapr_start_year(self, value: int):\n self.lock()\n\n # noinspection PyBroadException\n try:\n self._emapr_start_year = value\n self.dump_and_unlock()\n\n except Exception:\n self.unlock('-f')\n raise\n \n @property\n def emapr_dir(self):\n return _join(self.wd, 'emapr')\n\n def acquire_rasters(self, start_year=None, end_year=None):\n\n self.lock()\n\n # noinspection PyBroadException\n try:\n if start_year is not None:\n self._emapr_start_year = start_year\n else:\n start_year = self.emapr_start_year\n\n if end_year is not None:\n self._emapr_end_year = end_year\n else:\n end_year = self.emapr_end_year\n\n _map = Ron.getInstance(self.wd).map\n emapr_mgr = OSUeMapR(wd=self.emapr_dir, bbox=_map.extent)\n emapr_mgr.retrieve(list(range(start_year, end_year+1)))\n\n self._emapr_mgr = emapr_mgr\n self.dump_and_unlock()\n\n except Exception:\n self.unlock('-f')\n raise\n \n def on(self, evt):\n pass\n\n #if evt == TriggerEvents.WATERSHED_ABSTRACTION_COMPLETE:\n # self.acquire_rasters()\n\n\n def analyze(self):\n start_year = self.emapr_start_year\n end_year = self.emapr_end_year\n\n wd = self.wd\n\n subwta_fn = Watershed.getInstance(wd).subwta\n\n assert _exists(subwta_fn)\n\n emapr_mgr = self._emapr_mgr\n\n self.lock()\n try:\n\n data_ds = {}\n\n for year in range(start_year, end_year+1):\n for measure, statistic in OSUeMapR_Measures:\n key = measure, statistic, year\n\n emapr_ds = emapr_mgr.get_dataset(year=year, measure=measure, statistic=statistic)\n data_ds[key] = emapr_ds.spatial_aggregation(subwta_fn=subwta_fn)\n\n key0 = list(data_ds.keys())[0]\n\n data = {topaz_id: {} for topaz_id in data_ds[key0]}\n for topaz_id in data_ds[key0]:\n for year in range(start_year, end_year+1):\n data[topaz_id][year] = {}\n for measure, statistic in OSUeMapR_Measures:\n key = measure, statistic, year\n data[topaz_id][year][(measure, statistic)] = data_ds[key][topaz_id]\n\n self.data = data\n self.dump_and_unlock()\n\n except Exception:\n self.unlock('-f')\n raise\n\n","repo_name":"rogerlew/wepppy","sub_path":"wepppy/nodb/mods/emapr/emapr_ts.py","file_name":"emapr_ts.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"2142363123","text":"import scrapy\r\nimport os\r\nimport re\r\nimport json\r\nimport pandas as pd\r\nimport re\r\nimport numpy as np\r\nimport sqlalchemy\r\nimport time\r\nfrom datetime import date\r\nfrom datetime import datetime\r\n\r\n\r\nBASE_DIR = os.getcwd()\r\nDATA_DIR = os.path.join(BASE_DIR, 'url_dir')\r\nDB_PATH = os.path.join(BASE_DIR, 'hltvdb.db')\r\nCSV_PATH = os.path.join(BASE_DIR, 'matches.csv')\r\nPERFORMANCE_PATH = os.path.join(BASE_DIR, 'performance.csv')\r\nDUEL_PATH = os.path.join(BASE_DIR, 'duel_data.csv')\r\nERROR_PATH = os.path.join(BASE_DIR, 'error.csv')\r\n\r\ntry:\r\n \r\n matches_df = pd.read_csv(CSV_PATH, encoding='utf-8')\r\n performance_df = pd.read_csv(PERFORMANCE_PATH, encoding='utf-8')\r\n\r\n match_performance_ids = matches_df['77892'].astype(int)\r\n performance_ids = performance_df['performance_id'].astype(int)\r\n\r\n performance_urls = match_performance_ids[~match_performance_ids.isin(performance_ids)]\r\n urls = [f'https://www.hltv.org/stats/matches/performance/{url}/match' for url in performance_urls]\r\n mapstatsid_urls = [f'https://www.hltv.org/stats/matches/performance/mapstatsid/{url}/match' for url in performance_urls]\r\n\r\n error_urls = pd.read_csv(ERROR_PATH, encoding='utf-8')\r\n error_urls_list = list(error_urls['url'])\r\n\r\n performance_urls = urls + mapstatsid_urls\r\n\r\n performance_urls = [url for url in performance_urls if url not in error_urls_list]\r\n\r\n\r\nexcept:\r\n pass\r\n\r\n\r\nclass PerformanceSpider(scrapy.Spider):\r\n name = \"performance\"\r\n\r\n if 'performance_urls' in globals():\r\n start_urls = performance_urls\r\n else:\r\n start_urls = ['https://www.hltv.org/stats/matches/performance/77907/complexity-vs-fnatic']\r\n\r\n def parse(self, response):\r\n\r\n try:\r\n\r\n def get_player_info(resp):\r\n \r\n full_data = []\r\n\r\n for player_data in resp:\r\n\r\n data = player_data.split('=')[3]\r\n data = re.findall(r'{(.*?)}', data)\r\n data = [\"{\"+i+\"}\" for i in data]\r\n data = data[1:-1]\r\n data = [json.loads(stat) for stat in data]\r\n data = [stat['displayValue'] for stat in data]\r\n full_data.append(data)\r\n \r\n return full_data\r\n\r\n def get_performance_data(response):\r\n\r\n players_name = response.css('.player-nick::text').getall()\r\n players_name = [player.lower() for player in players_name]\r\n player_stats = get_player_info(response.css('.col .highlighted-player .facts .graph').getall())\r\n performance_df = pd.DataFrame()\r\n performance_df['nickname'] = players_name\r\n stats_cols = ['KPR', 'DPR', 'KAST', 'Impact', 'ADR', 'Rating']\r\n stats_df = pd.DataFrame(player_stats ,columns = stats_cols)\r\n performance_df[stats_cols] = stats_df\r\n \r\n return performance_df\r\n \r\n def get_performance_id(response):\r\n \r\n selector_string = response.xpath(\"//div[@class='small-padding stats-detailed-stats']\")[0].extract()\r\n\r\n #selector_string = response.xpath(\"//a[@class='col-box a-reset']\")[0].extract()\r\n performance_id = re.findall(r'\"(.*?)\"', selector_string)[1].split('/')[3]\r\n\r\n return performance_id\r\n \r\n def get_duel_data(response):\r\n \r\n t1_data = np.array([int(x) for x in response.css('.team1-player-score::text').getall()[0:25]])\r\n t2_data = np.array([int(x) for x in response.css('.team2-player-score::text').getall()[0:25]])\r\n t1_names = response.css('.team1 a::text').getall()[0:5]\r\n t2_names = response.css('.team2 a::text').getall()[0:5]\r\n duels = np.subtract(t2_data, t1_data)\r\n #match_id = get_match_id(response)\r\n \r\n x = 0\r\n duel_data = []\r\n\r\n for player in t2_names: \r\n for i in range(5):\r\n duel_data.append([player, t1_names[i], duels[i+x], 'team2'])\r\n duel_data.append([t1_names[i], player, -duels[i+x], 'team1'])\r\n x += 5\r\n \r\n return duel_data\r\n \r\n def get_team_data(response):\r\n \r\n #teams = response.css('.players-team-header span::text').getall()\r\n team1 = response.xpath(\"//th[contains(@class, 'team1-column')]//img[contains(@class, 'team-logo')]/@title\").extract()[-1]\r\n team2 = response.xpath(\"//th[contains(@class, 'team2-column')]//img[contains(@class, 'team-logo')]/@title\").extract()[-1]\r\n\r\n return [team1, team2]\r\n\r\n def match_result(response):\r\n \r\n match_result = response.css('.stats-match-map-result-score::text').get()\r\n match_type = response.css('.stats-match-map-result-mapname::text').get()\r\n team_names = response.css('.players-team-header span::text').getall()\r\n\r\n return match_result\r\n\r\n if response.status == 429:\r\n time.sleep(10)\r\n \r\n if response.status != 200:\r\n \r\n error_list = [response.url, response.status, datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")]\r\n\r\n error_data = pd.DataFrame(error_list).T\r\n error_data.columns = ['url', 'status', 'datetime']\r\n\r\n if 'error.csv' in os.listdir(BASE_DIR):\r\n\r\n error_data.to_csv(ERROR_PATH, mode='a', header=False, index=False)\r\n\r\n else:\r\n\r\n error_data.to_csv(ERROR_PATH, mode='a', header=True, index=False) \r\n \r\n if 'performance' in response.url:\r\n \r\n if 'mapstatsid' in response.url:\r\n mapstats = True\r\n else:\r\n mapstats = False\r\n\r\n performance_id = response.url.split('/')[-2]\r\n\r\n performance_data = get_performance_data(response)\r\n performance_data['performance_id'] = performance_id\r\n\r\n teams = get_team_data(response)\r\n duel_data = pd.DataFrame(get_duel_data(response))\r\n duel_cols = ['player1', 'player2', 'duel_result', 'team']\r\n duel_data['performance_id'] = performance_id\r\n duel_cols = ['player1', 'player2', 'duel_result', 'team', 'performance_id']\r\n duel_data.columns = duel_cols\r\n\r\n duel_data['team_name'] = np.where(duel_data['team'] == 'team1', teams[0], teams[1])\r\n duel_data['mapstatsid'] = mapstats\r\n\r\n if 'duel_data.csv' in os.listdir(BASE_DIR):\r\n\r\n duel_data.to_csv(DUEL_PATH, mode='a', header=False, index=False)\r\n\r\n else:\r\n\r\n duel_data.to_csv(DUEL_PATH, mode='a', header=True, index=False) \r\n\r\n if not performance_data.empty:\r\n\r\n performance_data['mapstatsid'] = mapstats\r\n if 'performance.csv' in os.listdir(BASE_DIR):\r\n \r\n performance_data.to_csv(PERFORMANCE_PATH, mode='a', header=False, index=False)\r\n\r\n else:\r\n\r\n performance_data.to_csv(PERFORMANCE_PATH, mode='a', header=True, index=False)\r\n\r\n else:\r\n print('empty df')\r\n \r\n elif 'stats' and 'matches' not in response.url:\r\n \r\n original_url = response.url.replace(response.url.split('/')[-1], '')\r\n next_url = response.css('.result-con .a-reset::attr(href)').get()\r\n stats_url = response.urljoin(next_url)\r\n \r\n yield scrapy.Request(stats_url, callback=self.parse)\r\n\r\n elif 'matches' in response.url:\r\n\r\n last_url = response.css('.small-padding a::attr(href)').get()\r\n perf_id = last_url.split('/')[-2]\r\n new_url = f'https://www.hltv.org/stats/matches/performance/{perf_id}/match'\r\n\r\n yield scrapy.Request(new_url, callback=self.parse) \r\n \r\n except:\r\n pass","repo_name":"andrewrol/hltvcrawler","sub_path":"performance_spider.py","file_name":"performance_spider.py","file_ext":"py","file_size_in_byte":8414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33550246298","text":"import math\nfrom time import sleep\nfrom statistics import median, mean\n\nimport requests\nfrom click import UsageError, command, option, argument, style\nfrom tabulate import tabulate\n\n\nSUPPORTED_HTTP_METHODS = ['GET', 'POST']\n\n\ndef percentile(data, percentile):\n size = len(data)\n return sorted(data)[int(math.ceil((size * percentile) / 100)) - 1]\n\ndef p90(data):\n return percentile(data, 90)\n\ndef parse_args(args):\n if not args or len(args) > 2:\n raise UsageError('Either specify or just ')\n try:\n method, url = args\n if method not in SUPPORTED_HTTP_METHODS:\n raise UsageError(f'Unsupported HTTP method: {method}')\n except ValueError:\n method = 'GET'\n url = args[0]\n return method.lower(), url\n\ndef println(status: int, elapsed: float, threshold: int):\n output = f'{elapsed:9.2f}'\n if threshold > 0:\n color = 'bright_green' if int(elapsed) <= threshold else 'bright_red'\n output = style(output, fg=color)\n status = style(str(status), fg='bright_black')\n millis = style('ms', fg='bright_black')\n print(f'({status}) {output} {millis}')\n\ndef display_statistics(data, title='Summary'):\n table = [['# Reqs', 'Median (ms)', 'Mean (ms)', 'Min (ms)', 'Max (ms)', 'P90 (ms)'],\n [len(data), median(data), mean(data), min(data), max(data), p90(data)]]\n print()\n print(f'{title}')\n print(tabulate(table, headers='firstrow', floatfmt='.2f', tablefmt='psql'))\n\n\n@command()\n@argument('args', nargs=-1, metavar=' ')\n@option('-c', '--count', default=0, type=int, help='Number of requests to run, defaults to infinite')\n@option('-t', '--threshold', default=0, type=int, help='Threshold in ms for marking a request as slow')\n@option('-p', '--persistent', is_flag=True, help='Use a persistent http connection for all requests')\n@option('-d', '--delay', default=0, type=int, help='Milliseconds to wait between requests')\n@option('-s', '--summary', is_flag=True, help='Output summary when done (or stopped)')\ndef cli(args, count, threshold, persistent, delay, summary):\n method, url = parse_args(args)\n data = []\n\n http = requests.Session() if persistent else requests\n\n try:\n func = getattr(http, method)\n index = count\n while True:\n r = func(url)\n elapsed = r.elapsed.total_seconds() * 1000\n println(r.status_code, elapsed, threshold)\n\n if summary:\n data.append(elapsed)\n\n if delay != 0 and index != 1:\n sleep(delay / 1000)\n\n if count == 0:\n continue\n index -= 1\n if index == 0:\n break\n except KeyboardInterrupt:\n pass\n finally:\n if hasattr(http, 'close'):\n http.close()\n\n if summary and data:\n display_statistics(data, title=url)\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"skivis/reqtime","sub_path":"reqtime.py","file_name":"reqtime.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3136702862","text":"#!/usr/bin/python3\n\nimport scapy.all as newScapy\n\n# request = newScapy.ARP()\n# request.pdst = '192.168.0.1/24'\n# broadcast = newScapy.Ether() \n \n# broadcast.dst = 'ff:ff:ff:ff:ff:ff'\n \n# request_broadcast = broadcast / request \n# clients = newScapy.srp(request_broadcast,timeout=5)[0] \n# for element in clients: \n# print(element[1].psrc + \" \" + element[1].hwsrc)\n\ntarget_ip = '192.168.1.1/24'\narp = newScapy.ARP(pdst= target_ip)\nether = newScapy.Ether(dst='ff:ff:ff:ff:ff:ff')\npacket = ether/arp\nresult = newScapy.srp(packet,timeout=3)[0]\nclients = []\n\nfor (sent,recieve) in result:\n clients.append({'ip':recieve.psrc,'mac':recieve.hwsrc})\n\nprint(\"Available devices in the network:\")\nprint(\"IP\" + \" \"*18+\"MAC\")\nfor client in clients:\n print(\"{:16} {}\".format(client['ip'], client['mac']))","repo_name":"HackerSpot2001/Voilent-Python-with-Python3","sub_path":"scan_network_with_scapy.py","file_name":"scan_network_with_scapy.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1648113218","text":"# /usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : wuyifei\n# Data : 10/23/18 11:39 AM\n# FileName: log.py\n\nimport logging.handlers\nimport os\nfrom django.conf import settings\n\nclass Logger(object):\n def __init__(self):\n self.log_file_path = settings.LOG_FILE_PATH\n self.backup_count = settings.LOG_BACKUP_COUNT\n self.max_bytes = settings.LOG_MAX_BYTES\n self.log_file = os.path.join(self.log_file_path,'tcsdb.log')\n\n if not os.path.exists(self.log_file_path):\n os.makedirs(self.log_file_path)\n # f = open(self.log_file,'w')\n # f.close()\n file_handler = logging.handlers.RotatingFileHandler(self.log_file, 'a',\n maxBytes=self.max_bytes,\n backupCount=self.backup_count,\n encoding='utf-8')\n # file_handler = logging.FileHandler(self.log_file, 'a', encoding='utf-8')\n file_handler.setFormatter(logging.Formatter(fmt=\"%(asctime)s - %(name)s - %(levelname)s: %(message)s\"))\n\n self.logger = logging.Logger('tcsdb', level=logging.INFO)\n self.logger.addHandler(file_handler)\n\n def info(self,msg):\n self.logger.info(msg)\n def error(self,msg):\n self.logger.error(msg)\n\n\nlogger = Logger()","repo_name":"wqqsukida/TCSDB","sub_path":"utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33341714808","text":"\"\"\"\nFunctions to deal with graphic operations.\nThe interface is inspired by the PICO API.\n\n\"\"\"\n\nfrom .globals import *\nfrom . import subr\nfrom . import synth\n\ndef sfx(n):\n \"\"\"Play a sound.\n\n Parameters\n ----------\n n : The sound to play\n\n Notes\n -----\n\n \"\"\"\n if n == -1:\n synth.stop_sound()\n else:\n synth.play_sound(n)\n\n\ndef music(n):\n \"\"\"Play a music.\n\n Parameters\n ----------\n n : The music to play\n\n Notes\n -----\n\n \"\"\"\n PYCO.music_playing = n\n if n == -1:\n synth.stop_sound()\n else:\n synth.play_sound(tuple([p for p in PYCO.music[n] if p >= 0]), True)\n\n\ndef map(pos, cels):\n \"\"\"Draws a portion of the map to the graphics buffer.\n\n Parameters\n ----------\n pos : The coordinates of the screen to place the upper left corner.\n cels : The position and dimensiton of the region of the map cell to draw.\n\n Notes\n -----\n The map is a grid of sprites from the sprite sheet, where each cell in the\n grid is assigned a sprite number. You can edit the map using the MINIISH map\n editor. You call the map() function to draw a region of the map (a subsection\n of the grid cells) onto the screen.\n\n You can use the map to draw large pictures by reusing sprite tiles in multiple\n cells. This is more memory efficient than drawing large images in pixels with\n the sprite editor, and easier to use than storing tables of sprite numbers in\n code.\n\n Any map cell set to sprite number 0 is not drawn, effectively making that cell\n transparent. You can use this along with using the transparent color for pixels\n in sprites to make regions of transparency in the image. A common technique is\n to layer multiple maps on top of one another, then animate the positions of\n these layers to produce effects such as parallax scrolling.\n\n Another use for maps is to design interactive levels or areas of a game world.\n When doing this, it is often necessary to determine which sprite is at a given\n location on the map, such as to determine whether a location next to the player\n is an obstruction. See mget().\n \"\"\"\n pos = (pos[0] - PYCO.camera[0], pos[1] - PYCO.camera[1])\n (celx, cely, celw, celh) = cels\n seq = []\n for i in range(celh):\n cx, cy = celx, cely + i\n sx, sy = pos[0], pos[1] + i * 8\n for j in range(celw):\n if 0 <= cx < 128 and 0 <= cy < 64:\n n = PYCO.map[cy][cx]\n if n > 0:\n r = (int(n % 16) * 8, int(n / 16) * 8, 8, 8)\n seq.append((PYCO.sprite_sheet, (sx, sy), r)) \n cx += 1\n sx += 8\n PYCO.screen.blits(seq)\n\n\ndef mget(cel):\n \"\"\"Gets the sprite number assigned to a cell on the map.\n \n Parameters\n ----------\n cel : The column, row coordinates of the cell.\n\n Notes\n -----\n The mget() function returns the sprite number assigned to a cell on the map.\n If a cell was modified by a call to mset(), mget() returns the updated value.\n\n When using the map to store level designs, a common technique is to keep track\n of the player's effective cell position on the map, then use mget() to look for\n adjacent level features such as obstructions. This can be combined with sprite\n flags (read with fget()) to indicate which sprite tiles represent obstructions,\n so a general purpose test for obstructions only needs to read the flags.\n \"\"\"\n if 0 <= cel[0] < 128 and 0 <= cel[1] < 64:\n return PYCO.map[cel[1]][cel[0]]\n else:\n return 0\n\n\ndef mset(cel, snum):\n \"\"\"Sets a cell on the map to a new sprite number.\n\n Parameters\n ----------\n cel : The column, row coordinates of the cell.\n snum: The new sprite number to store.\n\n Notes\n -----\n The mset() function modifies the map data.\n\n A simple use of mset() is to place or remove objects on the map, such as\n a treasure that the player can pick up. This allows for the level designer\n to set the initial locations of objects.\n\n In a more sophisticated version of this technique, the program can scan the\n map for objects with mget(), store their locations in a table, then erase\n them from the map data and draw them separately. This may make the objects\n easier to animate or participate in physics simulation.\n\n Advanced techniques that use mset() include generating levels procedurally,\n or storing very large maps as compressed data and decompressing it into the\n map region as needed. In both cases, once the maps are written to memory,\n the game engine can use map() to draw the level to the screen.\n \"\"\"\n if 0 <= cel[0] < 128 and 0 <= cel[1] < 64:\n PYCO.map[cel[1]][cel[0]] = snum\n\n\ndef clip(rect = None):\n \"\"\"Sets the clipping region in the draw state.\n\n Parameters\n ----------\n rect : The coordinates of the clipping rectangle.\n\n Notes\n -----\n When the draw state has a clipping rectangle set, all drawing operations\n will not affect any pixels in the graphics buffer outside of this rectangle.\n This is useful for reserving parts of the screen.\n\n When called without arguments, the function resets the clipping region to be\n the entire screen.\n \"\"\"\n PYCO.screen.set_clip(rect)\n\n\ndef blit(img, pos = (0, 0), rect = None):\n \"\"\"Blits an image on the screen.\n \n Parameters\n ----------\n img : The image to blit.\n pos : The position on the screen.\n rect : The portion of the image to blit.\n \"\"\"\n rect = img.get_rect() if rect is None else rect\n PYCO.screen.blit(img, pos, rect)\n\n\ndef color(color = LIGHT_GRAY):\n \"\"\"Sets the draw color in the draw state.\n\n Parameters\n ----------\n color : The color number. Default is 6 (light gray).\n\n Notes\n -----\n Many graphics functions accept an optional color argument. When \n this argument is omitted, the current color of the draw state is \n used by default. The color() function sets this color.\n\n The color number corresponds to the MINIISH palette, a value between 0 and 15.\n\n When you provide an explicit color argument to a graphics function,\n MINIISH changes the draw color to that color.\n\n The previous color is returned when calling color(), allowing it to\n be saved and restored if needed.\n \"\"\"\n prev_color, PYCO.color = PYCO.color, color\n return prev_color\n\n\ndef camera(offset = (0, 0)):\n \"\"\"Sets the camera offset in the draw state.\n\n Parameters\n ----------\n offset : The offset, in pixels, to subtract from future draw coordinates. (default 0, 0)\n\n Notes\n -----\n Setting a camera offset causes all subsequent draw operations to have\n the offset subtracted from their x and y coordinates. Camera sets the\n origin point for draw functions, and by default it is (0,0).\n\n The way camera() works is somewhat unintuitive. So, if you might expect\n to use camera(64,64) to put the camera origin in the middle of the screen,\n you'd actually use camera(-64,-64) to move the screen so that its middle is\n in front of the camera. This is probably because camera() sets an screen\n variable that is invisibly used by all draw functions, and in this case,\n the origin changed from (0,0) to (-64,-64).\n\n Camera offsets can be used to implement screen effects such as parallax\n scrolling (with a different offset per layer) or screen shake (a small\n random offset per frame).\n \"\"\"\n prev_camera, PYCO.camera = PYCO.camera, offset\n return prev_camera\n\n\ndef cls(color = BLACK):\n \"\"\"Clears the graphics buffer.\n\n Parameters\n ----------\n color : A color to use for the background. The default is 0 (black).\n\n Notes\n -----\n The cls() function clears the graphics buffer, effectively setting \n every pixel to the color 0. If the color argument is provided,\n that number is used instead.\n\n It is common (though not required) to call cls() at the beginning\n of the _draw() function as part of the game loop.\n \"\"\"\n PYCO.screen.fill(PALETTE[color])\n\n\ndef pget(pos):\n \"\"\"Gets the color value of a pixel on the screen.\n\n Parameters\n ----------\n pos : The position on the screen.\n\n Notes\n -----\n The pget() function reads the color value of a pixel from the\n graphics buffer.\n\n It will read from the buffer even if the buffer has not yet been\n copied to the display.\n\n It will return 0 (black) if given coordinates outside the range (0-127,0-127).\n \"\"\"\n if 0 <= pos[0] < 128 and 0 <= pos[1] < 128:\n col = PYCO.screen.get_at(pos)\n return subr.get_palette_exact(col)\n return BLACK\n\n\ndef pset(pos, color = None):\n \"\"\"Sets a pixel in the graphics buffer.\n\n Parameters\n ----------\n pos : The position in the graphics buffer.\n color : The color value to set. If unspecified,\n the color of the current draw state will be used.\n\n Notes\n -----\n The pset() function sets a pixel in the graphics buffer.\n\n This operation is affected by the draw state.\n \"\"\"\n PYCO.color = color if color is not None else PYCO.color\n if 0 <= pos[0] < 128 and 0 <= pos[1] < 128:\n PYCO.screen.set_at(pos, PALETTE[PYCO.color])\n\n\ndef rect(rect, color = None):\n \"\"\"Draws an empty rectangle shape.\n\n Parameters\n ----------\n rect : The position and dimensions on the screen.\n color : The color of the rectangle border. If omitted,\n the color from the draw state is used. \n\n Notes\n -----\n This draws a rectangle shape parallel to the screen borders.\n \"\"\"\n rect = (rect[0] - PYCO.camera[0], rect[1] - PYCO.camera[1], rect[2], rect[3])\n PYCO.color = color if color is not None else PYCO.color\n pg.draw.rect(PYCO.screen, PALETTE[PYCO.color], rect, 1)\n\n\ndef rectfill(rect, color = None):\n \"\"\"Draws a filled-in rectangle shape.\n\n Parameters\n ----------\n rect : The position and dimensions on the screen.\n color : The color of the rectangle border. If omitted, \n the color from the draw state is used. \n\n Notes\n -----\n This draws a rectangle shape parallel to the screen borders.\n \"\"\"\n rect = (rect[0] - PYCO.camera[0], rect[1] - PYCO.camera[1], rect[2], rect[3])\n PYCO.color = color if color is not None else PYCO.color\n PYCO.screen.fill(PALETTE[PYCO.color], rect)\n\n\ndef sspr(n, pos, s):\n \"\"\"Draws a sprite on the screen.\n\n Parameters\n ----------\n n : The sprite number.\n pos : The position on the screen.\n\n Notes\n -----\n This operation is affected by the draw state.\n \"\"\"\n pos = (pos[0] - PYCO.camera[0], pos[1] - PYCO.camera[1])\n rect = (int(n % 16) * s[0], int(n / 16) * s[1]) + s\n scale = (int(16 * s[0]), int(16 * s[1]))\n PYCO.screen.blit(pg.transform.scale(PYCO.sprite_sheet, scale), pos, rect)\n\ndef spr(n, pos):\n \"\"\"Draws a sprite on the screen.\n\n Parameters\n ----------\n n : The sprite number.\n pos : The position on the screen.\n\n Notes\n -----\n This operation is affected by the draw state.\n \"\"\"\n pos = (pos[0] - PYCO.camera[0], pos[1] - PYCO.camera[1])\n rect = (int(n % 16) * 8, int(n / 16) * 8, 8, 8)\n PYCO.screen.blit(PYCO.sprite_sheet, pos, rect)\n\n\ndef sget(pos):\n \"\"\"Gets the color value of a pixel on the sprite sheet.\n\n Parameters\n ----------\n pos : The position on the sprite sheet.\n\n Notes\n -----\n This is similar to pget() except instead of the graphics buffer,\n it gets the color value from the sprite sheet. The sprite sheet\n is treated as a single canvas 128 pixels wide and 128 pixels high.\n\n It will return 0 (black) if given coordinates outside the range (0-127,0-127).\n \"\"\"\n if 0 <= pos[0] < 128 and 0 <= pos[1] < 128:\n col = PYCO.sprite_sheet.get_at(pos)\n return subr.get_palette_exact(col)\n return BLACK\n\n\ndef sset(pos, color = None):\n \"\"\"Sets the color value of a pixel on the sprite sheet.\n\n Parameters\n ----------\n pos : The position on the sprite sheet.\n color : The color value to set. If unspecified,\n the color of the current draw state will be used.\n\n Notes\n -----\n This is similar to pset() except instead of the graphics buffer, \n it sets the color value to the sprite sheet. The sprite sheet is\n treated as a single canvas 128 pixels wide and 128 pixels high.\n \"\"\"\n PYCO.color = color if color is not None else PYCO.color\n if 0 <= pos[0] < 128 and 0 <= pos[1] < 128:\n PYCO.sprite_sheet.set_at(pos, PALETTE[PYCO.color])\n\n\ndef fget(n, f = None):\n \"\"\"Gets the value of a flag of a sprite.\n\n Parameters\n ----------\n n : The sprite number.\n f : The flag index (0-7). If omitted, a bit field of all flags is returned.\n\n Notes\n -----\n Each sprite has eight flags that can be set in the sprite editor\n or by the fset() function. You can use these flags for any purpose.\n One possible purpose is to define \"layers\" of map tiles, which\n modifies the behavior of the map() function.\n\n Flags are numbered from 0 to 7, appearing left to right in the sprite editor.\n\n When fget() is called without a flag index, it returns a number that\n represents all of the flags. This is a bit field where flag 0 is the\n \"least significant\" bit: flag 0 (leftmost) has a value of 1, flag 1 has\n a value of 2, flag 2 has a value of 4, and so on, up to flag 7 with a\n value of 128.\n \"\"\"\n if f is None:\n return PYCO.sprite_flags[n]\n else:\n return (PYCO.sprite_flags[n] & (1 << f)) > 0\n\n\ndef fset(n, v, f = None):\n \"\"\"Sets the value of a flag of a sprite.\n\n Parameters\n ----------\n n : The sprite number.\n v : The value, either true or false if the flag index is specified, or\n the bit field of all flags if it is not.\n f : The flag index (0-7). If omitted, a bit field of all flags is returned.\n\n Notes\n -----\n Each sprite has eight flags that can be set in the sprite editor or by the\n fset() function. You can use these flags for any purpose. One possible\n purpose is to define \"layers\" of map tiles, which modifies the behavior of\n the map() function.\n\n To set a specific flag, specify the flag index as the second argument, and\n either true or false as the value. Flags are numbered from 0 to 7, appearing\n left to right in the sprite editor.\n\n When fset() is called without a flag index, it accepts a number that\n represents all of the flags. This is a bit field where flag 0 is the \n \"least significant\" bit: flag 0 (leftmost) has a value of 1, flag 1 has\n a value of 2, flag 2 has a value of 4, and so on, up to flag 7 with a\n value of 128.\n \"\"\"\n if f is None:\n PYCO.sprite_flags[n] = v\n elif v:\n PYCO.sprite_flags[n] |= (1 << f)\n else:\n PYCO.sprite_flags[n] &= ~(1 << f)\n\n\ndef flush():\n \"\"\"Clears the keyboard buffer.\n \n Notes\n -----\n \"\"\"\n PYCO.keybuf = []\n pg.event.clear(eventtype = pg.KEYDOWN)\n\n\ndef input():\n \"\"\"Read a character in the keyboard buffer.\n\n Notes\n -----\n The input function pops the first character from the keyboard buffer\n or returns None if the bufefr is empty.\n\n This function translates the character in a Python string or returns hint\n if not a printable character.\n\n Hints\n -----\n \"return\" : if enter/return key is pressed.\n \"backspace\" : if backspace key is pressed.\n \"\"\"\n if len(PYCO.keybuf) == 0:\n return None\n else:\n event, PYCO.keybuf = PYCO.keybuf[0], PYCO.keybuf[1:]\n if event.key == pg.K_RETURN:\n return \"return\"\n elif event.key == pg.K_BACKSPACE:\n return \"backspace\"\n elif event.key == pg.K_DELETE:\n return \"delete\"\n elif event.key == pg.K_ESCAPE:\n return \"escape\"\n elif event.key == pg.K_UP:\n return subr.extend_key(event, \"up\")\n elif event.key == pg.K_DOWN:\n return subr.extend_key(event, \"down\")\n elif event.key == pg.K_LEFT:\n return subr.extend_key(event, \"left\")\n elif event.key == pg.K_RIGHT:\n return subr.extend_key(event, \"right\")\n elif event.key == pg.K_TAB:\n return \" \"\n elif event.mod & pg.KMOD_CTRL:\n return \"control-\" + pg.key.name(event.key)\n else:\n return event.unicode\n\n\ndef print(text, pos = (0, 0), color = None):\n \"\"\"Prints a string of characters to the screen.\n\n Parameters\n ----------\n text : The Python string of characters to print.\n pos : The position of the upper left corner to start printing.\n color : The color to use for the text. If unspecified,\n the color of the current draw state will be used.\n\n Notes\n -----\n The print function writes a line of text to the screen using the MINIISH font.\n \n If only the text argument is supplied, print() uses the camera position and\n draw color from the current draw state to determine where to draw the text,\n and what color to use.\n \"\"\"\n if text == \"\":\n return\n PYCO.color = color if color is not None else PYCO.color\n render = subr.render_text(text, PALETTE[PYCO.color])\n pos = (pos[0] - PYCO.camera[0], pos[1] - PYCO.camera[1])\n PYCO.screen.blit(render, pos)\n\n\ndef mcur(n = 0):\n \"\"\"Selects the mouse cursor.\n\n Parameters\n ----------\n n : the number of the cursor. Default is 0.\n \n Notes\n -----\n 0 : the pointer\n 1 : the cross\n 2 : the finger\n\n \"\"\"\n PYCO.mouse_cursor = n\n\n\ndef mxy():\n \"\"\"Returns the mouse coordinates.\n \n Notes\n -----\n The coordinates are translated to the screen coordinates.\n \"\"\"\n return PYCO.mouse_pos\n\n\ndef mbtn(n = 0):\n \"\"\"Returns the mouse buttons being pressed at this moment.\n \n Notes\n ----\n This function only returns the left button status; True if pressed False otherwise.\n \"\"\"\n return PYCO.mouse_btn[n]\n\n\ndef mmod():\n \"\"\"Returns the mouse modifiers at this moment.\n \"\"\"\n return PYCO.mouse_mod\n\ndef flip():\n \"\"\"Copies the graphics buffer to the screen, then synchronizes to i\n the next frame at 30 frames per second.\n\n Notes\n -----\n Sketches that use the built-in game loop functions _update() and _draw() do\n not need to call flip(). It can, however, be used to synchronize with\n the 30-frames-per-second draw timer in carts that implement \n their own custom game loop.\n \"\"\"\n PYCO.display.blit(pg.transform.scale(PYCO.screen, DISPLAY_SIZE), (0, 0)) \n pg.display.flip()\n PYCO.clock.tick(30)\n","repo_name":"RomualdRousseau/Miniish","sub_path":"Software/pyco/pyco/pyco.py","file_name":"pyco.py","file_ext":"py","file_size_in_byte":18531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29998467075","text":"\"\"\"users table\n\nRevision ID: b93450d6a23b\nRevises: a3d76adca091\nCreate Date: 2019-04-18 18:10:01.131488\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b93450d6a23b'\ndown_revision = 'a3d76adca091'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_post_timestamp', table_name='post')\n op.drop_table('post')\n op.add_column('user', sa.Column('company', sa.String(length=64), nullable=True))\n op.create_index(op.f('ix_user_company'), 'user', ['company'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_user_company'), table_name='user')\n op.drop_column('user', 'company')\n op.create_table('post',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('body', sa.VARCHAR(length=140), nullable=True),\n sa.Column('timestamp', sa.DATETIME(), nullable=True),\n sa.Column('user_id', sa.INTEGER(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_post_timestamp', 'post', ['timestamp'], unique=False)\n # ### end Alembic commands ###\n","repo_name":"dragod812/Kubernetes-Cluster-Scripts-Config","sub_path":"Deployment/Console/Console/migrations/versions/b93450d6a23b_users_table.py","file_name":"b93450d6a23b_users_table.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25613097257","text":"def solveWeight(arr,val):\r\n a1=[]\r\n a1.append(max(arr))\r\n arr.remove(max(arr))\r\n print(arr)\r\n for i in range(len(arr)-1):\r\n a1[i+1]=a1[i]+arr[i]\r\n\r\n if val not in a1:\r\n return 1,a1\r\n else:\r\n return 0,a1\r\n \r\n \r\n \r\n\r\n\r\nif __name__==\"__main__\":\r\n\r\n n= int(input())\r\n for _ in range(n):\r\n size,e_value= map(int,input().split())\r\n arr1=list(map(int,input().split()))\r\n result,array=solveWeight(arr1,e_value)\r\n if result:\r\n print(\"YES\")\r\n print(*array)\r\n else:\r\n print(\"NO\")\r\n","repo_name":"Adarsh-chaurasia/CodeForces","sub_path":"PhoenixAndGold.py","file_name":"PhoenixAndGold.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13491873937","text":"import allure\nimport pytest\n\nfrom pages.ui.elements_page import TextBoxPage, CheckBoxPage, RadioButtonPage, WebTablesPage, ButtonsPage, LinksPage\nfrom cfg import config\n\n\n@pytest.mark.ui\n@allure.suite('Test Elements block')\nclass TestTextBox:\n @allure.title('Filling text box fields')\n @allure.description\n def test_filling_text_boxes(self, driver):\n \"\"\"Filling all text fields\"\"\"\n\n text_box_page = TextBoxPage(driver)\n text_box_page.open(config.TEXT_BOX_URL)\n\n input_data = text_box_page.filling_fields()\n output_data = text_box_page.check_output_data()\n\n assert input_data == output_data, \"input and output data don't match\"\n\n@pytest.mark.ui\n@allure.suite('Test Elements block')\nclass TestCheckBox:\n @allure.title('Test check boxes')\n @allure.description\n def test_check_boxes(self, driver):\n \"\"\"Select random check boxes and assert that output titles match\"\"\"\n\n check_box_page = CheckBoxPage(driver)\n check_box_page.open(config.CHECK_BOX_URL)\n\n check_box_page.select_random_check_boxes()\n selected_titles = check_box_page.get_selected_check_boxes_titles()\n output_titles = check_box_page.get_output_check_boxes_titles()\n\n assert selected_titles == output_titles, \"Selected titles don't match with output titles\"\n\n@pytest.mark.ui\n@allure.suite('Test Elements block')\nclass TestRadioButtons:\n @allure.title('Click on yes radiobutton')\n @allure.description\n def test_yes_radio_button(self, driver):\n \"\"\"Check that 'Yes' radiobutton works\"\"\"\n\n rb_page = RadioButtonPage(driver)\n rb_page.open(config.RADIO_BUTTON_URL)\n output_result = rb_page.click_on_radiobutton('yes')\n\n assert output_result == 'Yes', \"Radio button 'Yes' was not selected\"\n\n @allure.title('Click on impressive radiobutton')\n @allure.description\n def test_impressive_radio_button(self, driver):\n \"\"\"Check that 'Impressive' radiobutton works\"\"\"\n\n rb_page = RadioButtonPage(driver)\n rb_page.open(config.RADIO_BUTTON_URL)\n output_result = rb_page.click_on_radiobutton('impressive')\n\n assert output_result == 'Impressive', \"Radio button 'Impressive' was not selected\"\n\n @allure.title('Click on no radiobutton')\n @allure.description\n def test_no_radio_button(self, driver):\n \"\"\"Check that 'No' radiobutton works\"\"\"\n\n rb_page = RadioButtonPage(driver)\n rb_page.open(config.RADIO_BUTTON_URL)\n output_result = rb_page.click_on_radiobutton('no')\n\n assert output_result == 'No', \"Radio button 'No' was not selected\"\n\n@pytest.mark.ui\n@allure.suite('Test Elements block')\nclass TestWebTables:\n @allure.title('Add new record in table')\n @allure.description\n def test_add_new_record(self, driver):\n \"\"\"Adding new record and assert that it in table\"\"\"\n\n web_table_page = WebTablesPage(driver)\n web_table_page.open(config.WEB_TABLES_URL)\n\n new_record_data = web_table_page.add_new_record()\n table_data = web_table_page.get_table_data()\n\n assert new_record_data in table_data, 'New record not added'\n\n @allure.title('Search record')\n @allure.description\n def test_search_record(self, driver):\n \"\"\"Search record in table by email\"\"\"\n\n web_table_page = WebTablesPage(driver)\n web_table_page.open(config.WEB_TABLES_URL)\n\n email = web_table_page.add_new_record()[3]\n search_result = web_table_page.search_record(email)\n\n assert email in str(search_result), 'Record was not found in table'\n\n @allure.title('Delete record from table')\n @allure.description\n def test_delete_record(self, driver):\n \"\"\"Delete record from table\"\"\"\n\n web_table_page = WebTablesPage(driver)\n web_table_page.open(config.WEB_TABLES_URL)\n\n record_for_delete = web_table_page.add_new_record()[3]\n web_table_page.search_record(record_for_delete)\n web_table_page.delete_record()\n table_data = web_table_page.get_table_data()\n\n assert record_for_delete not in str(table_data), f'Record with data {record_for_delete} was not deleted'\n\n @allure.title('Edit record')\n @allure.description\n def test_edit_record(self, driver):\n \"\"\"Edit record and check that fields are fields have changed in the table \"\"\"\n\n web_table_page = WebTablesPage(driver)\n web_table_page.open(config.WEB_TABLES_URL)\n\n record_for_edit = web_table_page.add_new_record()[3]\n web_table_page.search_record(record_for_edit)\n new_data = web_table_page.edit_record()\n table_data = web_table_page.get_table_data()\n\n assert new_data in table_data, 'Data was not changed'\n\n @allure.title('Change rows count')\n @allure.description\n @pytest.mark.parametrize('input_rows_count', [5, 10, 20, 25, 50, 100])\n def test_change_rows_count(self, driver, input_rows_count):\n \"\"\"Select rows count from 5 to 100 \"\"\"\n\n web_table_page = WebTablesPage(driver)\n web_table_page.open(config.WEB_TABLES_URL)\n\n web_table_page.select_rows_count(input_rows_count)\n output_rows_count = web_table_page.check_rows_count()\n\n assert input_rows_count == output_rows_count\n\n @allure.title('Return back rows count')\n @allure.description\n @pytest.mark.parametrize('input_rows_count', [5, 10, 20, 25, 50, 100])\n def test_return_back_rows_count(self, driver, input_rows_count):\n \"\"\"Select rows count from list and after trying to return them back\"\"\"\n\n web_table_page = WebTablesPage(driver)\n web_table_page.open(config.WEB_TABLES_URL)\n\n web_table_page.select_rows_count(input_rows_count)\n web_table_page.select_rows_count(10)\n output_rows_count = web_table_page.check_rows_count()\n\n assert output_rows_count == 10, 'Rows count was not returned back'\n\n@pytest.mark.ui\n@allure.suite('Test Elements block')\nclass TestButtons:\n click_type_and_expected_msg = [('double_click', 'You have done a double click'),\n ('right_click', 'You have done a right click'),\n ('click', 'You have done a dynamic click')]\n\n @allure.title('Test various clicks')\n @allure.description\n @pytest.mark.parametrize('click_type, exp_output_message', click_type_and_expected_msg)\n def test_various_clicks(self, driver, click_type, exp_output_message):\n \"\"\"Testing doubleclick, right click and click buttons\"\"\"\n\n buttons_page = ButtonsPage(driver)\n buttons_page.open(config.BUTTONS_URL)\n\n output_message = buttons_page.do_click(click_type)\n\n assert output_message == exp_output_message, 'Wrong click type'\n\n@pytest.mark.ui\n@allure.suite('Test Elements block')\nclass TestLinks:\n @allure.title('New tab link')\n @allure.description\n def test_new_tab_link(self, driver):\n \"\"\"Click on link which should open in a new tab \"\"\"\n\n links_page = LinksPage(driver)\n links_page.open(config.LINKS_URL)\n links_page.new_tab_link()\n current_url = links_page.current_url()\n\n assert current_url == 'https://demoqa.com/', \"Link was not opened in a new tab\"\n","repo_name":"aktushin/pytest-selenium-ui","sub_path":"tests/ui/test_elements_page.py","file_name":"test_elements_page.py","file_ext":"py","file_size_in_byte":7199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37869538881","text":"import seaborn as sns\n\n#Seaborn comes with datasets. We need to call them:\ntips = sns.load_dataset('tips')\nprint(tips.head())\n\n#DISPLOT - Distribution univariant plot (only 1 variable) (HISTOGRAM)\nsns.distplot(tips['total_bill']) #or:\nsns.histplot(tips['total_bill'])\nsns.distplot(tips['total_bill'],kde=False) #Take off the kde (the line) (kernel density estimation)\nsns.displot(tips[\"total_bill\"],kde=False,bins=30) #Change amount of bins\n\n#JOINTPLOT (use 2 variables). Need to pass x, y, dataset (x,y columns)\nsns.jointplot(x=\"total_bill\",y=\"tip\",data=tips) #Show a scatter plot\n\n#Kind (affects the view inside the plot). Parameters: scatter, reg, resid, kde, hex\nsns.jointplot(x=\"total_bill\",y=\"tip\", data=tips, kind=\"hex\") #hexagon view\nsns.jointplot(x=\"total_bill\",y=\"tip\", data=tips, kind=\"reg\") #regression\nsns.jointplot(x=\"total_bill\",y=\"tip\", data=tips, kind=\"kde\") #kernel density estimation\n\n# PAIRPLOT - Do every possible combination between numerical columns in all the data frame\n# Quick way to visualize data\n# When x,y is the same, it will plot an histogram\n# HUE - Pass a column name of a categorical variable. It will color the data points according hue column.\n# PALETTE - Can change the color palette\nsns.pairplot(tips, hue='sex',palette=\"coolwarm\")\n\n#RUGPLOT\n# Plot a dash for every single point in the distribution (from the column that we choose)\n# Like a flattened graph\nsns.rugplot(tips['total_bill'])\n\n#KDEPLOT\nsns.kdeplot(tips['total_bill']) # Plot only the KDE without the bins\nsns.kdeplot(x=tips['total_bill'],y=tips['tip']) # Plot the KDE OF 2 numerical variables","repo_name":"adriana-nm/udemy-python-ds","sub_path":"Seaborn/Distribution-Plots.py","file_name":"Distribution-Plots.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"21281201956","text":"import plistlib\nfrom struct import unpack, pack, calcsize\nimport os, binascii\n\nimport CFPListErrors as errors\nfrom CFTypes import *\n\nCFPropertyListFormatBinary = 1\nCFPropertyListFormatXML = 2\nCFPropertyListFormatAuto = 0\n\nclass CFPropertyList(object):\n def __init__(self, fileName=None, format=CFPropertyListFormatAuto):\n self.file = fileName\n self.format = format\n self.value = None\n \n def load(self, fileName=None, format=None):\n fileName = fileName if fileName else self.file\n format = format if format else self.format\n \n if format == CFPropertyListFormatBinary:\n self.read_binary(fileName)\n if format == CFPropertyListFormatAuto:\n opened = False\n try:\n fp = open(fileName, 'rb')\n opened = True\n except TypeError:\n fp = fileName\n magic_number = fp.read(8)\n if magic_number == False:\n raise IOError('Could not read %s' % fileName)\n if opened: fp.close()\n \n filetype = magic_number[:6]\n version = magic_number[-2:]\n \n if filetype == 'bplist':\n if not version == '00':\n raise errors.PListError('Wrong file format version for ' +\n '%s. Expected 00, got %s' % (fileName, version))\n self.value = self.read_binary(fileName)\n else:\n format = CFPropertyListFormatXML\n if format == CFPropertyListFormatXML:\n pass\n # self.value = plistlib.readPlist(fileName)\n \n def read_binary(self, filename):\n opened = False\n try:\n fp = open(filename, 'rb')\n opened = True\n except TypeError:\n fp = filename\n\n # first, we read the trailer: 32 bytes from the end\n fp.seek(-32, os.SEEK_END)\n buff = fp.read(32)\n \n (offset_size, object_ref_size, number_of_objects, top_object, table_offset) = unpack('>6xBB4xL4xL4xL', buff)\n \n # after that, get the offset table\n fp.seek(table_offset, os.SEEK_SET)\n coded_offset_table = fp.read(number_of_objects * offset_size)\n if not len(coded_offset_table) == number_of_objects * offset_size:\n raise CFFormatError('%s: Format error!' % filename)\n \n \n # decode offset table\n format_unpackers = [\n lambda d: '',\n lambda d: star_unpack('>B', d),\n lambda d: star_unpack('>H', d),\n lambda d: None,\n lambda d: star_unpack('>L', d)\n ]\n self.offsets = format_unpackers[offset_size](coded_offset_table)\n \n self.unique_table = []\n self.object_ref_size = object_ref_size\n \n top = self.read_binary_object_at(filename, fp, top_object)\n if opened: fp.close()\n return top\n \n def read_binary_object_at(self, filename, fp, pos):\n '''\n Read an object type byte at position pos, decode it and delegate to\n the correct reader function\n '''\n position = self.offsets[pos]\n fp.seek(position, os.SEEK_SET)\n return self.read_binary_object(filename, fp)\n \n def read_binary_object(self, fname, fp):\n '''\n Read an object type byte, decode it and delegate to the correct reader\n function\n '''\n # first: read the marker byte\n buff = fp.read(1)\n \n object_length = star_unpack('>B', buff)\n object_length = object_length[0] & 0xF\n \n buff = buff.encode('hex')\n object_type = buff[0]\n \n if not object_type == '0' and object_length == 15:\n object_length = self.read_binary_object(fname, fp)\n object_length = object_length.value\n \n retval = None\n if object_type == '0': # null, false, true, fillbyte\n retval = self.read_binary_null_type(object_length)\n if object_type == '1': # integer\n retval = self.read_binary_int(fname,fp,object_length)\n if object_type == '2': # real\n retval = self.read_binary_real(fname,fp,object_length)\n if object_type == '3': # date\n retval = self.read_binary_date(fname,fp,object_length)\n if object_type == '4': # data\n retval = self.read_binary_data(fname,fp,object_length)\n if object_type == '5': # byte string, usually utf8 encoded\n retval = self.read_binary_string(fname,fp,object_length)\n if object_type == '6': # unicode string (utf16be)\n retval = self.read_binary_unicode_string(fname,fp,object_length)\n if object_type == 'a': # array\n retval = self.read_binary_array(fname,fp,object_length)\n if object_type == 'd': # dictionary\n retval = self.read_binary_dict(fname,fp,object_length)\n \n return retval\n\n def read_binary_null_type(self, length):\n '''\n read a \"null\" type (i.e. null byte, marker byte, bool value)\n '''\n if length == 0: return 0 # null byte\n elif length == 8: return CFBoolean(False)\n elif length == 9: return CFBoolean(True)\n elif length == 15: return 15 # fill type\n raise CFFormatError(\"unknown null type: #{length}\")\n\n def read_binary_int(self, fname,fp,length):\n '''\n read a binary int value\n '''\n if length > 3:\n raise CFFormatError('Integer greater than 8 bytes: %s' % length)\n \n nbytes = 1 << length\n val = None\n buff = fp.read(nbytes)\n \n if length == 0:\n val = unpack('>B', buff)\n val = val[0]\n elif length == 1:\n val = unpack('>H', buff)\n val = val[0]\n elif length == 2:\n val = unpack('>L', buff)\n val = val[0]\n elif length == 3:\n (hiword,loword) = unpack('>LL', buff)\n if not (hiword & 0x80000000) == 0:\n # 8 byte integers are always signed, and are negative when bit\n # 63 is set. Decoding into either a Fixnum or Bignum is tricky,\n # however, because the size of a Fixnum varies among systems,\n # and Ruby doesn't consider the number to be negative, and\n # won't sign extend.\n val = -(2**63 - ((hiword & 0x7fffffff) << 32 | loword))\n else:\n val = hiword << 32 | loword\n \n return CFInteger(val)\n\n def read_binary_real(self, fname, fp, length):\n '''\n read a binary real value\n '''\n if length > 3:\n raise CFFormatError('Real greater than 8 bytes: %s' % length)\n \n nbytes = 1 << length\n val = None\n buff = fp.read(nbytes)\n \n if length == 0 or length == 1: # 1 or 2 byte float? must be an error\n raise CFFormatError('got %s byte float, must be an error!' % length+1)\n if length == 2:\n val = unpack('f',buff[::-1])\n val = val[0]\n if length == 3:\n val = unpack('d',buff[::-1])\n val = val[0]\n \n return CFReal(val)\n\n def read_binary_date(self, fname, fp, length):\n '''\n read a binary date value\n '''\n if length > 3:\n raise CFFormatError('Date greater than 8 bytes: %s' % length)\n \n nbytes = 1 << length\n val = None\n buff = fp.read(nbytes)\n \n if length == 0 or length == 1: # 1 or 2 byte float? must be an error\n raise CFFormatError('got %s byte CFDate, must be an error!' % length+1)\n if length == 2:\n val = unpack('f',buff[::-1])\n val = val[0]\n if length == 3:\n val = unpack('d',buff[::-1])\n val = val[0]\n \n return CFDate(val, CFDate.TIMESTAMP_APPLE)\n\n\n def read_binary_data(self, fname,fp,length):\n '''\n read a binary data value\n '''\n buff = ''\n if length > 0:\n buff = fp.read(length)\n return CFData(buff, CFData.DATA_RAW)\n\n def read_binary_string(self, fname,fp,length):\n '''\n read a binary string value\n '''\n buff = ''\n if length > 0:\n buff = fp.read(length)\n \n return CFString(buff)\n\n def read_binary_unicode_string(self, fname,fp,length):\n '''\n Read a unicode string value, coded as UTF-16BE\n '''\n # The problem is: we get the length of the string IN CHARACTERS;\n # since a char in UTF-16 can be 16 or 32 bit long, we don't really know\n # how long the string is in bytes\n \n buff = fp.read(2*length)\n buff = unicode(buff, 'utf-16be')\n buff = buff.encode('utf-8')\n \n return CFString(buff)\n\n def read_binary_array(self, fname,fp,length):\n '''\n read a binary array value, including contained objects\n '''\n array = []\n \n # first: read object refs\n if not length == 0:\n buff = fp.read(length * self.object_ref_size)\n if self.object_ref_size == 1:\n objects = star_unpack('>B', buff)\n else:\n objects = star_unpack('>H', buff)\n \n # and now read the objets\n for i in range(length):\n obj = self.read_binary_object_at(fname, fp, objects[i])\n array.append(obj)\n \n return CFArray(array)\n\n def read_binary_dict(self, fname,fp,length):\n '''\n read a dictionary value, including contained objects\n '''\n dic = {}\n \n if not length == 0:\n # first read the key refs\n buff = fp.read(length * self.object_ref_size)\n if self.object_ref_size == 1:\n keys = star_unpack('>B', buff)\n else:\n keys = star_unpack('>H', buff)\n \n # then the object refs\n buff = fp.read(length * self.object_ref_size)\n if self.object_ref_size == 1:\n objects = star_unpack('>B', buff)\n else:\n objects = star_unpack('>H', buff)\n \n # and finally the keys and objects\n for i in range(length):\n key = self.read_binary_object_at(fname, fp, keys[i])\n obj = self.read_binary_object_at(fname, fp, objects[i])\n dic[key] = obj\n \n return CFDictionary(dic)\n\n\ndef native_types(obj):\n if obj == None: return None\n \n if isinstance(obj, CFDate) or isinstance(obj, CFString) or isinstance(obj, CFInteger) or isinstance(obj, CFReal) or isinstance(obj, CFBoolean):\n return obj.value\n elif isinstance(obj, CFData):\n return obj.decoded_value\n elif isinstance(obj, CFArray):\n return [native_types(v) for v in obj.value]\n elif isinstance(obj, CFDictionary):\n hsh = {}\n for (k, v) in obj.value.items():\n hsh[k.value] = native_types(v)\n return hsh\n \n\ndef unpack_helper(fmt, data):\n size = calcsize(fmt)\n return unpack(fmt, data[:size]), data[size:]\n\ndef star_unpack(fmt, data):\n out = []\n while data:\n (b,), data = unpack_helper(fmt, data)\n out.append(b)\n return out\n","repo_name":"jordanbtucker/dpapick","sub_path":"depends/CFPropertyList/CFPropertyList/CFPropertyList.py","file_name":"CFPropertyList.py","file_ext":"py","file_size_in_byte":11376,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"54"} +{"seq_id":"36981110019","text":"import collections\nfrom PEManager import *\n\n\nclass Chunk(object):\n\n def __init__(self, pe_manager, size=0x1000):\n \"\"\"\n creator of memory chunk that be allocated.\n\n Args:\n pe_manager(PEManager) : target PEManager to append chunk.\n size(int) : size of chunk.\n \"\"\"\n if not isinstance(pe_manager, PEManager):\n raise TypeError('data should be of type: PEManager')\n data = bytearray(size)\n section = pe_manager.create_new_data_section(data, \".zigzi\")\n self.pe_manager = pe_manager\n self.offset = section.PointerToRawData\n self.offset_end = section.SizeOfRawData\n self.section_rva = section.VirtualAddress\n self.section_va = pe_manager.get_abs_va_from_rva(self.section_rva)\n self.size = size\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, i):\n if type(i) is slice:\n start = i.start + self.offset\n stop = i.stop + self.offset\n step = i.step\n if step is not None:\n print(\"NOT SUPPORTED STEP\")\n else:\n start = self.offset + i\n stop = self.offset + i + 1\n\n if start >= self.size + self.offset\\\n or start < self.offset:\n raise IndexError(\n \"Indexing is out of range Min:0 ~ Max:{} but argument:{}\"\n .format(self.size, start - self.offset)\n )\n if stop >= self.size + self.offset \\\n or start < self.offset:\n raise IndexError(\n \"Indexing is out of range Min:0 ~ Max:{} but argument:{}\"\n .format(self.size, start - self.offset)\n )\n\n return self.pe_manager.get_bytes_at_offset(start, stop)\n\n def __delitem__(self, i):\n pass\n\n def __setitem__(self, i, v):\n if type(i) is slice:\n start = i.start + self.offset\n stop = i.stop\n step = i.step\n if step is not None:\n print(\"NOT SUPPORTED STEP\")\n exit()\n else:\n start = i + self.offset\n\n if start >= self.size + self.offset \\\n or start < self.offset:\n raise IndexError(\n \"Indexing is out of range Max:{} but argument:{}\"\n .format(self.size, start - self.offset)\n )\n self.pe_manager.PE.set_bytes_at_offset(start, v)\n # struct.pack('\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"bye!\")\n\nrun(1920, 1080)","repo_name":"mgq79/Real-Code-","sub_path":"mcd/mcdonalds.py","file_name":"mcdonalds.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30791411950","text":"##########################################################################\n## Simulator.py v0.2\n##\n## Implements two versions of a multi-level sampler:\n##\n## 1) Traditional 3 step process\n## 2) Streaming process using hashing\n##\n## Original Code written by H. Andrew Schwartz\n## for SBU's Big Data Analytics Course\n## Spring 2020\n##\n## Student Name: Sai Pramod Kudapa\n## Student ID: 112686280\n\n##Data Science Imports:\nimport numpy as np\nimport mmh3\nfrom random import random\nfrom random import shuffle\n\n##IO, Process Imports:\nimport sys\nfrom pprint import pprint\n\n\n##########################################################################\n##########################################################################\n# Task 1.A Typical non-streaming multi-level sampler\nimport numpy as np\nimport mmh3\nimport datetime\n\n# helper method to compute gcd to simplify fraction\ndef gcd(a, b):\n while b:\n a, b = b, a % b\n return a\n\n\n# helper methods to determine bucket size and no of buckets for a percentage\n# a is bucket size and b is no of buckets\ndef buckets(p):\n a = float(p * 100)\n b = 100.0\n while a.is_integer() is False:\n a *= 10\n b *= 10\n g = int(gcd(a, b))\n if b > 100:\n return int(a/g), int(b/g)\n return int(a), int(b)\n\n\ndef typicalSampler(filename, percent=.01, sample_col=0):\n csv_file1 = open(filename, \"r\")\n csv_file2 = open(filename, \"r\")\n uuids = set()\n for row in csv_file1:\n attrs = row.split(',')\n user_id = attrs[2]\n uuids.add(user_id)\n\n rand_uuids_size = int(float(uuids.__len__()) * percent)\n random_no = np.random.randint(0, (uuids.__len__() - rand_uuids_size))\n random_sample_uuids = list(uuids)[random_no: random_no + rand_uuids_size]\n\n mean, m2, c = 0.0, 0.0, 0\n for row in csv_file2:\n attrs = row.split(',')\n user_id = attrs[sample_col]\n if user_id in random_sample_uuids:\n amount = float(attrs[3])\n c += 1\n delta = amount - mean\n mean += delta/c\n m2 += delta * (amount - mean)\n\n variance = m2/c\n sd = np.sqrt([variance])[0]\n return mean, sd\n\n\n##########################################################################\n##########################################################################\n# Task 1.B Streaming multi-level sampler\ndef streamSampler(csv_file, percent=.01, sample_col=0):\n mean, m2, c = 0.0, 0.0, 0\n a, b = buckets(percent)\n for row in csv_file:\n attrs = row.split(',')\n user_id = attrs[sample_col]\n if mmh3.hash(user_id) % b < a:\n amount = float(attrs[3])\n c += 1\n diff = amount - mean\n mean += diff / c\n m2 += diff * (amount - mean)\n variance = m2/c\n sd = np.sqrt([variance])[0]\n return mean, sd\n\n\n##########################################################################\n##########################################################################\n# Task 1.C Timing\nfiles = ['transactions_small.csv', 'transactions_medium.csv', 'transactions_large.csv']\npercents = [.02, .005]\n\nif __name__ == \"__main__\":\n\n for perc in percents:\n print(\"\\nPercentage: %.4f\\n==================\" % perc)\n for f in files:\n print(\"\\nFile: \", f)\n t1 = datetime.datetime.now()\n typicalSamplerResult = typicalSampler(f, perc, 2)\n t2 = datetime.datetime.now()\n print(\" Typical Sampler: \", typicalSamplerResult)\n print(\" Time elapsed for Typical Sampler: \", (t2-t1).total_seconds()*1000)\n fstream = open(f, \"r\")\n t3 = datetime.datetime.now()\n streamSamplerResult = streamSampler(fstream, perc, 2)\n t4 = datetime.datetime.now()\n print(\" Stream Sampler: \", streamSamplerResult)\n print(\" Time elapsed for Stream Sampler: \", (t4-t3).total_seconds()*1000)\n\n","repo_name":"saipramodkudapa/big-data-projects","sub_path":"streaming_sampler.py","file_name":"streaming_sampler.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42276329227","text":"from bs4 import BeautifulSoup\nimport urllib\nfrom time import sleep\n\n\nbaseURL = \"http://library.duke.edu\"\ngalleryURLs = [(\"/digitalcollections/brennanjohn/?page=\",2), (\"/digitalcollections/paverjohn/?page=\",17), (\"/digitalcollections/rcmaxwellco/?page=\",471), (\"/digitalcollections/oaaaslidelibrary/?page=\", 283), (\"/digitalcollections/oaaaarchives/?page=\", 809)]\n\nitr = 0\nurlCount = 0\nfor collection in galleryURLs:\n\turlCount += collection[1]\n\nurlCount = str(urlCount)\n\nf = open('all_image_urls', 'w')\n\nfor collection in galleryURLs:\n\tfor i in range(0,collection[1]):\n\t\thtml_doc = urllib.urlopen(baseURL+collection[0]+str(i+1)).read()\n\t\tsoup = BeautifulSoup(html_doc, 'html.parser')\n\t\tresultsGrid = soup.find(id=\"resultsGrid\")\n\n\t\twhile resultsGrid is None:\n\t\t\tprint(\"Error loading page (\" + baseURL+collection[0]+str(i+1) + \"), trying again in 1 second...\")\n\t\t\tsleep(1.0)\n\t\t\thtml_doc = urllib.urlopen(baseURL+collection[0]+str(i+1)).read()\n\t\t\tsoup = BeautifulSoup(html_doc, 'html.parser')\n\t\t\tresultsGrid = soup.find(id=\"resultsGrid\")\n\n\t\tfor link in resultsGrid.find_all('a'):\n\t\t\tf.write(baseURL + link.get('href') + \"\\n\")\n\t\t\n\t\titr += 1\n\t\tprint(\"Processed \" + str(itr) + \" of \" + urlCount)\n\nf.close()\n\nprint(\"done!\")","repo_name":"dansakamoto/outdoor-ads-face-detect","sub_path":"python/getallimgurls.py","file_name":"getallimgurls.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71956622880","text":"import collections\nimport bisect\n\nStudent = collections.namedtuple('Student', ('name', 'gpa'))\n\ndef comp_gpa(student):\n # negate since bisect only works on ascending lists\n return (-student.gpa, student.name)\n\ndef search_student(students, target, comp_gpa):\n i = bisect.bisect_left([comp_gpa(s) for s in students], comp_gpa(target))\n print(i)\n return 0 <= i < len(students) and students[i] == target\n\ndef main():\n names = ['Joe', 'Wendy', 'Bob', 'Josh', 'John', 'Bill', 'Mary']\n gpas = [4.0 , 4.0, 3.7, 3.5, 3.4, 3.2, 3.0]\n students = [ Student(name,gpa) for name, gpa in zip(names,gpas)]\n \n target1 = Student('Wendy', 4.0)\n target2 = Student('Darian', 3.2)\n print('Found Wendy:')\n print(search_student(students, target1, comp_gpa))\n print('Found Darian:')\n print(search_student(students, target2, comp_gpa))\n \n \n \nif __name__ == '__main__':\n main()\n","repo_name":"yerhu01/python-epi","sub_path":"problems/searching/compare_gpa.py","file_name":"compare_gpa.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28737580679","text":"import socketserver\n\n\nclass MyServer(socketserver.BaseRequestHandler):\n def handle(self):\n print(\"服务器启动\")\n conn = self.request\n print(self.client_address)\n while True:\n data = conn.recv(1024)\n if not data:break\n print(str(data,'utf8'))\n inp = input('>>')\n conn.sendall(inp.encode('utf-8'))\n conn.close()\n\n\nif __name__ == '__main__':\n server = socketserver.ThreadingTCPServer(('127.0.0.1',8000),MyServer)\n server.serve_forever()","repo_name":"825644691/pythonLearning","sub_path":"day27/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15523359243","text":"# -*- coding: utf-8 -*-\n# 参考: http://qiita.com/juntaki/items/9a13a3d2217ca223cf03\nfrom LoadImageDataForKeras import LoadImageDataForKeras\nimport numpy as np\nfrom PIL import Image\n\n#import matplotlib.pyplot as plt\n\nfrom keras.utils import np_utils\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.models import model_from_json\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D\nfrom keras.optimizers import Adam\nimport glob\n\n\n\n# 順番に読み込んでも,トレーニングデータとテストデータをミックスして出力してくれる関数\n# scikit-learnというパッケージに入っている\nfrom sklearn.cross_validation import train_test_split\n\nif __name__ == '__main__':\n # イメージを読み込むためのクラスのインスタンス化\n loadingObj = LoadImageDataForKeras(2)\n # 2は何分類問題かを示す.ここでは2分類問題\n\n # get_image_array_for_kerasには,ファイル名とラベルを与える\n # 2分類問題のラベルが0の時はnp.array([[1, 0]], dtype=np.uint8)\n # ラベル「1」の時はnp.array([[0, 1]], dtype=np.uint8)を与える\n\n # 3分類問題であれば,\n # np.array([[1, 0, 0]]), np.array([[0, 1, 0])), np.array([[0, 0, 1]])\n # のどれかになる\n\n # りんご画像ringo0~ringo9.jpgを読み込む\n for i in range(10):\n loadingObj.get_image_array_for_keras('ringo'+str(i)+'.jpg', np.array([[1, 0]], dtype=np.uint8))\n\n # みかん画像mikan0~mikan9.jpgを読み込む\n for i in range(10):\n loadingObj.get_image_array_for_keras('mikan'+str(i)+'.jpg', np.array([[0, 1]], dtype=np.uint8))\n\n # この出力が(サンプル数, 3, 50, 50)になってれば良い\n print(loadingObj.get_stacking_image_array().shape) \n # 積み重ねた全体を出力する\n # print(loadingObj.get_stacking_image_array())\n\n #この出力が(サンプル数, 分類する数)になってれば良い\n print(loadingObj.get_stacking_label_array().shape) \n # 積み重ねた全体を出力する\n # print(loadingObj.get_stacking_label_array())\n\n # 読み込んだデータを,ミックスしてトレーニングデータとテストデータに分ける\n # 参照: http://stackoverflow.com/questions/3674409/numpy-how-to-split-partition-a-dataset-array-into-training-and-test-datasets\n data_train, data_test, labels_train, labels_test = \\\n train_test_split(loadingObj.get_stacking_image_array(),\n loadingObj.get_stacking_label_array(),\n test_size=0.10, # 9割をトレーニングデータ,1割をテストデータ\n random_state=10)\n print(data_train.shape)\n print(data_test.shape)\n print(labels_train.shape)\n print(labels_test.shape)\n \n\n # ニューラルネットの定義\n # コンボリューション層とMaxpoolingを適当に重ねたもの\n # MaxPlooling2Dのオプションに関しては以下を参照\n # https://stackoverflow.com/questions/39815518/keras-maxpooling2d-layer-gives-valueerror\n model = Sequential()\n model.add(Convolution2D(96, 3, 3, border_mode=\"same\", activation=\"relu\" ,input_shape=(3, 50, 50) ))\n model.add(Convolution2D(96, 3, 3, border_mode=\"same\", activation=\"relu\"))\n model.add(MaxPooling2D(pool_size=(2,2), dim_ordering=\"th\"))\n model.add(Convolution2D(96, 3, 3, border_mode=\"same\", activation=\"relu\"))\n model.add(Convolution2D(96, 3, 3, border_mode=\"same\", activation=\"relu\"))\n model.add(MaxPooling2D(pool_size=(2,2), dim_ordering=\"th\"))\n model.add(Convolution2D(96, 3, 3, border_mode=\"same\", activation=\"relu\"))\n model.add(Convolution2D(96, 3, 3, border_mode=\"same\", activation=\"relu\"))\n model.add(MaxPooling2D(pool_size=(2,2), dim_ordering=\"th\"))\n model.add(Dropout(0.5))\n \n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n model.add(Dense(10))\n model.add(Activation(\"relu\"))\n model.add(Dense(2))\n model.add(Activation(\"sigmoid\"))\n #model.summary()\n model.compile(loss='binary_crossentropy', optimizer=\"adadelta\", metrics=['accuracy'])\n\n # 学習の実行\n # 20エポック回す\n hist = model.fit(data_train, labels_train, nb_epoch=20, batch_size=32, validation_data=(data_test, labels_test))\n\n # 結果をmatplotlibでplot\n # http://qiita.com/TypeNULL/items/4e4d7de11ab4361d6085\n #loss = hist.history['loss']\n #val_loss = hist.history['val_loss']\n\n #nb_epoch = len(loss)\n #plt.plot(range(nb_epoch), loss, marker='.', label='loss')\n #plt.plot(range(nb_epoch), val_loss, marker='.', label='val_loss')\n #plt.legend(loc='best', fontsize=10)\n #plt.grid()\n #plt.xlabel('epoch')\n #plt.ylabel('loss')\n #plt.show()\n\n\n # 学習したネットワークを保存し,再度読み込み,使えることを確認する\n # 参照: http://m0t0k1ch1st0ry.com/blog/2016/07/17/keras/\n\n # 学習したネットワークをJSONで保存\n model_json_str = model.to_json()\n # 'ringo_or_mikan_model.json'というファイルにネットワーク(モデル)を保存\n open('ringo_or_mikan_model.json', 'w').write(model_json_str)\n # 'ringo_or_mikan_weights.hdf5'というファイル名のhdf5形式で学習結果(重み)を保存\n model.save_weights('ringo_or_mikan_weights.hdf5');\n\n # ただしhdf5で保存したものをkeras-jsで読むためには,\n # json形式にエンコードする必要があるらしい\n # それに必要なプログラムはkeras-jsに含まれている\n # http://nonbiri-tereka.hatenablog.com/entry/2016/10/17/073541\n\n # 再度読み込み\n # Pythonはhdf5形式もそのまま読める\n # まずネットワークを保存したファイルを開いてネットワークを読み込み\n learned_model = model_from_json(open('ringo_or_mikan.json').read())\n # そのネットワークに学習した重みを読み込む\n learned_model.load_weights('ringo_or_mikan_weights.hdf5')\n \n learned_model.summary()\n # 使えるようにコンパイルする\n learned_model.compile(loss='binary_crossentropy', optimizer=\"adadelta\", metrics=['accuracy'])\n \n # ネットワークの評価をする\n score = learned_model.evaluate(data_test, labels_test, verbose=0)\n print('Test loss : ', score[0])\n print('Test accuracy : ', score[1])\n\n # ネットワークを使う\n # 学習に使用しなかったリンゴ画像を読み込む\n ringo_x = np.array(Image.open('ringo_x.jpg').resize((50, 50)))\n ringo_x = ringo_x.transpose(2, 0, 1)\n stacking_image_array = np.empty((0, 3, 50, 50), dtype=np.uint8)\n stacking_image_array = np.append(stacking_image_array,\\\n np.array([ringo_x]),\\\n axis=0)\n print(stacking_image_array.shape)\n # ネットワークによる分類推測を行う\n result = learned_model.predict(stacking_image_array, \n batch_size=1, verbose=1)\n print(result)\n # どのクラスに属するのかの確率を配列で出してくれる\n # predict_class関数を使うと,一番確率が高いクラスをそのまま出力してくれる\n\n # 学習に使用しなかったミカン画像を読み込み,同じように\n mikan_x = np.array(Image.open('mikan_x.jpg').resize((50, 50)))\n mikan_x = mikan_x.transpose(2, 0, 1)\n stacking_image_array = np.empty((0, 3, 50, 50), dtype=np.uint8)\n stacking_image_array = np.append(stacking_image_array,\\\n np.array([mikan_x]),\\\n axis=0)\n print(stacking_image_array.shape)\n result = learned_model.predict_proba(stacking_image_array, \n batch_size=1, verbose=1)\n # 2分類問題だと,predict関数とpredict_proba関数の出力は同じ?\n print(result)\n \n","repo_name":"daichi-a/RTImageDetectWithCamera","sub_path":"KerasLearningMain.py","file_name":"KerasLearningMain.py","file_ext":"py","file_size_in_byte":8051,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34597151187","text":"import typing\n\nimport requests\nfrom azure.mgmt.compute.models import OperatingSystemTypes\nfrom msrestazure.azure_exceptions import CloudError\nfrom requests.utils import is_valid_cidr\n\nfrom cloudshell.cp.azure import exceptions\nfrom cloudshell.cp.azure.actions.network import NetworkActions\nfrom cloudshell.cp.azure.utils.tags import get_default_tags_count\n\n\nclass ValidationActions(NetworkActions):\n MAX_VM_DISK_SIZE_GB = 1023\n MAX_TAGS_NUMBER = 15\n\n def register_azure_providers(self):\n \"\"\"Register Azure Providers.\"\"\"\n self._logger.info(\"Registering subscription with Azure providers...\")\n for provider in (\n \"Microsoft.Authorization\",\n \"Microsoft.Storage\",\n \"Microsoft.Network\",\n \"Microsoft.Compute\",\n ):\n self._logger.info(\n f\"Registering subscription with a {provider} resource provider\"\n )\n self._azure_client.register_provider(provider)\n\n def validate_azure_region(self, region: str):\n \"\"\"Validate Azure Region.\"\"\"\n self._logger.info(\"Validating Azure region...\")\n\n if not region:\n raise Exception(\"Region attribute can not be empty\")\n\n available_regions = [\n available_region.name\n for available_region in self._azure_client.get_available_regions()\n ]\n self._logger.debug(f\"Available Azure regions: {available_regions}\")\n\n if region not in available_regions:\n raise Exception(f'Region \"{region}\" is not a valid Azure Geo-location')\n\n def validate_azure_mgmt_resource_group(self, mgmt_resource_group_name: str):\n \"\"\"Validate Management Resource Group.\"\"\"\n self._logger.info(\n f\"Validating MGMT resource group {mgmt_resource_group_name}...\"\n )\n\n try:\n self._azure_client.get_resource_group(mgmt_resource_group_name)\n except CloudError:\n error_msg = (\n f\"Failed to find management resource group '{mgmt_resource_group_name}'\"\n )\n self._logger.exception(error_msg)\n raise Exception(error_msg)\n\n def validate_azure_sandbox_network(\n self, mgmt_resource_group_name: str, sandbox_vnet_name: str\n ):\n \"\"\"Validate Azure sandbox vNET.\"\"\"\n self._logger.info(\n \"Verifying that sandbox vNet exists under the MGMT resource group...\"\n )\n self.get_sandbox_virtual_network(\n resource_group_name=mgmt_resource_group_name,\n sandbox_vnet_name=sandbox_vnet_name,\n )\n\n def validate_azure_mgmt_network(\n self, mgmt_resource_group_name: str, mgmt_vnet_name: str\n ):\n \"\"\"Validate Azure management vNET.\"\"\"\n self._logger.info(\n \"Verifying that management vNet exists under the MGMT resource group...\"\n )\n self.get_mgmt_virtual_network(\n resource_group_name=mgmt_resource_group_name,\n mgmt_vnet_name=mgmt_vnet_name,\n )\n\n def validate_azure_vm_size(self, vm_size: str, region: str):\n \"\"\"Validate 'VM Size' attribute.\"\"\"\n self._logger.info(f\"Validating VM size {vm_size}\")\n if vm_size:\n available_vm_sizes = [\n vm_size.name\n for vm_size in self._azure_client.get_virtual_machine_sizes_by_region(\n region\n )\n ]\n\n self._logger.debug(f\"Available VM sizes: {available_vm_sizes}\")\n\n if vm_size not in available_vm_sizes:\n raise Exception(f\"VM Size {vm_size} is not valid\")\n\n def validate_custom_tags(self, custom_tags: typing.Dict):\n \"\"\"Validate resource 'Custom tags' attribute.\"\"\"\n self._logger.info(\"Validating 'Custom Tags' attribute\")\n allowed_tags_number = self.MAX_TAGS_NUMBER - get_default_tags_count()\n\n if len(custom_tags) > allowed_tags_number:\n raise Exception(\n f\"The number of Azure custom tags must be no more than \"\n f\"{allowed_tags_number}. Present number of custom tags: \"\n f\"{len(custom_tags)}\"\n )\n\n def validate_tags(self, tags: typing.Dict):\n \"\"\"Validate resource and deployment path 'Custom tags' attributes.\"\"\"\n self._logger.info(\"Validating 'Custom Tags' attribute\")\n default_tags_count = get_default_tags_count()\n\n if len(tags) > self.MAX_TAGS_NUMBER:\n raise Exception(\n f\"The total number of Azure custom tags must be no more than \"\n f\"{self.MAX_TAGS_NUMBER - default_tags_count}. \"\n f\"Present number of custom tags: {len(tags) - default_tags_count}\"\n )\n\n def validate_azure_additional_networks(self, mgmt_networks: typing.List[str]):\n \"\"\"Validate 'Additional Mgmt Networks' attribute.\"\"\"\n self._logger.info(\"Validating Deploy App 'Additional Mgmt Networks' attribute\")\n for cidr in mgmt_networks:\n if not is_valid_cidr(cidr):\n msg = (\n f\"CIDR {cidr} under the 'Additional Mgmt Networks' attribute \"\n f\"is not in the valid format\"\n )\n self._logger.exception(msg)\n raise Exception(msg)\n\n def validate_deploy_app_resource_group(self, deploy_app, cs_api):\n \"\"\"Validate Deploy App Resource Group.\"\"\"\n self._logger.info(\"Validating Deploy App Resource group...\")\n\n if not deploy_app.resource_group_name:\n return\n\n try:\n self._azure_client.get_resource_group(deploy_app.resource_group_name)\n except CloudError:\n error_msg = (\n f\"Failed to find Deploy App \"\n f\"Resource group '{deploy_app.resource_group_name}'\"\n )\n self._logger.exception(error_msg)\n raise Exception(error_msg)\n\n if deploy_app.resource_group_name.lower() in (\n reservation.Id.lower()\n for reservation in cs_api.GetCurrentReservations().Reservations\n ):\n error_msg = (\n f\"Invalid Deploy App \"\n f\"Resource group '{deploy_app.resource_group_name}'. It cannot \"\n f\"be a resource group created by another CloudShell reservation.\"\n )\n self._logger.exception(error_msg)\n raise Exception(error_msg)\n\n def validate_deploy_app_add_public_ip(self, deploy_app, connect_subnets):\n \"\"\"Validate 'Add Public IP' attribute.\"\"\"\n self._logger.info(\"Validating Deploy App 'Add Public IP' attribute\")\n all_subnets_are_private = (\n all(not subnet.is_public() for subnet in connect_subnets)\n if connect_subnets\n else False\n )\n\n if all_subnets_are_private and deploy_app.add_public_ip:\n raise Exception(\n \"Cannot deploy App with Public IP when connected \"\n \"only to private subnets\"\n )\n\n def validate_deploy_app_script_file(self, deploy_app):\n \"\"\"Validate 'Extension Script file' attribute.\"\"\"\n self._logger.info(\"Validating Deploy App Extension Script File\")\n\n if not deploy_app.extension_script_file:\n return\n\n error_msg = (\n f\"Unable to retrieve VM Extension Script File: \"\n f\"{deploy_app.extension_script_file}\"\n )\n\n try:\n response = requests.head(deploy_app.extension_script_file, verify=False)\n response.raise_for_status()\n except Exception:\n self._logger.exception(error_msg)\n raise Exception(error_msg)\n\n def validate_deploy_app_script_extension(self, deploy_app, image_os):\n \"\"\"Validate 'Extension Script file' attribute script extension.\"\"\"\n self._logger.info(\"Validating Deploy App Extension Script\")\n\n if not deploy_app.extension_script_file:\n return\n\n if image_os == OperatingSystemTypes.windows:\n if not deploy_app.extension_script_file.endswith(\"ps1\"):\n raise Exception(\n \"Invalid format for the PowerShell script. \"\n \"It must have a 'ps1' extension\"\n )\n else:\n if not deploy_app.extension_script_configurations:\n raise Exception(\n \"Linux Custom Script must have a command to execute in \"\n \"'Extension Script Configurations' attribute\"\n )\n\n def validate_deploy_app_disk_size(self, deploy_app):\n \"\"\"Validate 'Disk Size' attribute.\"\"\"\n self._logger.info(\"Validating Deploy App VM Disk size\")\n\n if not deploy_app.disk_size:\n return\n\n try:\n disk_size_num = int(deploy_app.disk_size)\n except ValueError:\n error_msg = f\"Invalid Virtual Machine Disk size '{deploy_app.disk_size}'\"\n self._logger.exception(error_msg)\n raise Exception(error_msg)\n\n if disk_size_num > self.MAX_VM_DISK_SIZE_GB:\n raise Exception(\n f\"Virtual Machine Disk size cannot be larger than \"\n f\"{self.MAX_VM_DISK_SIZE_GB} GB\"\n )\n\n def validate_vm_size(self, deploy_app_vm_size: str, cloud_provider_vm_size: str):\n \"\"\"Validate 'VM Size' attribute.\"\"\"\n self._logger.info(\"Validating VM size\")\n return any([deploy_app_vm_size, cloud_provider_vm_size])\n\n def validate_key_vault(self, key_vault_name: str):\n \"\"\"Validate Azure Region.\"\"\"\n self._logger.info(\"Validating Azure Key Vault...\")\n\n try:\n self._azure_client.get_key_vault_secret(\n key_vault_name=key_vault_name,\n secret_name=\"KeyVaultValidation\",\n )\n except exceptions.InvalidAttrException:\n raise Exception(f\"Key Vault '{key_vault_name}' doesn't exist.\")\n except exceptions.AzurePermissionsException:\n raise\n except exceptions.ResourceNotFoundException:\n pass\n except Exception as err:\n self._logger.exception(err)\n","repo_name":"QualiSystems/cloudshell-cp-azure","sub_path":"cloudshell/cp/azure/actions/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":10107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30190616460","text":"fname1 = \"AcOH_MP2.txt\"\nfname2 = \"result_Ac_MP2.txt\"\n\nf = open(fname2, \"w\")\nf.write(\"dihed MP2 kcal/mol\\n\")\n\na = -180\nfor fx in open(fname1, \"r\"):\n y = fx.split(\"|\")\n for i in range(len(y)):\n if \"MP2=\" in y[i]:\n x = y[i].split(\"=\")\n kcal = float(x[1]) * 627.5095\n f.write(str(a) + \" \" + str(x[1]) + \" \" + str(kcal) + \"\\n\")\n a += 10\n","repo_name":"ducksirloin/lab","sub_path":"gau_result_MP2.py","file_name":"gau_result_MP2.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15176430006","text":"\n#For Rocof data after wavelet\n\nimport keras\nimport np_utils\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import InputLayer, Input\nfrom tensorflow.keras.layers import Reshape, MaxPooling2D\nfrom tensorflow.keras.layers import Conv2D, Dense, Flatten, Dropout\nfrom tensorflow.keras.callbacks import TensorBoard,ModelCheckpoint\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras import optimizers,regularizers\n\nfrom sklearn.metrics import confusion_matrix, classification_report, accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.utils import shuffle\nimport pywt\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport pickle\nimport timeit\nimport datetime\nimport skopt\nfrom skopt import gp_minimize, forest_minimize\nfrom skopt.space import Real, Categorical, Integer\nfrom skopt.plots import plot_convergence\nfrom skopt.plots import plot_objective, plot_evaluations\nfrom skopt.utils import use_named_args\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import accuracy_score\n\n# import pyarrow.parquet as pq\nimport timeit\nimport pickle\n\nstart = timeit.default_timer()\n\nsampling_rate = 1800\ndef removePlanned(X,y):\n \"\"\"\n THIS FUNCTION REMOVES THE PLANNED EVENTS FROM THE EVENT DATASET\n \"\"\"\n \n X_new=[]\n y_new=[]\n for i in range(len(y)):\n #print(i)\n \n if y[i]==0:\n y_new.append(0)\n X_new.append(X[i,:,:,:])\n \n elif y[i]==1:\n y_new.append(1)\n X_new.append(X[i,:,:,:])\n \n \n elif y[i]==2:\n y_new.append(2)\n X_new.append(X[i,:,:,:])\n \n elif y[i]==3:\n y_new.append(3)\n X_new.append(X[i,:,:,:])\n \n\n return np.array(X_new), np.array(y_new)\n\n\ndef separatePMUs(X,y):\n \n \"\"\"\n This function separates features and their corresponding labels for each PMU\n to make more events \n \"\"\"\n \n num_case=X.shape[0]\n num_pmu=X.shape[1]\n num_sample=X.shape[2]\n X=X.reshape(num_case*num_pmu,num_sample)\n y2=[]\n for i in range(len(y)):\n if y[i]==0:\n for j in range(num_pmu):\n y2.append(0)\n \n elif y[i]==1:\n for j in range(num_pmu):\n y2.append(1)\n \n elif y[i]==2:\n for j in range(num_pmu):\n y2.append(2)\n \n elif y[i]==3:\n for j in range(num_pmu):\n y2.append(3)\n elif y[i]==4:\n for j in range(num_pmu):\n y2.append(4) \n elif y[i]==5:\n for j in range(num_pmu):\n y2.append(5) \n return X,np.array(y2)\n\n\ndef rd2(k):\n path1 = '../pickleset/'\n\n list = ['rocof','v_grad','i_grad', 'vp_a_diff_grad', 'ip_a_diff_grad','f_grad']\n \n p1 = open(path1 +'X_S'+str(k)+'_'+str(list[0])+'_6.pickle',\"rb\")\n pk1 = pickle.load(p1)\n # len(list)\n # for i in range(1,len(list)):\n # p2 = open(path1 +'X_S'+str(k)+'_'+str(list[i])+'_6.pickle',\"rb\")\n # pk2 = pickle.load(p2) \n \n # pk1=np.concatenate((pk1, pk2), axis=3)\n \n fps=60\n start_crop=int(fps*60*4.95)\n stop_crop=int(fps*60*5.05)\n\n pk1=pk1[:,:,start_crop:stop_crop,:]\n \n p3 = open(path1 + 'y_S'+str(k)+'_rocof_6.pickle',\"rb\")\n pk3 = pickle.load(p3) \n \n path2 = 'index/'\n tr=np.load(path2 +'tr_' +str(k)+'.npy')\n val=np.load(path2 +'val_' +str(k)+'.npy')\n tr=tr.tolist() \n val=val.tolist() \n # b = a[c]\n \n pk1,pk3=removePlanned(pk1,pk3)\n X_train = pk1[tr]\n y_train = pk3[tr]\n X_val = pk1[val]\n y_val = pk3[val]\n \n print(X_train.shape) \n print(X_val.shape) \n print(y_train.shape) \n print(y_val.shape) \n \n\n return X_train, X_val, y_train, y_val \n \ndef Cwt(X_train): \n wavename = 'morl'\n pca_2 = PCA(n_components=100)\n # totalscal = 11\n # fc = pywt.central_frequency(wavename)\n # cparam = 2 * fc * totalscal\n # print(data.shape)\n scales = np.arange(1, 101)\n # scales = cparam / np.arange(totalscal, 1, -1)\n # data = X_train\n # t = range(sampling_rate)\n wavelet = []\n for i in range(0,X_train.shape[0]):\n data = X_train[i].flatten()\n \n # print('data.shape',data.shape) \n # print('t.shape',len(t)) \n [coeff1, freqs1] = pywt.cwt(data, scales, wavename)\n \n \n\n wavelet.append(pca_2.fit_transform(coeff1))\n wavelet = np.array(wavelet)\n print(wavelet.shape)\n return wavelet\n # print('frequencies.shape',frequencies.shape)\n # print('cwtmatr.shape',cwtmatr.shape)\n # print('cwtmatr[0].shape',cwtmatr[0].shape) \n\ndef data_pack():\n X_train, X_val, y_train, y_val = rd2(1)\n X_train, y_train = separatePMUs (X_train, y_train)\n X_val, y_val = separatePMUs (X_val, y_val)\n \n X_train =Cwt(X_train)\n X_val =Cwt(X_val)\n num= X_train.shape[0]\n scaler = MinMaxScaler(feature_range=(0, 1))\n p1 = np.concatenate((X_train,X_val))\n # scaler.fit(p1)\n print(np.amax(p1))\n # p2 = scaler.transform(p1)\n # print(np.amax(p2))\n\n \n X_train = p1[:num]\n X_val = p1[num:]\n \n X_train = X_train[:,:,:,np.newaxis]\n X_val = X_val[:,:,:,np.newaxis] \n\n print('X_train.shape',X_train.shape) \n print('y_train.shape',y_train.shape) \n print('X_val.shape',X_val.shape) \n print('y_val.shape',y_val.shape) \n return X_train,y_train, X_val, y_val\n \n\n\n# def load_data(): \ndef recover(df):\n df = pd.DataFrame(df)\n df.columns = ['L1','L2','L3','L4']\n dd = np.zeros(df.shape[0])\n dd = dd.astype(\"int8\") \n rr = df[df['L2']>0].index.tolist()\n dd[rr] = 1\n \n rr = df[df['L3']>0].index.tolist()\n dd[rr] = 2 \n\n\n rr = df[df['L4']>0].index.tolist()\n dd[rr] = 3 \n \n df['rev'] = dd\n return df['rev'].values \n \ndef fake(): \n s_x = 10\n s_y = 3600\n channel_num = 1\n c_num =4\n tr_num = 20\n test_num = 5\n X_train = np.random.random((tr_num, s_x, s_y,channel_num))\n y_train = keras.utils.to_categorical(np.random.randint(c_num, size=(tr_num, 1)), num_classes=c_num)\n # y_train = (np.random.randint(c_num, size=(100, 1)), num_classes=c_num)\n X_test = np.random.random((test_num, s_x, s_y,channel_num))\n y_test = keras.utils.to_categorical(np.random.randint(c_num, size=(test_num, 1)), num_classes=c_num)\n y_test = recover(y_test)\n y_train = recover(y_train)\n print(X_test.shape)\n print(y_test.shape)\n \n \n return X_train,y_train,X_test, y_test\nbest_accuracy = 0.0 \n\ndef main():\n\n X_train,y_train,X_test,y_test = read_data()\n validation_set = (X_test,y_test)\n ##Constants\n\n EPOCHS=2\n BATCH_SIZE=16\n \n \n\n #number of classes\n num_classes=len(np.unique(y_train))\n\n #print(X.shape[0:])\n #hyperparameters\n learning_rate=0.0035\n num_conv_filters=4\n\n num_dense_layers=1\n num_dense_nodes=100\n drop_out_rate_input=0.9\n drop_out_rate_hidden=0.2\n\n \n channel_num = X_train.shape[3]\n s_x = X_train.shape[1]\n s_y = X_train.shape[2]\n \n all_accuracy=[] #list for reporting all accuracy\n\n\n\n model = Sequential()\n\n model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(s_x, s_y, channel_num)))\n model.add(Conv2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(3, 3)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(4, activation='softmax'))\n\n\n\n\n optimizer = Adam(lr=0.001)\n model.compile(optimizer=optimizer,\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n # model.summary()\n model.fit(X_train, y_train, batch_size=32, epochs=2)\n\n\n y_pred=model.predict_classes(X_test) \n\n\n\n\n # print(y_test.shape)\n print(y_pred.shape)\n\n matrix=confusion_matrix(y_test, y_pred)\n print(matrix)\n class_report=classification_report(y_test, y_pred)\n print(class_report)\n\n##prediction on test set\n\n\n\n \n \ndef main2():\n X_train, y_train, X_test, y_test= data_pack()\n # data_pack()\n\n validation_set = (X_test, y_test)\n num_classes=len(np.unique(y_train))\n\n\n \n ck = '2016cnn-'\n dim_learning_rate = Real(low=1e-4, high=1e-2, prior='log-uniform',\n name='learning_rate')\n dim_num_conv_filters = Integer(low=4, high=32, name='num_conv_filters')\n dim_size_conv_filters=Integer(low=4, high=20, name='size_conv_filters')\n dim_num_dense_layers = Integer(low=1, high=4, name='num_dense_layers')\n dim_num_dense_nodes = Integer(low=10, high=80, name='num_dense_nodes')\n dim_drop_out_input = Real(low=0.4, high=0.9, name='drop_out_rate_input')\n dim_drop_out_hidden = Real(low=0.2, high=0.7, name='drop_out_rate_hidden')\n\n dimensions = [dim_learning_rate,\n dim_num_conv_filters,\n dim_size_conv_filters,\n dim_num_dense_layers,\n dim_num_dense_nodes,\n dim_drop_out_input,\n dim_drop_out_hidden]\n\n \n def log_dir_name( learning_rate,num_conv_filters,\n size_conv_filters, num_dense_layers,\n num_dense_nodes,\n drop_out_rate_input, drop_out_rate_hidden):\n\n # The dir-name for the TensorBoard log-dir.\n # s = \"./19_logs/lr_{0:.0e}_conv_filters_{1}_conv_filter_size_{2}_dense_layers_{3}_nodes_{4}_dropout_input_{5}_dropout_hidden_{6}_activation_{7}/\"\n s = \"./19_logs/lr_{0:.0e}_conv_filters_{1}_conv_filter_size_{2}_dense_layers_{3}_nodes_{4}_dropout_input_{5}_dropout_hidden_{6}/\"\n # Insert all the hyper-parameters in the dir-name.\n log_dir = s.format(learning_rate,\n num_conv_filters,\n size_conv_filters,\n num_dense_layers,\n num_dense_nodes,\n drop_out_rate_input,\n drop_out_rate_hidden\n )\n\n return log_dir\n \n \n \n def create_model(\n \n num_conv_filters,\n size_conv_filters,\n learning_rate,\n num_dense_layers,\n num_dense_nodes,\n drop_out_rate_input,\n drop_out_rate_hidden\n ):\n\n\n\n channel_num = X_train.shape[3]\n s_x = X_train.shape[1]\n s_y = X_train.shape[2]\n \n all_accuracy=[] #list for reporting all accuracy\n\n\n\n model = Sequential()\n \n model.add(Conv2D(num_conv_filters, (3, size_conv_filters), activation='relu', input_shape=(s_x, s_y, channel_num)))\n # model.add(Conv2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(drop_out_rate_input))\n\n # model.add(Conv2D(64, (3, 3), activation='relu'))\n # model.add(Conv2D(64, (3, 3), activation='relu'))\n # model.add(MaxPooling2D(pool_size=(2, 2)))\n # model.add(Dropout(0.25))\n \n model.add(Flatten())\n for i in range(num_dense_layers):\n # Name of the layer. This is not really necessary\n # because Keras should give them unique names.\n name = 'layer_dense_{0}'.format(i+1)\n\n model.add(Dense(num_dense_nodes,\n activation='relu',\n name=name))\n model.add(Dropout(drop_out_rate_hidden))\n\n # Last fully-connected / dense layer with softmax-activation\n # for use in classification.\n model.add(Dense(num_classes, activation='softmax'))\n\n # Use the Adam method for training the network.\n # We want to find the best learning-rate for the Adam method.\n optimizer = Adam(lr=learning_rate)\n\n # In Keras we need to compile the model so it can be trained.\n model.compile(optimizer=optimizer,\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.summary()\n \n return model\n\n \n \n path_best_model = ck + 'best_model.hdf5'\n best_accuracy = 0.0\n\n @use_named_args(dimensions=dimensions)\n def fitness(learning_rate,num_conv_filters,\n size_conv_filters, num_dense_layers,\n num_dense_nodes, drop_out_rate_input,\n drop_out_rate_hidden):\n \n # Print the hyper-parameters.\n print('*************************** NEW ITERATION IS STARTED****************************')\n print('learning rate: {0:.1e}'.format(learning_rate))\n print('num_conv_filters:', num_conv_filters)\n print('size_filter:', size_conv_filters)\n print('num_dense_layers:', num_dense_layers)\n print('num_dense_nodes:', num_dense_nodes)\n print('drop_out_rate_input:', drop_out_rate_input)\n print('drop_out_rate_hidden:', drop_out_rate_hidden)\n # print('activation:', activation)\n print()\n \n # Create the neural network with these hyper-parameters.\n model = create_model(learning_rate=learning_rate,\n num_conv_filters=num_conv_filters,\n size_conv_filters=size_conv_filters,\n num_dense_layers=num_dense_layers,\n num_dense_nodes=num_dense_nodes,\n drop_out_rate_input=drop_out_rate_input,\n drop_out_rate_hidden=drop_out_rate_hidden\n )\n\n # Dir-name for the TensorBoard log-files.\n log_dir = log_dir_name(learning_rate,num_conv_filters,\n size_conv_filters, num_dense_layers,\n num_dense_nodes,drop_out_rate_input,\n drop_out_rate_hidden) \n \n callback_log = TensorBoard(\n log_dir=log_dir,\n histogram_freq=0,\n write_graph=True,\n write_grads=False,\n write_images=False) \n \n file_name=ck+ \"best_model_weights.hdf5\"\n\n mcp_save = ModelCheckpoint(file_name, save_best_only=True, monitor='val_accuracy', mode='max')\n EPOCHS=2\n BATCH_SIZE=16 \n history = model.fit(x=X_train,\n y=y_train,\n epochs=EPOCHS,\n batch_size=BATCH_SIZE,\n verbose=2,\n validation_data=validation_set,\n callbacks=[callback_log,mcp_save])\n\n # Get the classification accuracy on the validation-set\n # after the last training-epoch.\n #accuracy = history.history['val_accuracy'][-1] #last epoch accuracy in each call\n accuracy = max(history.history['val_accuracy']) #best accuracy among all epochs in each call\n print()\n print(\"Accuracy: {0:.2%}\".format(accuracy))\n print()\n \n #y_pred_percentage=model.predict(X_test)\n # X_test,y_test \n y_pred=model.predict_classes(X_test) \n matrix=confusion_matrix(y_test, y_pred)\n print(matrix)\n class_report=classification_report(y_test, y_pred)\n print(class_report)\n\n # Save the model if it improves on the best-found performance.\n # We use the global keyword so we update the variable outside\n # of this function.\n global best_accuracy\n # If the classification accuracy of the saved model is improved ...\n if accuracy > best_accuracy:\n # Save the new model to harddisk.\n \n model.load_weights(ck + \"best_model_weights.hdf5\")# save best model's weights\n model.save(ck + \"best_model_so_far.h5\") # save the entire model configurartion and weights\n print('**** THE BEST ACCURACY SO FAR IS ACHIEVED AND THE MODEL IS SAVED **** \\n')\n \n # save the hyperparameters\n pickle_out = open(ck + \"hp.pickle\",\"wb\")\n pickle.dump([learning_rate, num_conv_filters,\n size_conv_filters,num_dense_layers,num_dense_nodes,drop_out_rate_input,\n drop_out_rate_hidden], pickle_out, protocol=2)\n pickle_out.close() \n\n # Update the classification accuracy.\n best_accuracy = accuracy\n \n\n # Delete the Keras model with these hyper-parameters from memory.\n del model\n \n # Clear the Keras session, otherwise it will keep adding new\n # models to the same TensorFlow graph each time we create\n # a model with a different set of hyper-parameters.\n K.clear_session()\n \n # NOTE: Scikit-optimize does minimization so it tries to\n # find a set of hyper-parameters with the LOWEST fitness-value.\n # Because we are interested in the HIGHEST classification\n # accuracy, we need to negate this number so it can be minimized.\n return -accuracy \n \n ###Run the Hyper-Parameter Optimization####\n search_result = gp_minimize(func=fitness,\n dimensions=dimensions,\n acq_func='EI', # Expected Improvement.\n n_calls=10)\n\n stop = timeit.default_timer()\n #running time\n print('Overall Time:(mins)', (stop - start)/60)\n\n plot_convergence(search_result)\n plt.savefig(ck+'-plot1.png', format='png')\n\nif __name__ == '__main__': \n\n s1 = timeit.default_timer()\n main2()\n # rd2(1)\n # data_pack()\n s2 = timeit.default_timer()\n #running time\n print('Time(mins): ', (s2 - s1)/60 )\n \n \n","repo_name":"nyc1893/Python-Learning","sub_path":"LICENSE.md/classification/train/BO_CNN1.py","file_name":"BO_CNN1.py","file_ext":"py","file_size_in_byte":18499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36149937618","text":"import os\nimport sys\nfrom itertools import chain, combinations\nimport numpy as np\nbase_path = '/sc/arion/scratch/liy42/covid19_DECEASED_INDICATOR/'\n\n\nlist_of_method = ['EI', 'demographics',\n 'labs', 'medications',\n 'vitals', 'concatenated','comorbidities',\n # 'medications_binary', 'EI_med_binary', 'concatenated_med_binary'\n # 'EI_svdImpute', 'EI_svdImpute_rank_5', 'EI_svdImpute_rank_20',\n # 'concatenated_svdImpute', 'concatenated_svdImpute_rank_5', 'concatenated_svdImpute_rank_20',\n # 'labs_svdImpute', 'labs_svdImpute_rank_5', 'labs_svdImpute_rank_20'\n ]\n\noutcome_list = ['DECEASED_INDICATOR']\n\ncalling_script = str(sys.argv[-1])\n\nbase_command = 'python {} --path {}'\n\ndef powerset(iterable):\n \"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))\n\nlist_of_data = [\n 'demographics',\n 'labs', 'medications',\n 'vitals','comorbidities',\n 'EI', 'concatenated']\n\n# feature_power_set = powerset(list_of_data)\n#\n# for s in feature_power_set:\n# # print(s, len(s))\n# if len(s) > 1 and len(s) < len(list_of_data):\n# feat = ''\n# for sub in s:\n# feat = feat + '+' + sub\n# list_of_method.append(feat[1:])\n# rdim = np.array(range(10))+1\n# tcca_list = []\n# for r in rdim:\n# k = 'tcca{}'.format(r)\n# tcca_list.append(k)\n # dict_of_method['tcca{}'.format(r)] = 'EI_TensorCCA({})'.format(r)\n# list_of_data = list_of_data + tcca_list\n\nfor outcome in outcome_list:\n for m in list_of_data:\n if m == 'EI' or m == 'concatenated':\n dir_name = base_path+outcome+'_'+m\n elif calling_script == 'ensemble.py':\n dir_name = base_path +outcome+ '_EI/' + m\n else:\n continue\n lsf_fn = 'covid19_{}_{}.lsf'.format(outcome, m)\n script = open(lsf_fn, 'w')\n script.write(\n '#!/bin/bash\\n#BSUB -J train_all_base\\n#BSUB -P acc_pandeg01a\\n#BSUB -q premium\\n#BSUB -n 4\\n#BSUB -W 10:00\\n#BSUB -o train_all_base.stdout\\n#BSUB -eo train_all_base.stderr\\n#BSUB -R rusage[mem=10000]\\n')\n script.write('module purge\\nmodule load java\\nmodule load groovy\\nmodule load selfsched\\n')\n print(dir_name)\n cmd = base_command.format(calling_script, dir_name)\n print(cmd)\n script.write(cmd)\n script.close()\n os.system('bsub < {}'.format(lsf_fn))\n os.remove(lsf_fn)\n\n","repo_name":"huhrichard/ei_2","sub_path":"minerva_scripts/submit_all_covid19.py","file_name":"submit_all_covid19.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27815061382","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 18 16:56:51 2020\r\n\r\n@author: tom\r\n\"\"\"\r\n\r\nimport os\r\n\r\npath = 'C:/Users/'+os.getlogin()+'/Google Drive/University/Dissertation'\r\ndatapath = 'C:/Users/'+os.getlogin()+'/Dissertation Data'\r\n\r\nos.chdir(path+'/Code')\r\n\r\njava_path = \"C:/Program Files/Java/jdk-13.0.2/bin/java.exe\"\r\nos.environ['JAVAHOME'] = java_path\r\n\r\nfrom nltk.tag import StanfordPOSTagger\r\nfrom nltk.corpus.reader.plaintext import PlaintextCorpusReader\r\nfrom nltk.tokenize import WhitespaceTokenizer\r\n\r\nfrom generators import sent_gen, tup_to_str\r\nfrom sner import POSClient\r\n\r\nstanford_dir = datapath+\"/stanford-postagger-full-2020-08-06\"\r\nmodelfile = stanford_dir+\"/models/english-bidirectional-distsim.tagger\"\r\njarfile=stanford_dir+\"/stanford-postagger.jar\"\r\n\r\n#tagger=StanfordPOSTagger(model_filename=modelfile, path_to_jar=jarfile)\r\n#tagger.java_options='-mx15360m'\r\n\r\n\r\ntagger = POSClient(host='localhost', port=9198)\r\n\r\nsamp = PlaintextCorpusReader(datapath+'/Corpora/wiki/simple_20200601/','simple_sample.txt',\r\n word_tokenizer = WhitespaceTokenizer()\r\n )\r\n\r\n\r\noutput = open(datapath+'/Corpora/wiki/simple_20200601/simple_sample_tagged_3w.txt', 'w', encoding='utf-8')\r\n\r\ni = 0\r\nfor sent in sent_gen(samp, asstr = True):\r\n sent = [ tup_to_str(tup) for tup in tagger.tag(sent)]\r\n output.write(bytes(' '.join(sent), 'utf-8').decode('utf-8')+' \\n')\r\n i = i + 1\r\n if (i % 100000 == 0):\r\n print('Processed ' + str(i) + ' sentences')\r\n \r\noutput.close()\r\nprint('Processing complete!')","repo_name":"Oddtwang/MWEs","sub_path":"Project/0 Corpora/Simple Sample Tagged Text.py","file_name":"Simple Sample Tagged Text.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4347057530","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for\n)\nfrom flaskr import app\nfrom werkzeug.exceptions import abort\n\nfrom flaskr.auth import require_login\n\nfrom flaskr.lib.database import request_session\nfrom flaskr.lib.models.models import RoomModel\nfrom flaskr.lib.repository import room_repository\nfrom flaskr.lib.user_session import session_user\n\nbp = Blueprint('poker', __name__)\n\n\n@app.route('/')\n@require_login()\ndef index():\n rooms = room_repository.get_rooms()\n\n return render_template('poker/index.html', rooms=rooms)\n\n\n@app.route('/store')\n@require_login()\ndef store():\n return render_template('poker/store.html')\n\n\n@bp.route('/create', methods=('GET', 'POST'))\n@require_login()\ndef create():\n if request.method == 'POST':\n room_name = request.form['roomName']\n password = request.form.get(\"password\", \"\")\n\n error = None\n if not room_name:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = request_session()\n room = RoomModel(room_name, session_user(), password)\n db.add(room)\n db.commit()\n return redirect(url_for('index'))\n\n return render_template('poker/create.html')\n\n\ndef get_room(room_id, check_author=False):\n room = room_repository.get_room(room_id)\n\n if check_author and room.author != session_user():\n abort(401, \"Room id {0} doesn't belong to you.\".format(room_id))\n if room is None:\n abort(404, \"Room id {0} doesn't exist.\".format(room_id))\n\n return room\n\n\n@bp.route('//join', methods=('GET', 'POST'))\n@require_login()\ndef join(room_id):\n room = get_room(room_id)\n return render_template('poker/room.html', room=room)\n\n\n@bp.route('//roomSettings', methods=('GET',))\n@require_login()\ndef room_settings(room_id):\n room = get_room(room_id)\n return render_template('poker/room_settings.html', room=room)\n\n\n@bp.route('//game', methods=('GET',))\n@require_login()\ndef game(room_id):\n room = get_room(room_id)\n\n return render_template('poker/game.html', room=room)\n","repo_name":"Dtenwolde/web-poker","sub_path":"flaskr/poker.py","file_name":"poker.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22119306426","text":"#!/usr/bin/env python3\n\n\"\"\"This code was adapted from a Real Python tutorial on Python socket programming.\nIt is available on Github (TLT).\"\"\"\n\nimport time\nimport socket\nimport threading\nimport traceback\n\nfrom typing import List\n\n\nfrom sock_message import *\n\n\nclass SockServer:\n \"\"\"This class is the socket server that provides a communication link between select VMs running\n as part of a test iteration. It is designed to handle multiple connections from the client software.\n It should be instantiated from the testbed. The event loop runs in a thread.\"\"\"\n def __init__(self, my_host: str, my_port: int, context: str = None):\n self._host: str = my_host\n self._port: int = my_port\n self._context: str = context\n self._listen_sock = None\n self._sel: selectors = selectors.DefaultSelector()\n\n self.sock_objects: List[SockMessage] = []\n\n def setup_listen_socket(self):\n \"\"\"This method sets up the listening socket. For each connection, the listening socket will be\n cloned by socket.accept().\"\"\"\n self._listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Avoid bind() exception: OSError: [Errno 48] Address already in use.\n self._listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n self._listen_sock.bind((self._host, self._port))\n self._listen_sock.listen()\n self._listen_sock.setblocking(False)\n print(\"Listening on\", (self._host, self._port))\n\n # Register the socket with selectors API.\n self._sel.register(self._listen_sock, selectors.EVENT_READ, data=None)\n\n # Start the event loop in a thread.\n threading.Thread(target=self.event_loop, daemon=True).start()\n\n def accept_wrapper(self, sock):\n \"\"\"This method is called from the event loop and establishes a connection in answer to a request\n for a connection. It keeps a list of SockMessage instance references so messages can be sent to\n clients with a specific iteration and context.\"\"\"\n conn, addr = sock.accept() # Clones the listening socket for the server end on the connection.\n print(\"Accepted connection from\", addr)\n conn.setblocking(False)\n \"\"\"Keep in mind that the SockMessage instance that is created here is on the server side of the\n connection! The client connection has its own instance of SockMessage. Note setting of the\n server_instance flag.\"\"\"\n sock_message = SockMessage(self._sel, sock=conn, addr=addr, server_instance=True)\n events = selectors.EVENT_READ | selectors.EVENT_WRITE\n self._sel.register(conn, events, data=sock_message)\n self.sock_objects.append(sock_message)\n\n def close(self):\n \"\"\"We make the assumption that the client on the other end is going to close itself up when through.\"\"\"\n self._sel.close()\n\n def send_message(self, action: str, iteration: int, context: str, message: str):\n \"\"\"When the server needs to send a message to a client, we need to find which client to send it\n to based on the iteration number and the client context.\"\"\"\n for sock_object in self.sock_objects: # Our list of SockMessage client connections.\n if sock_object.iteration == iteration and sock_object.context == context:\n sock_object.message_out = create_message(action=action, value=message, iteration=iteration)\n break\n\n def event_loop(self):\n \"\"\"This is the event loop for monitoring socket connections. We are using select() which returns a list\n of socket connections that are ready for I/O. key.fileobj is the socket. key.data is a reference to\n SockMessage. If key.data is None, then this is the listening socket and so we call accept_wrapper()\n otherwise, we call sock_obj.process_events() passing in the communication type mask.\"\"\"\n try:\n while True:\n events = self._sel.select(timeout=None)\n for key, mask in events:\n if key.data is None:\n self.accept_wrapper(key.fileobj)\n else:\n sock_object: SockMessage = key.data\n try:\n sock_object.process_events(mask)\n except Exception:\n print(\"Server: error: exception for\",\n f\"{sock_object.addr}:\\n{traceback.format_exc()}\")\n sock_object.close()\n time.sleep(1) # just pausing for a second to keep from breaking the speed limit ;)\n except KeyboardInterrupt:\n print(\"Caught keyboard interrupt, exiting...\")\n finally:\n self._sel.close()\n\n\ndef main():\n \"\"\"This function is for commandline testing only. As part of the testbed the SockServer class will be\n instantiated by Testbed and the event loop will run in a thread.\"\"\"\n if len(sys.argv) != 3:\n print(\"usage:\", sys.argv[0], \" \")\n sys.exit(1)\n\n host, port = sys.argv[1], int(sys.argv[2])\n server = SockServer(host, port)\n server.setup_listen_socket()\n time.sleep(20)\n\n # Just a sample message for testing.\n server.send_message(action=SOCK_COMMAND, iteration=5,\n context=SOCK_CONTEXT_ATTACK,\n message=\"nmap -v -A scanme.nmap.org\")\n\n time.sleep(90)\n server.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"leetuckert10/SockiToMe","sub_path":"sock_server.py","file_name":"sock_server.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16994490722","text":"\"\"\"\nTest cases for the wiutils.summarizing.create_dwc_occurrence function.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom wiutils.darwincore import create_dwc_occurrence\n\n\n@pytest.fixture(scope=\"function\")\ndef images():\n return pd.DataFrame(\n {\n \"project_id\": [\"AAA001\", \"AAA001\", \"AAA001\", \"AAA001\", \"AAA001\"],\n \"deployment_id\": [\"001\", \"001\", \"002\", \"002\", \"002\"],\n \"image_id\": [\"bc6534f0\", \"003cb8eb\", \"a19bbd16\", \"740e09f5\", \"e09axa3q\"],\n \"location\": [\n \"gs://bucket/deployment/001/bc6534f0.jpg\",\n \"gs://bucket/deployment/001/003cb8eb.jpg\",\n \"gs://bucket/deployment/002/a19bbd16.jpg\",\n \"gs://bucket/deployment/002/740e09f5.jpg\",\n \"gs://bucket/deployment/002/e09axa3q.jpg\",\n ],\n \"class\": [\"Mammalia\", \"Mammalia\", np.nan, \"Aves\", \"Mammalia\"],\n \"order\": [\"Carnivora\", \"Carnivora\", np.nan, \"Passeriformes\", \"Rodentia\"],\n \"family\": [\"Felidae\", \"Felidae\", np.nan, \"Tinamidae\", \"Cuniculidae\"],\n \"genus\": [\"Panthera\", \"Panthera\", np.nan, np.nan, \"Cuniculus\"],\n \"species\": [\"onca\", \"onca\", np.nan, np.nan, \"paca nelsoni\"],\n \"timestamp\": [\n \"2020-12-13 09:04:50\",\n \"2020-12-13 10:02:12\",\n \"2021-01-12 09:59:02\",\n \"2020-11-17 01:12:57\",\n \"2021-01-12 09:59:03\",\n ],\n \"number_of_objects\": [1, 1, 0, 1, 2],\n }\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef deployments():\n return pd.DataFrame(\n {\n \"placename\": [\"90210\", \"90211\"],\n \"deployment_id\": [\"001\", \"002\"],\n \"project_id\": [\"AAA001\", \"AAA001\"],\n \"recorded_by\": [\"Joe Bloggs\", \"Joe Bricks\"],\n }\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef projects():\n return pd.DataFrame(\n {\n \"project_id\": [\"AAA001\"],\n \"project_admin_organization\": [\"Instituto Humboldt\"],\n }\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef constants():\n return {\n \"organismQuantityType\": \"individual(s)\",\n }\n\n\n@pytest.fixture(scope=\"function\")\ndef mapping():\n return {\n \"deployment_id\": \"eventID\",\n \"placename\": \"parentEventID\",\n \"recorded_by\": \"recordedBy\",\n \"number_of_objects\": \"organismQuantity\",\n \"project_admin_organization\": \"institutionCode\",\n \"family\": \"family\",\n \"genus\": \"genus\",\n }\n\n\n@pytest.fixture(scope=\"function\")\ndef order():\n return [\n \"eventID\",\n \"parentEventID\",\n \"eventDate\",\n \"eventTime\",\n \"recordedBy\",\n \"organismQuantity\",\n \"organismQuantityType\",\n \"associatedMedia\",\n \"institutionCode\",\n \"scientificName\",\n \"family\",\n \"genus\",\n \"specificEpithet\",\n \"infraspecificEpithet\",\n \"taxonRank\",\n ]\n\n\ndef test_defaults(deployments, images, projects, mocker, constants, mapping, order):\n mocker.patch(\"wiutils._dwc.occurrence.constants\", constants)\n mocker.patch(\"wiutils._dwc.occurrence.mapping\", mapping)\n mocker.patch(\"wiutils._dwc.occurrence.order\", order)\n gcs_base_url = \"https://console.cloud.google.com/storage/browser/\"\n result = create_dwc_occurrence(images, deployments, projects)\n expected = pd.DataFrame(\n {\n \"eventID\": [\"001\", \"001\", \"002\", \"002\"],\n \"parentEventID\": [\"90210\", \"90210\", \"90211\", \"90211\"],\n \"eventDate\": [\"2020-12-13\", \"2020-12-13\", \"2020-11-17\", \"2021-01-12\"],\n \"eventTime\": [\"09:04:50\", \"10:02:12\", \"01:12:57\", \"09:59:03\"],\n \"recordedBy\": [\"Joe Bloggs\", \"Joe Bloggs\", \"Joe Bricks\", \"Joe Bricks\"],\n \"organismQuantity\": [1, 1, 1, 2],\n \"organismQuantityType\": [\n \"individual(s)\",\n \"individual(s)\",\n \"individual(s)\",\n \"individual(s)\",\n ],\n \"associatedMedia\": [\n gcs_base_url + \"bucket/deployment/001/bc6534f0.jpg\",\n gcs_base_url + \"bucket/deployment/001/003cb8eb.jpg\",\n gcs_base_url + \"bucket/deployment/002/740e09f5.jpg\",\n gcs_base_url + \"bucket/deployment/002/e09axa3q.jpg\",\n ],\n \"institutionCode\": [\n \"Instituto Humboldt\",\n \"Instituto Humboldt\",\n \"Instituto Humboldt\",\n \"Instituto Humboldt\",\n ],\n \"scientificName\": [\n \"Panthera onca\",\n \"Panthera onca\",\n \"Tinamidae\",\n \"Cuniculus paca nelsoni\",\n ],\n \"family\": [\"Felidae\", \"Felidae\", \"Tinamidae\", \"Cuniculidae\"],\n \"genus\": [\"Panthera\", \"Panthera\", np.nan, \"Cuniculus\"],\n \"specificEpithet\": [\"onca\", \"onca\", np.nan, \"paca\"],\n \"infraspecificEpithet\": [np.nan, np.nan, np.nan, \"nelsoni\"],\n \"taxonRank\": [\"species\", \"species\", \"family\", \"subspecies\"],\n }\n )\n pd.testing.assert_frame_equal(result, expected)\n\n\ndef test_remove_duplicate_kws(\n deployments, images, projects, mocker, constants, mapping, order\n):\n mocker.patch(\"wiutils._dwc.occurrence.constants\", constants)\n mocker.patch(\"wiutils._dwc.occurrence.mapping\", mapping)\n mocker.patch(\"wiutils._dwc.occurrence.order\", order)\n gcs_base_url = \"https://console.cloud.google.com/storage/browser/\"\n result = create_dwc_occurrence(\n images,\n deployments,\n projects,\n remove_duplicate_kws=dict(interval=60, unit=\"minutes\"),\n )\n expected = pd.DataFrame(\n {\n \"eventID\": [\"001\", \"002\", \"002\"],\n \"parentEventID\": [\"90210\", \"90211\", \"90211\"],\n \"eventDate\": [\"2020-12-13\", \"2020-11-17\", \"2021-01-12\"],\n \"eventTime\": [\"09:04:50\", \"01:12:57\", \"09:59:03\"],\n \"recordedBy\": [\"Joe Bloggs\", \"Joe Bricks\", \"Joe Bricks\"],\n \"organismQuantity\": [1, 1, 2],\n \"organismQuantityType\": [\"individual(s)\", \"individual(s)\", \"individual(s)\"],\n \"associatedMedia\": [\n gcs_base_url\n + \"bucket/deployment/001/bc6534f0.jpg\"\n + \"|\"\n + gcs_base_url\n + \"bucket/deployment/001/003cb8eb.jpg\",\n gcs_base_url + \"bucket/deployment/002/740e09f5.jpg\",\n gcs_base_url + \"bucket/deployment/002/e09axa3q.jpg\",\n ],\n \"institutionCode\": [\n \"Instituto Humboldt\",\n \"Instituto Humboldt\",\n \"Instituto Humboldt\",\n ],\n \"scientificName\": [\"Panthera onca\", \"Tinamidae\", \"Cuniculus paca nelsoni\"],\n \"family\": [\"Felidae\", \"Tinamidae\", \"Cuniculidae\"],\n \"genus\": [\"Panthera\", np.nan, \"Cuniculus\"],\n \"specificEpithet\": [\"onca\", np.nan, \"paca\"],\n \"infraspecificEpithet\": [np.nan, np.nan, \"nelsoni\"],\n \"taxonRank\": [\"species\", \"family\", \"subspecies\"],\n }\n )\n pd.testing.assert_frame_equal(result, expected)\n\n\ndef test_intact_input(images, deployments, projects):\n images_original = images.copy()\n deployments_original = deployments.copy()\n create_dwc_occurrence(images, deployments, projects)\n pd.testing.assert_frame_equal(images_original, images)\n pd.testing.assert_frame_equal(deployments_original, deployments)\n","repo_name":"PEM-Humboldt/wiutils","sub_path":"tests/darwincore/test_create_dwc_occurrence.py","file_name":"test_create_dwc_occurrence.py","file_ext":"py","file_size_in_byte":7421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28379791855","text":"from pymongo import MongoClient\nfrom dataclasses import dataclass, asdict\n\ndb_name = \"airflow_slacker\"\ndag_run_collection = \"dag_runs\"\n\ndb = MongoClient()[db_name]\n\n\n@dataclass\nclass DAGRun:\n dag_id: str\n dag_run_url: str\n status: str\n start_time: str\n notified_status: str = \"\"\n slack_id: str = \"\"\n\n\nclass DuplicateResourceError(Exception):\n pass\n\n\ndef create_dag_run(dag_id, dag_run_url, status, start):\n col = db[dag_run_collection]\n dag_run = DAGRun(dag_id, dag_run_url, status, start)\n if not col.find_one({\"dag_run_url\": dag_run_url}):\n col.insert_one(asdict(dag_run))\n else:\n raise DuplicateResourceError(\"Duplicate dag run id.\")\n\n\ndef update_dag_run_status(dag_run_url, status):\n col = db[dag_run_collection]\n col.update_one({\"dag_run_url\": dag_run_url}, {\"$set\": {\"status\": status}})\n\n\ndef update_dag_run_slack(dag_run_url, slack_id, slack_status):\n col = db[dag_run_collection]\n col.update_one(\n {\"dag_run_url\": dag_run_url},\n {\"$set\": {\"slack_id\": slack_id, \"notified_status\": slack_status}},\n )\n\n\ndef get_running_dags():\n col = db[dag_run_collection]\n return [\n dag_run\n for dag_run in col.find()\n if dag_run[\"notified_status\"] not in [\"success\", \"failed\"]\n ]\n","repo_name":"kbeauregard/slackflow","sub_path":"slackflow/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20830029908","text":"import numpy as np\nfrom scipy import misc\nfrom imp import reload\nfrom labfuns import *\nimport random\n\n\n# ## Bayes classifier functions to implement\n# \n# The lab descriptions state what each function should do.\n\n\n# NOTE: you do not need to handle the W argument for this part!\n# in: labels - N vector of class labels\n# out: prior - C x 1 vector of class priors\ndef computePrior(labels, W=None):\n Npts = labels.shape[0]\n if W is None:\n W = np.ones((Npts,1))/Npts\n else:\n assert(W.shape[0] == Npts)\n classes = np.unique(labels)\n Nclasses = np.size(classes)\n\n prior = np.zeros((Nclasses,1))\n\n for jdx, c in enumerate(classes):\n idx = np.where(labels == c)[0]\n prior[jdx] = np.sum(W[idx]) / np.sum(W[:])\n\n return prior\n\n# NOTE: you do not need to handle the W argument for this part!\n# in: X - N x d matrix of N data points\n# labels - N vector of class labels\n# out: mu - C x d matrix of class means (mu[i] - class i mean)\n# sigma - C x d x d matrix of class covariances (sigma[i] - class i sigma)\ndef mlParams(X, labels, W=None):\n assert(X.shape[0]==labels.shape[0])\n Npts,Ndims = np.shape(X)\n classes = np.unique(labels) # Returns the sorted unique elements of an array\n Nclasses = np.size(classes)\n\n if W is None:\n W = np.ones((Npts,1))/float(Npts)\n\n mu = np.zeros((Nclasses,Ndims))\n sigma = np.zeros((Nclasses,Ndims,Ndims))\n\n for jdx, c in enumerate(classes):\n idx = np.where(labels == c)[0] # Vector of length C of indices for a given label class c\n xlc = X[idx,:] * W[idx] # Matrix C x d with samples in the class c\n mu[jdx] = np.sum(xlc, axis=0)/np.sum(W[idx]) # Compute mean\n \n for jdx, c in enumerate(classes):\n idx = np.where(labels == c)[0] # Vector of length C of indices for a given label class c\n xlc = X[idx, :] # Matrix C x d with samples in the class c\n diff = xlc - mu[jdx] # Matrix C x d with diffs between x - µ\n diff = np.square(diff) * W[idx]\n mean = np.sum(diff, axis=0) / np.sum(W[idx])\n sigma[jdx] = np.diag(mean) # Use diagonal matrix for Naive Bayes Classi\fer\n\n return mu, sigma\n\n# in: X - N x d matrix of M data points\n# prior - C x 1 matrix of class priors\n# mu - C x d matrix of class means (mu[i] - class i mean)\n# sigma - C x d x d matrix of class covariances (sigma[i] - class i sigma)\n# out: h - N vector of class predictions for test points\ndef classifyBayes(X, prior, mu, sigma):\n\n Npts = X.shape[0]\n Nclasses,Ndims = np.shape(mu)\n logProb = np.zeros((Nclasses, Npts))\n\n for jdx in range(Nclasses):\n diff = X - mu[jdx] # Matrix C x d with diffs between x - µ\n lnSigma = - np.log(np.linalg.det(sigma[jdx])) / 2 # N vector\n lnPrior = np.log(prior[jdx]) # N vector\n for i in range(Npts):\n logProb[jdx][i] = lnSigma - np.inner(diff[i]/np.diag(sigma[jdx]), diff[i]) / 2 + lnPrior\n \n # one possible way of finding max a-posteriori once\n # you have computed the log posterior\n h = np.argmax(logProb,axis=0)\n return h\n\n\n# The implemented functions can now be summarized into the `BayesClassifier` class, which we will use later to test the classifier, no need to add anything else here:\n\n\n# NOTE: no need to touch this\nclass BayesClassifier(object):\n def __init__(self):\n self.trained = False\n\n def trainClassifier(self, X, labels, W=None):\n rtn = BayesClassifier()\n rtn.prior = computePrior(labels, W)\n rtn.mu, rtn.sigma = mlParams(X, labels, W)\n rtn.trained = True\n return rtn\n\n def classify(self, X):\n return classifyBayes(X, self.prior, self.mu, self.sigma)\n\n\n# ## Test the Maximum Likelihood estimates\n# \n# Call `genBlobs` and `plotGaussian` to verify your estimates.\n\n\n# X, labels = genBlobs(centers=5)\n# mu, sigma = mlParams(X,labels)\n# plotGaussian(X,labels,mu,sigma)\n\n\n# Call the `testClassifier` and `plotBoundary` functions for this part.\n\n# testClassifier(BayesClassifier(), dataset='iris', split=0.7)\n\n# testClassifier(BayesClassifier(), dataset='vowel', split=0.7)\n\n# plotBoundary(BayesClassifier(), dataset='vowel',split=0.7)\n\n\n# ## Boosting functions to implement\n# \n# The lab descriptions state what each function should do.\n\n\n# in: base_classifier - a classifier of the type that we will boost, e.g. BayesClassifier\n# X - N x d matrix of N data points\n# labels - N vector of class labels\n# T - number of boosting iterations\n# out: classifiers - (maximum) length T Python list of trained classifiers\n# alphas - (maximum) length T Python list of vote weights\ndef trainBoost(base_classifier, X, labels, T=10):\n # these will come in handy later on\n Npts,Ndims = np.shape(X)\n\n classifiers = [] # append new classifiers to this list\n alphas = [] # append the vote weight of the classifiers to this list\n\n # The weights for the first iteration\n wCur = np.ones((Npts,1))/float(Npts)\n\n for i_iter in range(0, T):\n # a new classifier can be trained like this, given the current weights\n classifiers.append(base_classifier.trainClassifier(X, labels, wCur))\n\n # do classification for each point\n vote = classifiers[-1].classify(X)\n\n classes = np.unique(labels)\n\n # Compute error by classes to simplify operations\n eps = 0 \n for jdx in classes:\n idx = np.where(vote == jdx)[0]\n eps += np.sum(np.transpose(wCur[idx]) * (1 - (jdx == labels[idx])))\n\n alpha = (np.log(1 - eps) - np.log(eps)) / 2 # Compute new alpha\n alphas.append(alpha) # you will need to append the new alpha\n\n # Update weights\n wOld = wCur\n for i in range(Npts):\n wCur[i] = wOld[i] * np.exp(alpha * (-1)**(vote[i]==labels[i]))\n wCur = wCur / np.sum(wCur)\n \n return classifiers, alphas\n\n# in: X - N x d matrix of N data points\n# classifiers - (maximum) length T Python list of trained classifiers as above\n# alphas - (maximum) length T Python list of vote weights\n# Nclasses - the number of different classes\n# out: yPred - N vector of class predictions for test points\ndef classifyBoost(X, classifiers, alphas, Nclasses):\n Npts = X.shape[0]\n Ncomps = len(classifiers)\n\n # if we only have one classifier, we may just classify directly\n if Ncomps == 1:\n return classifiers[0].classify(X)\n else:\n votes = np.zeros((Npts,Nclasses))\n\n for i in range(Ncomps):\n classified = classifiers[i].classify(X)\n for j in range(Npts):\n votes[j][classified[j]] += alphas[i]\n\n # one way to compute yPred after accumulating the votes\n return np.argmax(votes,axis=1)\n\n\n# The implemented functions can now be summarized another classifer, the `BoostClassifier` class. This class enables boosting different types of classifiers by initializing it with the `base_classifier` argument. No need to add anything here.\n\n\n# NOTE: no need to touch this\nclass BoostClassifier(object):\n def __init__(self, base_classifier, T=10):\n self.base_classifier = base_classifier\n self.T = T\n self.trained = False\n\n def trainClassifier(self, X, labels):\n rtn = BoostClassifier(self.base_classifier, self.T)\n rtn.nbr_classes = np.size(np.unique(labels))\n rtn.classifiers, rtn.alphas = trainBoost(self.base_classifier, X, labels, self.T)\n rtn.trained = True\n return rtn\n\n def classify(self, X):\n return classifyBoost(X, self.classifiers, self.alphas, self.nbr_classes)\n\n\n# ## Run some experiments\n# \n# Call the `testClassifier` and `plotBoundary` functions for this part.\n\n\n#testClassifier(BoostClassifier(BayesClassifier(), T=10), dataset='iris',split=0.7)\n\n#testClassifier(BoostClassifier(BayesClassifier(), T=10), dataset='vowel',split=0.7)\n\n#plotBoundary(BoostClassifier(BayesClassifier()), dataset='vowel',split=0.7)\n\n\n# Now repeat the steps with a decision tree classifier.\n\n\n# testClassifier(DecisionTreeClassifier(), dataset='iris', split=0.7)\n\n# testClassifier(BoostClassifier(DecisionTreeClassifier(), T=10), dataset='iris',split=0.7)\n\n# testClassifier(DecisionTreeClassifier(), dataset='vowel',split=0.7)\n\n# testClassifier(BoostClassifier(DecisionTreeClassifier(), T=10), dataset='vowel',split=0.7)\n\n#plotBoundary(DecisionTreeClassifier(), dataset='iris',split=0.7)\n\n#plotBoundary(BoostClassifier(DecisionTreeClassifier(), T=10), dataset='iris',split=0.7)\n\n\n# ## Bonus: Visualize faces classified using boosted decision trees\n# \n# Note that this part of the assignment is completely voluntary! First, let's check how a boosted decision tree classifier performs on the olivetti data. Note that we need to reduce the dimension a bit using PCA, as the original dimension of the image vectors is `64 x 64 = 4096` elements.\n\n# testClassifier(BayesClassifier(), dataset='olivetti',split=0.7, dim=20)\n\n# testClassifier(BoostClassifier(DecisionTreeClassifier(), T=10), dataset='olivetti',split=0.7, dim=20)\n\n\n# You should get an accuracy around 70%. If you wish, you can compare this with using pure decision trees or a boosted bayes classifier. Not too bad, now let's try and classify a face as belonging to one of 40 persons!\n\n\n# X,y,pcadim = fetchDataset('olivetti') # fetch the olivetti data\n# xTr,yTr,xTe,yTe,trIdx,teIdx = trteSplitEven(X,y,0.7) # split into training and testing\n# pca = decomposition.PCA(n_components=20) # use PCA to reduce the dimension to 20\n# pca.fit(xTr) # use training data to fit the transform\n# xTrpca = pca.transform(xTr) # apply on training data\n# xTepca = pca.transform(xTe) # apply on test data\n# # use our pre-defined decision tree classifier together with the implemented\n# # boosting to classify data points in the training data\n# classifier = BayesClassifier().trainClassifier(xTrpca, yTr)\n# yPr = classifier.classify(xTepca)\n# # choose a test point to visualize\n# testind = random.randint(0, xTe.shape[0]-1)\n# # visualize the test point together with the training points used to train\n# # the class that the test point was classified to belong to\n# visualizeOlivettiVectors(xTr[yTr == yPr[testind],:], xTe[testind,:])\n\n","repo_name":"SimoneStefani/kth-dd2421","sub_path":"bayes-classifier/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10249,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"3843368551","text":"import requests\nimport streamlit as st\nfrom streamlit_extras.let_it_rain import rain\nfrom streamlit_lottie import st_lottie\nfrom streamlit_extras.add_vertical_space import add_vertical_space\n\ndef load_lottieurl(url: str):\n r = requests.get(url)\n if r.status_code != 200:\n return None\n return r.json()\n\ndef main():\n ani, wel = st.columns(2)\n with ani:\n st_lottie(load_lottieurl('https://assets3.lottiefiles.com/packages/lf20_xnbikipz.json'), quality='high', key='lottie', height=400)\n with wel:\n st.header(f'Welcom back, **{st.session_state[\"username\"]}!**')\n st.session_state['role'] = st.session_state['config']['accounts'].get(st.session_state['username'], 'reviewer')\n st.subheader(f'Your role: **{st.session_state[\"role\"]}**')\n st.session_state['permissions'] = st.session_state['config']['permissions'][st.session_state[\"role\"]]\n for p in st.session_state['permissions']:\n st.checkbox(p, True, p)\n rain(\n emoji = '🎁',\n font_size = 30,\n falling_speed = 10,\n animation_length = 'infinite',\n )\n add_vertical_space(5)\n st.markdown(\n '''\n---\n

Backend: AITeam

\n

Copyright © 1994 - 2023 MISA JSC

\n ''',\n unsafe_allow_html=True\n )\n","repo_name":"ChienLady/StreamlitToolDB","sub_path":"app/components/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15557880081","text":"import time\nimport uuid\nimport consts\nimport random\nimport logging\nimport requests\nimport threading\nfrom util import json_log\nfrom dto.order import ClientOrderDto, OrderDto\n\nlogger = logging.getLogger(__name__)\n\n\nclass Client:\n def __init__(self, i):\n self.name = f'Client-{i}'\n\n def generate_order(self):\n # request menu to food ordering service\n response = requests.get(f'http://{consts.FO_HOST}:{consts.FO_PORT}/menu')\n data = response.json()\n\n # pick some random restaurants\n rests = random.sample(data['restaurants_data'], random.randint(1, int(data['restaurants'])))\n\n # build a order for each of the restaurants\n client_order = ClientOrderDto(client_id=uuid.uuid4().hex[0:4], orders=[])\n\n for i, rest in enumerate(rests):\n single_order = OrderDto(restaurant_id=rest['restaurant_id'], items=[], max_wait=0, priority=random.randint(1, 5), time_start=time.time())\n for j in range(random.randint(1, 5)):\n food = random.choice(rest['menu'])\n single_order.items.append(food['id'])\n if single_order.max_wait < food['preparation-time']:\n single_order.max_wait = food['preparation-time']\n single_order.max_wait *= 1.6\n client_order.orders.append(single_order)\n client_order.created_time = time.time()\n\n # send order to food ordering service\n response = requests.post(f'http://{consts.FO_HOST}:{consts.FO_PORT}/order', json=client_order.dict())\n json_ = response.json()\n\n logger.info(f'\\n{self.name} New order: {json_log(client_order.dict())}\\nFO response: {json_log(json_)}')\n\n # client wait x time for each suborder from order\n for i, data in enumerate(json_['orders']):\n threading.Thread(target=self.client_wait_single_order, args=(json_, data), daemon=True).start()\n\n def client_wait_single_order(self, fo_response, single_order):\n start_waiting = time.time()\n logger.info(f'{self.name} order: \"{fo_response[\"order_id\"]}\" | suborder: {single_order[\"order_id\"]} | waiting time: {single_order[\"estimated_waiting_time\"]}')\n time.sleep(single_order['estimated_waiting_time'])\n is_ready = False\n while not is_ready:\n # find out if order is done\n logger.warning(f'{self.name} order: \"{fo_response[\"order_id\"]}\" | suborder: {single_order[\"order_id\"]} | request to dinning-hall: {single_order[\"restaurant_address\"]}')\n response = requests.get(f'http://{consts.DH_HOST}:{single_order[\"restaurant_address\"]}/v2/order/{fo_response[\"order_id\"]}')\n json_ = response.json()\n logger.warning(f'{self.name} order: \"{fo_response[\"order_id\"]}\" | suborder: {single_order[\"order_id\"]} | dinning-hall: {single_order[\"restaurant_address\"]} | \\nresponse: {json_log(json_)}')\n # if order is not ready wait for more time\n if json_['is_ready']:\n prep_time = int(time.time() - start_waiting)\n logger.debug(f'{self.name} order: \"{fo_response[\"order_id\"]}\" | suborder: {single_order[\"order_id\"]} | dinning-hall: {single_order[\"restaurant_address\"]} | max_wait: {single_order[\"max_wait\"]} | prep_time: {prep_time} DONE!')\n\n # calculate rating stars\n stars = self.rating_stars(single_order[\"max_wait\"], prep_time)\n logger.info(f'{self.name} order: \"{fo_response[\"order_id\"]}\" | suborder: {single_order[\"order_id\"]} | dinning-hall: {single_order[\"restaurant_address\"]} | STARS: {stars}')\n req = {\"order_id\": fo_response[\"order_id\"], \"stars\": stars, \"dh_address\": single_order[\"restaurant_address\"]}\n res = requests.post(f'http://{consts.FO_HOST}:{consts.FO_PORT}/rating', json=req)\n logger.info(f'{self.name} order: \"{fo_response[\"order_id\"]}\" | suborder: {single_order[\"order_id\"]} | new RATING: {res.json()[\"updated_rating\"]}')\n is_ready = True\n threading.Thread(target=self.generate_order, daemon=True, name=f'{self.name}').start()\n\n else:\n logger.debug(f'{self.name} order: \"{fo_response[\"order_id\"]}\" | suborder: {single_order[\"order_id\"]} | dinning-hall: {single_order[\"restaurant_address\"]} NOT DONE!')\n time.sleep(json_['estimated_waiting_time'])\n\n @staticmethod\n def rating_stars(max_wait, total):\n stars = 0\n if max_wait >= total:\n stars = 5\n elif max_wait * 1.1 >= total:\n stars = 4\n elif max_wait * 1.2 >= total:\n stars = 3\n elif max_wait * 1.3 >= total:\n stars = 2\n elif max_wait * 1.4 >= total:\n stars = 1\n\n return stars\n","repo_name":"nichitaa/multi-threaded-restaurant-simulation","sub_path":"client-service/domain/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34035611065","text":"from makedataset import makeDataset\nfrom model import UNet\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport config\nimport torch.nn as nn\nimport pickle\nimport os\nimport matplotlib.pyplot as plt\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom Losses import DiceLoss, GeneralizedDiceLoss\nfrom torchvision import transforms\n\n\nclass DiceScore(nn.Module):\n def __init__(self, weight=None, size_average=True):\n super().__init__()\n self.normalization = nn.Softmax(dim=1)\n\n def forward(self, inputs, targets, smooth=1):\n inputs = self.normalization(inputs)\n\n targets = targets[:, 1:2, ...]\n inputs = torch.where(inputs[:, 1:2, ...] > 0.5, 1.0, 0.0)\n\n inputs = inputs.reshape(-1)\n targets = targets.reshape(-1)\n\n intersection = (inputs * targets).sum()\n dice = (2. * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)\n\n return dice\n\n\n# define Transform\ntr = transforms.Compose([\n transforms.RandomCrop(256)\n])\n\n# make dataLoader\ntrainds = makeDataset(kind='train', location='data_npy')\nvalidds = makeDataset(kind='valid', location='data_npy')\n\ntrainLoader = DataLoader(trainds, batch_size=config.BATCH_SIZE, shuffle=True,\n pin_memory=config.PIN_MEMORY)\nvalidLoader = DataLoader(validds, batch_size=config.BATCH_SIZE, shuffle=False,\n pin_memory=config.PIN_MEMORY)\n\nparams = [0.0001]\nos.makedirs('final_result', exist_ok=True)\nfor (lr_) in params:\n # Define Model################################################################################################\n unet = UNet(64, 5, use_xavier=True, use_batchNorm=True, dropout=0.5, retain_size=True, nbCls=2)\n\n devices = 'cpu'\n device_num = 0\n if torch.cuda.is_available():\n devices = 'gpu'\n device_num = torch.cuda.device_count()\n if device_num > 1:\n unet = torch.nn.DataParallel(unet)\n unet.to(config.DEVICE)\n #############################################################################################################\n\n # Define History, optimizer, schedular, loss function########################################################\n history = {'train_loss': [], 'valid_loss': [], 'dice_valid_score': []}\n num_train = int(len(trainds) // config.BATCH_SIZE)\n writer = SummaryWriter(log_dir='./runs/Train')\n opt = torch.optim.NAdam(unet.parameters(), lr=lr_)\n schedular = ReduceLROnPlateau(opt, 'min', patience=5, factor=0.25, verbose=True)\n dicelossfunc = GeneralizedDiceLoss(normalization='softmax')\n diceScore = DiceScore()\n #############################################################################################################\n\n # main train#################################################################################################\n pbar = tqdm(range(config.N_EPOCHS), leave=False, position=0)\n for e in pbar:\n unet.train()\n totalloss = 0\n totalvalidloss = 0\n totalvaliddice = 0\n\n trainstep = 0\n validstep = 0\n\n inner_pbar = tqdm(range(num_train), leave=False, position=1)\n data_iter = iter(trainLoader)\n for i in inner_pbar:\n (x, y) = next(data_iter)\n (x, y) = (x.to(config.DEVICE), y.to(config.DEVICE))\n\n pred = unet(x)\n loss = dicelossfunc(pred, y)\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n totalloss += loss\n trainstep += 1\n inner_pbar.set_postfix({'Train_loss': \"{:.4f}\".format(loss)})\n\n with torch.no_grad():\n unet.eval()\n for (x, y) in validLoader:\n (x, y) = (x.to(config.DEVICE), y.to(config.DEVICE))\n\n pred = unet(x)\n validloss = dicelossfunc(pred.clone(), y.clone())\n totalvalidloss += validloss\n\n validScore = diceScore(pred, y)\n\n totalvaliddice += validScore\n validstep += 1\n\n avgloss = (totalloss / trainstep).cpu().detach().numpy()\n avgvalidloss = (totalvalidloss / validstep).cpu().detach().numpy()\n avgvaliddice = (totalvaliddice / validstep).cpu().detach().numpy()\n\n schedular.step(avgvalidloss)\n\n history['train_loss'].append(avgloss)\n history['valid_loss'].append(avgvalidloss)\n history['dice_valid_score'].append(avgvaliddice)\n\n writer.add_scalar('train_loss', avgloss, e)\n writer.add_scalar('validation_loss', avgvalidloss, e)\n writer.add_scalar('validation_dice', avgvaliddice, e)\n\n writer.add_scalars('loss', {'Train': avgloss, 'Valid': avgvalidloss}, e)\n\n pbar.set_postfix({'Train_avg_loss': '{:.4f}'.format(avgloss),\n 'Valid_avg_loss': '{:.4f}'.format(avgvalidloss),\n 'Valid_avg_dice': '{:.4f}%'.format(100 * avgvaliddice)})\n\n torch.save(unet.state_dict(), './final_result/unet_{}.pt'.format(e + 1))\n with open('./final_result/history_{}.pkl'.format(e + 1), 'wb') as f:\n pickle.dump(history, f)\n\n writer.flush()\n writer.close()\n\n print('Saving model...\\n\\n')\n torch.save(unet.state_dict(), './final_result/UNet.pt')\n\n print('Saving figure...\\n\\n')\n plt.style.use('ggplot')\n plt.figure(figsize=(15, 10))\n plt.plot(history['train_loss'], label='Train_Dice_Loss')\n plt.plot(history['valid_loss'], label='Validation_Dice_Loss')\n plt.title('Training Dice Score on Dataset')\n plt.xlabel('Number of Epoch')\n plt.ylabel('Dice Loss')\n plt.legend(loc='lower left')\n plt.savefig('./final_result/train_result.png')\n\n print('Saving History...\\n\\n')\n with open('./final_result/history.pkl', 'wb') as f:\n pickle.dump(history, f)\n\nprint('***************End of System***************')\n","repo_name":"sillsill777/KiTS19-Challenge-using-U-net","sub_path":"main_train.py","file_name":"main_train.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16792404406","text":"\"\"\"importing and registring model in django admin\"\"\"\n# blog/admin.py\nfrom django.contrib import admin\n#from models import Comment\nfrom . import models\nfrom .models import Comment\n\nclass ExsistingCommentInline(admin.TabularInline):\n \"\"\"creating inline for comments\"\"\"\n extra = 0\n model = Comment\n #fields = [\"name\", \"email\", \"text\", \"approved\"]\n readonly_fields = [\"name\", \"email\", \"text\"]\n\n #def exsisting(self, request):\n #return False\n def has_add_permission(self, request, obj=None):\n return False\n\nclass NewCommentInline(admin.TabularInline):\n \"\"\"creating inline for comments\"\"\"\n extra = 0\n model = Comment\n fields = [\"name\", \"email\", \"text\", \"approved\"]\n #readonly_fields = [\"name\", \"email\", \"text\"]\n def has_add_permission(self, request, obj=None):\n\n return True\n\n# Register the `Post` model\nclass PostAdmin(admin.ModelAdmin):\n \"\"\"customising post model view\"\"\"\n list_display = ('title', 'author', 'created', 'updated')\n #defining how to search on the page\n search_fields = ('title', 'author__username', 'author__first_name', 'author__last_name')\n #we can now filter on the basis of status all, draft or published\n list_filter = (\n 'status',\n 'topics',\n ) #admin.RelatedOnlyFieldListFilter\n prepopulated_fields = {'slug': ('title',)}\n inlines = [\n ExsistingCommentInline,\n NewCommentInline\n ]\n\n #def author(self, obj):#for list_display\n #return obj.author\n\n #def title(self, obj):#for list_display\n #return obj.title\nclass CommentAdmin(admin.ModelAdmin):\n \"\"\"customising post model view\"\"\"\n list_display = (\n 'name',\n 'text',\n 'created',\n 'updated',\n 'approved',\n )\n #defining how to search on the page\n search_fields = (\n 'text',\n )\n #we can now filter on the basis of approved or not\n list_filter = (\n 'approved',\n )\n\nclass TopicAdmin(admin.ModelAdmin):\n list_display = (\n 'name',\n 'slug',\n )\n prepopulated_fields = {'slug': ('name',)}\n\nclass ContactAdmin(admin.ModelAdmin):\n list_display = (\n 'email',\n 'last_name',\n 'first_name',\n 'submitted'\n )\n # Make these fields read-only in the admin\n readonly_fields = (\n 'first_name',\n 'last_name',\n 'email',\n 'message',\n 'submitted'\n )\n\nclass Photo_ContestAdmin(admin.ModelAdmin):\n list_display = (\n 'email',\n 'last_name',\n 'first_name',\n 'photo_submitted'\n )\n # Make these fields read-only in the admin\n readonly_fields = (\n 'first_name',\n 'last_name',\n 'email',\n 'image',\n 'photo_submitted'\n )\n list_filter = (\n 'photo_submitted',\n )\n\n# Register the `Post` model\nadmin.site.register(models.Post, PostAdmin)\n# Register the `Comment` model\nadmin.site.register(models.Comment, CommentAdmin)\nadmin.site.register(models.Topic, TopicAdmin)\nadmin.site.register(models.Contact, ContactAdmin)\nadmin.site.register(models.Photo_Contest, Photo_ContestAdmin)\n","repo_name":"Shivani-Y/beni_fan_page","sub_path":"blog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7752804600","text":"from wand.image import Image\n\n\ndef pseudo(name: str) -> str:\n \"\"\"\n input : name, a str\n output : pseudo_name, a peunonymized string of name\"\"\"\n pseudo_name = \"\"\n for x in name.split():\n pseudo_name += \" \" + x[0]\n return pseudo_name\n\n\ndef pseudo_geo(geo: str) -> str:\n \"\"\"\n intput : geo, a str\n output : the pseudonymized version of geo, here an empty string\"\"\"\n return \"\"\n\n\ndef response_to_str(obj) -> str:\n \"\"\"\n intput : obj, a pdf response structure\n output : the stirng contained in response structure\"\"\"\n str = \"\"\n for i, x in enumerate(obj):\n str += x[1] + \" \"\n return str\n\n\ndef pdf_to_image(f) -> list:\n \"\"\"\n intput : f, a pdf file\n output : list of paths to jpeg files representing pages in pdf file\"\"\"\n L = []\n with Image(filename=f, resolution=200) as source:\n for i, image in enumerate(source.sequence):\n newfilename = f.removesuffix(\".pdf\") + str(i + 1) + \".jpeg\"\n Image(image).save(filename=newfilename)\n L.append(newfilename)\n return L\n\n\ndef coordinates_from_points(position: list) -> list:\n \"\"\"\n input : position in easyocr format, list\n output : list of 4 coordinates for a rectangle\"\"\"\n return [\n position[0][0] / 2.8,\n position[0][1] / 2.8,\n position[2][0] / 2.8,\n position[2][1] / 2.8,\n ]\n","repo_name":"luclemot/pseudo_pdf_tool","sub_path":"api/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13892503640","text":"command = input()\nproducts = {}\nwhile command != \"statistics\":\n key,value = command.split(\": \")\n if key in products:\n products[key] += int(value)\n else:\n products[key] = int(value)\n\n command = input()\n\nprint(\"Products in stock:\")\nfor (product, quantity) in products.items():\n print(f\"- {product}: {quantity}\")\n\nprint(f\"Total Products: {len(products.keys())}\\nTotal Quantity: {sum(products.values())}\")","repo_name":"DianVK/softuni_python_fundamentals","sub_path":"Dictionaries - Lab/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"11317684186","text":"class Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n n = len(matrix)\n m = len(matrix[0])\n left = 0\n right = n*m - 1\n\n while left <= right:\n mid = (left + right)//2\n x = matrix[mid//m][mid%m]\n\n if x == target:\n return True\n elif x < target:\n left = mid + 1\n else:\n right = mid - 1\n \n return False\n","repo_name":"vivian-dai/Leetcode-Solutions","sub_path":"Daily Challenge/2023/08 - August/2023-08-06/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14544382162","text":"from django.test import TestCase\nfrom .models import Message, User\nfrom core.models import Professional\nfrom services.models import Job, Proposal\nfrom django.utils import timezone\n\n\nTODAY = timezone.now()\ntimedelta = timezone.timedelta\nclass TestChat(TestCase):\n\n def setUp(self):\n self.client = User(\n email='teste4@teste.com',\n full_name='Tom Cruise',\n cellphone='31988776455',\n password='senha',\n )\n self.client.save()\n self.user = User(\n email='teste5@teste.com',\n full_name='Tom Jobim',\n cellphone='31988776615',\n password='senha',\n )\n self.user.save()\n self.professional = Professional(\n user=self.user,\n skills=['CI', 'AE', 'EM'],\n occupation='CI',\n coren='10.002'\n )\n self.professional.save()\n self.proposal = Proposal(\n client=self.client,\n professional=self.professional,\n city='Curitiba',\n state='PR',\n professional_type='AE',\n service_type='AC',\n start_datetime=TODAY + timedelta(days=1),\n end_datetime=TODAY + timedelta(days=3),\n value=300.00,\n description='Lorem Ipsum dolores'\n )\n self.proposal.save()\n self.job = Job(\n proposal=self.proposal,\n client=self.client,\n professional=self.professional,\n value=300,\n start_datetime=TODAY + timedelta(days=1)\n )\n self.job.save()\n\n def test_send_message(self):\n message = Message(\n sender=self.client,\n receiver=self.user,\n content='Hello! How are you?',\n job=self.job,\n )\n message.save()\n self.assertEqual(\n self.client.messages_sent.all()[0],\n self.user.received_messages.all()[0]\n )","repo_name":"guilhermewebdev/acacia-api","sub_path":"chat/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"46109710967","text":"from typing import List as ListType, Union, Optional, Tuple\nfrom dataclasses import dataclass\n\nclass Elem:\n pass\n\n# =================================================================================================\n# SVG element\n# =================================================================================================\n\n@dataclass\nclass SVG(Elem):\n pass\n\n# =================================================================================================\n# Text element\n# =================================================================================================\n\n@dataclass\nclass Text(Elem):\n content : str\n\n# =================================================================================================\n# BCP14 element\n# =================================================================================================\n\n@dataclass\nclass BCP14(Elem):\n \"\"\"\n RFC 7991 Section 2.9\n \"\"\"\n content : Text\n\n# =================================================================================================\n# EM element\n# =================================================================================================\n\nEmContent = Union[Text, BCP14, \"CRef\", \"IRef\", \"RelRef\", \"Strong\", \"Sub\", \"Sup\", \"TT\", \"XRef\"]\n\n@dataclass\nclass EM(Elem):\n \"\"\"\n RFC 7991 Section 2.22\n \"\"\"\n content : ListType[EmContent]\n\n# =================================================================================================\n# {C, X, I, E, Rel}Ref elements\n# =================================================================================================\n\n@dataclass\nclass RelRef(Elem):\n \"\"\"\n RFC 7991 Section 2.44\n \"\"\"\n content : Text\n displayFormat : Optional[str]\n relative : Optional[str]\n section : str\n target : str\n\n@dataclass\nclass ERef(Elem):\n \"\"\"\n RFC 7991 Section 2.24\n \"\"\"\n content : Optional[Text]\n target : str\n\n@dataclass\nclass IRef(Elem):\n \"\"\"\n RFC 7991 Section 2.27\n \"\"\"\n item : str\n primary : Optional[bool]\n subitem : Optional[str]\n\n@dataclass\nclass XRef(Elem):\n \"\"\"\n RFC 7991 Section 2.66\n \"\"\"\n content : Optional[Text]\n format : Optional[str]\n pageno : Optional[bool]\n target : str\n\n\nCRefContent = Union[Text, EM, ERef, RelRef, \"Strong\", \"Sub\", \"Sup\", \"TT\", XRef]\n\n@dataclass\nclass CRef(Elem):\n \"\"\"\n RFC 7991 Section 2.16\n \"\"\"\n content : ListType[CRefContent]\n anchor : Optional[str]\n display : Optional[bool]\n source : Optional[str]\n\n# =================================================================================================\n# Sub and Sup elements\n# =================================================================================================\n\nStrongContent = Union[Text, BCP14, CRef, EM, ERef, IRef, RelRef, \"Sub\", \"Sup\", \"TT\", XRef]\n\n@dataclass\nclass Strong(Elem):\n \"\"\"\n RFC 7991 Section 2.50\n \"\"\"\n content : ListType[StrongContent]\n\n\nTTContent = Union[Text, BCP14, CRef, EM, ERef, IRef, RelRef, Strong, \"Sub\", \"Sup\", XRef]\n\n@dataclass\nclass TT(Elem):\n \"\"\"\n RFC 7991 Section 2.62\n \"\"\"\n content : ListType[TTContent]\n\n\nSubContent = Union[Text, BCP14, CRef, EM, ERef, IRef, RelRef, Strong, TT, XRef]\n\n@dataclass\nclass Sub(Elem):\n \"\"\"\n RFC 7991 Section 2.51\n \"\"\"\n content : ListType[SubContent]\n\n\nSupContent = Union[Text, BCP14, CRef, EM, ERef, IRef, RelRef, Strong, TT, XRef]\n\n@dataclass\nclass Sup(Elem):\n \"\"\"\n RFC 7991 Section 2.52\n \"\"\"\n content : ListType[SupContent]\n\n\n# =================================================================================================\n# SpanX element\n# =================================================================================================\n\n@dataclass\nclass SpanX(Elem):\n \"\"\"\n RFC 7991 Section 3.7\n \"\"\"\n content : Text\n style : Optional[str]\n xmlSpace : Optional[str]\n\n\n# =================================================================================================\n# T elements\n# =================================================================================================\n\n@dataclass\nclass List(Elem):\n \"\"\"\n RFC 7991 Section 3.4\n \"\"\"\n content : ListType[\"T\"]\n counter : Optional[str]\n hangIndent : Optional[str]\n style : Optional[str]\n\n@dataclass\nclass VSpace(Elem):\n \"\"\"\n RFC 7991 Section 3.10\n \"\"\"\n blankLines : Optional[str]\n\n\nTContent = Union[Text, BCP14, CRef, EM, ERef, IRef, List, RelRef, SpanX, Strong, Sub, Sup, TT, VSpace, XRef, 'T']\n\n@dataclass\nclass T(Elem):\n \"\"\"\n RFC 7991 Section 2.53\n \"\"\"\n content : ListType[TContent]\n anchor : Optional[str]\n hangText : Optional[str]\n keepWithNext : Optional[bool]\n keepWithPrevious : Optional[bool]\n\n# =================================================================================================\n# Artwork element\n# =================================================================================================\n\n@dataclass\nclass Artwork(Elem):\n \"\"\"\n RFC 7991 Section 2.5\n \"\"\"\n content : Union[Text, ListType[SVG]]\n align : Optional[str]\n alt : Optional[str]\n anchor : Optional[str]\n height : Optional[str]\n name : Optional[str]\n src : Optional[str]\n type : Optional[str]\n width : Optional[str]\n xmlSpace : Optional[str]\n\n# =================================================================================================\n# Pre and Postamble elements\n# =================================================================================================\n\nPostambleContent = Union[Text, BCP14, CRef, EM, ERef, IRef, SpanX, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass Postamble(Elem):\n \"\"\"\n RFC 7991 Section 3.5\n \"\"\"\n content : ListType[PostambleContent]\n\n\nPreambleContent = Union[Text, BCP14, CRef, EM, ERef, IRef, SpanX, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass Preamble(Elem):\n \"\"\"\n RFC 7991 Section 3.6\n \"\"\"\n content : ListType[PreambleContent]\n\n\n# =================================================================================================\n# Name element\n# =================================================================================================\n\nNameContent = Union[Text, CRef, ERef, RelRef, TT, XRef]\n\n@dataclass\nclass Name(Elem):\n \"\"\"\n RFC 7991 Section 2.32\n \"\"\"\n content : ListType[NameContent]\n\n# =================================================================================================\n# SourceCode element\n# =================================================================================================\n\n@dataclass\nclass SourceCode(Elem):\n \"\"\"\n RFC 7991 Section 2.48\n \"\"\"\n content : Text\n anchor : Optional[str]\n name : Optional[str]\n src : Optional[str]\n type : Optional[str]\n\n# =================================================================================================\n# Figure element\n# =================================================================================================\n\n@dataclass\nclass Figure(Elem):\n \"\"\"\n RFC 7991 Section 2.25\n \"\"\"\n name : Optional[Name]\n irefs : Optional[ListType[IRef]]\n preamble : Optional[Preamble]\n content : ListType[Union[Artwork, SourceCode]]\n postamble : Optional[Postamble]\n align : Optional[str]\n alt : Optional[str]\n anchor : Optional[str]\n height : Optional[str]\n src : Optional[str]\n suppressTitle : Optional[bool]\n title : Optional[str]\n width : Optional[str]\n\n# =================================================================================================\n# OL elements\n# =================================================================================================\n\nLIContent = Union[Artwork, \"DL\", Figure, \"OL\", SourceCode, T, \"UL\", \n Text, BCP14, CRef, EM, ERef, IRef, RelRef, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass LI(Elem):\n \"\"\"\n RFC 7991 Section 2.29\n \"\"\"\n content : ListType[LIContent]\n anchor : Optional[str]\n\n@dataclass\nclass UL(Elem):\n \"\"\"\n RFC 7991 Section 2.63\n \"\"\"\n content : ListType[LI]\n anchor : Optional[str]\n empty : Optional[bool]\n spacing : Optional[str]\n\n@dataclass\nclass OL(Elem):\n \"\"\"\n RFC 7991 Section 2.34\n \"\"\"\n content : ListType[LI]\n anchor : Optional[str]\n group : Optional[str]\n spacing : Optional[str]\n start : Optional[str]\n type : Optional[str]\n\n# =================================================================================================\n# DL elements\n# =================================================================================================\n\nDDContent = Union[Artwork, \"DL\", Figure, OL, SourceCode, T, UL,\n Text, BCP14, CRef, EM, ERef, IRef, RelRef, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass DD(Elem):\n \"\"\"\n RFC 7991 Section 2.18\n \"\"\"\n content : ListType[DDContent]\n anchor : Optional[str]\n\n\nDTContent = Union[Text, BCP14, CRef, EM, ERef, IRef, RelRef, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass DT(Elem):\n \"\"\"\n RFC 7991 Section 2.21\n \"\"\"\n content : ListType[DTContent]\n anchor : Optional[str]\n\n\n@dataclass\nclass DL(Elem):\n \"\"\"\n RFC 7991 Section 2.20\n \"\"\"\n content : ListType[Tuple[DT, DD]]\n anchor : Optional[str]\n hanging : Optional[bool]\n spacing : Optional[str]\n\n# =================================================================================================\n# TextTable elements\n# =================================================================================================\n\n@dataclass\nclass TTCol(Elem):\n \"\"\"\n RFC 7991 Section 3.9\n \"\"\"\n content : ListType[Union[Text, CRef, ERef, IRef, XRef]]\n align : Optional[str]\n width : Optional[str]\n\n\nCContent = Union[Text, BCP14, CRef, EM, ERef, IRef, SpanX, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass C(Elem):\n \"\"\"\n RFC 7991 Section 3.1\n \"\"\"\n content : ListType[CContent]\n\n\n@dataclass\nclass TextTable(Elem):\n \"\"\"\n RFC 7991 Section 3.8\n \"\"\"\n name : Optional[Name]\n preamble : Optional[Preamble]\n ttcols : ListType[TTCol]\n cs : Optional[ListType[C]]\n postamble : Optional[Postamble]\n align : Optional[str]\n anchor : Optional[str]\n style : Optional[str]\n suppressTitle : Optional[bool]\n title : Optional[str]\n\n# =================================================================================================\n# TR elements\n# =================================================================================================\n\n@dataclass\nclass BR(Elem):\n \"\"\"\n RFC 7991 Section 2.12\n \"\"\"\n\nTHContent = Union[Artwork, DL, Figure, OL, SourceCode, T, UL,\n Text, BCP14, BR, CRef, EM, ERef, IRef, RelRef, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass TH(Elem):\n \"\"\"\n RFC 7991 Section 2.58\n \"\"\"\n content : ListType[THContent]\n align : Optional[str]\n anchor : Optional[str]\n colspan : Optional[str]\n rowspan : Optional[str]\n\n\nTDContent = Union[Artwork, DL, Figure, OL, SourceCode, T, UL,\n Text, BCP14, BR, CRef, EM, ERef, IRef, RelRef, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass TD(Elem):\n \"\"\"\n RFC 7991 Section 2.56\n \"\"\"\n content : ListType[TDContent]\n align : Optional[str]\n anchor : Optional[str]\n colspan : Optional[str]\n rowspan : Optional[str]\n\n\n@dataclass\nclass TR(Elem):\n \"\"\"\n RFC 7991 Section 2.61\n \"\"\"\n content : ListType[Union[TD, TH]]\n anchor : Optional[str]\n\n# =================================================================================================\n# Table elements\n# =================================================================================================\n\n@dataclass\nclass TBody(Elem):\n \"\"\"\n RFC 7991 Section 2.55\n \"\"\"\n content : ListType[TR]\n anchor : Optional[str]\n\n@dataclass\nclass TFoot(Elem):\n \"\"\"\n RFC 7991 Section 2.57\n \"\"\"\n content : ListType[TR]\n anchor : Optional[str]\n\n@dataclass\nclass THead(Elem):\n \"\"\"\n RFC 7991 Section 2.59\n \"\"\"\n content : ListType[TR]\n anchor : Optional[str]\n\n@dataclass\nclass Table(Elem):\n \"\"\"\n RFC 7991 Section 2.54\n \"\"\"\n name : Optional[Name]\n irefs : Optional[ListType[IRef]]\n thead : Optional[THead]\n tbodies : ListType[TBody]\n tfoot : Optional[TFoot]\n anchor : Optional[str]\n\n# =================================================================================================\n# Section elements\n# =================================================================================================\n\n@dataclass\nclass Aside(Elem):\n \"\"\"\n RFC 7991 Section 2.6\n \"\"\"\n content : ListType[Union[Artwork, DL, Figure, IRef, List, OL, T, Table, UL]]\n anchor : Optional[str]\n\n\nBlockQuoteContent = Union[Artwork, DL, Figure, OL, SourceCode, T, UL,\n Text, BCP14, CRef, EM, ERef, IRef, RelRef, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass BlockQuote(Elem):\n \"\"\"\n RFC 7991 Section 2.10\n \"\"\"\n content : ListType[BlockQuoteContent]\n anchor : Optional[str]\n cite : Optional[str]\n quotedFrom : Optional[str]\n\n\n@dataclass\nclass Section(Elem):\n \"\"\"\n RFC 7991 Section 2.46\n \"\"\"\n name : Optional[Name]\n content : ListType[Union[Artwork, Aside, BlockQuote, DL, Figure, IRef, OL, SourceCode, T, Table, TextTable, UL, List]]\n sections : Optional[ListType[\"Section\"]]\n anchor : Optional[str]\n numbered : Optional[bool]\n removeInRFC : Optional[bool]\n title : Optional[str]\n toc : Optional[str]\n\n# =================================================================================================\n# Middle element\n# =================================================================================================\n\n@dataclass\nclass Middle(Elem):\n \"\"\"\n RFC 7991 Section 2.31\n \"\"\"\n content : ListType[Section]\n\n# =================================================================================================\n# Postal elements\n# =================================================================================================\n\n@dataclass\nclass Street(Elem):\n \"\"\"\n RFC 7991 Section 2.49\n \"\"\"\n content : Text\n ascii : Optional[str]\n\n@dataclass\nclass Region(Elem):\n \"\"\"\n RFC 7991 Section 2.43\n \"\"\"\n content : Text\n ascii : Optional[str]\n\n@dataclass\nclass PostalLine(Elem):\n \"\"\"\n RFC 7991 Section 2.38\n \"\"\"\n content : Text\n ascii : Optional[str]\n\n@dataclass\nclass City(Elem):\n \"\"\"\n RFC 7991 Section 2.13\n \"\"\"\n content : Text\n ascii : Optional[str]\n\n@dataclass\nclass Code(Elem):\n \"\"\"\n RFC 7991 Section 2.14\n \"\"\"\n content : Text\n ascii : Optional[str]\n\n@dataclass\nclass Country(Elem):\n \"\"\"\n RFC 7991 Section 2.15\n \"\"\"\n content : Text\n ascii : Optional[str]\n\n\nPostalContent = Union[City, Code, Country, Region, Street, PostalLine]\n\n@dataclass\nclass Postal(Elem):\n \"\"\"\n RFC 7991 Section 2.37\n \"\"\"\n content : ListType[PostalContent]\n\n\n# =================================================================================================\n# Address elements\n# =================================================================================================\n\n@dataclass\nclass Email(Elem):\n \"\"\"\n RFC 7991 Section 2.23\n \"\"\"\n content : Text\n ascii : Optional[str]\n\n@dataclass\nclass Phone(Elem):\n \"\"\"\n RFC 7991 Section 2.36\n \"\"\"\n content : Text\n\n@dataclass\nclass URI(Elem):\n \"\"\"\n RFC 7991 Section 2.64\n \"\"\"\n content : Text\n\n@dataclass\nclass Facsimile(Elem):\n \"\"\"\n RFC 7991 Section 3.2\n \"\"\"\n content : Text\n\n@dataclass\nclass Address(Elem):\n \"\"\"\n RFC 7991 Section 2.2\n \"\"\"\n postal : Optional[Postal]\n phone : Optional[Phone]\n facsimile : Optional[Facsimile]\n email : Optional[Email]\n uri : Optional[URI]\n\n# =================================================================================================\n# Author elements\n# =================================================================================================\n\n@dataclass\nclass Organization(Elem):\n \"\"\"\n RFC 7991 Section 2.35\n \"\"\"\n content : Optional[Text]\n abbrev : Optional[str]\n ascii : Optional[str]\n\n@dataclass\nclass Author(Elem):\n \"\"\"\n RFC 7991 Section 2.7\n \"\"\"\n org : Optional[Organization]\n address : Optional[Address]\n asciiFullname : Optional[str]\n asciiInitials : Optional[str]\n asciiSurname : Optional[str]\n fullname : Optional[str]\n initials : Optional[str]\n role : Optional[str]\n surname : Optional[str]\n\n# =================================================================================================\n# Front elements\n# =================================================================================================\n\n@dataclass\nclass SeriesInfo(Elem):\n \"\"\"\n RFC 7991 Section 2.47\n \"\"\"\n asciiName : Optional[str]\n asciiValue : Optional[str]\n name : str\n status : Optional[str]\n stream : Optional[str]\n value : str\n\n@dataclass\nclass Title(Elem):\n \"\"\"\n RFC 7991 Section 2.60\n \"\"\"\n content : Text\n abbrev : Optional[str]\n ascii : Optional[str]\n\n@dataclass\nclass Date(Elem):\n \"\"\"\n RFC 7991 Section 2.17\n \"\"\"\n day : Optional[str]\n month : Optional[str]\n year : Optional[str]\n\n@dataclass\nclass Area(Elem):\n \"\"\"\n RFC 7991 Section 2.4\n \"\"\"\n content : Text\n\n@dataclass\nclass Workgroup(Elem):\n \"\"\"\n RFC 7991 Section 2.65\n \"\"\"\n content : Text\n\n@dataclass\nclass Keyword(Elem):\n \"\"\"\n RFC 7991 Section 2.28\n \"\"\"\n content : Text\n\n@dataclass\nclass Abstract(Elem):\n \"\"\"\n RFC 7991 Section 2.1\n \"\"\"\n content : ListType[Union[DL, OL, T, UL]]\n anchor : Optional[str]\n\n@dataclass\nclass Note(Elem):\n \"\"\"\n RFC 7991 Section 2.33\n \"\"\"\n name : Optional[Name]\n content : ListType[Union[DL, OL, T, UL]]\n removeInRFC : Optional[bool]\n title : Optional[str]\n\n@dataclass\nclass Boilerplate(Elem):\n \"\"\"\n RFC 7991 Section 2.11\n \"\"\"\n content : ListType[Section]\n\n@dataclass\nclass Front(Elem):\n \"\"\"\n RFC 7991 Section 2.26\n \"\"\"\n title : Title\n seriesInfo : Optional[ListType[SeriesInfo]]\n authors : ListType[Author]\n date : Optional[Date]\n areas : Optional[ListType[Area]]\n workgroups : Optional[ListType[Workgroup]]\n keywords : Optional[ListType[Keyword]]\n abstract : Optional[Abstract]\n notes : Optional[ListType[Note]]\n boilerplate : Optional[Boilerplate]\n\n# =================================================================================================\n# References and ReferenceGroup elements\n# =================================================================================================\n\n@dataclass\nclass Format(Elem):\n \"\"\"\n RFC 7991 Section 3.3\n \"\"\"\n octets : Optional[str]\n target : Optional[str]\n type : str\n\nAnnotationContent = Union[Text, BCP14, CRef, EM, ERef, IRef, RelRef, SpanX, Strong, Sub, Sup, TT, XRef]\n\n@dataclass\nclass Annotation(Elem):\n \"\"\"\n RFC 7991 Section 2.3\n \"\"\"\n content : ListType[AnnotationContent]\n\n\nRefContentContent = Union[Text, BCP14, EM, Strong, Sub, Sup, TT]\n\n@dataclass\nclass RefContent(Elem):\n \"\"\"\n RFC 7991 Section 2.39\n \"\"\"\n content : ListType[RefContentContent]\n\n\n@dataclass\nclass Reference(Elem):\n \"\"\"\n RFC 7991 Section 2.40\n \"\"\"\n front : Front\n content : ListType[Union[Annotation, Format, RefContent, SeriesInfo]]\n anchor : str\n quoteTitle : Optional[bool]\n target : Optional[str]\n\n@dataclass\nclass ReferenceGroup(Elem):\n \"\"\"\n RFC 7991 Section 2.41\n \"\"\"\n content : ListType[Reference]\n anchor : str\n\n@dataclass\nclass References(Elem):\n \"\"\"\n RFC 7991 Section 2.42\n \"\"\"\n name : Optional[Name]\n content : ListType[Union[Reference, ReferenceGroup]]\n anchor : Optional[str]\n title : Optional[str]\n\n# =================================================================================================\n# Back elements\n# =================================================================================================\n\n@dataclass\nclass DisplayReference(Elem):\n \"\"\"\n RFC 7991 Section 2.19\n \"\"\"\n target : str\n to : str\n\n@dataclass\nclass Back(Elem):\n \"\"\"\n RFC 7991 Section 2.8\n \"\"\"\n displayrefs : Optional[ListType[DisplayReference]]\n refs : Optional[ListType[References]]\n sections : Optional[ListType[Section]]\n\n# =================================================================================================\n# Link element\n# =================================================================================================\n\n@dataclass\nclass Link(Elem):\n \"\"\"\n RFC 7991 Section 2.30\n \"\"\"\n href : str\n rel : Optional[str]\n\n# =================================================================================================\n# RFC\n# =================================================================================================\n\n@dataclass\nclass RFC(Elem):\n \"\"\"\n RFC 7991 Section 2.45\n \"\"\"\n links : Optional[ListType[Link]]\n front : Front\n middle : Middle\n back : Optional[Back]\n category : Optional[str]\n consensus : Optional[bool]\n docName : Optional[str]\n indexInclude : Optional[bool]\n ipr : Optional[str]\n iprExtract : Optional[str]\n number : Optional[str]\n obsoletes : Optional[str]\n prepTime : Optional[str]\n seriesNo : Optional[str]\n sortRefs : Optional[bool]\n submissionType : Optional[str]\n symRefs : Optional[bool]\n tocDepth : Optional[str]\n tocInclude : Optional[bool]\n updates : Optional[str]\n version : Optional[str]\n\n","repo_name":"glasgow-ipl/ips-protodesc-code","sub_path":"npt/rfc.py","file_name":"rfc.py","file_ext":"py","file_size_in_byte":22171,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"54"} +{"seq_id":"8938338837","text":"# -*- coding: utf-8 -*-\nimport json\nimport urllib3 # Third Party Library\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom django.core.management import call_command\nfrom foundation_tenant.models.bizmula.documenttype import DocumentType\nfrom foundation_tenant.models.bizmula.question import Question\nfrom foundation_tenant.models.bizmula.workspace import Workspace\nfrom foundation_tenant.models.bizmula.document import Document\nfrom foundation_tenant.models.bizmula.module import Module\nfrom foundation_tenant.models.bizmula.slide import Slide\nfrom foundation_tenant.models.bizmula.questionanswer import QuestionAnswer\nfrom foundation_tenant.models.base.me import Me\nfrom foundation_public.utils import resolve_full_url_with_subdmain\nfrom foundation_tenant.utils import int_or_none\nfrom smegurus import constants\n\n\nfrom django.template.loader import render_to_string # HTML to TXT\nfrom django.core.mail import EmailMultiAlternatives # EMAILER\n\n\nclass SendEmailViewMixin(object):\n def send_pending_document_review_notification(self, schema_name, document):\n \"\"\"\n Function will send a \"Pending Document Review\" email to the Documents\n assigned Advisor.\n \"\"\"\n # Iterate through all owners of this document and generate the contact\n # list for all the Advisors for each Entrepreneur.\n contact_list = []\n for me in document.workspace.mes.all():\n if me.managed_by:\n # If this User profile has an assigned manager then add this person\n # to the email else just email the administrator.\n contact_list.append(me.managed_by.owner.email)\n\n if len(contact_list) == 0:\n admins = User.objects.filter(groups__id=constants.ORGANIZATION_ADMIN_GROUP_ID)\n for admin in admins.all():\n # Attach the email to the contact_list.\n contact_list.append(admin.email)\n\n # Generate the data.\n url = resolve_full_url_with_subdmain(\n schema_name,\n 'tenant_review_detail',\n [document.id,]\n )\n web_view_extra_url = resolve_full_url_with_subdmain(\n schema_name,\n 'foundation_email_pending_document',\n [document.id,]\n )\n subject = \"Pending Document Review\"\n param = {\n 'document': document,\n 'url': url,\n 'web_view_url': web_view_extra_url,\n }\n\n # Plug-in the data into our templates and render the data.\n text_content = render_to_string('tenant_review/pending_doc_review.txt', param)\n html_content = render_to_string('tenant_review/pending_doc_review.html', param)\n\n # Generate our address.\n from_email = settings.DEFAULT_FROM_EMAIL\n to = contact_list\n\n # Send the email.\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n\nclass Command(SendEmailViewMixin, BaseCommand):\n help = _('Make pending document')\n\n def add_arguments(self, parser):\n parser.add_argument('id', nargs='+')\n\n def handle(self, *args, **options):\n \"\"\"\n Function will get the inputted tenant name and doc_id and\n set the database to the tenant schema and begin processing\n for the particular document.\n \"\"\"\n schema_name = options['id'][0]\n doc_id = int_or_none(options['id'][1])\n\n # the tenant metadata is stored.\n from django.db import connection\n\n # Connection will set it back to our tenant.\n connection.set_schema(schema_name, True) # Switch to Tenant.\n\n try:\n doc = Document.objects.get(id=doc_id)\n except Document.DoesNotExist:\n raise CommandError(_('Cannot find a document.'))\n except Exception as e:\n raise CommandError(_('Unknown error occured.'))\n\n # Take our document and submit the answers to Docxpresso.\n self.begin_processing(schema_name, doc)\n\n def begin_processing(self, schema_name, document):\n # Send a notification email to the assigned Advisor.\n self.send_pending_document_review_notification(schema_name, document)\n\n # Return a success message to the console.\n self.stdout.write(\n self.style.SUCCESS(_('Finished setting Document #%s to pending.') % str(document.id))\n )\n","repo_name":"smegurus/smegurus-django","sub_path":"tenant_workspace/management/commands/send_doc_pending_review_email.py","file_name":"send_doc_pending_review_email.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34535746628","text":"import base64\nimport json\nimport re\nfrom urllib.parse import urlsplit\nfrom uuid import uuid4\nimport logging\n\nimport azure.functions as func\n\nalphabet = \"23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz\"\n\n# The code and most of the comments are taken from https://github.com/django/django/blob/main/django/core/validators.py#L65.\n# To not install Django as dependency it was extracted.\n# It does NOT contain the IPv6 checks.\ndef is_valid_url(value):\n # Unicode letters range (must not be a raw string).\n ul = \"\\u00a1-\\uffff\"\n # IP patterns\n ipv4_re = (\n r\"(?:25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}\"\n )\n ipv6_re = r\"\\[[0-9a-f:.]+\\]\" # (simple regex, validated later)\n # Host patterns\n hostname_re = (\n r\"[a-z\" + ul + r\"0-9](?:[a-z\" + ul + r\"0-9-]{0,61}[a-z\" + ul + r\"0-9])?\"\n )\n # Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1\n domain_re = r\"(?:\\.(?!-)[a-z\" + ul + r\"0-9-]{1,63}(? 253:\n return False\n\n return True\n\n\ndef create_short_code():\n number = uuid4().int\n output = \"\"\n while number:\n if len(output) == 5:\n return output\n number, digit = divmod(number, len(alphabet))\n output += alphabet[digit]\n return output\n\n\ndef response_unauthorized():\n return func.HttpResponse(json.dumps({\"error\": \"Not authorized\"}), status_code=401)\n\n\ndef main(req: func.HttpRequest, entry: str, result: func.Out[str]) -> func.HttpResponse:\n if not \"x-ms-client-principal\" in req.headers:\n return response_unauthorized()\n\n # https://docs.microsoft.com/en-us/azure/static-web-apps/user-information?tabs=javascript\n principal = json.loads(\n base64.b64decode(req.headers[\"x-ms-client-principal\"]).decode(\"ascii\")\n )\n\n if not \"authenticated\" in principal[\"userRoles\"]:\n return response_unauthorized()\n\n # entries contains an URL if it is already existing in the table storage\n # see the functions.json \"filter\" and \"take\" properties of the binding\n found_entry = json.loads(entry)\n if len(found_entry) == 1:\n return func.HttpResponse(\n json.dumps({\"id\": found_entry[0][\"RowKey\"]}), status_code=201\n )\n\n try:\n body = req.get_json()\n except ValueError:\n return func.HttpResponse(\"No body received\", status_code=400)\n\n if not \"url\" in body:\n return func.HttpResponse(\"Invalid body received\", status_code=400)\n\n url = body[\"url\"]\n\n if not is_valid_url(url):\n return func.HttpResponse(\"Invalid URL received\", status_code=400)\n\n code = create_short_code()\n data = {\"URL\": url, \"PartitionKey\": \"urls\", \"RowKey\": code}\n result.set(json.dumps(data))\n\n return func.HttpResponse(json.dumps({\"id\": code}), status_code=201)\n","repo_name":"dArignac/shorty","sub_path":"functions/SetURL/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22099970835","text":"from collections import defaultdict\n\n\nclass Constraints:\n def __init__(self, employees_db, employees, shifts):\n self.employees_db = employees_db\n self.employees = employees\n self.shifts = shifts\n\n # Hard constraint 1: primary ppo protected\n def hc1_primary_pto_protected(self):\n c = 0\n # iterate thru employee, shifts and check each shift against PPTO\n for i in range(len(self.shifts)):\n if (\n self.shifts[i][1]\n in self.employees_db[self.employees[i][0]][\"PTO\"][\"Primary\"]\n ):\n if self.shifts[i][0] != \"OFF\":\n c += 1\n return c\n\n # Hard constraint 2: all shifts filled\n def hc2_all_shifts_filled(self):\n c = 0\n ###\n return c\n\n # Hard constraint 3: all roles valid\n def hc3_valid_roles(self):\n \"\"\"check if employees are qualified for shift role and return # invalid shifts\"\"\"\n c = 0\n for i in range(len(self.shifts)):\n if (\n self.shifts[i][0]\n not in self.employees_db[self.employees[i][0]][\"roles\"]\n ):\n c += 1\n return c\n\n # Soft constraint 1: No double backing\n def sc1_no_double_back(self):\n \"\"\"check if 3->1 shifts occur and return # violations\"\"\"\n c = 0\n # for i in range(len(self.shifts)):\n # if self.shifts[i] == 3 and self.shifts[i+1] == 1 and self.employees[i][0] == self.employees[i+1][0]:\n # c += 1\n return c\n\n # Soft constraint 2: Max 4 weekend days/schedule\n def sc2_max_4_weekends(self):\n \"\"\"check if employees have >4 weekend shifts\"\"\"\n c = 0\n weekend_shifts = defaultdict(int)\n\n for employee, shift in list(zip(self.employees, self.shifts)):\n if (employee[1] == 0 or employee[1] == 7) and (shift != 0): # weekend shift\n weekend_shifts[employee[0]] += 1\n for employee in weekend_shifts.keys():\n if weekend_shifts[employee] > 4:\n c += 1\n\n return c\n\n # Soft constraint 3: Minimal split weekends\n def sc3_min_split_weekends(self):\n c = 0\n # for day in weekend_days:\n # for i in range(len(employees)):\n return c\n\n # Soft constraint 4: No 7+ day stretches\n def sc4_no_7_days(self):\n \"\"\"Check that no employee works a 7 day stretch\"\"\"\n c = 0\n consecutive_count = 0\n current_employee = self.employees[0][0]\n for i in range(len(self.employees)):\n if self.employees[i][0] == current_employee:\n if self.shifts[i] != 0:\n consecutive_count += 1\n else:\n consecutive_count = 0\n else:\n current_employee = self.employees[i][0]\n consecutive_count = 1\n if consecutive_count == 7:\n c += 1\n return c\n\n # Soft constraint 5: Secondary schedule requests\n def sc5_secondary_sched_reqs(self):\n c = 0\n # iterate thru employee, shifts and check each shift against PPTO\n # for i in range(len(self.shifts)):\n # if self.shifts[i][1] in self.employees_db[self.employees[i][0]]['requests']['Off']:\n # c += 1\n return c\n\n # Soft constraint 6: Shift distribution equal between team members\n def sc6_shift_dist_equal(self):\n \"Check that employees have an even distribution of shifts\"\n # calculate mean # of shifts per person\n shift_count = 0\n for shift in self.shifts:\n if shift != 0: # is not a day off\n shift_count += 1\n else:\n pass\n mean_shifts = shift_count / len(self.employees_db.keys())\n\n # count individual shift totals\n individual_shifts = [0] * len(\n self.employees_db.keys()\n ) # array of 0 shifts for each employee\n for i in range(len(self.employees_db.keys())):\n for n in range(28 * i, 28 * (i + 1)):\n if self.shifts[n] != 0:\n individual_shifts[i] += 1\n\n # average squared distance of each person's shift count from the mean\n sq_dists = sum([(n - mean_shifts) ** 2 for n in individual_shifts])\n mean_sq_dist = sum(sq_dists) / len(sq_dists)\n\n return mean_sq_dist\n\n # Soft constraint 7: Maximize contiguous morning vs evening stretches\n def sc7_max_contiguous_stretches(self):\n c = 0\n ###\n return c\n\n # Soft constraint 8: Minimize PM -> Swing and Swing -> AM transitions\n def sc8_min_pm_swing_am(self):\n c = 0\n ###\n return c\n\n # Soft constraint 9: Minimal split weekdays off\n def sc9_min_split_weekdays_off(self):\n c = 0\n ###\n return c\n\n def hard_constraint_cost(self, constraints=\"all\"):\n \"\"\"select and apply hard constraints then sum penalty scores\"\"\"\n all_constraints = [\n self.hc1_primary_pto_protected,\n self.hc2_all_shifts_filled,\n self.hc3_valid_roles,\n ]\n if constraints == \"all\":\n constraints = [i + 1 for i in range(len(all_constraints))]\n c = 0\n violations = defaultdict(int) # log individual violation types\n\n for hc_number in constraints:\n temp_c = 0\n temp_c += all_constraints[hc_number - 1]()\n c += temp_c\n violations[hc_number] += temp_c\n return (c, violations)\n\n def soft_constraint_cost(self, constraints=\"all\"):\n \"\"\"select and apply hard constraints then sum penalty scores\"\"\"\n all_constraints = [\n self.sc1_no_double_back,\n self.sc2_max_4_weekends,\n self.sc3_min_split_weekends,\n self.sc4_no_7_days,\n self.sc5_secondary_sched_reqs,\n self.sc7_max_contiguous_stretches,\n self.sc8_min_pm_swing_am,\n self.sc9_min_split_weekdays_off,\n ]\n if constraints == \"all\":\n constraints = [i + 1 for i in range(len(all_constraints))]\n c = 0\n violations = defaultdict(int) # log individual violation types\n\n for sc_number in constraints:\n temp_c = 0\n temp_c += all_constraints[sc_number - 1]()\n c += temp_c\n violations[sc_number] += temp_c\n\n return (c, violations)\n\n def total_cost(\n self, hard_constraints, soft_constraints\n ): # expects constraints e.g. [[1, 2], [2,3]]\n \"\"\"Calculate penalty cost of a schedule\"\"\"\n # Set weights, select constraints and calculate hard and soft costs\n hard_weight = 1\n hard_cost = self.hard_constraint_cost(hard_constraints)[\n 0\n ] # (total, {constraint:total})[0]\n\n soft_weight = 1\n soft_cost = self.soft_constraint_cost(soft_constraints)[\n 0\n ] # (total, {constraint:total})[0]\n\n # Return total weighted cost function\n return hard_weight * hard_cost + soft_weight * soft_cost\n","repo_name":"kelseyfglenn/scheduler","sub_path":"src/constraints.py","file_name":"constraints.py","file_ext":"py","file_size_in_byte":7072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22064761360","text":"from datetime import date\n\nfrom django.contrib import messages\nfrom django.db.models import DecimalField, F, Sum, Value\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse\nfrom django.utils.dateparse import parse_date\nfrom django.utils.text import slugify\nfrom django.views.decorators.http import require_http_methods\n\nfrom . import forms, models, utils\n\n\ndef transactions(request, *args, **kwargs):\n \"\"\"Show all current transactions.\"\"\"\n # Get the current_month, either from the request kwargs, or use the today's month\n if request.GET.get('month'):\n current_month = get_object_or_404(\n models.Month.objects.all(),\n slug=request.GET.get('month')\n )\n else:\n current_month = models.Month.objects.get_or_create(\n year=date.today().year,\n month=date.today().month,\n name=date.today().strftime('%B, %Y'),\n )[0]\n expense_transactions = models.ExpenseTransaction.objects.filter(month=current_month).select_related('category')\n earning_transactions = models.EarningTransaction.objects.filter(month=current_month).select_related('category')\n expense_transaction_titles = models.ExpenseTransaction.objects.order_by(\n 'title'\n ).values_list(\"title\", flat=True).distinct('title')\n earning_transaction_titles = models.EarningTransaction.objects.order_by(\n 'title'\n ).values_list(\"title\", flat=True).distinct('title')\n\n context = {\n 'expense_transactions': expense_transactions,\n 'earning_transactions': earning_transactions,\n 'expense_form': forms.ExpenseTransactionForm(),\n 'earning_form': forms.EarningTransactionForm(),\n 'current_month': current_month,\n 'months': models.Month.objects.all(),\n 'expense_transaction_choices': expense_transaction_titles,\n 'earning_transaction_choices': earning_transaction_titles,\n 'expense_transaction_constant': models.Category.TYPE_EXPENSE,\n 'earning_transaction_constant': models.Category.TYPE_EARNING,\n }\n if request.method == 'POST':\n # Is this for an expense, or an earning?\n if request.POST.get('form_type') == 'expense':\n # Use the ExpenseTransactionForm for expense POSTs\n form = forms.ExpenseTransactionForm(request.POST)\n elif request.POST.get('form_type') == 'earning':\n # Use the EarningTransactionForm for earning POSTs\n form = forms.EarningTransactionForm(request.POST)\n\n # If the form is valid, then save the form\n if form.is_valid():\n form.save()\n else:\n # The form is not valid, so return the invalid form to the template\n context['expense_form'] = form\n return render(request, 'occurrence/transactions.html', context)\n\n\n@require_http_methods([\"GET\"])\ndef totals(request, *args, **kwargs):\n \"\"\"Show totals (for transactions) by category.\"\"\"\n month_slug = request.GET.get('month')\n # If the user did not provide a month_slug, then redirect to the totals\n # for the current month\n if not month_slug:\n current_month = models.Month.objects.get_or_create(\n year=date.today().year,\n month=date.today().month,\n name=date.today().strftime('%B, %Y'),\n slug=slugify(date.today().strftime('%B, %Y')),\n )[0]\n return redirect('{}?month={}'.format(reverse('totals'), current_month.slug))\n\n month = get_object_or_404(models.Month.objects.all(), slug=month_slug)\n\n # Get the expense totals for this month\n expense_categories, expense_total = utils.get_transactions_regular_totals(\n month,\n type_cat=models.Category.TYPE_EXPENSE,\n )\n # Get the earning totals for this month\n earning_categories, earning_total = utils.get_transactions_regular_totals(\n month,\n type_cat=models.Category.TYPE_EARNING,\n )\n\n # Get the MonthlyStatistic for this Month\n monthly_statistics = models.MonthlyStatistic.objects.filter(month=month)\n\n context = {\n 'expense_categories': expense_categories,\n 'expense_total': expense_total,\n 'earning_categories': earning_categories,\n 'earning_total': earning_total,\n 'total': earning_total - expense_total,\n 'monthly_statistics': monthly_statistics,\n 'months': models.Month.objects.all(),\n 'active_month': month,\n }\n return render(request, 'occurrence/totals.html', context)\n\n\n@require_http_methods([\"GET\"])\ndef running_total_categories(request):\n \"\"\"The view for Categories that have a running total, rather than the regular total.\"\"\"\n categories = models.Category.objects.filter(\n total_type=models.Category.TOTAL_TYPE_RUNNING\n ).annotate(\n total=Sum(\n F('expensetransaction__amount') * Value('-1'),\n output_field=DecimalField()\n ),\n )\n # For each Category, attach a queryset of ExpenseTransactions that have a\n # running_total_amount fiels\n for category in categories:\n category.expense_transactions = utils.get_expensetransactions_running_totals(category)\n\n context = {\n 'categories': categories\n }\n return render(request, 'occurrence/running_totals.html', context)\n\n\n@require_http_methods([\"GET\", \"POST\"])\ndef edit_transaction(request, type_cat, id):\n \"\"\"Edit a transaction.\"\"\"\n # Attempt to find the transaction, based on the type_cat\n if type_cat == models.Category.TYPE_EXPENSE:\n transaction = get_object_or_404(\n models.ExpenseTransaction.objects.all(),\n pk=id\n )\n elif type_cat == models.Category.TYPE_EARNING:\n transaction = get_object_or_404(\n models.EarningTransaction.objects.all(),\n pk=id\n )\n else:\n raise Http404('Category type not recognized')\n\n if request.method == \"POST\":\n if type_cat == models.Category.TYPE_EXPENSE:\n form = forms.ExpenseTransactionForm(request.POST, instance=transaction)\n elif type_cat == models.Category.TYPE_EARNING:\n form = forms.EarningTransactionForm(request.POST, instance=transaction)\n # If the form is valid, save the object\n if form.is_valid():\n form.save()\n # Redirect the user to the transactions view for the month that this\n # transaction is in\n return redirect('{}?month={}'.format(reverse('transactions'), transaction.month.slug))\n else:\n if type_cat == models.Category.TYPE_EXPENSE:\n form = forms.ExpenseTransactionForm(instance=transaction)\n elif type_cat == models.Category.TYPE_EARNING:\n form = forms.EarningTransactionForm(instance=transaction)\n context = {'form': form, 'transaction': transaction, 'type_cat': type_cat}\n return render(request, 'occurrence/edit_transaction.html', context)\n\n\n@require_http_methods([\"GET\", \"POST\"])\ndef copy_transactions(request):\n \"\"\"Copy transactions to a new date.\"\"\"\n # Get the parameters from the request.\n if request.method == \"GET\":\n request_data = request.GET\n elif request.method == \"POST\":\n request_data = request.POST\n transaction_type = request_data.get(\"transaction_type\", \"\")\n request_transaction_ids = request_data.getlist(\"selected_transactions\", [])\n\n selected_transaction_ids = []\n try:\n for id in request_transaction_ids:\n selected_transaction_ids.append(int(id))\n except (ValueError, TypeError):\n context = {'errors': [\"The selected transaction ids must be integers.\"]}\n return render(request, 'occurrence/copy_transactions.html', context)\n\n if transaction_type == models.Category.TYPE_EXPENSE:\n transactions = models.ExpenseTransaction.objects.filter(id__in=selected_transaction_ids)\n elif transaction_type == models.Category.TYPE_EARNING:\n transactions = models.EarningTransaction.objects.filter(id__in=selected_transaction_ids)\n else:\n error = (\n f\"You must choose a valid transaction_type (either '{models.Category.TYPE_EXPENSE}' \"\n f\"or '{models.Category.TYPE_EARNING}').\"\n )\n context = {'errors': [error]}\n return render(request, 'occurrence/copy_transactions.html', context)\n\n if len(request_transaction_ids) != transactions.count():\n error = \"One or more of the selected transactions does not exist.\"\n context = {'errors': [error]}\n return render(request, 'occurrence/copy_transactions.html', context)\n\n if request.method == \"GET\":\n # For GET requests, render a page with the relevant transaction data.\n context = {'transactions': transactions, 'transaction_type': transaction_type, 'errors': []}\n return render(request, 'occurrence/copy_transactions.html', context)\n else:\n # For POST requests, create new transactions, based on the chosen transactions' data.\n new_date = request.POST.get(\"date\")\n if new_date:\n new_date_obj = parse_date(new_date)\n if not new_date_obj:\n error = f\"You must choose a date in the appropriate format. '{new_date}' is not valid.\"\n context = {'errors': [error]}\n return render(request, 'occurrence/copy_transactions.html', context)\n\n # Determine the Month for the selected date.\n month = utils.get_or_create_month_for_date_obj(new_date_obj)\n\n # Create new transactions, based on the chosen transactions' data.\n if transaction_type == models.Category.TYPE_EXPENSE:\n TransactionModel = models.ExpenseTransaction\n elif transaction_type == models.Category.TYPE_EARNING:\n TransactionModel = models.EarningTransaction\n new_transactions = []\n for transaction in transactions:\n new_transactions.append(\n TransactionModel(\n category=transaction.category,\n title=transaction.title,\n slug=utils.create_unique_slug_for_transaction(transaction),\n date=new_date_obj,\n amount=transaction.amount,\n month=month,\n description=transaction.description,\n )\n )\n num_transactions_created = TransactionModel.objects.bulk_create(new_transactions)\n\n messages.success(request, f\"{len(num_transactions_created)} transaction(s) copied.\")\n return redirect('transactions')\n","repo_name":"dchukhin/skameika","sub_path":"occurrence/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36508369351","text":"import json\nimport os\nimport pathlib\n\nimport pandas\nfrom sklearn.ensemble import StackingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import make_scorer\n\nfrom classification.defaults import make_default_pipeline, make_default_gridsearch_parameters, \\\n make_classifier_specific_gridsearch_parameters\nfrom classification.default_classifiers import classifiers\nfrom classification.utils import load_dataset, get_classifier_by_name, nested_cross_validation, \\\n clf_short_name, \\\n class_f1_scorer, unpandas_parameters, report_classifier_performance, get_best_classifier\nfrom file_anchor import root_dir\nfrom helpers.Logger import Logger\n\noutput_path = root_dir() + 'classification/cross_project/output/' + pathlib.Path(__file__).stem + '/'\nos.makedirs(output_path, exist_ok=True)\nlog = Logger(log_file=output_path + 'log.txt')\n\n\ndef main():\n with open(root_dir() + 'classification/cross_project/output/project_selection/project_combos.json', 'r') as fd:\n project_combos = json.load(fd)\n\n for i, projects_split in enumerate(project_combos):\n log.s('at ' + str(i) + ' ' + str(projects_split))\n projects = projects_split['projects']\n eval_project_split(projects)\n\n\ndef eval_project_split(projects):\n train_docs, train_target, target_names = load_dataset(projects_list=projects, pick='ProjectsNotInList')\n validation_docs, validation_target, target_names = load_dataset(projects_list=projects, pick='ProjectsFromList')\n\n classifiers_df = pandas.DataFrame()\n\n for clf_name in classifiers:\n gs_params = make_default_gridsearch_parameters()\n gs_params.update(make_classifier_specific_gridsearch_parameters(clf_name))\n pipeline = make_default_pipeline(get_classifier_by_name(clf_name))\n\n ncv_results = nested_cross_validation(train_docs, train_target, target_names, None, pipeline, gs_params, scoring='f1_macro')\n best_clf, params = get_best_classifier(ncv_results, 'F1 macro average', make_default_pipeline)\n\n pipeline.fit(train_docs, train_target)\n y_predicted = pipeline.predict(validation_docs)\n\n test_set_prediction_df = pandas.DataFrame({'doc': validation_docs, 'target': validation_target, 'prediction': y_predicted})\n test_set_prediction_df.to_csv(output_path + 'predictions_' + clf_name + '_'.join(projects) + '.csv.zip', compression='zip')\n\n report = report_classifier_performance(validation_target, y_predicted, target_names, 0, params, clf_name)\n classifiers_df = classifiers_df.append(pandas.DataFrame(report))\n\n classifiers_df.to_csv(output_path + 'performance' + '_'.join(projects) + '.csv')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AmadeusBugProject/fault_type_prediction","sub_path":"classification/cross_project/cross_project_evaluation_LRC_SVC_RFC.py","file_name":"cross_project_evaluation_LRC_SVC_RFC.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"21815188482","text":"#!/usr/bin/env python3\nimport os\nimport shutil\n\npath = \"/Volumes/5TB/tumbler\"\n\nprint(\"Starting file move\\n\")\n\nprint(\"Getting list of folders...\")\ndirectories = os.listdir(path)\ndirectories.pop(0)\nprint(\"Done getting list of folders\\n\\n\")\n\ncount = len(directories)\ntotal = count\n\nfor dir in directories:\n source = path + \"/\" + dir + \"/media\"\n print(source)\n destination = path + \"/\" + dir\n print(destination)\n\n print(\"Getting list of files...\")\n try:\n files = os.listdir(source)\n except FileNotFoundError:\n print(\"Media folder doesn't exist\")\n continue\n\n files.pop(0)\n print(\"Done getting list of files\\n\")\n\n filecount = len(files)\n filetotal = filecount\n\n for file in files:\n filesource = source + \"/\" + file\n dest = shutil.move(filesource, destination)\n\n filecount = filecount - 1\n print(\"Moved file [\" + str(filecount) + \"/\" +\n str(filetotal) + \"] \" + filesource + \" -> \" + dest)\n\n print(\"Finished moving \" + str(filetotal) + \" files from \" + source)\n count = count - 1\n print(\"Completed \" + str(count) + \" of \" + str(total) + \" folders\\n\\n\")\n\nprint(\"Finished file move\")\n","repo_name":"iguillen/Scripts","sub_path":"move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45106575002","text":"#Advent of Code 2016 Day 14\n\nimport hashlib\nimport time\n\ndef createMd5(salt, number):\n return hashlib.md5((salt+str(number)).encode()).hexdigest()\n\ndef check(md5, salt, number):\n\n found = False\n valid = False\n check = \"\"\n\n for letter in range(0, len(md5)-2):\n if md5[letter] == md5[letter+1] == md5[letter+2]:\n found = True\n check = md5[letter]\n break\n\n if found:\n for x in range(number+1, number + 1 + 1000):\n test = createMd5(salt, x)\n\n if check*5 in test:\n valid = True\n break\n \n return valid\n\ndef stretch(md5, amount):\n\n if md5 in stretched:\n return stretched[md5]\n \n new = md5\n\n for x in range(0, amount):\n new = createMd5(new, \"\")\n\n stretched[md5] = new\n\n return new\n\ndef check_stretch(md5, salt, number):\n\n found = False\n valid = False\n check = \"\"\n\n for letter in range(0, len(md5)-2):\n if md5[letter] == md5[letter+1] == md5[letter+2]:\n found = True\n check = md5[letter]\n break\n\n if found:\n for x in range(number+1, number + 1 + 1000):\n\n test = createMd5(salt, x)\n test = stretch(test, 2016)\n\n if check*5 in test:\n valid = True\n break\n \n return valid\n\ndef part1(input):\n\n i = 0\n counter = 0\n\n while counter < 65:\n test = createMd5(input, i)\n \n if check(test, input, i):\n counter += 1\n \n if counter == 64:\n break\n\n i += 1\n\n print(\"Part 1: The index that creates the 64th key is {}\".format(i))\n\ndef part2(input):\n\n i = 0\n counter = 0\n \n while counter < 65:\n \n test = createMd5(input, i)\n test = stretch(test, 2016)\n \n if check_stretch(test, input, i):\n counter += 1\n \n if counter == 64:\n break\n\n i += 1\n\n print(\"Part 2: The index that creates the 64th key is {}\".format(i))\n\nprint()\n\ninput = \"yjdafjpo\"\nstretched = {}\n\nstart1 = time.perf_counter()\npart1(input)\nend1 = time.perf_counter()\n\nstart2 = time.perf_counter()\npart2(input)\nend2 = time.perf_counter()\n\nprint()\nprint(\"Spent {:>7.2f} seconds on Part 1\".format(end1-start1))\nprint(\"Spent {:>7.2f} seconds on Part 2\".format(end2-start2))","repo_name":"leiacf/AoC2016","sub_path":"src/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33467483132","text":"#!/bin/env python\n\nimport psycopg2 as pg\nimport csv, os\n\nTABLES_DEFINITION = '''\nCREATE TABLE IF NOT EXISTS country (\n id SERIAL PRIMARY KEY,\n name VARCHAR (100) NOT NULL UNIQUE,\n code VARCHAR (5) NOT NULL UNIQUE\n);\n\nCREATE TABLE IF NOT EXISTS \"state\" (\n id SERIAL PRIMARY KEY,\n name VARCHAR (100) NOT NULL,\n country_id INTEGER NOT NULL REFERENCES country (id),\n UNIQUE (country_id, name)\n);\n\nCREATE TABLE IF NOT EXISTS city (\n id SERIAL PRIMARY KEY,\n name VARCHAR (100) NOT NULL,\n state_id INTEGER NOT NULL REFERENCES \"state\" (id),\n UNIQUE (state_id, name)\n); \n'''\n\ncountries = []\nstates = []\n\ndef searchForCountry (name, code) :\n for country in countries :\n if len(country) >= 3 and country[1] == name and country[2] == code :\n return country\n \n return None\n\ndef searchForState (name, countryId) :\n for state in states :\n if len(state) >= 3 and state[1] == name and state[2] == countryId :\n return state\n \n return None\n\ndef insertCountry (connection, cursor, name, code):\n query = 'INSERT INTO country (name, code) VALUES (%s, %s) ON CONFLICT DO NOTHING RETURNING id'\n \n country = searchForCountry(name, code)\n \n if country is not None :\n return country[0]\n \n cursor.execute(query, (name, code))\n connection.commit()\n country = cursor.fetchone()\n \n if country is not None :\n countries.append((country[0], name, code))\n return country[0]\n \n cursor.execute('SELECT id FROM country WHERE name = %s AND code = %s', (name, code))\n country = cursor.fetchone()\n \n if country is not None :\n countries.append((country[0], name, code))\n return country[0]\n \ndef insertState (connection, cursor, name, countryId) :\n query = 'INSERT INTO \"state\" (name, country_id) VALUES (%s, %s) ON CONFLICT DO NOTHING RETURNING id'\n \n state = searchForState(name, countryId)\n \n if state is not None :\n return state[0]\n \n cursor.execute(query, (name, countryId))\n connection.commit()\n state = cursor.fetchone()\n \n if state is not None :\n states.append((state[0], name, countryId))\n return state[0]\n \n cursor.execute('SELECT id FROM \"state\" WHERE name = %s AND country_id = %s', (name, countryId))\n state = cursor.fetchone()\n \n if state is not None :\n states.append((state[0], name, countryId))\n return state[0]\n\ndef insertCity (connection, cursor, name, stateId) :\n query = 'INSERT INTO city (name, state_id) VALUES (%s, %s) ON CONFLICT DO NOTHING RETURNING id'\n \n cursor.execute(query, (name, stateId))\n connection.commit()\n \n city = cursor.fetchone()\n \n if city is not None : return city[0]\n \n return None\n\ndef fromCsvToDatabase (csvFilename, connection) : \n cursor = connection.cursor()\n \n with open(csvFilename, 'r', newline=\"\") as csvFile:\n reader = csv.DictReader(csvFile)\n \n for row in reader :\n countryId = insertCountry(connection, cursor, row['country'], row['iso2'])\n stateId = insertState(connection, cursor, row['admin_name'], countryId)\n cityId = insertCity(connection, cursor, row['city'], stateId)\n \n if cityId is None :\n print('[DUPLICATE] :', row['city'], '->', row['admin_name'], '->', row['country'])\n \n cursor.close()\n \ndef prepareDatabase (connection) :\n queries = filter(lambda query: len(query) > 0, map(lambda query: query.strip(), TABLES_DEFINITION.split(';')))\n cursor = connection.cursor()\n \n for query in queries :\n cursor.execute(query)\n connection.commit()\n \n cursor.close()\n\ndef getDbConfig () : \n return {\n 'database': os.getenv('PG_DB', 'postgres'),\n 'user': os.getenv('PG_USER', 'postgres'),\n 'password': os.getenv('PG_PASSWORD', ''),\n 'host': os.getenv('PG_HOST', 'localhost'),\n 'port': int(os.getenv('PG_PORT', '5432')),\n }\n\ndef main () :\n dbConf = getDbConfig()\n dbConnection = pg.connect(**dbConf)\n \n prepareDatabase(dbConnection)\n fromCsvToDatabase('./worldcities.csv', dbConnection)\n \n dbConnection.close()\n \nif __name__ == '__main__' :\n main()\n","repo_name":"Abderrahman-byte/digital-school-platform","sub_path":"worldcities/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73850644322","text":"from django.urls import path\nfrom rest_framework import routers\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r\"users\", views.UserViewSet)\nrouter.register(r\"groups\", views.GroupViewSet)\n\nurlpatterns = [\n path(\"\", views.home, name=\"home\"),\n path(\"login_user/\", views.login_user, name=\"login\"),\n path(\"logout_user/\", views.logout_user, name=\"logout\"),\n path(\"register_user/\", views.register_user, name=\"register\"),\n]\n","repo_name":"bal-sm/bless_server","sub_path":"duser/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26911019633","text":"#!/usr/bin/env python3\n\n# Program to count up numbers and identify primes\nimport RPi.GPIO as GPIO\nfrom binaryledcounter import BinaryLEDCounter\nfrom sys import argv\nfrom random import randrange\nfrom primecounter import countprime\nfrom threading import Thread, Lock\n\n# Mode that pin numberings will be in\npinmode = GPIO.BOARD\n# List of output pins\n# The i'th element of this array represents the 2^i bit of our display\noutputpins = [7,8,10,11,12,13,15,16,18,19,21,22]\n# Pin which is connected to the button, with a PULL UP resistor\nbuttonpin = 3\n# Speed at which the primer should count (numbers/second)\nspeed = 8\n\n# Set up the GPIO pins and binary counter\nGPIO.setmode(pinmode)\nGPIO.setwarnings(False)\nc = BinaryLEDCounter(outputpins)\nGPIO.setup(buttonpin,GPIO.IN)\n\n# Find out where we should start\nif len(argv)==1:\n start = c.getvalue()\nelif (argv[1]=='random')or(argv[1]=='rand'):\n start = randrange(0,c.maxvalue()+1)\nelse:\n start = int(argv[1])\nc.setvalue(start)\nprint(\"Start: {}\".format(start))\n\n# Lock to keep threads from counting at the same time and global value\ncounterlock = Lock()\nvalue = start\n\n# Function for thread that waits on button press\ndef buttonwait():\n global value\n global c\n global speed\n while True:\n GPIO.wait_for_edge(buttonpin,GPIO.FALLING)\n with counterlock:\n y = countprime(value+1,c,1.0/speed)\n print('{s} {d} {e}'.format(s=value,d='.'*(y-value-1),e=y))\n value = y\n\n# Function for thread that waits on enter key\ndef keyboardwait():\n global value\n global c\n global speed\n while True:\n input()\n with counterlock:\n y = countprime(value+1,c,1.0/speed)\n print('{s} {d} {e}'.format(s=value,d='.'*(y-value-1),e=y))\n value = y\n\n# Start all input threads, then wait to join\nthreads = [Thread(target=buttonwait), Thread(target=keyboardwait)]\nfor t in threads:\n t.daemon=True\n t.start()\ntry:\n for t in threads:\n t.join()\nexcept KeyboardInterrupt:\n print()\n\n","repo_name":"ianmtaylor1/piprimer","sub_path":"piprimer.py","file_name":"piprimer.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6470990138","text":"import pygame\nimport sys\n\nTILESIZE = 64\nWIDTH = TILESIZE * 16\nHEIGHT = TILESIZE * 16\nRED = (255, 0, 0)\n\n# ----------------------------------------------------------------\n# class Game\n# ----------------------------------------------------------------\n\nclass Game:\n def __init__(self):\n pygame.init()\n pygame.display.set_caption('My World')\n self.display_surface = pygame.display.set_mode((WIDTH, HEIGHT))\n self.display_surface.fill(RED)\n\n def handle_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.myquit()\n return True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.myquit()\n return True\n\n def myquit(self):\n pygame.quit()\n sys.exit()\n\n# ==================================================================\n# ==================================================================\n\ndef setup():\n mygame = Game()\n\n while True:\n mygame.handle_events()\n # ----------------------------------\n pygame.display.flip()\n pygame.display.update()\n\n\nif __name__ == \"__main__\":\n setup()\n\n","repo_name":"poly451/Tutorials","sub_path":"simple_rpg/code_for_video_01.py","file_name":"code_for_video_01.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"2531920855","text":"# %% [code]\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import preprocessing\nfrom sklearn.metrics import roc_auc_score\n\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import StratifiedKFold\n\nimport xgboost as xgb\n\n\n# %% [code]\ntrain_df = pd.read_csv('/kaggle/input/santander-customer-transaction-prediction/train.csv')\ntest_df = pd.read_csv('/kaggle/input/santander-customer-transaction-prediction/test.csv')\n\n\ntrain_df.drop(['ID_code'],axis=1,inplace=True)\n\ntest_id_list = test_df.ID_code\ntest_df.drop(['ID_code'],axis=1,inplace=True)\n\n\n# %% [code]\nX = train_df.drop('target',axis=1)\ny = train_df.target\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, stratify = y, random_state=42)\n\nparam_dict = {\n 'max_depth': range(1,10),\n 'gamma': np.arange(0,0.5,0.05),\n 'lambda': np.geomspace(1, 5, num=10),\n 'min_child_weight': [1, 5, 10],\n 'subsample': [0.6, 0.8, 1.0],\n 'colsample_bytree': [0.5,0.6, 0.8, 1.0]\n}\nxgb = xgb.XGBClassifier(tree_method='gpu_hist', eval_metric= 'auc')\n\nrscv = RandomizedSearchCV(xgb, param_dict,n_iter=800, scoring = 'roc_auc', n_jobs = -1, verbose = 1 , cv= StratifiedKFold(4).split(X_train,y_train))\nrscv.fit(X_train,y_train)\n\nprint(\"Best Paramaeter = \", rscv.best_estimator_ )\n\n# %% [code]\nprint(\"train accuracy score = \", accuracy_score(y_train,rscv.predict(X_train)))\nprint(\"test accuracy score = \", accuracy_score(y_test,rscv.predict(X_test)))\nprint(\"ROC_AUC score = \", roc_auc_score(y_test,rscv.predict_proba(X_test)[:,1] ))\n\n\n# submitting output\noutput_submission = pd.DataFrame(zip(test_id_list,rscv.predict_proba(test_df)[:,1]), columns = ['ID_code','target'])\noutput_submission.to_csv('/kaggle/working/output_submission.csv',index=False)","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/santander-customer-transaction-prediction/LekshmanNatarajan/intermediate-ml-xgboost-w-gpu-hyperparam-search.py","file_name":"intermediate-ml-xgboost-w-gpu-hyperparam-search.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"36439793116","text":"import frappe\nfrom datetime import date\nfrom frappe import get_print\nfrom frappe.model.document import Document\nfrom frappe.utils import getdate, today, add_to_date\nfrom accounting.accounting.doctype.cart.cart import Cart\nfrom accounting.accounting.doctype.item.item import Item\nfrom accounting.accounting.doctype.party.party import Party\nfrom accounting.accounting.doctype.account.account import Account\nfrom accounting.accounting.doctype.sales_invoice.sales_invoice import SalesInvoice\n\n\nclass SalesOrder(Document):\n def validate(self):\n if Party.get_type(self.customer) != \"Customer\":\n frappe.throw(\"Please select a valid Customer.\")\n if getdate(self.payment_due_date) < date.today():\n frappe.throw(\n \"Payment Due Date should not be earlier than today's date.\")\n if Account.get_parent(self.debit_to) != \"Accounts Receivable\":\n frappe.throw(\n \"Debit account parent should be of type Accounts Receivable.\")\n if not self.validate_asset_account():\n frappe.throw(\n \"Asset account parent should be of type Stock Assets or Fixed Assets.\")\n Item.are_items_available(self.items)\n self.posting_date = today()\n\n def validate_asset_account(self) -> bool:\n parent_account = Account.get_parent(self.asset_account)\n return parent_account == \"Stock Assets\" or parent_account == \"Fixed Assets\"\n\n @staticmethod\n def create(customer: str, debit_to_account: str = \"Debtors\", asset_account: str = \"Stock In Hand\") -> object:\n \"\"\"Create and Submit Sales Order.\"\"\"\n cart = frappe.get_doc(\"Cart\", customer)\n\n total_qty = 0\n total_amount = 0\n for cart_item in cart.items:\n total_qty += cart_item.qty\n total_amount += cart_item.amount\n\n sales_odr = frappe.new_doc(\"Sales Order\")\n sales_odr.customer = customer\n sales_odr.posting_date = today()\n sales_odr.payment_due_date = add_to_date(today(), days=15)\n sales_odr.items = cart.items\n sales_odr.total_qty = total_qty\n sales_odr.total_amount = total_amount\n sales_odr.debit_to = debit_to_account\n sales_odr.asset_account = asset_account\n sales_odr.flags.ignore_permissions = True\n sales_odr.submit()\n return SalesInvoice.generate_invoice(sales_odr.name)\n\n\n@frappe.whitelist(allow_guest=False)\ndef create_order() -> None:\n \"\"\"A helper function to call SalesOrder.create().\"\"\"\n customer = frappe.session.user\n sales_inv = SalesOrder.create(customer)\n Cart.empty(customer)\n if sales_inv:\n frappe.local.response.filecontent = get_print(\n sales_inv.doctype, sales_inv.name, doc=sales_inv, print_format=\"Sales Invoice\", as_pdf=True\n )\n frappe.local.response.filename = sales_inv.name + \".pdf\"\n frappe.local.response.type = \"pdf\"\n","repo_name":"s-aga-r/accounting","sub_path":"accounting/accounting/doctype/sales_order/sales_order.py","file_name":"sales_order.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"42765613327","text":"import matplotlib\nmatplotlib.use(\"Agg\")\nfrom matplotlib import pyplot as plt\nimport os\nimport seaborn\nfrom collections import OrderedDict\nimport numpy as np\nimport argparse\nimport re\n\ndef parse():\n parser = argparse.ArgumentParser(description=\"Plot from log file.\")\n parser.add_argument('--max_x',\n help=\"maximum x value on the plot\",\n required=True, type=int)\n parser.add_argument('--max_y',\n help=\"maximum y value on the plot\",\n required=False, default='auto')\n parser.add_argument('--min_y',\n help=\"minimum y value on the plot\",\n required=False, default='auto')\n parser.add_argument('--keys',\n help=\"the metric keys to scrub from the logs and plot\",\n required=True, nargs='+', type=str)\n parser.add_argument('--title',\n help=\"title of the plot\",\n required=True, type=str)\n parser.add_argument('--source',\n help=\"source log text file\",\n required=True, type=str)\n parser.add_argument('--dest',\n help=\"write the plot to this file\",\n required=False, default=\"plot.png\", type=str)\n return parser.parse_args()\n\n \ndef scrub(path, keys):\n history = OrderedDict()\n history_summary = OrderedDict()\n for key in keys:\n history[key] = OrderedDict()\n history_summary[key] = []\n with open(path, 'rt') as f:\n last_epoch, epoch = None, 0\n for line in f:\n # Split by ' - ' and ':' plus any surrounding non alpha-num chars.\n split_line = re.split('[: ]+', line)\n if last_epoch is None:\n last_epoch = split_line[1]\n if split_line[1] != last_epoch:\n epoch += 1\n last_epoch = split_line[1]\n for key in keys:\n if epoch not in history[key]:\n history[key][epoch] = []\n if key in split_line:\n val_idx = len(split_line)-split_line[::-1].index(key)\n history[key][epoch].append(float(split_line[val_idx]))\n #epoch += 1\n \n # Average keys over each epoch.\n for key in keys:\n #window = [0]*2000\n #window = [0]*1\n for epoch in history[key]:\n if history[key][epoch]:\n #window.append(np.mean(history[key][epoch]))\n #window.pop(0)\n history_summary[key].append( np.mean(history[key][epoch]) )\n #history_summary[key].append( np.mean(window) )\n \n return history_summary\n\n\nif __name__=='__main__':\n # Get all arguments\n args = parse()\n \n # Read log file\n history = scrub(args.source, args.keys)\n \n # TEMP\n # Print best score\n key_train, key_val = args.keys\n if np.all(np.greater_equal(history[key_val], 0)):\n idx = np.argmax(history[key_val])\n elif np.all(np.less_equal(history[key_val], 0)):\n idx = np.argmin(history[key_val])\n else:\n raise ValueError\n print(\"Best validation {}: {}\"\n \"\".format(key_val, history[key_val][idx]))\n print(\"-- training {}: {}\".format(key_train, history[key_train][idx]))\n print(\"-- epoch {} of {}\".format(idx+1, len(history[key_train])))\n # /TEMP\n \n # Color generator for the plots\n def gen_colors(num_colors):\n for c in seaborn.color_palette('hls', n_colors=num_colors):\n yield c\n \n # Plot\n fig, ax = plt.subplots(nrows=1, ncols=1)\n color_generator = gen_colors(num_colors=len(args.keys))\n for i, key in enumerate(args.keys):\n ax = ax\n if args.min_y=='auto':\n min_y = min([min(history[key]) for key in history.keys() \\\n for ID in history.keys()])*1.1\n else:\n min_y = float(args.min_y)\n if args.max_y=='auto':\n max_y = max([max(history[key]) for key in history.keys() \\\n for ID in history.keys()])*1.1\n else:\n max_y = float(args.max_y)\n title = args.title\n ax.set_title(title)\n ax.set_xlabel(\"number of epochs\")\n ax.axis([0, args.max_x, min_y, max_y])\n ax.plot(history[key][:args.max_x],\n color=next(color_generator), label=key)\n ax.yaxis.set_ticks(np.arange(min_y, max_y, (max_y-min_y)/20.))\n \n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n fig.subplots_adjust(top=1.5)\n fig.savefig(args.dest, bbox_inches='tight')\n #fig.savefig(args.dest)\n \n","repo_name":"veugene/lits","sub_path":"scripts/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"13930776962","text":"from flask_app import app\nfrom flask import render_template,redirect,request,session,flash\nfrom flask_app.models.faction import Faction\nfrom flask_app.models.user import User\n\n\n\n\n@app.route('/faction/new')\ndef new_faction():\n return render_template('add_faction.html')\n\n@app.route('/faction/create', methods=['POST'])\ndef create_faction():\n data = {\n 'name': request.form['name'],\n 'level': request.form['level'],\n 'date_created': request.form['date_created']\n }\n\n new_faction_id =Faction.create_new_faction(data)\n\n\n return redirect('/')\n\n\n@app.route('/faction/')\ndef show_one_faction(faction_id):\n query_data = {\n 'faction_id': faction_id\n }\n\n one_faction = Faction.get_faction_with_users(query_data)\n\n return render_template('show_one_faction.html',one_faction = one_faction)\n\n","repo_name":"tjones811/Python","sub_path":"flask_mysql_redo/user_cr/flask_app/controllers/faction_control.py","file_name":"faction_control.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36474252020","text":"# Backtracking Algorithm\n# Backtracking is a general algorithm for finding solutions to some computational problems, notably constraint satisfaction problems, that incrementally builds candidates to the solutions, and abandons a candidate as soon as it determines that the candidate cannot possibly be completed to a valid solution.\n\n#Permutations Problem\n# Find the number of permutations possible for displaying the number in the list\ndef permute(nums):\n\n\tresult = []\n\tif (len(nums) == 1):\n\t\treturn [nums[:]]\n\n\tfor i in range(len(nums)):\n\t\tn = nums.pop(0)\n\t\tperms = permute(nums)\n\n\t\tfor perm in perms:\n\t\t\tperm.append(n)\n\n\t\tresult.extend(perms)\n\t\tnums.append(n)\n\n\treturn result\n\n# Helping Function to swap string\ndef swap_string(string,pos1,pos2):\n\tstring = list(string)\n\tstring[pos1],string[pos2] = string[pos2],string[pos1]\n\tresult = \"\".join(string)\n\treturn result\n\n# Same Permutation Problem But with slight different approach\ndef permutation(string):\n\tif len(string) == 1:\n\t\treturn string\n\tresult = []\n\tdef backtrack(str,left,right):\n\t\tif left == right:\n\t\t\tresult.append(str)\n\t\t\treturn\n\t\tfor i in range(left,right+1):\n\t\t\t# First swapping based on left index and ith position\n\t\t\tstr = swap_string(str,left,i)\n\t\t\t# backtracking to find the solution\n\t\t\tbacktrack(str,left+1,right)\n\t\t\t# Returning the normal string back to normal position\n\t\t\t#str = swap_string(str,left,i)\n\n\tbacktrack(string,0,len(string)-1)\n\treturn result\n\nif __name__ == '__main__':\n print(permute([1,2,3]))\n # print(permute([1,1,2]))\n print(swap_string(\"Mat\",1,2))\n print(permutation(\"Mat\"))","repo_name":"Mati02K/DSA","sub_path":"python/Backtracking.py","file_name":"Backtracking.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"7876318640","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @FileName :107. Binary Tree Level Order Traversal II.py\n# @Time :1/13/22\n# @Author :Eason Tang\nfrom typing import List, Optional\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return []\n\n import collections\n q = collections.deque({(root, 1)})\n ans = []\n\n current_depth = 1\n current_node_list = []\n\n while q:\n node, depth = q.popleft()\n if not node:\n continue\n\n if depth > current_depth:\n ans.append(current_node_list)\n current_node_list = []\n current_depth = depth\n\n current_node_list.append(node.val)\n\n q.append((node.left, depth + 1))\n q.append((node.right, depth + 1))\n\n ans.append(current_node_list)\n return ans[::-1]\n","repo_name":"tangyisheng2/leetcode-note","sub_path":"code/107. Binary Tree Level Order Traversal II.py","file_name":"107. Binary Tree Level Order Traversal II.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40193389533","text":"import re\n\n\nPATTERN = r\"\\([0-9 +*]+\\)\"\n\nSUM_PATTERN = r\"[0-9]+ \\+ [0-9]+\"\n\ndef get_input():\n with open(\"input.txt\", \"r\") as file:\n return file.read()\n\n\ndef resolve(expression):\n elements = expression.split(' ')\n while len(elements) > 1:\n n1 = elements[0]\n n2 = elements[2]\n op = elements[1]\n elements = elements[3:]\n result = eval(n1+' '+op+' '+n2)\n elements.insert(0, str(result))\n return elements[0]\n\n\ndef execute(expression):\n while True:\n match_obj = re.search(PATTERN, expression)\n if not match_obj:\n break\n match = match_obj.group(0)\n executed_match = resolve(match[1:-1])\n expression = expression.replace(match, executed_match)\n return int(resolve(expression))\n\n\ndef execute_second(expression):\n repeat = True\n while repeat:\n repeat = False\n while True:\n match_obj = re.search(SUM_PATTERN, expression)\n if not match_obj:\n break\n repeat = True\n match = match_obj.group(0)\n executed_match = resolve(match)\n expression = expression.replace(match, executed_match)\n match_obj = re.search(PATTERN, expression)\n if match_obj:\n match = match_obj.group(0)\n executed_match = resolve(match[1:-1])\n expression = expression.replace(match, executed_match)\n repeat = True\n return int(resolve(expression))\n\n\ndef quiz1(data):\n result = 0\n for d in data:\n result += execute(d)\n print(result)\n\ndef quiz2(data):\n result = 0\n for d in data:\n res = execute_second(d)\n # print(res)\n # input()\n result += res\n print(result)\n\nif __name__ == \"__main__\":\n data = get_input().splitlines()\n quiz1(data)\n quiz2(data)","repo_name":"Kavuti/advent-of-code-2020","sub_path":"18/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8625916732","text":"import os\n\nfrom PyQt5.QtCore import QThread, pyqtSignal\nfrom PyQt5.QtGui import QTextCursor\n\nfrom py_tools import tools_common, tools_file\n\n# 显示前:超过这个长度的数据会被截断\n# 这是全局超限长度,oversize_length是局部的超限长度\nfrom py_tools.tools_common import BufferQueue\n\nshow_text_length = 10000\noversize_length_min = 1000\noversize_length_max = 1280000\n\n# 显示后:text中的数据长度超过此值会被删除\nshow_text_delete_length = 4096000\n# 截断或删除操作后实际显示的数据长度\nshow_text_remain_length = 2000\nerror_msg_prefix = 'error: tools_text: '\n\n\ndef show_msg_to_text_dict(msg_queue=None, tk_text_dict=None):\n \"\"\"显示msg到text\"\"\"\n # str_convert为True表示读时转换\n # display_way为True表示一次性显示\n try:\n msg_showed = False\n all_msg_dict = {}\n while not msg_queue.empty():\n msg_get_list = msg_queue.get()\n gui_id = msg_get_list[0]\n msg_get = msg_get_list[1]\n\n if gui_id in tk_text_dict.keys():\n tk_text = tk_text_dict[gui_id].show_text\n path = tk_text_dict[gui_id].txt_path\n show_time = tk_text_dict[gui_id].show_time\n str_convert = tk_text_dict[gui_id].str_convert\n display_way = tk_text_dict[gui_id].display_way\n display_on = tk_text_dict[gui_id].display_on\n oversize_save_txt = tk_text_dict[gui_id].oversize_save\n oversize_length = tk_text_dict[gui_id].oversize_length\n\n if gui_id not in all_msg_dict.keys():\n all_msg_dict[gui_id] = \"\"\n\n if display_on:\n if str_convert:\n # 需要先转换,再显示\n if isinstance(msg_get, list) and len(msg_get) == 2:\n # 转换数据部分挪到了这里\n msg_to_text = convert_to_str(msg_get[0], msg_get[1]) + '\\n'\n if show_time:\n msg_to_text = tools_common.get_date_time_ms() + '\\n' + msg_to_text\n if display_way:\n # 先累加,最后一次性显示\n all_msg_dict[gui_id] += msg_to_text\n else:\n # get一次显示一次\n show_text_action(tk_text, msg_to_text, path,\n oversize_save_txt, oversize_length)\n else:\n # 在写的时候转换,这里直接显示\n if show_time:\n msg_get[0] = tools_common.get_date_time_ms() + '\\n' + msg_get[0] + msg_get[1]\n if display_way:\n # 先累加,最后一次性显示\n all_msg_dict[gui_id] += msg_get[0] + '\\n'\n else:\n # get一次显示一次\n show_text_action(tk_text, msg_get[0] + '\\n', path,\n oversize_save_txt, oversize_length)\n msg_showed = True\n if all_msg_dict:\n for gui_id_temp, all_msg in all_msg_dict.items():\n if all_msg:\n path = tk_text_dict[gui_id_temp].txt_path\n oversize_save_txt = tk_text_dict[gui_id_temp].oversize_save\n oversize_length = tk_text_dict[gui_id_temp].oversize_length\n show_text_action(tk_text_dict[gui_id_temp].show_text, all_msg, path,\n oversize_save_txt, oversize_length)\n if msg_showed:\n for gui_id_temp in all_msg_dict.keys():\n tk_text = tk_text_dict[gui_id_temp].show_text\n tk_text.moveCursor(QTextCursor.End)\n # delete_text_in_tk_text(tk_text, is_tk)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + 'show_msg_to_text_dict')\n print(error_msg)\n\n\ndef show_text_action(tk_text=None, msg_to_text='', path=\"\",\n oversize_save_txt=True, oversize_length=show_text_length):\n # is_tk为True表示tk,False表示为pyqt\n try:\n length = len(msg_to_text)\n actual_show_text_length = int(oversize_length) + 2 * int(show_text_remain_length)\n if 0 < length <= actual_show_text_length:\n insert_to_tk_text(tk_text, msg_to_text)\n elif length > actual_show_text_length:\n now_time = tools_common.get_date_time_ms()\n if oversize_save_txt:\n txt_name = os.path.join(path, now_time + '.txt')\n tools_file.save_to_txt(txt_name, msg_to_text)\n msg = msg_to_text[0: show_text_remain_length] + '...\\n......\\n...' + \\\n msg_to_text[length - show_text_remain_length:] + \\\n '数据过大(' + str(length) + '),只显示部分数据,全部数据已存储在:\\n' + \\\n txt_name + '\\n请查看!\\n\\n'\n else:\n msg = msg_to_text[0: show_text_remain_length] + \\\n '...\\n......\\n...' + \\\n msg_to_text[length - show_text_remain_length:] + \\\n '数据过大(' + str(length) + '),只显示部分数据。\\n\\n'\n insert_to_tk_text(tk_text, msg)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + 'show_text_action')\n print(error_msg)\n\n\ndef insert_to_tk_text(tk_text=None, msg=''):\n \"\"\"以附加的形式写数据到tk_text\"\"\"\n try:\n tk_text.append(msg.strip() + '\\n')\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + 'insert_to_tk_text')\n print(error_msg)\n\n\ndef convert_to_str(content=None, additional_msg=''):\n \"\"\"将数据转换为str\"\"\"\n msg_to_text = ''\n try:\n additional_msg = additional_msg.strip()\n if additional_msg:\n msg_to_text += additional_msg + '\\n'\n msg_to_text += convert_type_to_str(content)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + 'convert_to_str')\n msg_to_text += error_msg\n print(error_msg)\n return msg_to_text\n\n\ndef convert_type_to_str(content=None):\n \"\"\"将其他类型的数据转为str类型\"\"\"\n msg_to_text = \"\"\n if isinstance(content, str):\n msg_to_text += content + '\\n'\n elif isinstance(content, list):\n msg_to_text += '['\n for data in content:\n msg_to_text += convert_type_to_str(data).strip() + ', '\n msg_to_text = msg_to_text.strip().strip(',')\n msg_to_text += ']\\n'\n elif isinstance(content, dict):\n msg_to_text += '{'\n for key, value in content.items():\n msg_to_text += convert_type_to_str(key).strip() + ': '\n msg_to_text += convert_type_to_str(value).strip() + ', '\n msg_to_text = msg_to_text.strip().strip(',')\n msg_to_text += '}\\n'\n elif isinstance(content, tuple):\n msg_to_text += '('\n for arg in content:\n msg_to_text += convert_type_to_str(arg).strip() + ', '\n msg_to_text = msg_to_text.strip().strip(',')\n msg_to_text += ')\\n'\n else:\n msg_to_text += str(content) + '\\n'\n return msg_to_text\n\n\ndef show_everything_to_text(msg_queue=None, content=None, additional_msg='',\n str_convert=True, gui_id=\"default\"):\n \"\"\"显示所有类型的信息\"\"\"\n # 根据msg的类型判断怎么写入显示,最终要分解为str类型\n # 先完成,str、int、float、error->tuple,类型\n # 这里主要的工作是解析输入的需要显示的变量的类型\n try:\n # 第二种方式,直接str,转换的时候更快,但是在显示的时候这种方式转换出来的显示更慢\n # if len(additional_msg) > 0:\n # msg_to_text += additional_msg + '\\n'\n # msg_to_text += str(content)\n if str_convert:\n if msg_queue:\n # 这里改为,直接写缓存,转换部分留给显示部分\n msg_queue.write([gui_id, [content, additional_msg]])\n else:\n # 第一种方式,转换,在显示的时候这种方式拆出来的显示更快\n msg_to_text = convert_to_str(content, additional_msg)\n if msg_queue:\n msg_queue.write([gui_id, [msg_to_text, '']])\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + 'show_everything_to_text')\n print(error_msg)\n\n\ndef config_oversize_length(dst_value=None, src_value=None):\n if dst_value and \\\n str(dst_value).isdigit() and \\\n oversize_length_min <= int(dst_value) <= oversize_length_max and \\\n int(dst_value) != int(src_value):\n return True\n return False\n\n\nclass LocalShowText(QThread):\n \"\"\"负责所有显示和txt生成\"\"\"\n signal = pyqtSignal(dict)\n\n def __init__(self):\n \"\"\"初始化\"\"\"\n super().__init__()\n # 以下两个队列是全局唯一的,作用于各个线程,各个线程写入数据\n # 主线程读取数据,进行显示和保存数据\n # msg_queue是显示和txt的缓存,因为显示的都写入txt,保存的就是str\n self.global_msg_queue = BufferQueue()\n # 主界面\n self.sub_top = None\n # 显示窗口\n # {'gui_id': {'show_text': show_text, 'result_txt': None}}\n self.show_text_dict = {}\n\n # 如果显示的内容大于1000行,就清理到只剩下100\n self.max_show_count = [0, 1000, 500]\n\n # 超限的长度\n self.oversize_length = show_text_length\n\n # 显示次数\n self.show_count = 0\n\n def add_show_text(self, gui_id='', tk_text=None, show_time=True, txt_path=\"\", oversize_save=True):\n \"\"\"增加一个show_text\"\"\"\n try:\n if gui_id in self.show_text_dict.keys():\n self.show_text_dict[gui_id].show_text = tk_text\n self.show_text_dict[gui_id].show_time = show_time\n else:\n self.show_text_dict[gui_id] = \\\n DisplaySetting(tk_text, show_time, txt_path, True, True, True, oversize_save)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + \"add_show_text\")\n self.local_show_everything_to_text(error_msg)\n\n def delete_show_text(self, gui_id=None):\n \"\"\"删除一个show_text\"\"\"\n try:\n if gui_id in self.show_text_dict.keys():\n self.show_text_dict.pop(gui_id)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + \"delete_show_text\")\n self.local_show_everything_to_text(error_msg)\n\n def run(self):\n \"\"\"持续监听,如有内容则显示\"\"\"\n count = 0\n while True:\n show_msg_to_text_dict(self.global_msg_queue,\n self.show_text_dict)\n QThread.usleep(500)\n\n count += 1\n if count >= 5000:\n self.response_of_check_show_text()\n count = 0\n\n def show_msg_outside(self):\n \"\"\"在外部显示内容\"\"\"\n try:\n show_msg_to_text_dict(self.global_msg_queue, self.show_text_dict)\n self.show_count += 1\n if self.show_count >= 5000:\n self.response_of_check_show_text()\n self.show_count = 0\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + \"show_msg_outside\")\n self.local_show_everything_to_text(error_msg)\n\n def response_of_check_show_text(self):\n \"\"\"检查show text的内容是否超过限制\"\"\"\n # 能否这么使用还需要验证\n try:\n # {'gui_id': {'show_text': show_text, 'result_txt': None}}\n for value in self.show_text_dict.values():\n show_text = value.show_text\n if show_text:\n text = show_text.toPlainText()\n length = len(text)\n if length >= int(show_text_delete_length + show_text_remain_length):\n show_text.clear()\n show_text.document().clear()\n show_text.append(text[length - show_text_remain_length:])\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + 'response_of_check_show_text')\n self.local_show_everything_to_text(error_msg)\n\n def local_show_everything_to_text(self, content=None, additional_msg='', gui_id=\"default\"):\n \"\"\"本地对接显示函数\"\"\"\n # 显示数据和存储数据是分开的\n # 写数据\n try:\n if gui_id in self.show_text_dict.keys():\n show_everything_to_text(self.global_msg_queue, content, additional_msg,\n self.show_text_dict[gui_id].str_convert, gui_id)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'local_show_everything_to_text: show_everything_to_text')\n print(error_msg)\n\n def set_save_display_show_time(self, gui_id='', value=None):\n error_msg = ''\n try:\n if gui_id in self.show_text_dict.keys():\n error_msg = self.show_text_dict[gui_id].set_save_display_show_time(value)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'local_show_everything_to_text: set_save_display_show_time')\n if error_msg:\n self.local_show_everything_to_text(error_msg)\n\n def set_save_display_oversize_length(self, gui_id='', value=None):\n try:\n error_msg = ''\n if value:\n if gui_id in self.show_text_dict.keys():\n error_msg = self.show_text_dict[gui_id].set_save_display_display_way(int(value))\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'local_show_everything_to_text: set_save_display_oversize_length')\n if error_msg:\n self.local_show_everything_to_text(error_msg)\n\n def set_save_display_str_convert(self, gui_id='', value=None):\n try:\n # if value:\n error_msg = ''\n if gui_id in self.show_text_dict.keys():\n error_msg = self.show_text_dict[gui_id].set_save_display_display_way(value)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'local_show_everything_to_text: set_save_display_str_convert')\n if error_msg:\n self.local_show_everything_to_text(error_msg)\n\n def set_save_display_display_way(self, gui_id='', value=None):\n try:\n # if value:\n error_msg = ''\n if gui_id in self.show_text_dict.keys():\n error_msg = self.show_text_dict[gui_id].set_save_display_display_way(value)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'local_show_everything_to_text: set_save_display_display_way')\n if error_msg:\n self.local_show_everything_to_text(error_msg)\n\n def set_save_display_display_on(self, gui_id='', value=None):\n try:\n # if value:\n error_msg = ''\n if gui_id in self.show_text_dict.keys():\n error_msg = self.show_text_dict[gui_id].set_save_display_display_on(value)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'local_show_everything_to_text: set_save_display_display_on')\n if error_msg:\n self.local_show_everything_to_text(error_msg)\n\n def set_save_display_oversize_save(self, gui_id='', value=None):\n try:\n # if value:\n error_msg = ''\n if gui_id in self.show_text_dict.keys():\n error_msg = self.show_text_dict[gui_id].set_save_display_oversize_save(value)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'local_show_everything_to_text: set_save_display_oversize_save')\n if error_msg:\n self.local_show_everything_to_text(error_msg)\n\n def get_show_time(self, gui_id=''):\n \"\"\"\"\"\"\n result = None\n if gui_id in self.show_text_dict.keys():\n result = self.show_text_dict[gui_id].show_time\n return result\n\n def get_str_convert(self, gui_id=''):\n \"\"\"\"\"\"\n result = None\n if gui_id in self.show_text_dict.keys():\n result = self.show_text_dict[gui_id].str_convert\n return result\n\n def get_display_on(self, gui_id=''):\n \"\"\"\"\"\"\n result = None\n if gui_id in self.show_text_dict.keys():\n result = self.show_text_dict[gui_id].display_on\n return result\n\n def get_display_way(self, gui_id=''):\n \"\"\"\"\"\"\n result = None\n if gui_id in self.show_text_dict.keys():\n result = self.show_text_dict[gui_id].display_way\n return result\n\n def get_oversize_save(self, gui_id=''):\n \"\"\"\"\"\"\n result = None\n if gui_id in self.show_text_dict.keys():\n result = self.show_text_dict[gui_id].oversize_save\n return result\n\n def get_oversize_length(self, gui_id=''):\n \"\"\"\"\"\"\n result = None\n if gui_id in self.show_text_dict.keys():\n result = self.show_text_dict[gui_id].oversize_length\n return result\n\n\nclass DisplaySetting:\n \"\"\"显示设置\"\"\"\n\n def __init__(self, tk_text=None, show_time=True, txt_path=\"\", str_convert=True,\n display_on=True, display_way=True, oversize_save=True, ):\n \"\"\"初始化\"\"\"\n self.show_text = tk_text\n self.show_time = show_time\n self.txt_path = txt_path\n self.str_convert = str_convert\n self.display_on = display_on\n self.display_way = display_way\n self.oversize_save = oversize_save\n self.oversize_length = show_text_length\n\n def set_save_display_show_time(self, value=None):\n error_msg = ''\n try:\n self.show_time = value\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'DisplaySetting: set_save_display_show_time')\n return error_msg\n\n def set_save_display_oversize_length(self, value=None):\n error_msg = \"\"\n try:\n self.oversize_length = int(value)\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'DisplaySetting: set_save_display_oversize_length')\n return error_msg\n\n def set_save_display_str_convert(self, value=None):\n error_msg = ''\n try:\n self.str_convert = value\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'DisplaySetting: set_save_display_str_convert')\n return error_msg\n\n def set_save_display_display_way(self, value=None):\n error_msg = ''\n try:\n self.display_way = value\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'DisplaySetting: set_save_display_display_way')\n return error_msg\n\n def set_save_display_display_on(self, value=None):\n error_msg = ''\n try:\n self.display_on = value\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'DisplaySetting: set_save_display_display_on')\n return error_msg\n\n def set_save_display_oversize_save(self, value=None):\n error_msg = ''\n try:\n self.oversize_save = value\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix +\n 'DisplaySetting: set_save_display_oversize_save')\n return error_msg\n\n\nclass LocalShowTextDirect:\n show_info = 'show_info'\n\n def __init__(self, show_text=None, result_path=''):\n \"\"\"初始化\"\"\"\n self.show_text = show_text\n self.result_path = result_path\n # 设置超限是否存储到txt\n self.oversize_save = True\n # 超限长度\n self.oversize_length = 1000\n # 调用显示次数\n self.show_count = 0\n # 清理阈值次数\n self.clear_count = 10\n\n def show_text_action(self, key='', data=None):\n \"\"\"显示\"\"\"\n if key == self.show_info:\n self.show_data_to_text_direct(data)\n\n def show_data_to_text(self, msg=''):\n \"\"\"直接显示数据\"\"\"\n try:\n self.show_text.append(msg)\n self.show_text.moveCursor(QTextCursor.End)\n except Exception as e:\n print(tools_common.get_error_msg(e.args, error_msg_prefix + 'show_data_to_text_direct'))\n\n def show_data_to_text_direct(self, show_msg=''):\n \"\"\"显示数据\"\"\"\n try:\n length = len(show_msg)\n txt_name = ''\n if length > self.oversize_length:\n if self.oversize_save:\n now_time = tools_common.get_date_time_ms()\n txt_name = os.path.join(self.result_path, now_time + '.txt')\n msg = show_msg[0:self.oversize_length] + '...\\n......\\n数据过大,只显示部分数据,全部数据已存储在:\\n' + txt_name + '\\n请查看!\\n'\n else:\n msg = show_msg[0:self.oversize_length] + '...\\n......\\n数据过大,只显示部分数据\\n'\n else:\n msg = show_msg + '\\n'\n self.show_data_to_text(msg)\n\n if txt_name:\n tools_file.save_to_txt(txt_name, show_msg)\n\n self.show_count += 1\n if self.show_count >= self.clear_count:\n self.show_count = 0\n self.show_text.clear()\n except Exception as e:\n error_msg = tools_common.get_error_msg(e.args, error_msg_prefix + 'show_data_to_text')\n self.show_data_to_text(error_msg)\n","repo_name":"AlvinsFish/UiExample","sub_path":"py_tools/tools_text.py","file_name":"tools_text.py","file_ext":"py","file_size_in_byte":23178,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"72618279202","text":"# coding=utf-8\nimport os,os.path,re\n\npath='X:\\\\图片\\\\云'\n\ndef deal(p):\n for root,dirs,files in os.walk(path):\n #三个参数:分别返回1.父目录 2.所有文件夹名字(不含路径) 3.所有文件名字\n \"\"\" \n for eachdir in dirs:\n print('root:'+root)\n print(\"dir:\"+eachdir)\n \"\"\"\n count=0\n for file in files:\n #print('父目录:'+root)\n #print('文件名:'+file)\n #print(root,os.sep,file)\n print(\"绝对路径:\" + os.path.join(root,file))\n m=p.findall(os.path.join(root,file))\n #print(m)\n if len(m)==2:\n count+=1\n newname=str(m[0])\n print('new:'+str(newname))\n if os.path.exists(newname):\n os.remove(os.path.join(root,file))\n else:\n os.rename(os.path.join(root,file),os.path.join(root,newname))\n else:\n pass\n #count_three+=1\n #print(\"到了快速反击的速度上来看房价\")\n return count\n \ndef main():\n #p4=re.compile('.*(?=\\.jpg\\.jpg\\.jpg\\.jpg)')\n # p3=re.compile('.*(?=\\.jpg\\.jpg\\.jpg)')\n p2=re.compile('.*(?=\\.jpg\\.jpg)')\n # print('4:'+str(deal(p4)))\n # print('3:'+str(deal(p3)))\n print('2:'+str(deal(p2)))\n\nif __name__=='__main__':\n main()\n","repo_name":"cnafan/pythonCodeHotchpotch","sub_path":"文件重命名.py","file_name":"文件重命名.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43115237940","text":"# Author: Andrew Hamara\n\n# Solution for leetcode problem 1207. Unique Number of Occurences\n\nclass Solution:\n def uniqueOccurences(self, arr:List[int]) -> bool:\n m = {}\n for x in arr:\n if x in m:\n m[x] += 1\n else:\n m[x] = 1\n l = list(m.values())\n return len(l) == len(set(l))\n","repo_name":"andrewhamara/Python-LC","sub_path":"1207.py","file_name":"1207.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29842820981","text":"import gradio as gr\nimport cv2\nimport argparse\nimport sys\nimport numpy as np\nimport torch\nfrom pathlib import Path\n\nimport options as option\nfrom models import create_model\nsys.path.insert(0, \"../../\")\nimport utils as util\n\n# options\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-opt\", type=str, default='options/bokeh/test/refusion.yml', help=\"Path to options YMAL file.\")\nopt = option.parse(parser.parse_args().opt, is_train=False)\n\nopt = option.dict_to_nonedict(opt)\n\n# load pretrained model by default\nmodel = create_model(opt)\ndevice = model.device\n\nsde = util.IRSDE(max_sigma=opt[\"sde\"][\"max_sigma\"], T=opt[\"sde\"][\"T\"], schedule=opt[\"sde\"][\"schedule\"], eps=opt[\"sde\"][\"eps\"], device=device)\nsde.set_model(model.model)\n\ndef deraining(image):\n image = image[:, :, [2, 1, 0]] / 255.\n\n src_lens = torch.tensor(float(18))\n tgt_lens = torch.tensor(float(160))\n disparity = torch.tensor(float(35))\n\n image = torch.tensor(image).float().cuda()\n image = torch.permute(image, (2, 0, 1))\n\n latent_LQ, hidden = model.encode(torch.unsqueeze(image, 0))\n noisy_state = sde.noise_state(latent_LQ)\n\n model.feed_data(noisy_state, latent_LQ, src_lens=src_lens, tgt_lens=tgt_lens, disparity=disparity, GT=None)\n model.test(sde, hidden=hidden, save_states=False)\n visuals = model.get_current_visuals(need_GT=False)\n output = util.tensor2img(visuals[\"Output\"].squeeze())\n return output\n\ninterface = gr.Interface(fn=deraining, inputs=\"image\", outputs=\"image\", title=\"Image Deraining using IR-SDE\")\ninterface.launch(share=True)\n\n","repo_name":"Algolzw/image-restoration-sde","sub_path":"codes/config/latent-bokeh/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":353,"dataset":"github-code","pt":"54"} +{"seq_id":"32062730096","text":"import tkinter as tk\nboard3=[]\nclass Buttons():\n global board3\n def __init__(self,i,a,outputframe,board2,label):\n self= tk.Button(outputframe, bd = 0,width=5,height=2,justify=tk.RIGHT,command=lambda:click(self,self.cget('text')),text=board2[label],font=(\"time\",10,\"bold\"))\n self.grid(column=a,row=i,padx=2,pady=2)\n board3.append(self) # goes through and makes a button object and if text starts as number disable user from clicking it \n if self.cget(\"text\") != \"*\":\n self[\"state\"] = tk.DISABLED\n def click(self,number):\n try:\n if int(number) > 8:#onclick number + 1 if 9 sets back to *\n self.config(text=\"*\")\n else :\n self.config(text=(int(self.cget(\"text\"))+1))\n except:\n self.config(text=1)\n \n def boards():\n return board3 #restuns board\n\n\n \n","repo_name":"natebrant/Python","sub_path":"sudoku2/sudoclass.py","file_name":"sudoclass.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71587254881","text":"\"\"\"\nScript that reads from raw MovieLens-1M data and dumps into a pickle\nfile the following:\n\n* A heterogeneous graph with categorical features.\n* A list with all the movie titles. The movie titles correspond to\n the movie nodes in the heterogeneous graph.\n\nThis script exemplifies how to prepare tabular data with textual\nfeatures. Since DGL graphs do not store variable-length features, we\ninstead put variable-length features into a more suitable container\n(e.g. torchtext to handle list of texts)\n\"\"\"\n\nimport argparse\nimport os\nimport pickle\nimport re\n\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as ssp\nimport torch\nimport torchtext\nfrom builder import PandasGraphBuilder\nfrom data_utils import *\n\nimport dgl\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"directory\", type=str)\n parser.add_argument(\"out_directory\", type=str)\n args = parser.parse_args()\n directory = args.directory\n out_directory = args.out_directory\n os.makedirs(out_directory, exist_ok=True)\n\n ## Build heterogeneous graph\n\n # Load data\n users = []\n with open(os.path.join(directory, \"users.dat\"), encoding=\"latin1\") as f:\n for l in f:\n id_, gender, age, occupation, zip_ = l.strip().split(\"::\")\n users.append(\n {\n \"user_id\": int(id_),\n \"gender\": gender,\n \"age\": age,\n \"occupation\": occupation,\n \"zip\": zip_,\n }\n )\n users = pd.DataFrame(users).astype(\"category\")\n\n movies = []\n with open(os.path.join(directory, \"movies.dat\"), encoding=\"latin1\") as f:\n for l in f:\n id_, title, genres = l.strip().split(\"::\")\n genres_set = set(genres.split(\"|\"))\n\n # extract year\n assert re.match(r\".*\\([0-9]{4}\\)$\", title)\n year = title[-5:-1]\n title = title[:-6].strip()\n\n data = {\"movie_id\": int(id_), \"title\": title, \"year\": year}\n for g in genres_set:\n data[g] = True\n movies.append(data)\n movies = pd.DataFrame(movies).astype({\"year\": \"category\"})\n\n ratings = []\n with open(os.path.join(directory, \"ratings.dat\"), encoding=\"latin1\") as f:\n for l in f:\n user_id, movie_id, rating, timestamp = [\n int(_) for _ in l.split(\"::\")\n ]\n ratings.append(\n {\n \"user_id\": user_id,\n \"movie_id\": movie_id,\n \"rating\": rating,\n \"timestamp\": timestamp,\n }\n )\n ratings = pd.DataFrame(ratings)\n\n # Filter the users and items that never appear in the rating table.\n distinct_users_in_ratings = ratings[\"user_id\"].unique()\n distinct_movies_in_ratings = ratings[\"movie_id\"].unique()\n users = users[users[\"user_id\"].isin(distinct_users_in_ratings)]\n movies = movies[movies[\"movie_id\"].isin(distinct_movies_in_ratings)]\n\n # Group the movie features into genres (a vector), year (a category), title (a string)\n genre_columns = movies.columns.drop([\"movie_id\", \"title\", \"year\"])\n movies[genre_columns] = movies[genre_columns].fillna(False).astype(\"bool\")\n movies_categorical = movies.drop(\"title\", axis=1)\n\n # Build graph\n graph_builder = PandasGraphBuilder()\n graph_builder.add_entities(users, \"user_id\", \"user\")\n graph_builder.add_entities(movies_categorical, \"movie_id\", \"movie\")\n graph_builder.add_binary_relations(\n ratings, \"user_id\", \"movie_id\", \"watched\"\n )\n graph_builder.add_binary_relations(\n ratings, \"movie_id\", \"user_id\", \"watched-by\"\n )\n\n g = graph_builder.build()\n\n # Assign features.\n # Note that variable-sized features such as texts or images are handled elsewhere.\n for data_type in [\"gender\", \"age\", \"occupation\", \"zip\"]:\n g.nodes[\"user\"].data[data_type] = torch.LongTensor(\n np.array(users[data_type].cat.codes.values)\n )\n\n g.nodes[\"movie\"].data[\"year\"] = torch.LongTensor(\n np.array(movies[\"year\"].cat.codes.values)\n )\n g.nodes[\"movie\"].data[\"genre\"] = torch.FloatTensor(\n np.array(movies[genre_columns].values)\n )\n\n for edge_type in [\"watched\", \"watched-by\"]:\n for data_type in [\"rating\", \"timestamp\"]:\n g.edges[edge_type].data[data_type] = torch.LongTensor(\n np.array(ratings[data_type].values)\n )\n\n # Train-validation-test split\n # This is a little bit tricky as we want to select the last interaction for test, and the\n # second-to-last interaction for validation.\n train_indices, val_indices, test_indices = train_test_split_by_time(\n ratings, \"timestamp\", \"user_id\"\n )\n\n # Build the graph with training interactions only.\n train_g = build_train_graph(\n g, train_indices, \"user\", \"movie\", \"watched\", \"watched-by\"\n )\n assert train_g.out_degrees(etype=\"watched\").min() > 0\n\n # Build the user-item sparse matrix for validation and test set.\n val_matrix, test_matrix = build_val_test_matrix(\n g, val_indices, test_indices, \"user\", \"movie\", \"watched\"\n )\n\n ## Build title set\n\n movie_textual_dataset = {\"title\": movies[\"title\"].values}\n\n # The model should build their own vocabulary and process the texts. Here is one example\n # of using torchtext to pad and numericalize a batch of strings.\n # field = torchtext.data.Field(include_lengths=True, lower=True, batch_first=True)\n # examples = [torchtext.data.Example.fromlist([t], [('title', title_field)]) for t in texts]\n # titleset = torchtext.data.Dataset(examples, [('title', title_field)])\n # field.build_vocab(titleset.title, vectors='fasttext.simple.300d')\n # token_ids, lengths = field.process([examples[0].title, examples[1].title])\n\n ## Dump the graph and the datasets\n\n dgl.save_graphs(os.path.join(out_directory, \"train_g.bin\"), train_g)\n\n dataset = {\n \"val-matrix\": val_matrix,\n \"test-matrix\": test_matrix,\n \"item-texts\": movie_textual_dataset,\n \"item-images\": None,\n \"user-type\": \"user\",\n \"item-type\": \"movie\",\n \"user-to-item-type\": \"watched\",\n \"item-to-user-type\": \"watched-by\",\n \"timestamp-edge-column\": \"timestamp\",\n }\n\n with open(os.path.join(out_directory, \"data.pkl\"), \"wb\") as f:\n pickle.dump(dataset, f)\n","repo_name":"dmlc/dgl","sub_path":"examples/pytorch/pinsage/process_movielens1m.py","file_name":"process_movielens1m.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"29796536908","text":"import wx\nimport os\nimport cv2\nimport numpy as np\n\nclass Transforms(wx.Panel):\n def __init__(self,parent):\n wx.Panel.__init__(self, parent)\n \n self.path = ''\n\n self.mode = 0\n\n self.imgDict = {}\n\n self.mainbox = wx.BoxSizer(wx.HORIZONTAL) \n self.SetSizer(self.mainbox)\n\n self.rSizer = wx.GridBagSizer(5, 5)\n self.mainbox.Add(self.rSizer)\n\n sb = wx.StaticBox(self, label=\"4.Transforms\")\n \n boxsizer = wx.StaticBoxSizer(sb, wx.VERTICAL)\n\n def Resize(event):\n self.Resize()\n\n btn1 = wx.Button(self, label=\"4.1 Resize \")\n btn1.Bind(wx.EVT_BUTTON, Resize)\n boxsizer.Add(btn1, flag=wx.LEFT|wx.TOP, border=5)\n\n def Translation(event):\n self.Translation()\n\n btn2 = wx.Button(self, label=\"4.2 Translation \")\n btn2.Bind(wx.EVT_BUTTON, Translation)\n boxsizer.Add(btn2, flag=wx.LEFT|wx.TOP, border=5)\n\n def Angle(event):\n self.Angle()\n\n corner_detection = wx.Button(self, label=\"4.3 Rotation, Scaling \")\n corner_detection.Bind(wx.EVT_BUTTON, Angle)\n boxsizer.Add(corner_detection, flag=wx.LEFT|wx.TOP, border=5)\n\n def Shearing(event):\n self.Shearing()\n\n corner_detection = wx.Button(self, label=\"4.4 Shearing \")\n corner_detection.Bind(wx.EVT_BUTTON, Shearing)\n boxsizer.Add(corner_detection, flag=wx.LEFT|wx.TOP, border=5)\n\n self.rSizer.Add(boxsizer, pos=(0, 0), span=(1, 5),\n flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=5)\n \n def Resize(self):\n img = cv2.imread('./Dataset_OpenCvDl_Hw1/Q4_Image/SQUARE-01.png')\n image = cv2.resize(img, (256, 256))\n\n cv2.imshow('Resize', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n def Translation(self):\n img = cv2.imread('./Dataset_OpenCvDl_Hw1/Q4_Image/SQUARE-01.png')\n img = cv2.resize(img, (256, 256))\n\n M = np.float32([[1, 0, 0],[0, 1, 60]])\n\n img = cv2.warpAffine(img, M, (400, 300))\n\n cv2.imshow('Translation', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows() \n\n def Angle(self):\n img = cv2.imread('./Dataset_OpenCvDl_Hw1/Q4_Image/SQUARE-01.png')\n img = cv2.resize(img, (256, 256))\n\n H = np.float32([[1, 0, 0],[0, 1, 60]])\n img = cv2.warpAffine(img, H, (400, 300))\n\n M = cv2.getRotationMatrix2D((128, 188), 10, 0.5) \n img = cv2.warpAffine(img, M, (400, 300))\n cv2.imshow('Angle', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows() \n\n def Shearing(self):\n img = cv2.imread('./Dataset_OpenCvDl_Hw1/Q4_Image/SQUARE-01.png')\n img = cv2.resize(img, (256, 256))\n\n H = np.float32([[1, 0, 0],[0, 1, 60]])\n img = cv2.warpAffine(img, H, (400, 300))\n\n M = cv2.getRotationMatrix2D((128, 188), 10, 0.5) \n img = cv2.warpAffine(img, M, (400, 300))\n \n W = np.float32([[1, 0.5, 0],\n [0, 1 , 0],\n [0, 0 , 1]])\n img = cv2.warpPerspective(img, W, (400, 300))\n cv2.imshow('Shearing', img)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows() ","repo_name":"a12345645/CVDL","sub_path":"Introduction to Image Processing, Computer Vision and Deep Learning/hw1/Transforms.py","file_name":"Transforms.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"33337851905","text":"import logging\nimport json\nimport sys\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\nfrom excerpt_gen import ExcerptGen\nfrom base.config_loader import ConfigLoader\n\n\nclass ExcerptServer(ConfigLoader):\n def run(self, **kwargs):\n # allow command line arguments to overwrite config\n for k, v in kwargs.items():\n setattr(self, k, v)\n eg = ExcerptGen(accelerator=self.accelerator,\n model_name=self.model_name)\n\n class RequestHandler(BaseHTTPRequestHandler):\n def _set_response(self):\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n def do_POST(self):\n # refuse to receive non-json content\n ctype = self.headers['Content-Type']\n if ctype != 'application/json':\n logging.info(f'Got Content-Type={ctype}')\n self.send_response(400)\n self.end_headers()\n return\n content_length = int(self.headers['Content-Length'])\n body = self.rfile.read(content_length)\n logging.info(f'POST request,\\nPath: {self.path}')\n if self.path == '/get_excerpts':\n response = self._get_excerpts(body)\n elif self.path == '/get_excerpts_from_docs':\n response = self._get_excerpts_from_docs(body)\n else:\n logging.info(f'Got path={self.path}')\n self.send_response(400)\n self.end_headers()\n return\n self._set_response()\n self.wfile.write(json.dumps(response).encode('utf-8'))\n\n def _get_excerpts(self, body):\n message = json.loads(body)\n question = message.get('question', '')\n url = message.get(\n 'url', 'https://en.wikipedia.org/wiki/COVID-19_pandemic')\n if question:\n response = \\\n eg.get_excerpts(question, url=url)\n else:\n response = {'error': 'No question provided'}\n return response\n\n def _get_excerpts_from_docs(self, body):\n message = json.loads(body)\n question = message.get('question', '')\n docs = message.get('docs', [])\n if question and docs:\n response = eg.get_excerpts_from_docs(question, docs)\n else:\n response = {\n 'error': (f'No question (len={len(question)}) or '\n f'docs (len={len(docs)}) provided')\n }\n return response\n\n httpd = HTTPServer((self.host, self.port), RequestHandler)\n logging.info(f'Listening on {self.host}:{self.port}')\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n logging.info('KeyboardInterrupt')\n finally:\n httpd.server_close()\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n s = ExcerptServer()\n s.run(**dict(v.split('=') for v in sys.argv[1:]))\n","repo_name":"joy13975/covidprof_submission","sub_path":"src/excerpt_server.py","file_name":"excerpt_server.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12217263228","text":"from flask import Flask, escape, url_for, request\nfrom tools.redis_tool import get_redis_conn\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n print(dir(request))\n print(request.url)\n redis_conn = get_redis_conn()\n redis_conn.set(\"now\", str(datetime.now()))\n return redis_conn.get(\"now\")\n\n\n@app.route(\"/login\")\ndef login():\n return \"login\"\n\n\n@app.route(\"/user/\")\ndef profile(username):\n return \"{}'s profile\".format(escape(username))\n\n\nwith app.test_request_context():\n print(url_for(\"index\"))\n print(url_for(\"login\"))\n print(url_for(\"login\", next=\"/\"))\n print(url_for(\"profile\", username=\"John Doe\"))\n","repo_name":"xth9363/ff1","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23506469551","text":"import math\n\nfrom django import template\n\nregister = template.Library()\n\n@register.filter()\ndef all_submissions(num_players, submissions):\n #try:\n return len(submissions) == (num_players - 1)\n #except:\n #return False\n\n@register.simple_tag()\ndef calculate_turn(**kwargs):\n num_rounds = kwargs['r']\n num_players = kwargs['pl']\n if isinstance(num_players, str):\n num_players = int(num_players)\n if isinstance(num_rounds, str):\n num_rounds = int(num_rounds)\n turn = (num_rounds) % num_players\n return num_players if turn == 0 else turn\n\n@register.simple_tag()\ndef calculate_round(**kwargs):\n num_rounds = kwargs['r']\n current_round = kwargs['cr']\n if isinstance(num_rounds, str):\n num_rounds = int(num_rounds)\n if isinstance(current_round, str):\n current_round = int(current_round)\n \n return math.ceil(current_round / num_rounds)","repo_name":"stefdworschak/trivia-game","sub_path":"cah/templatetags/cah_tags.py","file_name":"cah_tags.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32577297436","text":"import pandas as pd\nimport csv\nimport datetime\nimport time\n\n\n# task_1\n'''\nНапишите программу, которая будет открывать определенный файл CSV и выводить на печать построчно последние 10 строк. \nЕсли в файле всего 10 и меньше строк, то нужно вывести только первую строчку.\n'''\n\ndef check_file(path):\n df = pd.read_csv(path)\n if len(df) >= 10:\n print(df.tail())\n else:\n print(df.head(1))\n\ncheck_file('prices22.csv')\n\n\n# task_2\n'''\nСоздать текстовый файл File_txt.txt. В него первой строкой записать свою группу, ФИО. \nВторая строка – четные числа от 0 до 100. Числа должны быть разделены «;».\n'''\nwith open('File_txt.txt', 'w') as text_file:\n text_file.write('Ushanov Nikita, PUOR22-2m \\n')\n\nwith open('File_txt.txt', 'a') as text_file:\n text_file.write(arr = \"; \".join(map(str, list(range(0, 100, 2)))))\n \n\n# task_3\n'''\nНапишите функцию read_last(lines, file), которая будет открывать определенный файл file и выводить на печать\nпострочно последние строки в количестве lines (на всякий случай проверим, что задано положительное целое число).\n'''\ndef read_last(lines, file):\n if lines > 0:\n with open(file, encoding='utf-8') as text:\n file_lines = text.readlines()[-lines:]\n for line in file_lines:\n print(line.strip())\n else:\n print('Параметр \"lines\" должен быть больше 0!')\n \n\n# task_4\n'''\nТребуется реализовать функцию longest_words(file), которая выводит слово, имеющее максимальную длину (или список слов, если таковых несколько).\n'''\ndef longest_words(file):\n with open(file, encoding='utf-8') as text:\n words = text.read().split()\n max_length = len(max(words, key=len))\n sought_words = [word for word in words if len(word) == max_length]\n if len(sought_words) == 1:\n return sought_words[0]\n else:\n return sought_words\n \n \n# task_5\n'''\nТребуется создать csv-файл «rows_300.csv» со следующими столбцами:\n– № - номер по порядку (от 1 до 300);\n– Секунда – текущая секунда на вашем ПК;\n– Микросекунда – текущая миллисекунда на часах.\nНа каждой итерации цикла искусственно приостанавливайте скрипт на 0,01 секунды.\n'''\nwith open('rows_300.csv', 'w', encoding='utf-8', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['№', 'Секунда ', 'Микросекунда'])\n for line in range(1, 301):\n writer.writerow([line, datetime.datetime.now().second, datetime.datetime.now().microsecond])\n time.sleep(0.01)\n","repo_name":"NikitaUshanov/python_19_11_22","sub_path":"seminar_1.py","file_name":"seminar_1.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74006111202","text":"from ase.io import read, write\n\natoms = read('POSCAR')\nselected = [119, 168, 49, 50, 51, 52, 53, 54]\nremove = []\nfor i in range(len(atoms)):\n if not i+1 in selected:\n remove.append(i)\n\ndel(atoms[remove])\nwrite('out.vasp', atoms, vasp5=True, direct=True)\n","repo_name":"esemble/simpy","sub_path":"tools/ase/extract_atoms.py","file_name":"extract_atoms.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"54"} +{"seq_id":"25486497794","text":"import re\nplanets = input()\nplanets = re.findall(r'[A-z]+',planets)\nidx = planets.index('Sun')\nhot,cool = '',''\nif idx != 0 : hot += ' ' + planets[idx-1]\nif idx != len(planets)-1 : hot += ' ' + planets[idx+1]\nmx = max(idx,len(planets)-1-idx)\nif idx == mx : cool += ' ' + planets[0]\nif len(planets)-1-idx == mx : cool += ' ' + planets[len(planets)-1]\nprint('Hot:' + hot)\nprint('Cool:' + cool)\n#Mercury Venus Earth Mars Sun Jupiter Saturn Uranus Neptune","repo_name":"apkmew/Code","sub_path":"CPE KU Year 1/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"15518799632","text":"from flask import Blueprint, make_response, jsonify\nfrom utils.extension import admin, mysql\nfrom os import listdir, mkdir\nfrom os.path import exists, join\nimport pandas as pd\nimport cv2 as cv\n\ninit_db = Blueprint('init_db', __name__)\n\nORIGINAL_PATH = join('images', 'original')\nRESIZED_PATH = join('images', 'resized')\nPROJECT_NAME = \"AM69123_\"\nRESIZED_WIDTH = 500\n\n\ndef is_image_in_db(image_id):\n cursor = mysql.connection.cursor()\n image = cursor.execute(\"\"\"\n SELECT * \n FROM images \n WHERE image_id= %(image_id)s \"\"\",\n {\n 'image_id': image_id\n })\n cursor.close()\n\n if image > 0:\n return True\n return False\n\n\ndef is_document_in_db(document_cote):\n cursor = mysql.connection.cursor()\n document = cursor.execute(\"\"\"\n SELECT * \n FROM documents \n WHERE document_cote= %(document_cote)s \"\"\",\n {\n 'document_cote': document_cote\n })\n cursor.close()\n\n if document > 0:\n return True\n return False\n\n\ndef add_document_to_db(cote, name, date):\n cursor = mysql.connection.cursor()\n cursor.execute(\"\"\"\n INSERT INTO documents(document_cote, document_name, document_date) \n VALUES (%(cote)s, %(name)s, %(date)s) \"\"\",\n {\n 'cote': cote,\n 'name': name,\n 'date': date,\n })\n mysql.connection.commit()\n cursor.close()\n print(f'''Document {cote} successfully added to DB''')\n\n\ndef add_image_to_db(image_id, document_cote):\n cursor = mysql.connection.cursor()\n cursor.execute(\"\"\"\n INSERT INTO images(image_id, document_cote) \n VALUES (%(image_id)s, %(document_cote)s) \"\"\",\n {\n 'image_id': image_id,\n 'document_cote': document_cote,\n })\n mysql.connection.commit()\n cursor.close()\n print(f'''Image {image_id} successfully added to DB''')\n\n\ndef minify(original_path, cote, image_name):\n img = cv.imread(join(original_path, cote, image_name))\n height = int(img.shape[0] * RESIZED_WIDTH / img.shape[1])\n resized = cv.resize(img, (RESIZED_WIDTH, height), interpolation=cv.INTER_CUBIC)\n cv.imwrite(join(RESIZED_PATH, cote, image_name), resized)\n print(f'''Image {image_name} successfully minified''')\n\n\ndef add_images_to_db(cote):\n if not exists(join(ORIGINAL_PATH, cote)):\n print(f'''XXX --- Directory does not exists for {cote} --- XXX''')\n return\n\n path = join(RESIZED_PATH, cote)\n if not exists(path):\n mkdir(path)\n for f in listdir(join(ORIGINAL_PATH, cote)):\n if f[-4:] == '.jpg':\n if not exists(join(RESIZED_PATH, cote, f)):\n minify(ORIGINAL_PATH, cote, f)\n if not is_image_in_db(f[:-4]):\n add_image_to_db(f[:-4], cote)\n\n\ndef read_excel_data(excel_file):\n data = pd.read_excel(excel_file,\n sheet_name='Feuil1')\n df = pd.DataFrame(data, columns=['Cote', 'Intitulé', 'dates'])\n print(df)\n\n for i in range(df.shape[0]):\n if not is_document_in_db(PROJECT_NAME + data.at[i, 'Cote']):\n print(f'''Adding {PROJECT_NAME + data.at[i, 'Cote']} to DB''')\n add_document_to_db(PROJECT_NAME + data.at[i, 'Cote'],\n data.at[i, 'Intitulé'], data.at[i, 'dates'])\n add_images_to_db(PROJECT_NAME + data.at[i, 'Cote'])\n\n\n@init_db.route('/initdb')\n@admin\ndef initdb():\n read_excel_data('images/liste_documents.xlsx')\n return make_response(jsonify({'success': 'init db'}), 200)\n","repo_name":"alexisreis/annotation-app","sub_path":"server/api/init_database.py","file_name":"init_database.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30933731778","text":"import h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom constants import barcodes_dict, barcode_list, data_path\n\n\ndef plot_means(means):\n plt.clf()\n for i in range(4):\n plt.plot(means[i], label='barcode {}'.format(i+1))\n plt.legend(loc='upper left')\n plt.show()\n\n\ndef make_signal(means, length):\n signal = []\n N = len(means)\n segment_len = length//(N-1)\n for i in range(N-1):\n signal.append(means[i])\n for j in range(1, segment_len):\n signal.append(means[i] + (means[i+1] - means[i])*(j/segment_len))\n return signal\n\n\nf = h5py.File(data_path+'kmer_model.hdf5', 'r')\nN_classes = len(barcode_list)\nK = 6\nkmers = {}\nsquiggle_length = 400\n\nfor x in list(f['model']):\n kmers[x[0].decode('utf-8')] = x[1]\n\nmean_signals = []\nfor key in barcode_list:\n signal = np.array([])\n barcode = barcodes_dict[key]\n for i in range(0, len(barcode)-K):\n signal = np.append(signal, kmers[barcode[i:i + K]])\n mean_signals.append(make_signal(signal, squiggle_length))\n\nfor i in range(N_classes):\n plt.plot(mean_signals[i], label='barcode {}'.format(i))\nplt.legend(loc='upper left')\nplt.show()\n\n","repo_name":"Gogis0/barcluster","sub_path":"barcode_models.py","file_name":"barcode_models.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1585395868","text":"# 나이트의 이동\nimport sys\nfrom collections import deque\ninput=sys.stdin.readline\n\ndx=[-2,-2,-1,-1,1,1,2,2]\ndy=[1,-1,2,-2,2,-2,1,-1]\n\ndef bfs(x,y):\n q=deque()\n q.append((x,y))\n\n while len(q):\n if g[tx][ty] != 0:\n return g[tx][ty]\n x,y=q.popleft()\n\n for i in range(8):\n nx=x+dx[i]\n ny=y+dy[i]\n\n #범위 벗어남\n if nx<0 or ny<0 or nx>=size or ny>=size:\n continue\n if g[nx][ny]==0:\n g[nx][ny]=g[x][y]+1\n q.append((nx,ny))\n return g[tx][ty]\n\nn=int(input())\nfor i in range(n):\n size=int(input())\n g=[[0]*size for i in range(size)]\n cx,cy=map(int,input().split())\n tx,ty=map(int,input().split())\n if cx==tx and cy==ty:\n print(0)\n else:\n print(bfs(cx,cy))","repo_name":"Mindlestick/CodingTest","sub_path":"5.DFS&BFS/BOJ7562.py","file_name":"BOJ7562.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17288948815","text":"# Clasificación de imagenes\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Descargar las imagenes de la base de datos de perros vs gatos y almacenarla en el directorio /tmp\n_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'\npath_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)\nPATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')\n\n'''\nEl directorio que se genera tiene la siguiente estructura\ncats_and_dogs_filtered\n|__ train\n |______ cats: [cat.0.jpg, cat.1.jpg, cat.2.jpg ....]\n |______ dogs: [dog.0.jpg, dog.1.jpg, dog.2.jpg ...]\n|__ validation\n |______ cats: [cat.2000.jpg, cat.2001.jpg, cat.2002.jpg ....]\n |______ dogs: [dog.2000.jpg, dog.2001.jpg, dog.2002.jpg ...]\n\n'''\n\n# Una vez que se extraen los datos, se asignan las variables para los grupos de \n# entrenamiento y validación\ntrain_dir = os.path.join(PATH, 'train')\nvalidation_dir = os.path.join(PATH, 'validation')\n\n# Directorios de validación\ntrain_cats_dir = os.path.join(train_dir, 'cats') # imágenes de gatos\ntrain_dogs_dir = os.path.join(train_dir, 'dogs') # imágenes de perros\n# Directorios de entrenamiento\nvalidation_cats_dir = os.path.join(validation_dir, 'cats') # imágenes de gatos\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs') # imágenes de perros\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Analizar los datos que se obtuvieron\n\n# Ver el número total de imágenes de entrenamiento\nnum_cats_tr = len(os.listdir(train_cats_dir))\nnum_dogs_tr = len(os.listdir(train_dogs_dir))\n\n# Ver el número total de imágenes de validación\nnum_cats_val = len(os.listdir(validation_cats_dir))\nnum_dogs_val = len(os.listdir(validation_dogs_dir))\n\n# Total de imágenes\ntotal_train = num_cats_tr + num_dogs_tr\ntotal_val = num_cats_val + num_dogs_val\n\n# Se colocan algunas variables para su uso posterior en el programa\nbatch_size = 128\nepochs = 50\nIMG_HEIGHT = 150\nIMG_WIDTH = 150\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Preparación de los datos\n\n# La clase proporcionada por tf.keras ImageDataGenerator puede leer imágenes y pre-procesarlas\n# para convertirlas en tensores, también configura los generadores que convierten las imágenes\n# en grupos de tensores (batches of tensors)\ntrain_image_generator = ImageDataGenerator(rescale=1./255) # Generador para los datos de entrenamiento\nvalidation_image_generator = ImageDataGenerator(rescale=1./255) # Generador para los datos de validación\n\n# Se cargan las imágenes, se rescalan y redimensionan\ntrain_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_HEIGHT, IMG_WIDTH),\n class_mode='binary')\n\nval_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,\n directory=validation_dir,\n target_size=(IMG_HEIGHT, IMG_WIDTH),\n class_mode='binary')\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Visualizar las imágenes\n\n# La función next regresa un lote de imágenes del set de datos\n# regresa valores de la forma (x_train, y_train), donde:\n# x_train son los características de entrenamiento (imágenes)\n# y_train son las etiquetas {Estas se descartan, no se asignan a variable alguna}\nsample_training_images, _ = next(train_data_gen)\n\n# Esta función graficará las imágenes en forma de una rejilla 1 x 5\ndef plotImages(images_arr):\n fig, axes = plt.subplots(1, 5, figsize=(20,20))\n axes = axes.flatten()\n for img, ax in zip( images_arr, axes):\n ax.imshow(img)\n ax.axis('off')\n plt.tight_layout()\n plt.show()\n\nplotImages(sample_training_images[:5])\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Crear el modelo\n\n# Aquí se genera un modelo que consiste en tres bloques de convolución \n# con las capas máximas pool en cada uno de ellos.\n# Hay una capa completamente conectada con 512 unidades que son activadas\n# por una FA del tipo relu (Rectified Linear Unit, y = max(0, x))\n# La salida del modelo es una clasificación binaria mediante la FA sigmoide\nmodel = Sequential([\n Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),\n MaxPooling2D(),\n Conv2D(32, 3, padding='same', activation='relu'),\n MaxPooling2D(),\n Conv2D(64, 3, padding='same', activation='relu'),\n MaxPooling2D(),\n Flatten(),\n Dense(512, activation='relu'),\n Dense(1, activation='sigmoid')\n])\n\n# Compilar el modelo\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# Observar el resumen del modelo generado \nmodel.summary()\n\n# Entrenar el modelo\n'''\nhistory = model.fit_generator(\n train_data_gen,\n steps_per_epoch=total_train // batch_size,\n epochs=epochs,\n validation_data=val_data_gen,\n validation_steps=total_val // batch_size\n)\n\n# Visualizar los resultados del entrenamiento\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n'''\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Eliminar el overfitting por medio del aumento de los datos de entrenamiento\n# lo que se hace es, en primer lugar, evitar que la red sea entrenada\n# o que vea la misma imagen dos veces durante el entrenamiento\n# por lo que se tiene que hacer ligeras modificaciones a las imágenes que\n# se tienen de entrenamiento\n\n# Aplicar una rotación horizontal\nimage_gen = ImageDataGenerator(rescale=1./255, horizontal_flip=True)\ntrain_data_gen = image_gen.flow_from_directory(batch_size=batch_size,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_HEIGHT, IMG_WIDTH))\n\n# Tomar una muestra de los ejemplos de entrenamiento y repetirla 5 veces\n# para que se observe el resultado sobre la misma imagen\naugmented_images = [train_data_gen[0][0][0] for i in range(5)]\n\n# Visualizar las nuevas imágenes\nplotImages(augmented_images)\n\n# Rotar la imagen de manera aleatoria\nimage_gen = ImageDataGenerator(rescale=1./255, rotation_range=45)\n\ntrain_data_gen = image_gen.flow_from_directory(batch_size=batch_size,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_HEIGHT, IMG_WIDTH))\n\naugmented_images = [train_data_gen[0][0][0] for i in range(5)]\n\nplotImages(augmented_images)\n\n\n# Ahora hacer zoom\nimage_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.5)\n\ntrain_data_gen = image_gen.flow_from_directory(batch_size=batch_size,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_HEIGHT, IMG_WIDTH))\n\naugmented_images = [train_data_gen[0][0][0] for i in range(5)]\n\nplotImages(augmented_images)\n\n# Juntar todos estos grupos de datos\nimage_gen_train = ImageDataGenerator(\n rescale=1./255,\n rotation_range=45,\n width_shift_range=.15,\n height_shift_range=.15,\n horizontal_flip=True,\n zoom_range=0.5\n )\n\ntrain_data_gen = image_gen_train.flow_from_directory(batch_size=batch_size,\n directory=train_dir,\n shuffle=True,\n target_size=(IMG_HEIGHT, IMG_WIDTH),\n class_mode='binary')\n\n# Visualizar una sola imagen con todas las transformaciones\naugmented_images = [train_data_gen[0][0][0] for i in range(5)]\nplotImages(augmented_images)\n\n# Ahora para la validación\nimage_gen_val = ImageDataGenerator(rescale=1./255)\n\nval_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size,\n directory=validation_dir,\n target_size=(IMG_HEIGHT, IMG_WIDTH),\n class_mode='binary')\n\n# Otra técnica para reducir el overfitting es la introducción de dropouts a la red\n# Es una forma de regularización que fuerza a los pesos neuronales a tomar\n# solo valores pequeños, lo que hace la distribución de los pesos más regulares\n# y la red puede reducir el overfitting en entrenamientos con muestras pequeñas\n\n# Crear una nueva red con Dropouts\n# esto hace, de manera aleatoria, que el 20% de las neuronas se vayan a 0\nmodel_new = Sequential([\n Conv2D(16, 3, padding='same', activation='relu', \n input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),\n MaxPooling2D(),\n Dropout(0.2),\n Conv2D(32, 3, padding='same', activation='relu'),\n MaxPooling2D(),\n Conv2D(64, 3, padding='same', activation='relu'),\n MaxPooling2D(),\n Dropout(0.2),\n Flatten(),\n Dense(512, activation='relu'),\n Dense(1, activation='sigmoid')\n])\n\n# Compilar el nuevo modelo\nmodel_new.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# Ver el resumen del modelo\nmodel_new.summary()\n\n# Entrenar el modelo\nhistory = model_new.fit_generator(\n train_data_gen,\n steps_per_epoch=total_train // batch_size,\n epochs=epochs,\n validation_data=val_data_gen,\n validation_steps=total_val // batch_size\n)\n\n# Visualizar el nuevo modelo\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n\n\n\n\n'''\npredictions = model.predict(test_images)\n# Se puede observar la primera predicción\npredictions[0]\n\n# Se observa cual es el que tiene el valor máximo, que representaría el que tiene la mayor\n# probabilidad de que sea la etiqueta\nnp.argmax(predictions[0])\n'''\n","repo_name":"NightRoadIx/SistemasVisionArtif","sub_path":"exempel037.py","file_name":"exempel037.py","file_ext":"py","file_size_in_byte":11876,"program_lang":"python","lang":"es","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"30933777508","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport h5py\nimport my_dtw\nimport seaborn as sn\nfrom itertools import product\nfrom constants import data_path\nfrom my_scoring import ScoringScheme\nfrom visualization import make_alignment_figure\nfrom util import *\n\n\ndtw = my_dtw.LocalDTW(scoring_scheme=ScoringScheme())\ndtw_global = my_dtw.GlobalDTW()\nbc1 = '1'\nbc2 = '1'\nf = h5py.File(data_path+'barcoded_reads.hdf5', 'r')\ntrain_sz = 100\nprefix_size = 1000\nREPS = 10\nmean = None\n\nbarcodes1 = np.random.choice(list(f['barcode0'+bc1].keys())[train_sz:], 10)\nbarcodes2 = np.random.choice(list(f['barcode0'+bc2].keys())[train_sz:], 10)\nscore_trails = []\n\nfor (a, b) in product(barcodes1, barcodes1):\n if a == b: continue\n bar1, bar2 = np.array(f['barcode0'+bc1][a]), np.array(f['barcode0'+bc2][b])\n start1 = f['barcode0'+bc1][a].attrs['start'] - len(bar1)\n end1 = f['barcode0'+bc1][a].attrs['end'] - len(bar1)\n start2 = f['barcode0'+bc2][b].attrs['start'] - len(bar2)\n end2 = f['barcode0'+bc2][b].attrs['end'] - len(bar2)\n #bar1 = np.array(mean_barcodes.mean_signals[0])\n #bar2 = np.array(mean_barcodes.mean_signals[1])\n #bar1 = z_normalize(trim_blank(bar1, 300).astype(float))[400:400+prefix_size]\n #bar2 = z_normalize(trim_blank(bar2).astype(float))[400:400+prefix_size]\n bar1 = z_normalize(trim_blank(bar1).astype(float))\n bar2 = z_normalize(trim_blank(bar2).astype(float))\n #bar1 = bar1[cut:cut+prefix_size]\n #bar2 = bar2[cut:cut+prefix_size]\n bar1 = bar1[:prefix_size]\n bar2 = bar2[:prefix_size]\n d = dtw.align(bar1, bar2)\n path = dtw.get_path(dtw.A)\n sig1 = bar1[path[1][0]:path[1][-1]]\n sig2 = bar2[path[0][0]:path[0][-1]]\n score_trail = [d]\n for scale, shift in ls_normalize(bar1, bar2, REPS):\n d = dtw.align(bar1, bar2)\n score_trail.append(d)\n path = dtw.get_path(dtw.A)\n sig1, sig2 = dtw.get_aligned(bar1, bar2, path)\n bar1 = scale*bar1 + shift\n print(a, b, 'dist:', d, 'scores:', f['barcode0'+bc1][a].attrs['score'], f['barcode0'+bc2][b].attrs['score'])\n #fig = make_alignment_figure(sig1, sig2, path, dtw.A)\n #plt.title('Score={}'.format(d))\n #fig.show()\n score_trails.append(score_trail)\n if len(score_trails) == 100:\n plt.plot(score_trails)\n plt.xlabel('Iteration')\n plt.ylabel('LDTW Score')\n plt.show()\n break\n\n","repo_name":"Gogis0/barcluster","sub_path":"local_dtw_test.py","file_name":"local_dtw_test.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70821982242","text":"import os, sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport pandas as pd\nfrom pandas import DataFrame\nimport seaborn as sns\nimport tensorflow as tf\nfrom scipy import linalg\nfrom common.hw_utils import *\nfrom Memristor_model.Charge_Trap_Memristor import *\nfrom XBAR_model.XBAR_calculation import XBAR_ARRAY\n\ndata_path = '../MNIST_SW/data/'\n\n#### 1. load data ####\n# load train_data\n\ntrain_data = load_data_csv(data_path + 'train_and_test_data/1010_v2.csv')\nt_train, x_train = tf.one_hot(train_data[:, 0], depth = 10), train_data[:, 1:]/255\n\n# load test_data\ntest_data = load_data_csv(data_path + 'train_and_test_data/1010t_v2.csv')\nt_test, x_test = tf.one_hot(test_data[:, 0], depth = 10), test_data[:, 1:]/255\n\n#### 2. load Weight and Bias ####\nw = load_data_csv(data_path + 'WandB/10x10weight.csv')\nb = load_data_csv(data_path + 'WandB/10x10bias.csv')\nweight_mat = np.vstack((w, b))\n\n#### 3. Memristor Device ####\n# load CTM\nCTM = CTM()\nCTM.load()\n# get fitted Conductance\nG_fit = conduct(V = 10, G_max=CTM.G_max_device, G_min = CTM.G_min_device)\nG_max = np.max(G_fit)\nG_min = np.min(G_fit)\n\n#### 4. Weight Mapping ####\n# mapping weight\nG_mapped_weight = weight_mapping(weight_mat = w, G_mat=G_fit)\nG_mapped_weight_ideal = G_mapped_weight.copy()\n# mapping bias\nG_mapped_bias = weight_mapping(weight_mat = b, G_mat=G_fit)\nG_mapped_bias_ideal = G_mapped_bias.copy()\n\n\n#### 5. Mapping with Device ####\nn_pulse = 34\nn_states = n_pulse\n# Mapping weight with G_step\nG_mapped_weight = weight_G_step_mapping(G_mapped = G_mapped_weight, G_fit = G_fit, n_states = n_states, G_min = G_min, G_max = G_max)\n# Mapping bias with G_step\nG_mapped_bias = weight_G_step_mapping(G_mapped = G_mapped_bias, G_fit = G_fit, n_states = n_states, G_min = G_min, G_max = G_max)\n\n#### 6. Get G Index for Programming ####\n# weight\nG_index = get_G_index(G_mapped=G_mapped_weight, G_fit = G_fit)\n# bias\nG_bias_index = get_G_index(G_mapped=G_mapped_bias, G_fit = G_fit)\n\n#### 7. Voltages and Line Resistance ####\n'''\n WL1 = Left Side of the Word line\n WL2 = Right Side of the Word line\n BL1 = Top of the Bit line\n BL2 = Bottom of the Bit line\n'''\nV_APP_WL1 = 10 # WL input voltage\nV_APP_BL2 = 0\n\n# initial Resistance -> All HRS cells\nR_device_init = (1/G_min) * np.ones((weight_mat.shape))\nXBAR = XBAR_ARRAY(Device_R=R_device_init.copy())\nXBAR.R_min, XBAR.R_max = (1/G_max), (1/G_min)\n\n#### 8. Programming with pulse number ####\n\n#Fixme\n# ########################################################################\n# V_device_lst= []\n# for coll in range(10,110,10):\n# G_index = np.random.randint(0,34, (128,coll))\n# G_bias_index = np.random.randint(0, 34, (1, coll))\n# R_device = (1/G_max) *np.ones((129,coll))\n# ########################################################################\n\n### G_ideal Programming ###\nr_factor = 1e-8\nXBAR.V_WL, XBAR.R_S_WL = r_factor * (1/G_min), r_factor * (1/G_min)\nXBAR.programming( V_APP_WL = V_APP_WL1, G_index = G_index, G_bias_index = G_bias_index )\nG_ideal = 1 / XBAR.Device_R\n\n### G Programming ###\nr_factor = 1e-4 # r_factor = R_Line / R_LRS\nXBAR.Device_R = R_device_init.copy()\nXBAR.V_WL, XBAR.R_S_WL = r_factor * (1/G_min), r_factor * (1/G_min)\nXBAR.programming( V_APP_WL = V_APP_WL1, G_index = G_index, G_bias_index = G_bias_index )\nG_1e_4 = 1 / XBAR.Device_R\n\n# GGG=1/(XBAR.Device_R[:weight_mat.shape[0]-1])\n# plt.scatter(w,GGG,s=1.5)\n# plt.ylim(0,G_max+0.5*G_max)\n# plt.xlabel('weight')\n# plt.ylabel('conductance')\n# ax = plt.gca()\n# ax.spines['right'].set_color('none')\n# ax.spines['top'].set_color('none')\n# ax.xaxis.set_ticks_position('bottom')\n# ax.spines['bottom'].set_position(('data',0))\n# ax.yaxis.set_ticks_position('left')\n# ax.spines['left'].set_position(('data',0))\n# plt.show()\n\n# plt.plot(G_ideal, G_ideal, '-o',color = 'r')\n# plt.plot(G_ideal, G_1e_4, 'o',color = 'b')\n# plt.show()\n\nplt.plot(G_ideal * 1e+12,G_ideal* 1e+12,'-o',ms=2.5,color='r',label='G ideal')\nplt.plot(G_ideal* 1e+12,G_1e_4* 1e+12,'o',ms=2.5,color='b',label='G read')\n#plt.plot(G_ideal,G_read_comp_node_10M,'o',ms=2.5,color='m',label='G read')\nplt.ylabel('G pgm',fontsize=23)\nplt.xlabel('G ideal',fontsize=23)\nplt.title('G ideal vs G pgm',fontsize=25)\nplt.show()\n\n\n\n\n# V_device_lst.append(XBAR.V_device)\n# sns.heatmap(R_device)\n# plt.show()\n# V_d = np.zeros((10,129,100))\n# for i in range(len(V_device_lst)):\n# for j in range(len(V_device_lst[i])):\n# for k in range(len(V_device_lst[i][j])):\n# V_d[i][j][k] = V_device_lst[i][j][k]\n#\n# # i = 0\n# # for cols in range(10,110,10):\n# # plt.plot(V_d[i][-1][:cols],'o')\n# # i+=1\n# # plt.show()\n#\n# cnt = 10\n# for j in range(10):\n# for i in range(len(V_d[j][-1][:cnt])):\n# plt.plot(V_d[j][i][:cnt])\n# cnt+=10\n# plt.show()\nprint(1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"janghoan/line_simul","sub_path":"tests/pgm_uncompensated.py","file_name":"pgm_uncompensated.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3321901220","text":"\"\"\"Analysis widget for the Moving Wire Control application.\"\"\"\n\nimport os as _os\nimport sys as _sys\nimport numpy as _np\nimport time as _time\nimport pandas as _pd\nimport imaids as _imaids\nimport traceback as _traceback\n\nfrom qtpy.QtWidgets import (\n QWidget as _QWidget,\n QMessageBox as _QMessageBox,\n QApplication as _QApplication,\n QVBoxLayout as _QVBoxLayout,\n )\nfrom qtpy.QtCore import Qt as _Qt\nfrom qtpy.QtWidgets import QFileDialog as _QFileDialog\nimport qtpy.uic as _uic\n\nfrom idanalysis.gui.utils import (\n get_ui_file as _get_ui_file,\n sleep as _sleep,\n update_db_name_list as _update_db_name_list,\n pandas_load_db_measurements as _pandas_load_db_measurements,\n pandas_load_db_maps as _pandas_load_db_maps,\n json_to_array as _json_to_array\n )\n\nimport matplotlib\nmatplotlib.use('Qt5Agg')\n\nfrom matplotlib.backends.backend_qt5agg import (\n FigureCanvasQTAgg as _FigureCanvas)\nfrom matplotlib.backends.backend_qt5agg import (\n NavigationToolbar2QT as _NavigationToolbar)\nfrom matplotlib.figure import Figure\n\n\nclass MplCanvas(_FigureCanvas):\n\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = matplotlib.figure.Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.add_subplot(111)\n super(MplCanvas, self).__init__(fig)\n\n\nclass AnalysisWidget(_QWidget):\n \"\"\"Analysis widget class for the ID Analysis application.\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"Set up the ui.\"\"\"\n super().__init__(parent)\n\n # setup the ui\n uifile = _get_ui_file(self)\n self.ui = _uic.loadUi(uifile, self)\n\n self.connect_signal_slots()\n # self.set_pyplot()\n self.set_plot_flag = True\n\n self.data = None\n\n @property\n def directory(self):\n \"\"\"Return the default directory.\"\"\"\n return _QApplication.instance().directory\n\n def connect_signal_slots(self):\n \"\"\"Create signal/slot connections.\"\"\"\n self.ui.cmb_plot.currentIndexChanged.connect(self.plot)\n self.ui.pbt_analyse.clicked.connect(self.run_analysis)\n self.ui.pbt_spectra_fieldmap.clicked.connect(self.save_spectra)\n self.ui.pbt_multipoles.clicked.connect(self.multipoles)\n self.ui.tbt_filedialog.clicked.connect(self.file_dialog)\n self.ui.cmb_id.currentIndexChanged.connect(self.change_id)\n \n def change_id(self):\n if self.ui.cmb_id.currentText() == 'PAPU50':\n self.ui.sb_periods.setValue(18)\n self.ui.dsb_period_length.setValue(50)\n self.ui.dsb_gap.setValue(24)\n self.ui.chb_angle.setChecked(False)\n self.ui.chb_crosstalk.setChecked(False)\n \n elif self.ui.cmb_id.currentText() == 'Delta525':\n self.ui.sb_periods.setValue(21)\n self.ui.dsb_period_length.setValue(52.5)\n self.ui.dsb_gap.setValue(13.6)\n self.ui.chb_angle.setChecked(True)\n self.ui.chb_crosstalk.setChecked(True)\n \n\n def set_pyplot(self):\n \"\"\"Configures plot widget\"\"\"\n self.canvas = MplCanvas(self, width=5, height=8, dpi=100)\n _toolbar = _NavigationToolbar(self.canvas, self)\n \n _layout = _QVBoxLayout()\n _layout.addWidget(self.canvas)\n _layout.addWidget(_toolbar)\n \n self.wg_plot.setLayout(_layout)\n\n def file_dialog(self):\n \"\"\"Opens file dialog to select the fieldmap.\"\"\"\n try:\n self.filename, _ = _QFileDialog.getOpenFileName(self,\"Fieldmap file\",\n \"\",\"All Files (*);;Python Files (*.py)\")\n self.ui.cmb_filename.setCurrentText(self.filename)\n except Exception:\n _traceback.print_exc(file=_sys.stdout)\n\n def run_analysis(self):\n \"\"\"Runs analysis on fieldmap.\"\"\"\n try:\n # Loads the file\n filename = self.ui.cmb_filename.currentText()\n nr_periods = self.ui.sb_periods.value()\n period_length = self.ui.dsb_period_length.value()\n gap = self.ui.dsb_gap.value()\n \n self.data = _imaids.insertiondevice.InsertionDeviceData(\n filename=filename, nr_periods=nr_periods,\n period_length=period_length, gap=gap)\n\n if self.ui.chb_angle.isChecked():\n self.data.correct_angles()\n if self.ui.chb_crosstalk.isChecked():\n self.data.correct_cross_talk()\n\n # Parameters for calculus:\n \n energy = 3\n rkstep = 0.5\n skip_poles = 4\n\n # Calculations\n self.data.b = self.data.get_field(x=0, y=0, z=self.data.pz)\n \n self.data.roll_off = self.data.calc_roll_off_amplitude(\n self.data.pz, self.data.px)\n self.data.ib, self.data.iib = self.data.calc_field_integrals(\n z_list=self.data.pz)\n \n self.data.integs = _np.array(\n [self.data.calc_field_integrals(self.data.pz, x=xp, y=0) \n for xp in self.data.px])\n \n self.data.traj = self.data.calc_trajectory(\n energy, [0, 0, self.data.pz[0], 0, 0, 1],\n self.data.pz[-1], rkstep)\n self.data.bxamp, self.data.byamp, _, _ = (\n self.data.calc_field_amplitude())\n self.data.kh, self.data.kv = self.data.calc_deflection_parameter(\n self.data.bxamp, self.data.byamp)\n zpe, pe, perms = self.data.calc_phase_error(\n energy, self.data.traj, self.data.bxamp, self.data.byamp,\n skip_poles=skip_poles)\n self.data.pe = pe*180/_np.pi\n self.data.perms = perms*180/_np.pi\n self.data.zpe = zpe\n\n self.ui.le_I1x.setText('{:.2f}'.format(self.data.ib[:, 0][-1]))\n self.ui.le_I1y.setText('{:.2f}'.format(self.data.ib[:, 1][-1]))\n self.ui.le_I2x.setText('{:.2f}'.format(self.data.iib[:, 0][-1]))\n self.ui.le_I2y.setText('{:.2f}'.format(self.data.iib[:, 1][-1]))\n self.ui.le_perms.setText('{:.2f}'.format(self.data.perms))\n if self.data.bxamp > self.data.byamp:\n self.ui.le_bamp.setText('{:.2f}'.format(self.data.bxamp))\n else:\n self.ui.le_bamp.setText('{:.2f}'.format(self.data.byamp))\n self.ui.le_kh.setText('{:.2f}'.format(self.data.kh))\n self.ui.le_kv.setText('{:.2f}'.format(self.data.kv))\n\n self.plot()\n except Exception:\n _traceback.print_exc(file=_sys.stdout)\n\n def plot(self):\n \"\"\"Plots measurement data.\"\"\"\n try:\n\n if self.set_plot_flag:\n self.set_pyplot()\n self.set_plot_flag = False\n\n self.canvas.axes.cla()\n\n if self.ui.cmb_plot.currentText() == 'BxByBz':\n z = self.data.pz\n self.canvas.axes.plot(z, self.data.b[:, 0], label='Bx')\n self.canvas.axes.plot(z, self.data.b[:, 1], label='By')\n self.canvas.axes.plot(z, self.data.b[:, 2], label='Bz')\n self.canvas.axes.set_xlabel('z [mm]')\n self.canvas.axes.set_ylabel('Field [T]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n\n elif self.ui.cmb_plot.currentText() == 'Bx':\n z = self.data.pz\n self.canvas.axes.plot(z, self.data.b[:, 0], label='Bx')\n self.canvas.axes.set_xlabel('z [mm]')\n self.canvas.axes.set_ylabel('Field [T]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n\n elif self.ui.cmb_plot.currentText() == 'By':\n z = self.data.pz\n self.canvas.axes.plot(z, self.data.b[:, 1], label='By')\n self.canvas.axes.set_xlabel('z [mm]')\n self.canvas.axes.set_ylabel('Field [T]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n\n elif self.ui.cmb_plot.currentText() == 'Bz':\n z = self.data.pz\n self.canvas.axes.plot(z, self.data.b[:, 2], label='Bz')\n self.canvas.axes.set_xlabel('z [mm]')\n self.canvas.axes.set_ylabel('Field [T]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n\n elif self.ui.cmb_plot.currentText() == 'Roll-off':\n x = self.data.px\n self.canvas.axes.plot(x, 100*self.data.roll_off[1],\n label='Roll-off')\n self.canvas.axes.set_xlabel('x [mm]')\n self.canvas.axes.set_ylabel('Roll-off [%]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n\n elif self.ui.cmb_plot.currentText() == 'Trajectory X':\n z = _np.linspace(self.data.pz[0], self.data.pz[-1],\n len(self.data.traj[:, 0]))\n self.canvas.axes.plot(z, self.data.traj[:, 0],\n label='X trajectory')\n self.canvas.axes.set_xlabel('z [mm]')\n self.canvas.axes.set_ylabel('x [mm]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n\n elif self.ui.cmb_plot.currentText() == 'Trajectory Y':\n z = _np.linspace(self.data.pz[0], self.data.pz[-1],\n len(self.data.traj[:, 0]))\n self.canvas.axes.plot(z, self.data.traj[:, 1],\n label='Y trajectory')\n self.canvas.axes.set_xlabel('z [mm]')\n self.canvas.axes.set_ylabel('x [mm]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n \n elif self.ui.cmb_plot.currentText() == 'Phase Error':\n poles = list(range(1, len(self.data.pe)+1))\n self.canvas.axes.plot(poles, self.data.pe, '-o')\n \n self.canvas.axes.grid(1)\n self.canvas.axes.set_xlabel('Pole Number')\n self.canvas.axes.set_ylabel(r'Phase Error ($\\mathbf{\\phi)}$ [°]')\n self.canvas.axes.axhline(0, color='k', linestyle='--')\n\n elif self.ui.cmb_plot.currentText() == 'Phase Error vs z':\n poles = list(range(1, len(self.data.pe)+1))\n self.canvas.axes.plot(self.data.zpe, self.data.pe, '-o')\n \n self.canvas.axes.grid(1)\n self.canvas.axes.set_xlabel('z [mm]')\n self.canvas.axes.set_ylabel(r'Phase Error ($\\mathbf{\\phi)}$ [°]')\n self.canvas.axes.axhline(0, color='k', linestyle='--')\n \n elif self.ui.cmb_plot.currentText() == 'I1x vs x':\n x = self.data.px\n self.canvas.axes.plot(x, self.data.integs[:, 0, -1, 0],\n label='I1x')\n self.canvas.axes.set_xlabel('x [mm]')\n self.canvas.axes.set_ylabel('First Field Integral [G.cm]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n \n elif self.ui.cmb_plot.currentText() == 'I1y vs x':\n x = self.data.px\n self.canvas.axes.plot(x, self.data.integs[:, 0, -1, 1],\n label='I1y')\n self.canvas.axes.set_xlabel('x [mm]')\n self.canvas.axes.set_ylabel('First Field Integral [G.cm]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n \n elif self.ui.cmb_plot.currentText() == 'I2x vs x':\n x = self.data.px\n self.canvas.axes.plot(x, self.data.integs[:, 1, -1, 0],\n label='I2x')\n self.canvas.axes.set_xlabel('x [mm]')\n self.canvas.axes.set_ylabel('Second Field Integral [kG.cm2]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n \n elif self.ui.cmb_plot.currentText() == 'I2y vs x':\n x = self.data.px\n self.canvas.axes.plot(x, self.data.integs[:, 1, -1, 1],\n label='I2x')\n self.canvas.axes.set_xlabel('x [mm]')\n self.canvas.axes.set_ylabel('Second Field Integral [kG.cm2]')\n self.canvas.axes.legend()\n self.canvas.axes.grid(1)\n\n self.canvas.figure.tight_layout()\n self.canvas.draw()\n\n except Exception:\n _traceback.print_exc(file=_sys.stdout)\n\n def multipoles(self):\n \"\"\"Prints multipoles (from higher to lower harmonics).\"\"\"\n try:\n an, bn = self.data.calc_integral_multipole_coef(self.data.pz,\n self.data.px)\n\n _msg = 'Normal:\\n{}\\n\\nSkew:\\n{}'.format(bn, an)\n print(_msg)\n _QMessageBox.information(self, 'Multipoles', _msg, _QMessageBox.Ok)\n except Exception:\n print(_traceback.print_exc(file=_sys.stdout))\n return False\n\n def save_spectra(self):\n try:\n filename = self.filename.replace('.dat', '.txt')\n self.data.save_fieldmap_spectra(filename, self.data.px,\n self.data.py, self.data.pz)\n _msg = \"Spectra fieldmap saved sucessfully.\"\n _QMessageBox.information(self, 'Save Spectra', _msg,\n _QMessageBox.Ok)\n except Exception:\n _msg = \"Spectra fieldmap could not be saved.\"\n _QMessageBox.warning(self, 'Save Spectra', _msg,\n _QMessageBox.Ok)\n","repo_name":"lnls-ima/insertion-devices","sub_path":"idanalysis/gui/analysiswidget.py","file_name":"analysiswidget.py","file_ext":"py","file_size_in_byte":13852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37778068250","text":"import bpy\nfrom mathutils import Vector\nfrom mathutils.geometry import normal # takes 3 or more! :)\nfrom bpy.props import (BoolProperty, FloatVectorProperty, StringProperty, FloatProperty)\n\nfrom sverchok.node_tree import SverchCustomTreeNode\nfrom sverchok.data_structure import (\n dataCorrect, node_id, updateNode, Vector_generate, Matrix_generate)\n\nfrom sverchok.ui.bgl_callback_3dview import callback_disable, callback_enable\nfrom sverchok.utils.context_managers import sv_preferences\nfrom sverchok.utils.sv_idx_viewer28_draw import draw_indices_2D, draw_indices_2D_wbg\n\n\ndef calc_median(vlist):\n a = Vector((0, 0, 0))\n for v in vlist:\n a += v\n return a / len(vlist)\n\n\nclass SvIDXViewer28(SverchCustomTreeNode, bpy.types.Node):\n\n '''Display the index information of geometry and topology.\n In: Vertices, Edges, Faces, Matrixes (for offset), text (for vertices)\n Params: Color for vertices, edges, faces, backface On/Off, background poly On/Off\n '''\n bl_idname = 'SvIDXViewer28'\n bl_label = 'Viewer Index+'\n bl_icon = 'INFO'\n sv_icon = 'SV_INDEX_VIEWER'\n\n def get_scale(self):\n try:\n with sv_preferences() as prefs:\n scale = prefs.index_viewer_scale\n except:\n scale = 1.0\n return scale\n\n def make_color_prop(name, col):\n return FloatVectorProperty(\n name=name, description='', size=4, min=0.0, max=1.0,\n default=col, subtype='COLOR', update=updateNode)\n\n n_id: StringProperty(default='', options={'SKIP_SAVE'})\n\n activate: BoolProperty(\n name='Show', description='Activate node?',\n default=True,\n update=updateNode)\n\n draw_bg: BoolProperty(\n name='draw_bg', description='draw background poly?',\n default=False, update=updateNode)\n\n draw_obj_idx: BoolProperty(\n name='draw_obj_idx', description='draw object index beside part index',\n default=False, update=updateNode)\n\n draw_bface: BoolProperty(\n name='draw_bface', description='draw backfacing indices?',\n default=True, update=updateNode)\n\n display_vert_index: BoolProperty(\n name=\"Vertices\", description=\"Display vertex indices\",\n default=True, update=updateNode)\n display_edge_index: BoolProperty(\n name=\"Edges\", description=\"Display edge indices\", update=updateNode)\n display_face_index: BoolProperty(\n name=\"Faces\", description=\"Display face indices\", update=updateNode)\n \n \n text_scale: FloatProperty(\n name='text_scale', description='', min=0.1,\n default=1.0, update=updateNode)\n\n bg_edges_col: make_color_prop(\"bg_edges\", (.2, .2, .2, 1.0))\n bg_faces_col: make_color_prop(\"bg_faces\", (.2, .2, .2, 1.0))\n bg_verts_col: make_color_prop(\"bg_verts\", (.2, .2, .2, 1.0))\n numid_edges_col: make_color_prop(\"numid_edges\", (1.0, 1.0, 0.1, 1.0))\n numid_faces_col: make_color_prop(\"numid_faces\", (1.0, .8, .8, 1.0))\n numid_verts_col: make_color_prop(\"numid_verts\", (0.6, 1, 0.3, 1.0))\n\n def sv_init(self, context):\n inew = self.inputs.new\n inew('SvVerticesSocket', 'verts')\n inew('SvStringsSocket', 'edges')\n inew('SvStringsSocket', 'faces')\n inew('SvMatrixSocket', 'matrix')\n inew('SvStringsSocket', 'text')\n\n\n def draw_buttons(self, context, layout):\n view_icon = 'RESTRICT_VIEW_' + ('OFF' if self.activate else 'ON')\n\n column_all = layout.column()\n\n row = column_all.row(align=True)\n split = row.split()\n r = split.column()\n r.prop(self, \"activate\", text=\"Show\", toggle=True, icon=view_icon)\n row.prop(self, \"draw_bg\", text=\"BG\", toggle=True)\n row.prop(self, \"draw_bface\", text=\"\", icon='GHOST_ENABLED', toggle=True)\n\n col = column_all.column(align=True)\n for item, item_icon in zip(['vert', 'edge', 'face'], ['VERTEXSEL', 'EDGESEL', 'FACESEL']):\n row = col.row(align=True)\n row.prop(self, f\"display_{item}_index\", toggle=True, icon=item_icon, text='')\n row.prop(self, f\"numid_{item}s_col\", text=\"\")\n if self.draw_bg:\n row.prop(self, f\"bg_{item}s_col\", text=\"\")\n\n def get_settings_dict(self):\n '''Produce a dict of settings for the callback'''\n # A copy is needed, we can't have reference to the\n # node in a callback, it will crash blender on undo\n return {\n 'bg_edges_col': self.bg_edges_col[:],\n 'bg_faces_col': self.bg_faces_col[:],\n 'bg_verts_col': self.bg_verts_col[:],\n 'numid_edges_col': self.numid_edges_col[:],\n 'numid_faces_col': self.numid_faces_col[:],\n 'numid_verts_col': self.numid_verts_col[:],\n 'display_vert_index': self.display_vert_index,\n 'display_edge_index': self.display_edge_index,\n 'display_face_index': self.display_face_index,\n 'draw_bface': self.draw_bface,\n 'draw_bg': self.draw_bg,\n 'scale': self.get_scale()\n }.copy()\n\n def draw_buttons_ext(self, context, layout):\n row = layout.row(align=True)\n box = layout.box()\n little_width = 0.735\n\n col = box.column(align=True)\n row = col.row(align=True)\n row.label(text='Colors')\n\n for _icon in ['VERTEXSEL', 'EDGESEL', 'FACESEL']:\n colz = row.column(align=True)\n colz.scale_x = little_width\n colz.label(icon=_icon, text=' ')\n\n colprops = [\n ['Numbers :', ['numid_verts_col', 'numid_edges_col', 'numid_faces_col']],\n ['Background :', ['bg_verts_col', 'bg_edges_col', 'bg_faces_col']]\n ]\n\n for label, geometry_types in colprops:\n row = col.row(align=True)\n row.label(text=label)\n for colprop in geometry_types:\n colx = row.column(align=True)\n colx.scale_x = little_width\n colx.prop(self, colprop, text=\"\")\n\n layout.row().prop(self, 'draw_obj_idx', text=\"Draw Object Index\", toggle=True)\n layout.row().prop(self, 'text_scale', text=\"Text Scale\")\n\n\n def get_face_extras(self, geom):\n face_medians = []\n face_normals = []\n for obj_index, faces in enumerate(geom.faces):\n\n verts = geom.verts[obj_index]\n\n medians = []\n normals = []\n concat_median = medians.append\n concat_normal = normals.append\n\n for face in faces:\n poly_verts = [verts[idx] for idx in face]\n concat_normal(normal(poly_verts))\n concat_median(calc_median(poly_verts))\n\n face_medians.append(medians)\n face_normals.append(normals)\n\n return face_medians, face_normals\n\n\n def get_geometry(self):\n inputs = self.inputs\n geom = lambda: None\n\n for socket in ['matrix', 'verts', 'edges', 'faces', 'text']:\n input_stream = inputs[socket].sv_get(default=[])\n if socket == 'verts' and input_stream:\n\n # ensure they are Vector()\n input_stream = Vector_generate(input_stream)\n\n # ensure they are Matrix() multiplied\n for obj_index, verts in enumerate(input_stream):\n if geom.matrix:\n matrix_index = obj_index if obj_index < len(geom.matrix) else -1\n matrix = geom.matrix[matrix_index] \n input_stream[obj_index] = [matrix @ v for v in verts]\n\n setattr(geom, socket, input_stream)\n\n prefix_if_needed = lambda obj_index, chars: (f'{obj_index}: {chars}') if self.draw_obj_idx else chars\n\n fixed_text = []\n if geom.text:\n for obj_index, final_verts in enumerate(geom.verts):\n text_size = max(len(final_verts),\n len(geom.edges[obj_index] if geom.edges else []),\n len(geom.faces[obj_index] if geom.faces else []))\n text_items = self.get_text_of_correct_length(obj_index, geom, text_size)\n for text_item in text_items:\n\n # yikes, don't feed this function nonsense :)\n\n if isinstance(text_item, float):\n chars = prefix_if_needed(obj_index, text_item)\n elif isinstance(text_item, list) and len(text_item) == 1:\n chars = prefix_if_needed(obj_index, text_item[0])\n\n else:\n # in case it receives [0, 0, 0] or (0, 0, 0).. etc\n chars = prefix_if_needed(obj_index, text_item)\n\n fixed_text.append(chars)\n\n if not self.draw_bface:\n geom.face_medians, geom.face_normals = self.get_face_extras(geom)\n geom.text_data = fixed_text\n return geom\n\n else:\n # pass only data onto the draw callback that you intend to show.\n display_topology = lambda: None\n display_topology.vert_data = []\n display_topology.edge_data = []\n display_topology.face_data = []\n display_topology.text_data = fixed_text\n\n concat_vert = display_topology.vert_data.append\n concat_edge = display_topology.edge_data.append\n concat_face = display_topology.face_data.append\n\n for obj_index, final_verts in enumerate(geom.verts):\n\n # can't display vert idx and text simultaneously - ...\n if self.display_vert_index:\n for idx, vpos in enumerate(final_verts):\n chars = prefix_if_needed(obj_index, idx)\n concat_vert((chars, vpos))\n\n if self.display_edge_index and obj_index < len(geom.edges):\n for edge_index, (idx1, idx2) in enumerate(geom.edges[obj_index]):\n loc = final_verts[idx1].lerp(final_verts[idx2], 0.5)\n chars = prefix_if_needed(obj_index, edge_index)\n concat_edge((chars, loc))\n\n if self.display_face_index and obj_index < len(geom.faces):\n for face_index, f in enumerate(geom.faces[obj_index]):\n poly_verts = [final_verts[idx] for idx in f]\n median = calc_median(poly_verts)\n chars = prefix_if_needed(obj_index, face_index)\n concat_face((chars, median))\n\n return display_topology\n\n def get_text_of_correct_length(self, obj_index, geom, num_elements_to_fill):\n \"\"\" get text elements, and extend if needed\"\"\"\n if obj_index < len(geom.text):\n text_items = geom.text[obj_index]\n else:\n text_items = geom.text[len(geom.text)-1]\n\n if not (len(text_items) == num_elements_to_fill):\n\n # ---- this doesn't touch the data, but returns a copy, or a modified copy -----\n if len(text_items) < num_elements_to_fill:\n return text_items + [text_items[-1], ] * (num_elements_to_fill - len(text_items))\n else:\n return text_items[:num_elements_to_fill]\n\n return text_items\n\n def end_early(self):\n if not self.id_data.sv_show:\n return True\n\n if not (self.activate and self.inputs['verts'].is_linked):\n return True\n\n verts = self.inputs['verts'].sv_get()\n if not verts:\n return True\n\n def process(self):\n n_id = node_id(self)\n callback_disable(n_id)\n if self.end_early():\n return\n\n config = self.get_settings_dict()\n config[\"scale\"] = config[\"scale\"] * self.text_scale\n geom = self.get_geometry()\n\n draw_data = {\n 'tree_name': self.id_data.name[:],\n 'custom_function': draw_indices_2D_wbg if self.draw_bg else draw_indices_2D,\n 'args': (geom, config)}\n\n callback_enable(n_id, draw_data, overlay='POST_PIXEL')\n\n def sv_free(self):\n callback_disable(node_id(self))\n\n def sv_copy(self, node):\n ''' reset n_id on copy '''\n self.n_id = ''\n\n def show_viewport(self, is_show: bool):\n \"\"\"It should be called by node tree to show/hide objects\"\"\"\n if not self.activate:\n # just ignore request\n pass\n else:\n if is_show:\n self.process()\n else:\n callback_disable(node_id(self))\n\n\ndef register():\n bpy.utils.register_class(SvIDXViewer28)\n\n\ndef unregister():\n bpy.utils.unregister_class(SvIDXViewer28)\n","repo_name":"nortikin/sverchok","sub_path":"nodes/viz/viewer_idx28.py","file_name":"viewer_idx28.py","file_ext":"py","file_size_in_byte":12662,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"1226006621","text":"from __future__ import annotations\n\nimport abc\nimport contextlib\nimport os\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Generator,\n Type,\n)\n\nimport attr\nimport tomlkit\n\nfrom dl_repmanager.fs_editor import FilesystemEditor\nfrom dl_repmanager.package_meta_reader import PackageMetaIOFactory\nfrom dl_repmanager.primitives import (\n LocalReqPackageSpec,\n PackageInfo,\n RequirementList,\n)\nfrom dl_repmanager.toml_tools import (\n TOMLIOFactory,\n TOMLWriter,\n)\n\n\nif TYPE_CHECKING:\n from dl_repmanager.package_index import PackageIndex\n from dl_repmanager.repository_env import RepoEnvironment\n\n\n@attr.s\nclass RepositoryManagementPlugin(abc.ABC):\n repository_env: RepoEnvironment = attr.ib(kw_only=True)\n package_index: PackageIndex = attr.ib(kw_only=True)\n base_path: Path = attr.ib(kw_only=True)\n config_data: dict = attr.ib(kw_only=True)\n fs_editor: FilesystemEditor = attr.ib(init=False)\n\n @fs_editor.default\n def _make_fs_editor(self) -> FilesystemEditor:\n return self.repository_env.get_fs_editor()\n\n @abc.abstractmethod\n def register_package(self, package_info: PackageInfo) -> None:\n raise NotImplementedError\n\n @abc.abstractmethod\n def unregister_package(self, package_info: PackageInfo) -> None:\n raise NotImplementedError\n\n def re_register_package(self, old_package_info: PackageInfo, new_package_info: PackageInfo) -> None:\n self.unregister_package(old_package_info)\n self.register_package(new_package_info)\n\n\n@attr.s\nclass CommonToolingRepositoryManagementPlugin(RepositoryManagementPlugin):\n _package_list_rel_path: Path = attr.ib(init=False)\n\n @_package_list_rel_path.default\n def _make_package_list_rel_path(self) -> Path:\n return Path(self.config_data[\"package_list_path\"])\n\n def register_package(self, package_info: PackageInfo) -> None:\n def transform_package_list(old_text: str) -> str:\n pkg_list = old_text.strip().split()\n pkg_rel_path = package_info.get_relative_path(self.base_path)\n pkg_list.append(str(pkg_rel_path))\n pkg_list.sort()\n return \"\\n\".join(pkg_list) + \"\\n\"\n\n pkg_list_path = self.base_path / self._package_list_rel_path\n self.fs_editor.replace_file_content(pkg_list_path, replace_callback=transform_package_list)\n\n def unregister_package(self, package_info: PackageInfo) -> None:\n def transform_package_list(old_text: str) -> str:\n pkg_list = old_text.strip().split()\n pkg_rel_path = package_info.get_relative_path(self.base_path)\n pkg_list.remove(str(pkg_rel_path))\n pkg_list.sort()\n return \"\\n\".join(pkg_list) + \"\\n\"\n\n pkg_list_path = self.base_path / self._package_list_rel_path\n self.fs_editor.replace_file_content(pkg_list_path, replace_callback=transform_package_list)\n\n\n@attr.s\nclass MainTomlRepositoryManagementPlugin(RepositoryManagementPlugin):\n _metapackages_by_package_type: dict[str, list[str]] = attr.ib(init=False)\n\n @_metapackages_by_package_type.default\n def _make_metapackages_by_package_type(self) -> dict[str, list[str]]:\n result: dict[str, list[str]] = {}\n for metapkg_config in self.config_data[\"metapackages\"]:\n for package_type in metapkg_config[\"package_types\"]:\n if package_type not in result:\n result[package_type] = []\n result[package_type].append(metapkg_config[\"name\"])\n return result\n\n def _get_metapackage_paths(self, package_type: str) -> list[Path]:\n metapackage_names = self._metapackages_by_package_type.get(package_type, ())\n return [\n self.repository_env.get_metapackage_spec(metapackage_name).toml_path\n for metapackage_name in metapackage_names\n ]\n\n def _get_path_for_toml(self, metapackage_path: Path, package_info: PackageInfo) -> Path:\n toml_abs_dir = (self.base_path / metapackage_path).parent\n return package_info.get_relative_path(toml_abs_dir)\n\n def _register_main(self, metapackage_path: Path, package_info: PackageInfo) -> None:\n package_path_for_toml = self._get_path_for_toml(metapackage_path=metapackage_path, package_info=package_info)\n package_dep_table = tomlkit.inline_table()\n package_dep_table.add(\"path\", str(package_path_for_toml))\n with self._metapackage_toml_writer(metapackage_path=metapackage_path) as toml_writer:\n section = toml_writer.get_editable_section(\"tool.poetry.group.ci.dependencies\")\n section[package_info.package_reg_name] = package_dep_table\n\n def _unregister_main(self, metapackage_path: Path, package_info: PackageInfo) -> None:\n with self._metapackage_toml_writer(metapackage_path=metapackage_path) as toml_writer:\n with toml_writer.suppress_non_existent_key():\n toml_writer.get_editable_section(\"tool.poetry.dependencies\").remove(package_info.package_reg_name)\n with toml_writer.suppress_non_existent_key():\n toml_writer.get_editable_section(\"tool.poetry.group.dev.dependencies\").remove(\n package_info.package_reg_name\n )\n with toml_writer.suppress_non_existent_key():\n toml_writer.get_editable_section(\"tool.poetry.group.ci.dependencies\").remove(\n package_info.package_reg_name\n )\n\n def _register_app(self, metapackage_path: Path, package_info: PackageInfo) -> None:\n package_path_for_toml = self._get_path_for_toml(metapackage_path=metapackage_path, package_info=package_info)\n package_base_name = package_info.abs_path.name\n package_dep_table = tomlkit.inline_table()\n package_dep_table.add(\"path\", str(package_path_for_toml))\n with self._metapackage_toml_writer(metapackage_path=metapackage_path) as toml_writer:\n section = toml_writer.add_section(f\"tool.poetry.group.app_{package_base_name}.dependencies\")\n section.add(package_info.package_reg_name, package_dep_table)\n section.add(tomlkit.nl())\n\n def _unregister_app(self, metapackage_path: Path, package_info: PackageInfo) -> None:\n package_base_name = package_info.abs_path.name\n with self._metapackage_toml_writer(metapackage_path=metapackage_path) as toml_writer:\n toml_writer.delete_section(f\"tool.poetry.group.app_{package_base_name}.dependencies\")\n\n @contextlib.contextmanager\n def _metapackage_toml_writer(self, metapackage_path: Path) -> Generator[TOMLWriter, None, None]:\n toml_path = self.base_path / metapackage_path\n toml_io_factory = TOMLIOFactory(fs_editor=self.fs_editor)\n with toml_io_factory.toml_writer(toml_path) as toml_writer:\n yield toml_writer\n\n def register_package(self, package_info: PackageInfo) -> None:\n for metapackage_path in self._get_metapackage_paths(package_type=package_info.package_type):\n if \"main_dependency_group\" in self.repository_env.get_tags(package_info.package_type):\n self._register_main(metapackage_path=metapackage_path, package_info=package_info)\n if \"own_dependency_group\" in self.repository_env.get_tags(package_info.package_type):\n self._register_app(metapackage_path=metapackage_path, package_info=package_info)\n\n def unregister_package(self, package_info: PackageInfo) -> None:\n for metapackage_path in self._get_metapackage_paths(package_type=package_info.package_type):\n if \"main_dependency_group\" in self.repository_env.get_tags(package_info.package_type):\n self._unregister_main(metapackage_path=metapackage_path, package_info=package_info)\n if \"own_dependency_group\" in self.repository_env.get_tags(package_info.package_type):\n self._unregister_app(metapackage_path=metapackage_path, package_info=package_info)\n\n\n@attr.s\nclass DependencyReregistrationRepositoryManagementPlugin(RepositoryManagementPlugin):\n \"\"\"Updates requirements in other packages dependent on this one\"\"\"\n\n def _is_package_dependent_on(\n self,\n section_name: str,\n dependent_package_info: PackageInfo,\n base_package_info: PackageInfo,\n ) -> bool:\n req_specs = dependent_package_info.requirement_lists.get(section_name, RequirementList()).req_specs\n for req_spec in req_specs:\n if req_spec.package_name == base_package_info.package_reg_name:\n # It really is a dependent package\n assert isinstance(req_spec, LocalReqPackageSpec)\n return True\n\n return False\n\n def register_package(self, package_info: PackageInfo) -> None:\n pass\n\n def unregister_package(self, package_info: PackageInfo) -> None:\n package_meta_io_factory = PackageMetaIOFactory(fs_editor=self.fs_editor)\n\n # Scan other packages to see if they are dependent on this one and update these dependency entries\n for other_package_info in self.package_index.list_package_infos():\n if other_package_info == package_info:\n continue\n\n for section_name in other_package_info.requirement_lists:\n if other_package_info.is_dependent_on(package_info, section_name=section_name):\n with package_meta_io_factory.package_meta_writer(other_package_info.toml_path) as pkg_meta_writer:\n pkg_meta_writer.remove_requirement_item(\n section_name=section_name,\n item_name=package_info.package_reg_name,\n )\n\n def re_register_package(self, old_package_info: PackageInfo, new_package_info: PackageInfo) -> None:\n package_meta_io_factory = PackageMetaIOFactory(fs_editor=self.fs_editor)\n\n # Scan other packages to see if they are dependent on this one and update these dependency entries\n for other_package_info in self.package_index.list_package_infos():\n if other_package_info == old_package_info:\n continue\n\n for section_name in other_package_info.requirement_lists:\n if other_package_info.is_dependent_on(old_package_info, section_name=section_name):\n new_req_rel_path = Path(os.path.relpath(new_package_info.abs_path, other_package_info.abs_path))\n with package_meta_io_factory.package_meta_writer(other_package_info.toml_path) as pkg_meta_writer:\n pkg_meta_writer.update_requirement_item(\n section_name=section_name,\n item_name=old_package_info.package_reg_name,\n new_item_name=new_package_info.package_reg_name,\n new_path=new_req_rel_path,\n )\n\n # Update own dependency entries if the package has moved to another dir\n old_pkg_dir_path = old_package_info.abs_path.parent\n new_pkg_dir_path = new_package_info.abs_path.parent\n if new_pkg_dir_path != old_pkg_dir_path:\n for section_name, req_list in old_package_info.requirement_lists.items():\n updated_requirements: dict[str, PackageInfo] = {} # {: }\n for other_package_spec in req_list.req_specs:\n if not isinstance(other_package_spec, LocalReqPackageSpec):\n continue\n req_package_info = self.package_index.get_package_info_by_reg_name(other_package_spec.package_name)\n updated_requirements[other_package_spec.package_name] = req_package_info\n\n with package_meta_io_factory.package_meta_writer(new_package_info.toml_path) as pkg_meta_writer:\n for (\n _req_package_reg_name,\n req_package_info,\n ) in updated_requirements.items():\n updated_req_path = Path(os.path.relpath(req_package_info.abs_path, new_package_info.abs_path))\n pkg_meta_writer.update_requirement_item(\n section_name=section_name,\n item_name=req_package_info.package_reg_name,\n new_item_name=req_package_info.package_reg_name,\n new_path=updated_req_path,\n )\n\n\n_PLUGIN_CLASSES: dict[str, Type[RepositoryManagementPlugin]] = {\n \"common_tooling\": CommonToolingRepositoryManagementPlugin,\n \"toml_registration\": MainTomlRepositoryManagementPlugin,\n \"dependency_registration\": DependencyReregistrationRepositoryManagementPlugin,\n}\n\n\ndef get_plugin_cls(plugin_type: str) -> Type[RepositoryManagementPlugin]:\n return _PLUGIN_CLASSES[plugin_type]\n","repo_name":"datalens-tech/datalens-backend","sub_path":"terrarium/dl_repmanager/dl_repmanager/management_plugins.py","file_name":"management_plugins.py","file_ext":"py","file_size_in_byte":12860,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"54"} +{"seq_id":"30964658577","text":"import time\n\n# from selenium import webdriver\n# from selenium.webdriver.edge.service import Service as EdgeService\n# from webdriver_manager.microsoft import EdgeChromiumDriverManager\n# \n# driver = webdriver.Edge(service=EdgeService(EdgeChromiumDriverManager().install()))\n# \n# time.sleep(1)\nprint('driver installed')\n#------------------------------------------\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport re\n\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\ndesired_capabilities = DesiredCapabilities.EDGE\ndesired_capabilities[\"pageLoadStrategy\"] = \"none\"\n\n\ndriver = webdriver.Edge()\nprint(0)\ndriver.get('https://www.kickstarter.com/projects/xgorobot/xgo-2-worlds-first-raspberry-pi-robotic-dog-with-an-arm?ref=discovery')\nprint(1)\ntime.sleep(10)\nprint(2)\nmoney_data=driver.find_element(By.XPATH, '//*[@id=\"react-project-header\"]/div/div[1]/div[3]/div/div[2]/div[1]/div[2]/span/span')\npeople_data=money=driver.find_element(By.XPATH, '//*[@id=\"react-project-header\"]/div/div[1]/div[3]/div/div[2]/div[2]/div/span')\nmoney=re.findall(\"\\d+\",money_data.text) \npeople=re.findall(\"\\d+\",people_data.text) \npeople=people[0]\nprint(people)\nmm=''\nfor m in money:\n mm+=str(m)\nmoney=int(mm)\nprint(money)\n\n\nimport requests\nimport json\nheaders={\"api-key\":\"VqL9j56HfXisdr8nOvGT=iLl95g=\"}\nurl='http://api.heclouds.com/devices/1039865113/datapoints'\npayload = {'datastreams': [{\"id\": \"followers\", \"datapoints\": [{\"value\": int(people)}]}]}\npayload=json.dumps(payload)\nok=requests.post(headers=headers,url=url,data=payload)\nprint(ok.text)\n\npayload = {'datastreams': [{\"id\": \"moneys\", \"datapoints\": [{\"value\": money}]}]}\npayload=json.dumps(payload)\nok=requests.post(headers=headers,url=url,data=payload)\nprint(ok.text)\n\ndriver.close()\n\n\n","repo_name":"jd3096-mpy/xgo2-kickstarter","sub_path":"getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35249492390","text":"from typing import List, Iterator, Dict, Tuple, Any, Type\nimport torch.nn.functional as func \nimport numpy as np\nimport torch\nfrom copy import deepcopy\n\nnp.random.seed(1901)\n\nclass Attack:\n def __init__(\n self,\n vm, device, attack_path,\n epsilon = 0.01,\n alpha = 0.005,\n steps=10,\n min_val = 0,\n max_val = 1\n ):\n \"\"\"\n This file contains code for untargeted FGSM attack\n args:\n vm: virtual model is wrapper used to get outputs/gradients of a model.\n device: system on which code is running \"cpu\"/\"cuda\"\n epsilon: magnitude of perturbation that is added\n\n \"\"\"\n self.vm = vm\n self.steps=10\n self.device = device\n self.attack_path = attack_path\n self.epsilon = 0.11\n self.alpha=self.epsilon*1/self.steps\n self.min_val = 0\n self.max_val = 1\n\n def attack(\n self, original_images: np.ndarray, labels: List[int], target_label = None,\n ):\n original_images = original_images.to(self.device)\n original_images = torch.unsqueeze(original_images, 0)\n labels = torch.tensor(labels).to(self.device)\n target_labels = target_label * torch.ones_like(labels).to(self.device)\n original_images.requires_grad = True\n perturbed_images=self.step(original_images,target_labels)\n for i in range(self.steps-1):\n perturbed_images=self.step(perturbed_images,target_labels)\n \n perturbed_images.retain_grad()\n # get gradient with repect to labels\n data_grad = self.grad1(perturbed_images, target_labels)\n \n \n sign_data_grad = data_grad.sign()\n\n # perturd image\n perturbed_images = perturbed_images - self.alpha*sign_data_grad\n perturbed_images = torch.clamp(perturbed_images, self.min_val, self.max_val)\n\n adv_outputs = self.vm.get_batch_output(perturbed_images)\n final_pred = adv_outputs.max(1, keepdim=True)[1]\n correct = 0\n correct += (final_pred == target_labels).sum().item()\n #print(\"---------------------\")\n #print(correct)\n #print(\"---------------------\")\n self.alpha = 0.997 * self.alpha\n\n if correct == original_images.size(dim=0):\n #print(\"------------------\")\n #print(\"!!!early stopping!!!\")\n #print(\"------------------\")\n break\n \n\n adv_outputs = self.vm.get_batch_output(perturbed_images)\n final_pred = adv_outputs.max(1, keepdim=True)[1]\n correct = 0\n correct += (final_pred == target_labels).sum().item()\n return np.squeeze(perturbed_images.cpu().detach().numpy()), correct\n\n def step(self,original_images, target_labels):\n \n original_images.retain_grad()\n # get gradient with repect to labels\n data_grad = self.grad1(original_images, target_labels)\n \n \n sign_data_grad = data_grad.sign()\n\n # perturd image\n perturbed_image = original_images - self.alpha*sign_data_grad\n perturbed_image = torch.clamp(perturbed_image, self.min_val, self.max_val)\n return perturbed_image\n def grad1(self, batch, labels):\n self.vm.gradient_queries += batch.shape[0]\n return self.grad2(batch, labels)\n def grad2(self, original_images, labels):\n self.vm.defender.model.eval()\n outputs = self.vm.defender.model(original_images)\n loss = func.nll_loss(outputs, labels)\n self.vm.defender.model.zero_grad()\n loss.backward(retain_graph=True)\n data_grad = original_images.grad.data\n return data_grad","repo_name":"Tenebranas/CS-175-Adversarial-Abominations","sub_path":"attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70263646882","text":"import pygame\n\n# Initialise Pygame\npygame.init()\n\n# Set up the display\nscreen = pygame.display.set_mode((400, 300))\n\n# set clock\nclock = pygame.time.Clock()\n\n# Load the sprite image\nsprite_images = []\nfor i in range(1, 9):\n sprite_images.append(pygame.image.load(f\"images/Run/Warrior_Run_{i}.png\"))\n #scale the sprite image\n sprite_images[i - 1] = pygame.transform.scale(sprite_images[i - 1], (sprite_images[i - 1].get_width() * 2, sprite_images[i - 1].get_height() * 2))\n\n# Define the sprite's position\nsprite_x = 200\nsprite_y = 300\n\n# Define the sprite's velocity\nsprite_vel_x = 0\nsprite_vel_y = 0\n\n# Define the sprite's acceleration\nsprite_acc_x = 0\nsprite_acc_y = 0.5\n\n# Define the sprite's jump velocity\njump_vel = -10\n\n# Define the sprite's movement speed\nmove_speed = 5\n\n# Define the current frame of the sprite animation\ncurrent_frame = 0\n\n# Define the animation speed\nanimation_speed = 10\n\n# Define the game loop\nrunning = True\nwhile running:\n # Handle events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n sprite_vel_x = -move_speed\n elif event.key == pygame.K_RIGHT:\n sprite_vel_x = move_speed\n elif event.key == pygame.K_SPACE:\n sprite_vel_y = jump_vel\n\n # Update the sprite's position and velocity\n sprite_x += sprite_vel_x\n sprite_y += sprite_vel_y\n sprite_vel_x += sprite_acc_x\n sprite_vel_y += sprite_acc_y\n\n # Clamp the sprite's position to the screen bounds\n if sprite_x < 0:\n sprite_x = 0\n elif sprite_x > screen.get_width() - sprite_images[0].get_width():\n sprite_x = screen.get_width() - sprite_images[0].get_width()\n if sprite_y < 0:\n sprite_y = 0\n elif sprite_y > screen.get_height() - sprite_images[0].get_height():\n sprite_y = screen.get_height() - sprite_images[0].get_height()\n sprite_vel_y = 0\n\n #fill the screen with black\n screen.fill((0,0,0))\n \n # Draw the sprite\n screen.blit(sprite_images[current_frame // animation_speed], (sprite_x, sprite_y))\n\n\n # Update the current frame of the sprite animation\n current_frame = (current_frame + 1) % (animation_speed * len(sprite_images))\n\n # Update the display\n pygame.display.update()\n\n # Tick the clock\n clock.tick(120)\n\n\n# Quit Pygame\npygame.quit()","repo_name":"deangroom/space_eggs","sub_path":"walkcycle.py","file_name":"walkcycle.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23614475680","text":"#coding: utf-8\n# script para passar todas as imagens no filtro\n# HOG e escreve-las em uma pasta na saida\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.io import imread, imshow\nfrom skimage.feature import hog\nfrom skimage import color\nimport os\nimport pickle\nimport sys\n\nnp.set_printoptions(threshold=np.nan) # corrige o tamanho maximo do print na saida padrao de um ndarray\n\ndef main(argv):\n\n\tif(len(argv) < 4):\n\t\tprint(\"Numero errado de argumentos!\")\n\t\tprint(\"Usagem do hog.py:\")\n\t\tprint(\"argumento-01: Inteiro que define o numero de pixels por celula\")\n\t\tprint(\"argumento-02: Inteiro que define o numero de orientacoes\")\n\t\tprint(\"argumento-03: Inteiro que define o numero de celulas por bloco\")\n\t\tprint(\"argumento-04: (opcional) '-verbose' (sem aspas): prints para debugging\")\n\t\treturn\n\n\tppc = int(argv[1]) # pixels por celula, definida pelo usuario\n\tori = int(argv[2]) # orientations, definida pelo usuario\n\tcpb = int(argv[3]) # cells_per_block, definida pelo usuario\n\n\tverbose = False\t# opcao de entrada do programa \"-verbose\" coloca prints para debugging\n\tif(len(argv) == 5):\n\t\tif(argv[4] == \"-verbose\"):\n\t\t\tverbose = True\n\n\t# caminhoEntrada = os.getcwd() # os.getcwd ==> pasta atual do arquivo hog.py\n\tpastaBase = \"/home/arthur/SI/IA/EP\" # pasta selecionada pelo usuario\n\n\tcaminhos = []\n\t#parte 1\n\t#caminhos.append(os.path.join(pastaBase, \"dataset1\", \"testes\"))\n\t#caminhos.append(os.path.join(pastaBase, \"dataset1\", \"treinamento\"))\n\t\n\t#parte 2\n\tcaminhos.append(os.path.join(pastaBase, \"dataset2\", \"testes\"))\n\tcaminhos.append(os.path.join(pastaBase, \"dataset2\", \"treinamento\"))\n\n\tfor caminhoEntrada in caminhos:\n\t\ttry:\n\t\t\tarquivosPasta = os.listdir(caminhoEntrada)\n\t\texcept OSError as err:\n\t\t\tprint(\"Erro no acesso a pasta com as imagens de entrada: \",err)\n\t\t\treturn\n\n\t\tarquivosImagem = list(filter(lambda k: '.png' in k, arquivosPasta))\n\n\t\tif len(arquivosImagem) == 0:\n\t\t\tprint(\"Pasta selecionada nao contem imagens .png\")\n\t\t\treturn\n\t\t\n\t\ti = 1\n\t\texistePasta = True\n\t\ttry:\n\t\t\twhile(existePasta == True):\n\t\t\t\tcaminhoSaida = os.path.join(caminhoEntrada,\"HOG\" + str(i))\n\t\t\t\tif os.path.exists(caminhoSaida):\n\t\t\t\t\ti += 1\n\t\t\t\telse:\n\t\t\t\t\texistePasta = False\n\t\t\tos.makedirs(caminhoSaida)\n\t\texcept OSError as err:\n\t\t\tprint(\"Erro de acesso a pasta de saida: \", err)\n\t\t\treturn\n\n\t\tfor imagem in arquivosImagem:\n\t\t\tif (verbose == True):\n\t\t\t\tprint(\"\\tProcessando imagem \" + imagem)\n\t\t\ttry:\n\t\t\t\tA = color.rgb2gray(imread(os.path.join(caminhoEntrada, imagem)))\n\t\t\texcept IOError as err:\n\t\t\t\tprint(\"Erro na leitura da imagem \", imagem, \": \", err)\n\t\t\t\tcontinue\n\n\t\t\tv, B = hog(A,orientations=ori, pixels_per_cell=(ppc, ppc),\n\t\t\t\tcells_per_block=(cpb, cpb), visualise=True)\n\n\t\t\t'''\n\t\t\t# plots para mostrar as imagens geradas:\n\t\t\tfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)\n\n\t\t\tax1.axis('off')\n\t\t\tax1.imshow(A, cmap=plt.cm.gray)\n\t\t\tax1.set_title('Imagem de Entrada')\n\t\t\tax1.set_adjustable('box-forced')\n\n\t\t\tax2.axis('off')\n\t\t\tax2.imshow(B)\n\t\t\tax2.set_title('HOG')\n\t\t\tax2.set_adjustable('box-forced')\n\n\t\t\tplt.show()\n\t\t\t'''\n\t\t\t\n\t\t\t# trocando a extensao .png por .txt:\n\t\t\tsaida = imagem[:-3]\n\t\t\tsaida = saida + \"txt\"\n\n\t\t\ttry:\n\t\t\t\tf = open(os.path.join(caminhoSaida, saida), 'w')\n\t\t\t\t# f.write(str(v)[2:-1])\t# tem esse corte [2:-1] para tirar '[' e ']' da saida\n\t\t\t\tfor x in np.nditer(v):\n\t\t\t\t\tf.write(str(x) + \"\\n\")\n\t\t\texcept IOError as err:\n\t\t\t\tprint(\"Erro na escrita do arquivo \", saida, \": \", err)\n\n\t\ttry:\n\t\t\tarqConfig = open(os.path.join(caminhoSaida,\"configExtrator.dat\"), \"wb\")\n\t\texcept IOError as err:\n\t\t\tprint(\"Erro na escrita do arquivo configExtrator.dat\", err)\n\t\t\treturn\n\n\t\tdata = (ppc, ori, cpb)\n\t\tpickle.dump(data, arqConfig, protocol=2)\n\t\tarqConfig.close()\n\n\t\ttry:\n\t\t\tarqLog = open(os.path.join(os.getcwd(),\"logExtratores.txt\"), \"a\")\n\t\texcept IOError as err:\n\t\t\tprint(\"Erro na escrita do arquivo logExtratores.txt\", err)\n\t\t\treturn\n\n\t\ttexto = [\"HOG\" + str(i) + \": \" + caminhoSaida + \"\\n\",\n\t\t\t\t \"ppc = \" + str(ppc) + \"\\n\",\n\t\t\t\t \"ori = \" + str(ori) + \"\\n\",\n\t\t\t\t \"cpb = \" + str(cpb) + \"\\n\\n\"\n\t\t]\n\t\tfor linha in texto:\n\t\t\tarqLog.write(linha)\n\t\tarqLog.close()\n\nif __name__ == \"__main__\":\n main(sys.argv)","repo_name":"art-carnieto/estudosIA","sub_path":"python/hog.py","file_name":"hog.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18746743137","text":"import logging\nimport traceback\nfrom collections import defaultdict\n\nfrom odoo import api, models\nfrom odoo.tools import pycompat\nfrom .lib.management import LogManage\n\n_logger = logging.getLogger(__name__)\n\nLogManage.register_type('write', '修改')\nLogManage.register_type('delete', '删除')\nLogManage.register_type('create', '创建')\n\nmodel = models.BaseModel\n\nsuper_create = model.create\n\nsuper_unlink = model.unlink\n\nsuper_write = model.write\n\n\ndef make_fields_log_value(record, res_id=None, values=None, mode='edit'):\n if mode == 'write':\n field_names = list(values.keys())\n res = record.search_read([('id', '=', res_id)], field_names)\n if not res:\n return False\n old_values = res[0]\n new_values = values\n elif mode == 'create':\n old_values = defaultdict(str)\n new_values = values\n field_names = list(values.keys())\n elif mode == 'delete':\n res = record.search_read([('id', '=', res_id)])\n if not res:\n return False\n old_values = record.search_read([('id', '=', res_id)])[0]\n field_names = list(old_values.keys())\n new_values = defaultdict(str)\n else:\n return\n values = []\n\n for field_name in field_names:\n\n if field_name not in record._fields:\n continue\n field = getattr(record, '_fields')[field_name]\n\n field_type = field.type\n old_v = old_values[field_name]\n new_v = new_values[field_name]\n if field_type in ['one2many', 'many2many', 'one2many']:\n new_v = str(new_v)\n old_v = str(old_v)\n elif field_type in ['datetime', 'date']:\n new_v = str(new_v)\n old_v = str(old_v)\n elif field_type == 'text':\n new_v = field.convert_to_column(new_v, record)\n old_v = field.convert_to_column(old_v, record)\n if old_v == new_v:\n continue\n display_name = field.string\n values.append({\n 'filed_name': field_name,\n 'filed_display': display_name,\n 'new_value': new_v,\n 'old_value': old_v,\n 'field_type': field_type\n })\n return values\n\n\ndef write_log(records, mode, values=None):\n if not getattr(records, '_track_log', False):\n return False\n for record in records:\n try:\n field_values = make_fields_log_value(record, values=values, res_id=record.id, mode=mode)\n if not field_values:\n continue\n LogManage.put_log(record=record, mode=mode, res_id=record.id, fields=field_values)\n except:\n _logger.info('write log fail, mode is %s', mode)\n traceback.print_exc()\n\n\nclass Base(models.AbstractModel):\n _inherit = 'base'\n\n @api.multi\n def unlink(self):\n write_log(self, 'delete')\n res = super(Base, self).unlink()\n return res\n\n @api.multi\n def write(self, vals):\n write_log(self, 'write', values=vals)\n res = super(Base, self).write(vals)\n return res\n\n @api.model_create_multi\n @api.returns('self', lambda value: value.id)\n def create(self, vals_list):\n result = super(Base, self).create(vals_list)\n for record, values in pycompat.izip(result, vals_list):\n if not values:\n continue\n write_log(record, 'create', values)\n return result\n","repo_name":"rezaghanimi/main_mdias","sub_path":"mdias_addons/odoo_operation_log/model_extend.py","file_name":"model_extend.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12164159942","text":"from aiogram import types\r\nfrom aiogram.dispatcher.dispatcher import Dispatcher\r\nfrom aiogram.types import InlineKeyboardMarkup\r\nfrom aiogram.types.inline_keyboard import InlineKeyboardButton\r\nfrom create_bot import dp, bot\r\nfrom keyboards import client_kb as nav\r\nfrom data_base import sqlite_db\r\nimport logging\r\n\r\n\r\n# def check_sub_members(chat_member):\r\n# print(chat_member['status'])\r\n# if chat_member['status'] != 'left':\r\n# return True\r\n# else:\r\n# return False\r\n\r\n\r\n\r\nasync def welcome (message : types.Message):\r\n try:\r\n await bot.send_message(message.from_user.id, 'Приятного апетита', reply_markup=nav.kb_client)\r\n await message.delete()\r\n \r\n except:\r\n await message.reply(f'{message.from_user.first_name}, все заказы производятся напрямую в чате с ботом! Перейдите по ссылке: \\nhttps://t.me/Pizza_Nemo_Bot \\nИ сделайте заказ! 🤡')\r\n\r\n\r\n\r\n\r\nasync def command_operation_mode (message: types.Message):\r\n \r\n await bot.send_message (message.from_user.id, 'ПН - ПТ: 10:00 - 23:00 \\nCб и Вс : 9:00 - 2:00 🗓')\r\n await message.delete()\r\n\r\n\r\n\r\nasync def command_adress (message:types.Message):\r\n await bot.send_message (message.from_user.id, 'Улица: Пушкина 🏘 \\nДом: Калатушкина🏠')\r\n await message.delete()\r\n\r\n\r\nasync def send_photo(message:types.Message):\r\n chat_id = message.chat.id \r\n text = 'Привет, подпишись на канал =)'\r\n markup = InlineKeyboardMarkup (\r\n inline_keyboard=[\r\n [\r\n InlineKeyboardButton (text = 'Подпишись', url = 'https://www.youtube.com/channel/UCXRAoUWcfu-wRxBy58uD7lA'),\r\n InlineKeyboardButton (text = 'Подписан', callback_data = '123')\r\n ],\r\n [\r\n InlineKeyboardButton (text = 'Мой Телеграм', url = 'https://t.me/ILYACHEREMISIN')\r\n ]\r\n ]\r\n )\r\n photo = open(\"TRUE PIZZA.jpg\", 'rb')\r\n #await bot.send_message (chat_id=chat_id, text = text, reply_markup=markup)\r\n await bot.send_photo (caption='Пример нашей пиццы', chat_id=chat_id, photo=photo, reply_markup=markup)\r\n await message.delete()\r\n\r\n\r\n@dp.message_handler(commands=['Меню'])\r\n@dp.message_handler(lambda message: 'Меню' in message.text)\r\nasync def menu_command(message: types.Message):\r\n await sqlite_db.sql_read(message)\r\n\r\n\r\n\r\n\r\ndef register_handlers_client (dp:Dispatcher):\r\n dp.register_message_handler (welcome, commands=['start', 'help'])\r\n dp.register_message_handler (command_operation_mode, commands=['operating_mode', 'Operation_mode'])\r\n dp.register_message_handler (command_operation_mode, lambda message: 'Режим' in message.text)\r\n dp.register_message_handler (command_operation_mode, lambda message: 'режим' in message.text)\r\n dp.register_message_handler (command_adress, lambda message: 'Адрес' in message.text)\r\n dp.register_message_handler (command_adress, lambda message: 'адрес' in message.text)\r\n dp.register_message_handler (command_adress, commands=['adress', 'Adress'])\r\n dp.register_message_handler (send_photo, commands=['varients', 'Varients'])\r\n dp.register_message_handler(send_photo,lambda message: 'Вариант' in message.text)\r\n dp.register_message_handler(send_photo,lambda message: 'вариант' in message.text)\r\n dp.register_message_handler(send_photo,lambda message: 'Пример' in message.text)\r\n dp.register_message_handler(send_photo,lambda message: 'пример' in message.text)\r\n # dp.register_message_handler(menu_command, commands=['Меню'])\r\n # dp.register_message_handler(send_photo,lambda message: 'Меню' in message.text)\r\n","repo_name":"Sapfik/Practise-Python","sub_path":"handlers/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5038842145","text":"\"\"\"\nthis is compilation of functions to analyse BEAM-related data for NYC simulation\n\"\"\"\n\nfrom urllib.error import HTTPError\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nimport datetime as dt\nimport pandas as pd\n\nfrom shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\nfrom io import StringIO\n\n\ndef get_output_path_from_s3_url(s3_url):\n \"\"\"\n transform s3 output path (from beam runs spreadsheet) into path to s3 output\n that may be used as part of path to the file.\n\n s3path = get_output_path_from_s3_url(s3url)\n beam_log_path = s3path + '/beamLog.out'\n \"\"\"\n\n return s3_url \\\n .strip() \\\n .replace(\"s3.us-east-2.amazonaws.com/beam-outputs/index.html#\", \"beam-outputs.s3.amazonaws.com/\")\n\n\ndef parse_config(s3url, complain=True):\n \"\"\"\n parse beam config of beam run.\n\n :param s3url: url to s3 output\n :param complain: it will complain if there are many config values found with the same name\n :return: dictionary config key -> config value\n \"\"\"\n\n s3path = get_output_path_from_s3_url(s3url)\n url = s3path + \"/fullBeamConfig.conf\"\n config = urllib.request.urlopen(url)\n\n config_keys = [\"flowCapacityFactor\", \"speedScalingFactor\", \"quick_fix_minCarSpeedInMetersPerSecond\",\n \"activitySimEnabled\", \"transitCapacity\",\n \"minimumRoadSpeedInMetersPerSecond\", \"fractionOfInitialVehicleFleet\",\n \"agentSampleSizeAsFractionOfPopulation\",\n \"simulationName\", \"directory\", \"generate_secondary_activities\", \"lastIteration\",\n \"fractionOfPeopleWithBicycle\",\n \"parkingStallCountScalingFactor\", \"parkingPriceMultiplier\", \"parkingCostScalingFactor\", \"queryDate\",\n \"transitPrice\", \"transit_crowding\", \"transit_crowding_percentile\",\n \"maxLinkLengthToApplySpeedScalingFactor\", \"max_destination_distance_meters\",\n \"max_destination_choice_set_size\",\n \"transit_crowding_VOT_multiplier\", \"transit_crowding_VOT_threshold\",\n \"activity_file_path\", \"intercept_file_path\", \"additional_trip_utility\",\n \"ModuleProbability_1\", \"ModuleProbability_2\", \"ModuleProbability_3\", \"ModuleProbability_4\",\n \"BUS-DEFAULT\", \"RAIL-DEFAULT\", \"SUBWAY-DEFAULT\"]\n intercept_keys = [\"bike_intercept\", \"car_intercept\", \"drive_transit_intercept\", \"ride_hail_intercept\",\n \"ride_hail_pooled_intercept\", \"ride_hail_transit_intercept\", \"walk_intercept\",\n \"walk_transit_intercept\", \"transfer\"]\n\n config_map = {}\n default_value = \"\"\n\n for conf_key in config_keys:\n config_map[conf_key] = default_value\n\n def set_value(key, line_value):\n value = line_value.strip().replace(\"\\\"\", \"\")\n\n if key not in config_map:\n config_map[key] = value\n else:\n old_val = config_map[key]\n if old_val == default_value or old_val.strip() == value.strip():\n config_map[key] = value\n else:\n if complain:\n print(\"an attempt to rewrite config value with key:\", key)\n print(\" value in the map \\t\", old_val)\n print(\" new rejected value\\t\", value)\n\n physsim_names = ['JDEQSim', 'BPRSim', 'PARBPRSim', 'CCHRoutingAssignment']\n\n def look_for_physsim_type(config_line):\n for physsim_name in physsim_names:\n if 'name={}'.format(physsim_name) in config_line:\n set_value(\"physsim_type\", \"physsim_type = {}\".format(physsim_name))\n\n for b_line in config.readlines():\n line = b_line.decode(\"utf-8\").strip()\n\n look_for_physsim_type(line)\n\n for ckey in config_keys:\n if ckey + \"=\" in line or ckey + \"\\\"=\" in line or '\"' + ckey + \":\" in line:\n set_value(ckey, line)\n\n for ikey in intercept_keys:\n if ikey in line:\n set_value(ikey, line)\n\n return config_map\n\n\ndef get_from_s3(s3url, file_name,\n s3_additional_output='scripts_output'):\n s3path = get_output_path_from_s3_url(s3url)\n path = \"{}/{}/{}\".format(s3path, s3_additional_output, file_name)\n df = None\n try:\n df = pd.read_csv(path, low_memory=False)\n except HTTPError:\n print('File does not exist by path:', path)\n\n return df\n\n\ndef plot_fake_real_walkers(title, fake_walkers, real_walkers, threshold):\n fig, axs = plt.subplots(2, 2, figsize=(24, 4 * 2))\n fig.tight_layout()\n fig.subplots_adjust(wspace=0.05, hspace=0.2)\n fig.suptitle(title, y=1.11)\n\n ax1 = axs[0, 0]\n ax2 = axs[0, 1]\n\n fake_walkers['length'].hist(bins=50, ax=ax1, alpha=0.3, label='fake walkers')\n real_walkers['length'].hist(bins=50, ax=ax1, alpha=0.3, label='real walkers')\n ax1.legend(loc='upper right', prop={'size': 10})\n ax1.set_title(\"Trip length histogram. Fake vs Real walkers. Min length of trip is {0}\".format(threshold))\n ax1.axvline(5000, color=\"black\", linestyle=\"--\")\n\n fake_walkers['length'].hist(bins=50, ax=ax2, log=True, alpha=0.3, label='fake walkers')\n real_walkers['length'].hist(bins=50, ax=ax2, log=True, alpha=0.3, label='real walkers')\n ax2.legend(loc='upper right', prop={'size': 10})\n ax2.set_title(\n \"Trip length histogram. Fake vs Real walkers. Logarithmic scale. Min length of trip is {0}\".format(threshold))\n ax2.axvline(5000, color=\"black\", linestyle=\"--\")\n\n ax1 = axs[1, 0]\n ax2 = axs[1, 1]\n\n long_real_walkers = real_walkers[real_walkers['length'] >= threshold]\n number_of_top_alternatives = 5\n walkers_by_alternative = long_real_walkers.groupby('availableAlternatives')['length'].count().sort_values(\n ascending=False)\n top_alternatives = set(\n walkers_by_alternative.reset_index()['availableAlternatives'].head(number_of_top_alternatives))\n\n for alternative in top_alternatives:\n label = str(list(set(alternative.split(':')))).replace('\\'', '')[1:-1]\n selected = long_real_walkers[long_real_walkers['availableAlternatives'] == alternative]['length']\n selected.hist(bins=50, ax=ax1, alpha=0.4, linewidth=4, label=label)\n selected.hist(bins=20, ax=ax2, log=True, histtype='step', linewidth=4, label=label)\n\n ax1.set_title(\"Length histogram of top {} alternatives of real walkers\".format(number_of_top_alternatives))\n ax1.legend(loc='upper right', prop={'size': 10})\n ax2.set_title(\n \"Length histogram of top {} alternatives of real walkers. Logarithmic scale\".format(number_of_top_alternatives))\n ax2.legend(loc='upper right', prop={'size': 10})\n\n\ndef get_fake_real_walkers(s3url, iteration, threshold=2000):\n s3path = get_output_path_from_s3_url(s3url)\n events_file_path = s3path + \"/ITERS/it.{0}/{0}.events.csv.gz\".format(iteration)\n\n start_time = time.time()\n modechoice = pd.concat([df[(df['type'] == 'ModeChoice') | (df['type'] == 'Replanning')]\n for df in pd.read_csv(events_file_path, low_memory=False, chunksize=100000)])\n print(\"events file url:\", events_file_path)\n print(\"loading took %s seconds\" % (time.time() - start_time))\n\n count_of_replanning = modechoice[modechoice['type'] == 'Replanning'].shape[0]\n modechoice = modechoice[modechoice['type'] == 'ModeChoice']\n count_of_modechouces = len(modechoice) - count_of_replanning\n\n walk_modechoice = modechoice[modechoice['mode'] == 'walk'].copy()\n\n def is_real(row):\n if row['length'] < threshold:\n return True\n\n alternatives = set(row['availableAlternatives'].split(':'))\n\n if len(alternatives) == 0:\n print('+1')\n return False\n\n if len(alternatives) == 1 and ('WALK' in alternatives or 'NaN' in alternatives):\n return False\n\n return True\n\n walk_modechoice[['availableAlternatives']] = walk_modechoice[['availableAlternatives']].fillna('NaN')\n walk_modechoice['isReal'] = walk_modechoice.apply(is_real, axis=1)\n\n fake_walkers = walk_modechoice[~walk_modechoice['isReal']]\n real_walkers = walk_modechoice[walk_modechoice['isReal']]\n\n plot_fake_real_walkers(s3url, fake_walkers, real_walkers, threshold)\n\n columns = ['real_walkers', 'real_walkers_ratio', 'fake_walkers', 'fake_walkers_ratio', 'total_modechoice']\n values = [len(real_walkers), len(real_walkers) / count_of_modechouces,\n len(fake_walkers), len(fake_walkers) / count_of_modechouces, count_of_modechouces]\n\n walkers = pd.DataFrame(np.array([values]), columns=columns)\n return walkers\n\n\ndef save_to_s3(s3url, df, file_name,\n aws_access_key_id, aws_secret_access_key,\n output_bucket='beam-outputs', s3_additional_output='scripts_output'):\n import boto3\n s3 = boto3.resource('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)\n\n require_string = 'index.html#'\n if require_string not in s3url:\n print(\n 's3url does not contain \"{}\". That means there is no way to save df. Cancelled.'.format(\n require_string))\n else:\n df.to_csv(file_name)\n folder_path = s3url.split('#')[1].strip()\n out_path = \"{}/{}/{}\".format(folder_path, s3_additional_output, file_name)\n s3.meta.client.upload_file(file_name, output_bucket, out_path)\n print('saved to s3: ', out_path)\n\n\ndef read_traffic_counts(df):\n df['date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, \"%m/%d/%Y\"))\n df['hour_0'] = df['12:00-1:00 AM']\n df['hour_1'] = df['1:00-2:00AM']\n df['hour_2'] = df['2:00-3:00AM']\n df['hour_3'] = df['2:00-3:00AM']\n df['hour_4'] = df['3:00-4:00AM']\n df['hour_5'] = df['4:00-5:00AM']\n df['hour_6'] = df['5:00-6:00AM']\n df['hour_7'] = df['6:00-7:00AM']\n df['hour_8'] = df['7:00-8:00AM']\n df['hour_9'] = df['9:00-10:00AM']\n df['hour_10'] = df['10:00-11:00AM']\n df['hour_11'] = df['11:00-12:00PM']\n df['hour_12'] = df['12:00-1:00PM']\n df['hour_13'] = df['1:00-2:00PM']\n df['hour_14'] = df['2:00-3:00PM']\n df['hour_15'] = df['3:00-4:00PM']\n df['hour_16'] = df['4:00-5:00PM']\n df['hour_17'] = df['5:00-6:00PM']\n df['hour_18'] = df['6:00-7:00PM']\n df['hour_19'] = df['7:00-8:00PM']\n df['hour_20'] = df['8:00-9:00PM']\n df['hour_21'] = df['9:00-10:00PM']\n df['hour_22'] = df['10:00-11:00PM']\n df['hour_23'] = df['11:00-12:00AM']\n df = df.drop(['Date', '12:00-1:00 AM', '1:00-2:00AM', '2:00-3:00AM', '3:00-4:00AM', '4:00-5:00AM', '5:00-6:00AM',\n '6:00-7:00AM', '7:00-8:00AM', '8:00-9:00AM',\n '9:00-10:00AM', '10:00-11:00AM', '11:00-12:00PM', '12:00-1:00PM', '1:00-2:00PM', '2:00-3:00PM',\n '3:00-4:00PM', '4:00-5:00PM', '5:00-6:00PM',\n '6:00-7:00PM', '7:00-8:00PM', '8:00-9:00PM', '9:00-10:00PM', '10:00-11:00PM', '11:00-12:00AM'],\n axis=1)\n return df\n\n\ndef aggregate_per_hour(traffic_df, date):\n wednesday_df = traffic_df[traffic_df['date'] == date]\n agg_df = wednesday_df.groupby(['date']).sum()\n agg_list = []\n for i in range(0, 24):\n xs = [i, agg_df['hour_%d' % i][0]]\n agg_list.append(xs)\n return pd.DataFrame(agg_list, columns=['hour', 'count'])\n\n\ndef plot_traffic_count(date):\n # https://data.cityofnewyork.us/Transportation/Traffic-Volume-Counts-2014-2018-/ertz-hr4r\n path_to_csv = 'https://data.cityofnewyork.us/api/views/ertz-hr4r/rows.csv?accessType=DOWNLOAD'\n df = read_traffic_counts(pd.read_csv(path_to_csv))\n agg_per_hour_df = aggregate_per_hour(df, date)\n agg_per_hour_df.plot(x='hour', y='count', title='Date is %s' % date)\n\n\ndef get_volume_reference_values():\n nyc_volumes_benchmark_date = '2018-04-11'\n nyc_volumes_benchmark_raw = read_traffic_counts(\n pd.read_csv('https://data.cityofnewyork.us/api/views/ertz-hr4r/rows.csv?accessType=DOWNLOAD'))\n nyc_volumes_benchmark = aggregate_per_hour(nyc_volumes_benchmark_raw, nyc_volumes_benchmark_date)\n return nyc_volumes_benchmark\n\n\ndef plot_simulation_volumes_vs_bench(s3url, iteration, ax, title=\"Volume SUM comparison with reference.\",\n simulation_volumes=None, s3path=None, nyc_volumes_reference_values=None):\n if s3path is None:\n s3path = get_output_path_from_s3_url(s3url)\n\n if nyc_volumes_reference_values is None:\n nyc_volumes_reference_values = get_volume_reference_values()\n\n def calc_sum_of_link_stats(link_stats_file_path, chunksize=100000):\n start_time = time.time()\n df = pd.concat([df.groupby('hour')['volume'].sum() for df in\n pd.read_csv(link_stats_file_path, low_memory=False, chunksize=chunksize)])\n df = df.groupby('hour').sum().to_frame(name='sum')\n # print(\"link stats url:\", link_stats_file_path)\n print(\"link stats downloading and calculation took %s seconds\" % (time.time() - start_time))\n return df\n\n if simulation_volumes is None:\n linkstats_path = s3path + \"/ITERS/it.{0}/{0}.linkstats.csv.gz\".format(iteration)\n simulation_volumes = calc_sum_of_link_stats(linkstats_path)\n\n color_reference = 'tab:red'\n color_volume = 'tab:green'\n\n ax1 = ax\n\n ax1.set_title('{} iter {}'.format(title, iteration))\n ax1.set_xlabel('hour of day')\n\n ax1.plot(range(0, 24), nyc_volumes_reference_values['count'], color=color_reference, label=\"reference\")\n ax1.plot(np.nan, color=color_volume, label=\"simulation volume\") # to have both legends on same axis\n ax1.legend(loc=\"upper right\")\n ax1.xaxis.set_ticks(np.arange(0, 24, 1))\n\n ax1.tick_params(axis='y', labelcolor=color_reference)\n\n volume_per_hour = simulation_volumes[0:23]['sum']\n volume_hours = list(volume_per_hour.index)\n\n shifted_hours = list(map(lambda x: x + 1, volume_hours))\n\n ax12 = ax1.twinx() # to plot things on the same graph but with different Y axis\n ax12.plot(shifted_hours, volume_per_hour, color=color_volume)\n ax12.tick_params(axis='y', labelcolor=color_volume)\n\n return simulation_volumes\n\n\n# index is hour\nnyc_activity_ends_reference = [0.010526809, 0.007105842, 0.003006647, 0.000310397, 0.011508960, 0.039378258,\n 0.116178879, 0.300608907, 0.301269741, 0.214196234, 0.220456846, 0.237608230,\n 0.258382041, 0.277933413, 0.281891163, 0.308248524, 0.289517677, 0.333402259,\n 0.221353890, 0.140322664, 0.110115403, 0.068543370, 0.057286657, 0.011845660]\n\n\ndef plot_activities_ends_vs_bench(s3url, iteration, ax, ax2=None, title=\"Activity ends comparison.\", population_size=1,\n activity_ends=None, s3path=None):\n if s3path is None:\n s3path = get_output_path_from_s3_url(s3url)\n\n def load_activity_ends(events_file_path, chunksize=100000):\n start_time = time.time()\n try:\n df = pd.concat([df[df['type'] == 'actend']\n for df in pd.read_csv(events_file_path, low_memory=False, chunksize=chunksize)])\n except HTTPError:\n raise NameError('can not download file by url:', events_file_path)\n df['hour'] = (df['time'] / 3600).astype(int)\n print(\"activity ends loading took %s seconds\" % (time.time() - start_time))\n return df\n\n if activity_ends is None:\n events_path = s3path + \"/ITERS/it.{0}/{0}.events.csv.gz\".format(iteration)\n activity_ends = load_activity_ends(events_path)\n\n color_act_ends = 'tab:blue'\n\n ax.set_title('{} iter {} [{} total act ends]'.format(title, iteration, activity_ends.shape[0]))\n ax.set_xlabel('hour of day')\n ax.xaxis.set_ticks(np.arange(0, 24, 1))\n\n act_ends_24 = activity_ends[activity_ends['hour'] <= 24].copy()\n\n act_ends_total = act_ends_24.groupby('hour')['hour'].count() / population_size\n act_ends_hours = list(act_ends_total.index)\n\n def plot_act_ends(ax_to_plot, act_type):\n df = act_ends_24[act_ends_24['actType'] == act_type].groupby('hour')['hour'].count() / population_size\n ax_to_plot.plot(df.index, df, label='# of {} ends'.format(act_type))\n\n def plot_benchmark_and_legend(ax_to_plot):\n color_benchmark = 'black'\n ax_to_plot.plot(np.nan, color=color_benchmark,\n label='benchmark (right scale)') # to have both legends on same axis\n\n ax_to_plot.legend(loc=\"upper right\")\n ax_to_plot.tick_params(axis='y', labelcolor=color_act_ends)\n\n ax_twinx = ax_to_plot.twinx() # to plot things on the same graph but with different Y axis\n ax_twinx.plot(range(0, 24), nyc_activity_ends_reference, color=color_benchmark)\n ax_twinx.tick_params(axis='y', labelcolor=color_benchmark)\n\n ax.plot(act_ends_hours, act_ends_total, color=color_act_ends, label='# of activity ends', linewidth=3)\n plot_act_ends(ax, 'Work')\n plot_act_ends(ax, 'Home')\n\n plot_benchmark_and_legend(ax)\n\n if ax2 is not None:\n ax2.set_title('other activities')\n ax2.set_xlabel('hour of day')\n ax2.xaxis.set_ticks(np.arange(0, 24, 1))\n\n plot_act_ends(ax2, 'Meal')\n plot_act_ends(ax2, 'SocRec')\n plot_act_ends(ax2, 'Shopping')\n plot_act_ends(ax2, 'Other')\n\n plot_benchmark_and_legend(ax2)\n\n return activity_ends\n\n\ndef plot_volumes_comparison_on_axs(s3url, iteration, suptitle=\"\", population_size=1,\n simulation_volumes=None, activity_ends=None,\n plot_simulation_volumes=True, plot_activities_ends=True):\n fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(25, 7))\n fig1.tight_layout(pad=0.1)\n fig1.subplots_adjust(wspace=0.25, hspace=0.1)\n plt.xticks(np.arange(0, 24, 2))\n plt.suptitle(suptitle, y=1.05, fontsize=17)\n\n if plot_simulation_volumes:\n plot_simulation_volumes_vs_bench(s3url, iteration=iteration, ax=ax1,\n title=\"Volume SUM comparison with benchmark.\",\n simulation_volumes=simulation_volumes)\n\n if plot_activities_ends:\n plot_activities_ends_vs_bench(s3url, iteration=iteration, ax=ax2, title=\"Activity ends comparison.\",\n population_size=population_size, activity_ends=activity_ends)\n\n\ndef read_nyc_ridership_counts_absolute_numbers_for_mta_comparison(s3url, iteration=0):\n holland_tunnel = {1110292, 1110293, 1110294, 1110295, 540918, 540919, 782080, 782081}\n linkoln_tunnel = {1057628, 1057629, 1057630, 1057631, 308, 309, 817812, 817813, 817814, 817815, 87180, 87181}\n george_washingtone_bridge = {735454, 735455, 767820, 767821, 781014, 781015, 781086, 781087, 781156, 781157, 782128,\n 782129, 796856, 796857, 796858, 796859, 796870, 796871, 866324, 866325, 87174, 87175,\n 87176, 87177, 88110, 88111, 886008, 886009, 968272, 968273, 781094, 781095}\n henry_hudson_bridge = {1681043, 1681042, 542015, 542014, 88230, 88231}\n robert_f_kennedy_bridge = {1235912, 1235913, 1247588, 1247589, 21094, 21095, 23616, 23617, 29774, 29775, 30814,\n 30815, 763932, 763933, 782436, 782437, 782438, 782439, 782440, 782441, 782560, 782561,\n 782570, 782571, 782702, 782703, 782706, 782707, 782708, 782709, 782718, 782719, 870348,\n 870349, 782720, 782721, 782722, 782723, 782724, 782725, 782726, 782727, 782728, 782729,\n 782914, 782915, 853900, 853901, 1230075, 1233314, 1233315, 1299262, 1299263, 1299264,\n 1299265, 1299266, 1299267, 1299268, 1299269, 1299274, 1299275, 1299278, 1299279, 958834,\n 958835, 958836, 958837, 916655, 1041132, 1041133, 1078046, 1078047, 1078048, 1078049,\n 1078050, 1078051, 1078052, 1078053, 1078056, 1078057, 1078058, 1078059, 1078060, 1078061,\n 1089632, 1089633, 1089634, 1089635, 1101864, 1101865, 1101866, 1101867, 1230068, 1230069,\n 1230070, 1230071, 1230072, 1230073, 1230074, 916652, 916653, 916654, 757589, 757588,\n 853929, 853928, 779898, 779899, 1339888, 1339889, 1339890, 1339891, 1433020, 1433021,\n 154, 155, 731748, 731749, 731752, 731753, 731754, 731755, 731766, 731767, 731768, 731769,\n 731770, 731771, 731786, 731787, 853892, 853893, 868400, 868401, 868410, 868411}\n queens_midtown_tunnel = {1367889, 1367888, 487778, 487779}\n hugh_l_carey_tunnel = {1071576, 1071577, 1109400, 1109401, 13722, 13723, 1658828, 1658829, 19836, 19837}\n bronx_whitestone_bridge = {62416, 62417, 729848, 729849, 765882, 765883, 853914, 853915}\n throgs_neck_bridge = {1090614, 1090615, 1090616, 1090617, 1090618, 1090619, 765880, 765881}\n varrazzano_narrows_bridge = {788119, 788118, 1341065, 1341064, 788122, 788123, 788140, 788141}\n marine_parkwaygil_hodges_memorial_bridge = {1750240, 1750241, 53416, 53417, 732358, 732359, 761184, 761185, 761186,\n 761187, 793744, 793745}\n cross_bay_veterans_memorial_bridge = {1139186, 1139187, 1139198, 1139199, 1139200, 1139201, 1139208, 1139209,\n 1139214, 1139215, 1139222, 1139223, 1139300, 1139301, 1139302, 1139303,\n 1517804, 1517805, 1517806, 1517807, 1517808, 1517809, 1743514, 1743515,\n 1749330, 1749331, 1749332, 1749333, 48132, 48133, 51618, 51619, 51620, 51621,\n 59452, 59453, 68364, 68365, 793786, 793787, 865036, 865037, 865060, 865061,\n 865062, 865063, 953766, 953767, 953768, 953769, 999610, 999611, 999626,\n 999627, 999628, 999629, 1297379}\n\n mta_briges_tunnels_links = holland_tunnel \\\n .union(linkoln_tunnel) \\\n .union(george_washingtone_bridge) \\\n .union(henry_hudson_bridge) \\\n .union(robert_f_kennedy_bridge) \\\n .union(queens_midtown_tunnel) \\\n .union(hugh_l_carey_tunnel) \\\n .union(bronx_whitestone_bridge) \\\n .union(throgs_neck_bridge) \\\n .union(varrazzano_narrows_bridge) \\\n .union(marine_parkwaygil_hodges_memorial_bridge) \\\n .union(cross_bay_veterans_memorial_bridge)\n\n s3path = get_output_path_from_s3_url(s3url)\n\n events_file_path = \"{0}/ITERS/it.{1}/{1}.events.csv.gz\".format(s3path, iteration)\n columns = ['type', 'person', 'vehicle', 'vehicleType', 'links', 'time', 'driver']\n pte = pd.concat([df[(df['type'] == 'PersonEntersVehicle') | (df['type'] == 'PathTraversal')][columns]\n for df in pd.read_csv(events_file_path, chunksize=100000, low_memory=False)])\n\n print('read pev and pt events of shape:', pte.shape)\n\n pev = pte[(pte['type'] == 'PersonEntersVehicle')][['type', 'person', 'vehicle', 'time']]\n pte = pte[(pte['type'] == 'PathTraversal')][['type', 'vehicle', 'vehicleType', 'links', 'time', 'driver']]\n\n walk_transit_modes = {'BUS-DEFAULT', 'RAIL-DEFAULT', 'SUBWAY-DEFAULT'}\n drivers = set(pte[pte['vehicleType'].isin(walk_transit_modes)]['driver'])\n pev = pev[~pev['person'].isin(drivers)]\n\n def get_gtfs_agency(row):\n veh_id = row['vehicle'].split(\":\")\n if len(veh_id) > 1:\n agency = veh_id[0]\n return agency\n return \"\"\n\n def car_by_mta_bridges_tunnels(row):\n if pd.isnull(row['links']):\n return False\n\n for link_str in row['links'].split(\",\"):\n link = int(link_str)\n if link in mta_briges_tunnels_links:\n return True\n\n return False\n\n pte['carMtaRelated'] = pte.apply(car_by_mta_bridges_tunnels, axis=1)\n pte['gtfsAgency'] = pte.apply(get_gtfs_agency, axis=1)\n\n vehicle_info = pte.groupby('vehicle')[['vehicleType', 'gtfsAgency']].first().reset_index()\n\n pev_advanced = pd.merge(pev, vehicle_info, on='vehicle')\n pev_advanced = pev_advanced.sort_values('time', ignore_index=True)\n\n gtfs_agency_to_count = pev_advanced.groupby('gtfsAgency')['person'].count()\n\n # calculate car\n car_mode = {'Car', 'Car-rh-only', 'PHEV', 'BUS-DEFAULT'}\n car_mta_related = pte[(pte['vehicleType'].isin(car_mode)) &\n (pte['carMtaRelated'])]['time'].count()\n transit_car_to_count = gtfs_agency_to_count.append(pd.Series([car_mta_related], index=['Car']))\n\n # calculate subway\n person_pevs = pev_advanced.groupby('person').agg(list)[['vehicleType', 'gtfsAgency']]\n\n def calc_number_of_subway_trips(row):\n vehicle_list = row['vehicleType']\n count_of_trips = 0\n last_was_subway = False\n for vehicle in vehicle_list:\n if vehicle == 'SUBWAY-DEFAULT':\n if not last_was_subway:\n count_of_trips = count_of_trips + 1\n last_was_subway = True\n else:\n last_was_subway = False\n return count_of_trips\n\n person_pevs['subway_trips'] = person_pevs.apply(calc_number_of_subway_trips, axis=1)\n subway_trips = person_pevs['subway_trips'].sum()\n\n triptype_to_count = transit_car_to_count.append(pd.Series([subway_trips], index=['Subway']))\n triptype_to_count = triptype_to_count.to_frame().reset_index()\n\n print('calculated:\\n', pev_advanced.groupby('vehicleType')['person'].count())\n\n return triptype_to_count\n\n\ndef calculate_nyc_ridership_and_save_to_s3_if_not_calculated(s3url, iteration, aws_access_key_id, aws_secret_access_key,\n force=False, output_bucket='beam-outputs'):\n if force:\n print('\"force\" set to True, so, ridership will be recalculated independent of it existence in s3')\n else:\n print('\"force\" set to False (by default) so, ridership will be calculated only if it does not exist in s3')\n\n import boto3\n s3 = boto3.resource('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)\n\n s3_additional_output = 'scripts_output'\n\n ridership = None\n\n require_string = 'index.html#'\n if require_string not in s3url:\n print(\n 's3url does not contain \"{}\". That means there is no way to save result of the function. Calculation '\n 'cancelled.'.format(\n require_string))\n else:\n ridership_file_name = '{}.nyc_mta_ridership.csv.gz'.format(iteration)\n folder_path = s3url.split('#')[1].strip()\n\n s3path = get_output_path_from_s3_url(s3url)\n path = \"{}/{}/{}\".format(s3path, s3_additional_output, ridership_file_name)\n\n def calculate():\n print(\"Ridership calculation...\")\n ridership_df = read_nyc_ridership_counts_absolute_numbers_for_mta_comparison(s3url, iteration)\n ridership_df.to_csv(ridership_file_name)\n out_path = \"{}/{}/{}\".format(folder_path, s3_additional_output, ridership_file_name)\n s3.meta.client.upload_file(ridership_file_name, output_bucket, out_path)\n print('\\nuploaded\\nto: backet {}, path {}\\n\\n'.format(output_bucket, out_path))\n return ridership_df\n\n if force:\n ridership = calculate()\n else:\n try:\n ridership = pd.read_csv(path, low_memory=False)\n print(\"file exist with path '{}'\".format(path))\n except HTTPError:\n print(\"Looks like file does not exits with path '{}'\".format(path))\n ridership = calculate()\n\n return ridership\n\n\ndef calculate_ridership_and_fake_walkers_for_s3urls(s3urls, iteration, aws_access_key_id, aws_secret_access_key):\n for s3url in s3urls:\n print(s3url)\n ridership = calculate_nyc_ridership_and_save_to_s3_if_not_calculated(s3url, iteration, aws_access_key_id,\n aws_secret_access_key)\n print('ridership done\\n')\n\n for s3url in s3urls:\n print(s3url)\n\n fake_walkers_file_name = \"{}.fake_real_walkers.csv.gz\".format(iteration)\n walkers = get_from_s3(s3url, fake_walkers_file_name)\n if walkers is None:\n walkers = get_fake_real_walkers(s3url, iteration)\n save_to_s3(s3url, walkers, fake_walkers_file_name, aws_access_key_id, aws_secret_access_key)\n else:\n print('file {} already exist for url {}'.format(fake_walkers_file_name, s3url))\n print(walkers)\n\n\ndef read_nyc_gtfs_trip_id_to_route_id():\n base_path = \"https://beam-outputs.s3.us-east-2.amazonaws.com/new_city/newyork/gtfs_trips_only_per_agency/\"\n files = ['MTA_Bronx_20200121_trips.csv.gz', 'MTA_Brooklyn_20200118_trips.csv.gz',\n 'MTA_Manhattan_20200123_trips.csv.gz', 'MTA_Queens_20200118_trips.csv.gz',\n 'MTA_Staten_Island_20200118_trips.csv.gz', 'NJ_Transit_Bus_20200210_trips.csv.gz']\n\n urls = map(lambda file_name: base_path + file_name, files)\n trip_id_to_route_id = {}\n\n for url in urls:\n trips = pd.read_csv(url.strip(), low_memory=False)[['route_id', 'trip_id']]\n for index, row in trips.iterrows():\n trip_id_to_route_id[str(row['trip_id'])] = row['route_id']\n print(len(trip_id_to_route_id))\n\n return trip_id_to_route_id\n\n\ndef read_bus_ridership_by_route_and_hour(s3url, gtfs_trip_id_to_route_id=None, iteration=0):\n if not gtfs_trip_id_to_route_id:\n gtfs_trip_id_to_route_id = read_nyc_gtfs_trip_id_to_route_id()\n\n s3path = get_output_path_from_s3_url(s3url)\n\n events_file_path = \"{0}/ITERS/it.{1}/{1}.events.csv.gz\".format(s3path, iteration)\n columns = ['type', 'person', 'vehicle', 'vehicleType', 'time', 'driver']\n pte = pd.concat([df[(df['type'] == 'PersonEntersVehicle') | (df['type'] == 'PathTraversal')][columns]\n for df in pd.read_csv(events_file_path, chunksize=100000, low_memory=False)])\n\n print('read PEV and PT events of shape:', pte.shape)\n\n pev = pte[(pte['type'] == 'PersonEntersVehicle')][['person', 'vehicle', 'time']]\n pev['hour'] = pev['time'] // 3600\n\n pte = pte[(pte['type'] == 'PathTraversal') & (pte['vehicleType'] == 'BUS-DEFAULT')]\n drivers = set(pte['driver'])\n\n pev = pev[~pev['person'].isin(drivers)]\n\n print('got PEV {} and PT {}'.format(pev.shape, pte.shape))\n\n def get_gtfs_agency_trip_id_route_id(row):\n agency = \"\"\n trip_id = \"\"\n route_id = \"\"\n\n veh_id = row['vehicle'].split(\":\")\n if len(veh_id) > 1:\n agency = veh_id[0]\n trip_id = str(veh_id[1])\n route_id = gtfs_trip_id_to_route_id.get(trip_id, \"\")\n\n return [agency, trip_id, route_id]\n\n pte[['gtfsAgency', 'gtfsTripId', 'gtfsRouteId']] = pte \\\n .apply(get_gtfs_agency_trip_id_route_id, axis=1, result_type=\"expand\")\n\n print('calculated gtfs agency, tripId and routeId')\n\n columns = ['vehicleType', 'gtfsAgency', 'gtfsTripId', 'gtfsRouteId']\n vehicle_info = pte.groupby('vehicle')[columns].first().reset_index()\n\n pev = pd.merge(pev, vehicle_info, on='vehicle')\n\n print('got advanced version of PEV:', pev.shape, 'with columns:', pev.columns)\n\n walk_transit_modes = {'BUS-DEFAULT'} # ,'RAIL-DEFAULT', 'SUBWAY-DEFAULT'\n bus_to_agency_to_trip_to_hour = pev[(pev['vehicleType'].isin(walk_transit_modes))] \\\n .groupby(['gtfsAgency', 'gtfsRouteId', 'hour'])['person'].count()\n\n return bus_to_agency_to_trip_to_hour\n\n\ndef plot_nyc_ridership(s3url_to_ridership, function_get_run_name_from_s3url, names_to_plot_separately=None, multiplier=20, figsize=(20, 7)):\n columns = ['date', 'subway', 'bus', 'rail', 'car', 'transit (bus + subway)']\n\n suffix = '\\n mta.info'\n reference_mta_info = [['09 2020' + suffix, 1489413, 992200, 130600, 810144, 2481613],\n ['08 2020' + suffix, 1348202, 1305000, 94900, 847330, 2653202],\n ['07 2020' + suffix, 1120537, 1102200, 96500, 779409, 2222737],\n ['06 2020' + suffix, 681714, 741200, 56000, 582624, 1422914],\n ['05 2020' + suffix, 509871, 538800, 29200, 444179, 1048671],\n ['04 2020' + suffix, 516174, 495400, 24100, 342222, 1011574],\n [' 2019' + suffix, 5491213, 2153913, 622000, 929951, 7645126]]\n\n def get_graph_data_row_from_dataframe(triptype_to_count_df, run_name, agency_column='index', value_column='0'):\n\n def get_agency_data(agency):\n return triptype_to_count_df[triptype_to_count_df[agency_column] == agency][value_column].values[0]\n\n def get_sum_agency_data(agencies):\n agencies_sum = 0\n for agency in agencies:\n agencies_sum = agencies_sum + get_agency_data(agency)\n return agencies_sum\n\n mta_bus = get_sum_agency_data(['MTA_Bronx_20200121', 'MTA_Brooklyn_20200118',\n 'MTA_Manhattan_20200123', 'MTA_Queens_20200118',\n 'MTA_Staten_Island_20200118'])\n\n mta_rail = get_sum_agency_data(['Long_Island_Rail_20200215',\n 'Metro-North_Railroad_20200215'])\n\n mta_subway = get_agency_data('Subway')\n car = get_agency_data('Car')\n transit = mta_subway + mta_bus\n\n return [run_name,\n mta_subway * multiplier,\n mta_bus * multiplier,\n mta_rail * multiplier,\n car * multiplier,\n transit * multiplier]\n\n graph_data = []\n\n for s3url, triptype_to_count in s3url_to_ridership.items():\n title = function_get_run_name_from_s3url(s3url)\n row = get_graph_data_row_from_dataframe(triptype_to_count, title)\n graph_data.append(row)\n\n result = pd.DataFrame(graph_data, columns=columns)\n reference_df = pd.DataFrame(reference_mta_info, columns=columns)\n result = result.append(reference_df).groupby('date').sum()\n\n def plot_bars(df, ax, ax_title, columns_to_plot):\n df[columns_to_plot].plot(kind='bar', ax=ax)\n # ax.grid('on', which='major', axis='y')\n ax.set_title(ax_title)\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.7))\n\n fig, axs = plt.subplots(1, 1, sharex='all', figsize=figsize)\n ax_main = axs\n\n plot_bars(result, ax_main,\n 'reference from mta.info vs BEAM simulation\\nrun data multiplied by {}'.format(multiplier),\n ['subway', 'bus', 'rail', 'car', 'transit (bus + subway)'])\n\n if names_to_plot_separately:\n def plot_bars_2(df, ax, ax_title, columns_to_plot):\n df[columns_to_plot].plot(kind='bar', ax=ax)\n ax.set_title(ax_title)\n # ax.legend(loc='upper right')\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.7))\n\n result_t = result[['subway', 'bus', 'rail', 'car']].transpose()\n\n fig, axs = plt.subplots(1, len(names_to_plot_separately), sharey='all', figsize=figsize)\n fig.subplots_adjust(wspace=0.3, hspace=0.3)\n\n if len(names_to_plot_separately) == 1:\n axs = [axs]\n\n for (name, ax) in zip(names_to_plot_separately, axs):\n selected_columns = []\n for column in result_t.columns:\n if str(column).startswith(name):\n selected_columns.append(column)\n\n plot_bars_2(result_t, ax, \"\", selected_columns)\n\n plt.suptitle('reference from mta.info vs BEAM simulation\\nrun data multiplied by {}'.format(20))\n\ndef read_ridership_from_s3_output(s3url, iteration):\n ridership = None\n s3_additional_output = 'scripts_output'\n\n require_string = 'index.html#'\n if require_string not in s3url:\n print(\n 's3url does not contain \"{}\". That means there is no way read prepared output.'.format(require_string))\n else:\n ridership_file_name = '{}.nyc_mta_ridership.csv.gz'.format(iteration)\n s3path = get_output_path_from_s3_url(s3url)\n path = \"{}/{}/{}\".format(s3path, s3_additional_output, ridership_file_name)\n\n try:\n ridership = pd.read_csv(path, low_memory=False)\n print(\"downloaded ridership from \", path)\n except HTTPError:\n print(\"Looks like file does not exits -> '{}'\".format(path))\n\n return ridership\n\n\ndef compare_riderships_vs_baserun_and_benchmark(title_to_s3url, iteration, s3url_base_run, date_to_calc_diff=None,\n figsize=(20, 5), rot=15, suptitle=\"\",\n plot_columns=None, plot_reference=True):\n columns = ['date', 'subway', 'bus', 'rail', 'car', 'transit']\n\n suffix = '\\n mta.info'\n\n benchmark_mta_info = [['09 2020' + suffix, -72.90, -54.00, -78.86, -12.90, -68.42],\n ['08 2020' + suffix, -75.50, -40.00, -83.32, -08.90, -66.68],\n ['07 2020' + suffix, -79.60, -49.00, -83.91, -16.20, -71.90],\n ['06 2020' + suffix, -87.60, -66.00, -90.95, -37.40, -82.17],\n ['05 2020' + suffix, -90.70, -75.00, -95.00, -52.30, -86.89],\n ['04 2020' + suffix, -90.60, -77.00, -96.13, -63.20, -87.47]]\n\n if not plot_columns:\n plot_columns = columns[1:]\n\n date_to_benchmark = {}\n for row in benchmark_mta_info:\n date_to_benchmark[row[0]] = row[1:]\n\n print('reference dates:', date_to_benchmark.keys())\n\n def column_name_to_passenger_multiplier(column_name):\n if column_name == '0':\n return 1\n\n delimeter = '-'\n if delimeter in column_name:\n nums = column_name.split(delimeter)\n return (int(nums[0]) + int(nums[1])) // 2\n else:\n return int(column_name)\n\n def get_sum_of_passenger_per_trip(df, ignore_hour_0=True):\n sum_df = df.sum()\n total_sum = 0\n\n for column in df.columns:\n if column == 'hours':\n continue\n if ignore_hour_0 and column == '0':\n continue\n multiplier = column_name_to_passenger_multiplier(column)\n total_sum = total_sum + sum_df[column] * multiplier\n\n return total_sum\n\n def get_car_bus_subway_trips(beam_s3url):\n s3path = get_output_path_from_s3_url(beam_s3url)\n\n def read_csv(filename):\n file_url = s3path + \"/ITERS/it.{0}/{0}.{1}.csv\".format(iteration, filename)\n try:\n return pd.read_csv(file_url)\n except HTTPError:\n print('was not able to download', file_url)\n\n sub_trips = read_csv('passengerPerTripSubway')\n bus_trips = read_csv('passengerPerTripBus')\n car_trips = read_csv('passengerPerTripCar')\n rail_trips = read_csv('passengerPerTripRail')\n\n sub_trips_sum = get_sum_of_passenger_per_trip(sub_trips, ignore_hour_0=True)\n bus_trips_sum = get_sum_of_passenger_per_trip(bus_trips, ignore_hour_0=True)\n car_trips_sum = get_sum_of_passenger_per_trip(car_trips, ignore_hour_0=False)\n rail_trips_sum = get_sum_of_passenger_per_trip(rail_trips, ignore_hour_0=True)\n\n return car_trips_sum, bus_trips_sum, sub_trips_sum, rail_trips_sum\n\n (base_car, base_bus, base_sub, base_rail) = get_car_bus_subway_trips(s3url_base_run)\n\n graph_data = []\n\n for (run_title, s3url_run) in title_to_s3url:\n (minus_car, minus_bus, minus_sub, minus_rail) = get_car_bus_subway_trips(s3url_run)\n\n def calc_diff(base_run_val, minus_run_val):\n return (minus_run_val - base_run_val) / base_run_val * 100\n\n diff_transit = calc_diff(base_sub + base_bus + base_rail, minus_sub + minus_bus + minus_rail)\n diff_sub = calc_diff(base_sub, minus_sub)\n diff_bus = calc_diff(base_bus, minus_bus)\n diff_car = calc_diff(base_car, minus_car)\n diff_rail = calc_diff(base_rail, minus_rail)\n\n graph_data.append(['{0}'.format(run_title), diff_sub, diff_bus, diff_rail, diff_car, diff_transit])\n\n def plot_bars(df, ax, title, columns_to_plot):\n df.groupby('date').sum()[columns_to_plot].plot(kind='bar', ax=ax, rot=rot)\n ax.grid('on', which='major', axis='y')\n ax.set_title(title)\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.7))\n\n if date_to_calc_diff:\n fig, axs = plt.subplots(1, 2, sharey='all', figsize=figsize)\n ax_main = axs[0]\n else:\n fig, axs = plt.subplots(1, 1, sharey='all', figsize=figsize)\n ax_main = axs\n\n fig.tight_layout(pad=0.1)\n fig.subplots_adjust(wspace=0.25, hspace=0.1)\n\n plt.suptitle('Comparison of difference vs baseline and vs real data from MTI.info\\n{}'.format(suptitle), y=1.2,\n fontsize=17)\n\n result = pd.DataFrame(graph_data, columns=columns)\n if plot_reference:\n reference_df = pd.DataFrame(benchmark_mta_info, columns=columns)\n result = result.append(reference_df)\n\n plot_bars(result, ax_main, 'reference from mta.info vs BEAM simulation', plot_columns)\n\n if date_to_calc_diff:\n df_to_compare = pd.DataFrame(graph_data, columns=columns)\n diff = df_to_compare[columns[1:]].sub(date_to_benchmark[date_to_calc_diff + suffix], axis=1)\n diff[columns[0]] = df_to_compare[columns[0]]\n plot_bars(diff, axs[1], 'runs minus reference at {}'.format(date_to_calc_diff), plot_columns)\n\n\ndef people_flow_in_cbd_s3(s3url, iteration):\n s3path = get_output_path_from_s3_url(s3url)\n events_file_path = s3path + \"/ITERS/it.{0}/{0}.events.csv.gz\".format(iteration)\n return people_flow_in_cbd_file_path(events_file_path)\n\n\ndef people_flow_in_cbd_file_path(events_file_path, chunksize=100000):\n events = pd.concat([events[events['type'] == 'PathTraversal'] for events in\n pd.read_csv(events_file_path, low_memory=False, chunksize=chunksize)])\n return people_flow_in_cdb(events)\n\n\ndef diff_people_flow_in_cbd_s3(s3url, iteration, s3url_base, iteration_base):\n s3path = get_output_path_from_s3_url(s3url)\n events_file_path = s3path + \"/ITERS/it.{0}/{0}.events.csv.gz\".format(iteration)\n s3path_base = get_output_path_from_s3_url(s3url_base)\n events_file_path_base = s3path_base + \"/ITERS/it.{0}/{0}.events.csv.gz\".format(iteration_base)\n return diff_people_flow_in_cbd_file_path(events_file_path, events_file_path_base)\n\n\ndef diff_people_flow_in_cbd_file_path(events_file_path, events_file_path_base, chunksize=100000):\n events = pd.concat([events[events['type'] == 'PathTraversal'] for events in\n pd.read_csv(events_file_path, low_memory=False, chunksize=chunksize)])\n events_base = pd.concat([events[events['type'] == 'PathTraversal'] for events in\n pd.read_csv(events_file_path_base, low_memory=False, chunksize=chunksize)])\n return diff_people_in(events, events_base)\n\n\ndef people_flow_in_cdb(df):\n polygon = Polygon([\n (-74.005088, 40.779100),\n (-74.034957, 40.680314),\n (-73.968867, 40.717604),\n (-73.957924, 40.759091)\n ])\n\n def inside(x, y):\n point = Point(x, y)\n return polygon.contains(point)\n\n def num_people(row):\n mode = row['mode']\n if mode in ['walk', 'bike']:\n return 1\n elif mode == 'car':\n return 1 + row['numPassengers']\n else:\n return row['numPassengers']\n\n def benchmark():\n data = \"\"\"mode,Entering,Leaving\nsubway,2241712,2241712\ncar,877978,877978\nbus,279735,279735\nrail,338449,338449\nferry,66932,66932\nbike,33634,33634\ntram,3528,3528\n \"\"\"\n return pd.read_csv(StringIO(data)).set_index('mode')\n\n f = df[(df['type'] == 'PathTraversal')][['mode', 'numPassengers', 'startX', 'startY', 'endX', 'endY']].copy(\n deep=True)\n\n f['numPeople'] = f.apply(lambda row: num_people(row), axis=1)\n f = f[f['numPeople'] > 0]\n\n f['startIn'] = f.apply(lambda row: inside(row['startX'], row['startY']), axis=1)\n f['endIn'] = f.apply(lambda row: inside(row['endX'], row['endY']), axis=1)\n f['numIn'] = f.apply(lambda row: row['numPeople'] if not row['startIn'] and row['endIn'] else 0, axis=1)\n\n s = f.groupby('mode')[['numIn']].sum()\n b = benchmark()\n\n t = pd.concat([s, b], axis=1)\n t.fillna(0, inplace=True)\n\n t['percentIn'] = t['numIn'] * 100 / t['numIn'].sum()\n t['percent_ref'] = t['Entering'] * 100 / t['Entering'].sum()\n\n t = t[['numIn', 'Entering', 'percentIn', 'percent_ref']]\n\n t['diff'] = t['percentIn'] - t['percent_ref']\n t['diff'].plot(kind='bar', title=\"Diff: current - reference, %\", figsize=(7, 5), legend=False, fontsize=12)\n\n t.loc[\"Total\"] = t.sum()\n return t\n\n\ndef get_people_in(df):\n polygon = Polygon([\n (-74.005088, 40.779100),\n (-74.034957, 40.680314),\n (-73.968867, 40.717604),\n (-73.957924, 40.759091)\n ])\n\n def inside(x, y):\n point = Point(x, y)\n return polygon.contains(point)\n\n def num_people(row):\n mode = row['mode']\n if mode in ['walk', 'bike']:\n return 1\n elif mode == 'car':\n return 1 + row['numPassengers']\n else:\n return row['numPassengers']\n\n f = df[(df['type'] == 'PathTraversal') & (df['mode'].isin(['car', 'bus', 'subway']))][\n ['mode', 'numPassengers', 'startX', 'startY', 'endX', 'endY']].copy(deep=True)\n\n f['numPeople'] = f.apply(lambda row: num_people(row), axis=1)\n f = f[f['numPeople'] > 0]\n\n f['startIn'] = f.apply(lambda row: inside(row['startX'], row['startY']), axis=1)\n f['endIn'] = f.apply(lambda row: inside(row['endX'], row['endY']), axis=1)\n f['numIn'] = f.apply(lambda row: row['numPeople'] if not row['startIn'] and row['endIn'] else 0, axis=1)\n\n s = f.groupby('mode')[['numIn']].sum()\n\n s.fillna(0, inplace=True)\n\n s['percentIn'] = s['numIn'] * 100 / s['numIn'].sum()\n\n return s['percentIn']\n\n\ndef diff_people_in(current, base):\n def reference():\n data = \"\"\"date,subway,bus,car\n07/05/2020,-77.8,-35,-21.8\n06/05/2020,-87.2,-64,-30.8\n05/05/2020,-90.5,-73,-50.3\n04/05/2020,-90.5,-71,-78.9\n03/05/2020,0.0,4,-0.1\n \"\"\"\n ref = pd.read_csv(StringIO(data), parse_dates=['date'])\n ref.sort_values('date', inplace=True)\n ref['month'] = ref['date'].dt.month_name()\n ref = ref.set_index('month').drop('date', 1)\n return ref\n\n b = get_people_in(base)\n c = get_people_in(current)\n b.name = 'base'\n c.name = 'current'\n\n t = pd.concat([b, c], axis=1)\n t['increase'] = t['current'] - t['base']\n\n pc = reference()\n\n run = t['increase'].to_frame().T\n run = run.reset_index().drop('index', 1)\n run['month'] = 'Run'\n run = run.set_index('month')\n result = pd.concat([run, pc], axis=0)\n\n result.plot(kind='bar', title=\"Diff current - reference, %\", figsize=(10, 10), legend=True, fontsize=12)\n return result\n\n\ndef plot_calibration_parameters(title_to_s3url,\n suptitle=\"\", figsize=(23, 6), rot=70,\n calibration_parameters=None,\n removal_probabilities=None):\n if calibration_parameters is None:\n calibration_parameters = ['additional_trip_utility', 'walk_transit_intercept']\n\n calibration_values = []\n\n for (title, s3url) in title_to_s3url:\n s3path = get_output_path_from_s3_url(s3url)\n config = parse_config(s3path + \"/fullBeamConfig.conf\", complain=False)\n\n def get_config_value(conf_value_name, split_character=\"=\"):\n return config.get(conf_value_name, '=default').split(split_character)[-1]\n\n param_values = [title]\n for param in calibration_parameters:\n param_value = get_config_value(param)\n if not param_value:\n param_value = get_config_value(param, \":\")\n if not param_value:\n param_value = 0\n\n param_values.append(float(param_value))\n\n calibration_values.append(param_values)\n\n calibration_parameters.insert(0, 'name')\n result = pd.DataFrame(calibration_values, columns=calibration_parameters)\n\n linewidth = 4\n removal_probabilities_color = 'black'\n\n ax = result.plot(x='name', figsize=figsize, rot=rot, linewidth=linewidth)\n\n if removal_probabilities:\n ax.plot(np.NaN, np.NaN, '--', label='removal probabilities (right scale)',\n color=removal_probabilities_color, linewidth=linewidth)\n\n ax.set_title('calibration parameters {}'.format(suptitle))\n ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n\n ax.grid('on', which='major', axis='y')\n\n if removal_probabilities:\n ax2 = ax.twinx()\n ax2.plot(range(len(removal_probabilities)), removal_probabilities, '--',\n color=removal_probabilities_color, alpha=0.5, linewidth=linewidth)","repo_name":"LBNL-UCB-STI/beam","sub_path":"src/main/python/city_specific_analysis/nyc_analysis.py","file_name":"nyc_analysis.py","file_ext":"py","file_size_in_byte":49044,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"54"} +{"seq_id":"34359083498","text":"from django.db import models\nfrom users.models import MyUser\n\n\nclass Registry(models.Model):\n query_string = models.CharField('String that user was searching for', max_length=100)\n date = models.DateTimeField('query time', auto_now_add=True)\n user = models.ForeignKey(MyUser, on_delete=models.CASCADE, related_name='registries')\n\n\nclass Entry(models.Model):\n name = models.CharField(\n 'String that user was searching for', max_length=100)\n price = models.IntegerField()\n link = models.CharField(max_length=200)\n photo = models.CharField(blank=True, max_length=200)\n","repo_name":"vix000/final","sub_path":"scraper-django/registers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40219695038","text":"from miniworldmaker.containers import toolbar\nfrom miniworldmaker.containers import widgets\n\n\nclass ColorToolbar(toolbar.Toolbar):\n \"\"\"\n A toolbar to get the background color at a specific pixel\n \"\"\"\n\n def __init__(self, board):\n super().__init__()\n self.registered_events.add(\"all\")\n self.registered_events.add(\"debug\")\n self.board = board\n self.default_size = 220\n self.color_label = ColorLabel(\"Color\")\n self.add_widget(self.color_label)\n\n def get_event(self, event, data):\n if \"mouse_left\" in event and self.board.is_in_container(data[0], data[1]):\n self.color_label.set_text(str(self.board.get_color_from_pixel(data)))\n self.color_label.set_color(self.board.get_color_from_pixel(data))\n\n\nclass ColorLabel(widgets.ToolbarLabel):\n def __init__(self, text):\n super().__init__(text)\n\n def set_color(self, color):\n self.background_color = color\n self.dirty = 1\n","repo_name":"asbl/miniworldmaker","sub_path":"source/miniworldmaker/containers/color_toolbar.py","file_name":"color_toolbar.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"18877657368","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@author:WangYong\n@workNumber:xy04952\n@fileName: views.py\n@creatTime: 2019/08/05\n\"\"\"\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.core.paginator import PageNotAnInteger, EmptyPage\nfrom download.common.customPaginator import KingPaginator\nfrom download.common.insertSql import requestdata, getSoftid_ordertime, initdownload\nfrom download.models import CaseResult, TestSoftId, TestCase\nfrom download.common.performCase import runCase, stepRunCase, runGetid, getResult\nfrom download.common.UserCaseManner import caseOperate\nfrom download.common.initEnvironment import initEnviron\nfrom dateutil.parser import parse\nimport threading\nimport json\nimport time\n\n\n# 初始化系统数据\ndef initDate(req):\n if req.method == \"POST\":\n threading.Thread(target=initEnviron).start()\n\n result = {'result': '初始化数据进行中!'}\n return HttpResponse(json.dumps(result))\n\n\n# 首页\ndef index(request):\n if not request.session.get(\"username\"):\n return redirect(\"login\")\n else:\n role = request.COOKIES.get(\"userRoles\")\n return render(request, 'index.html', {\"role\": role})\n\n\n'''\n function:研一项目功能***start\n'''\n\n\n# 研一项目\ndef projectName(request):\n if not request.session.get(\"username\"):\n return redirect(\"login\")\n role = request.COOKIES.get(\"userRoles\")\n return render(request, 'channel/rbcHome.html', {\"role\": role})\n\n\n# 用例管理页面\ndef case(request, project):\n if not request.session.get(\"username\"):\n return redirect(\"login\")\n role = request.COOKIES.get(\"userRoles\")\n username = request.session.get(\"username\")\n caseList = TestCase.objects.filter(newCaseUser=username, project=project).order_by(\"-createTime\")\n paginator = KingPaginator(caseList, 13)\n page = int(request.GET.get(\"page\", 1))\n try:\n pages = paginator.page(page)\n except PageNotAnInteger:\n pages = paginator.page(1)\n except EmptyPage:\n pages = paginator.page(paginator.num_pages)\n return render(request, 'channel/{}Case.html'.format(project), {\"pages\": pages, \"role\": role})\n\n\n# 用例执行详情\ndef details(request, project):\n if not request.session.get(\"username\"):\n return redirect(\"login\")\n role = request.COOKIES.get(\"userRoles\")\n typeName = [\"高中高端网校通\", \"初中高端网校通\"]\n case_list = CaseResult.objects.filter(executionUser=request.session.get(\"username\"), project=project).order_by(\n '-executionTime')\n paginator = KingPaginator(case_list, 13)\n page = int(request.GET.get(\"page\", 1))\n try:\n pages = paginator.page(page)\n except PageNotAnInteger:\n pages = paginator.page(1)\n except EmptyPage:\n pages = paginator.page(paginator.num_pages)\n return render(request, 'channel/{}Detail.html'.format(project),\n {\"pages\": pages, \"role\": role, \"typeName\": typeName})\n\n\n# 记点通道报告\ndef report(request, project):\n if not request.session.get(\"username\"):\n return redirect(\"login\")\n role = request.COOKIES.get(\"userRoles\")\n username = request.session.get(\"username\")\n allCase = CaseResult.objects.filter(executionUser=username, project=project).count()\n failCase = CaseResult.objects.filter(assertResult=False, executionUser=username,\n project=project).count()\n successCase = CaseResult.objects.filter(assertResult=True, executionUser=username,\n project=project).count()\n return render(request, \"channel/{}Report.html\".format(project), {\"all\": allCase,\n \"failed\": failCase,\n \"success\": successCase,\n \"role\": role})\n\n\n'''\n function:研一项目功能***end\n'''\n\n\n# 检查用例编号是否唯一\ndef check(request):\n if request.method == \"POST\":\n get_caseId = request.POST.get(\"data[caseId]\")\n\n if len(get_caseId) == 0:\n result = {'result': '用例编号不能为空'}\n return HttpResponse(json.dumps(result))\n try:\n count = TestCase.objects.filter(caseId=get_caseId)\n except Exception as e:\n print(e)\n count = None\n if count:\n result = {'result': '用例编号已存在,请重新输入'}\n return HttpResponse(json.dumps(result))\n else:\n result = {'result': '可以使用'}\n return HttpResponse(json.dumps(result))\n\n\n\"\"\"\n00 \"时间输入不合法!\"\n01 \"存在用例编号、用例名称、下载次数、上传时间、预期结果为空!\"\n02 \"新增用例成功!\"\n03 \"资料下载量输入有误!\"\n04 \"返利金额输入有误!\"\n05 \"资料ID不能为空!\"\n06 \"用例名称不能为空!\"\n07 \"资料上传人不能为空!\"\n08 \"修改用例成功!\"\n09 \"资源ID已用完,请联系管理员再次分配!\"\n\"\"\"\n\n\n# 用例新增、修改操作\ndef caseManner(request):\n user = request.session.get(\"username\")\n if request.method == \"POST\":\n flag = caseOperate(request, user)\n if flag == \"00\":\n return HttpResponse(\"时间输入不合法!\")\n elif flag == \"01\":\n return HttpResponse(\"存在用例编号、用例名称、下载次数、上传时间、预期结果为空!\")\n elif flag == \"02\":\n return HttpResponse(\"新增用例成功!\")\n elif flag == \"03\":\n return HttpResponse(\"资料下载量输入有误!\")\n elif flag == \"04\":\n return HttpResponse(\"返利金额输入有误!\")\n elif flag == \"05\":\n return HttpResponse(\"资料ID不能为空!\")\n elif flag == \"06\":\n return HttpResponse(\"用例名称不能为空!\")\n elif flag == \"07\":\n return HttpResponse(\"资料上传人不能为空!\")\n elif flag == \"08\":\n return HttpResponse(\"修改用例成功!\")\n else:\n return HttpResponse(\"资料softID已用完,请联系管理员分配!\")\n\n\n# 删除用例\ndef deleteCase(req):\n if req.method == \"POST\":\n get_caseId = req.POST.get(\"data[caserid]\")\n try:\n count = TestCase.objects.filter(caseId=get_caseId).delete()\n except Exception as e:\n print(e)\n count = None\n if count:\n result = {'result': '用例删除成功!'}\n return HttpResponse(json.dumps(result))\n else:\n result = {'result': '删除失败,服务器出现异常!'}\n return HttpResponse(json.dumps(result))\n\n\n# 执行用例\ndef startTestCase(request):\n if request.method == \"POST\":\n ids = request.POST.get(\"data[caseId]\")\n project = request.POST.get(\"data[project]\")\n username = request.session.get(\"username\")\n role = request.COOKIES.get(\"userRoles\")\n\n userList = ['testers', 'Administrator']\n if role not in userList:\n result = {'result': '当时用户无权限执行用例!'}\n return HttpResponse(json.dumps(result))\n\n # 开启一个线程去执行用例\n threading.Thread(target=runCase, args=(ids, username, project,)).start()\n\n # 返回信息给前端\n result = {'result': '提交用例执行操作成功!'}\n return HttpResponse(json.dumps(result))\n\n\n# 步骤化执行用例功能\ndef stepTestCase(request):\n if request.method == \"POST\":\n data = request.POST.get(\"data[data]\").split(',')\n caseId = data[0]\n step = data[1]\n print(caseId, step)\n\n role = request.COOKIES.get(\"userRoles\")\n userList = ['testers', 'Administrator']\n if role not in userList:\n result = {'result': '当时用户无权限执行用例!'}\n return HttpResponse(json.dumps(result))\n # 开启一个线程运行\n threading.Thread(target=stepRunCase, args=(step, caseId,)).start()\n\n result = {'result': '用例执行中....'}\n return HttpResponse(json.dumps(result))\n\n\n# 工具页\ndef tools(req):\n return render(req, \"tools.html\")\n\n\n# 抽奖功能\ndef luckydraw(req):\n if req.method == \"POST\":\n userId = req.POST.get(\"userid\")\n number = req.POST.get(\"number\")\n startTime = parse(req.POST.get(\"starttime\"))\n endTime = parse(req.POST.get(\"endtime\"))\n if len(userId) == 0:\n return HttpResponse(\"用户id不能为空!\")\n solids = getSoftid_ordertime(int(number), startTime, endTime)\n for sifted in solids:\n initdownload(sifted[0], userId)\n time.sleep(5)\n for sifted in solids:\n requestdata(userId, sifted[0])\n return HttpResponse(\"抽奖完成!\")\n\n\ndef threadtest(req):\n if req.method == \"POST\":\n lock = threading.Lock()\n threads = [] # 初始化线程列表\n number = int(TestSoftId.objects.all().count() / 10) # 获取总数并分成10片\n started = TestSoftId.objects.all().first().id # 获取初始位置id\n for i in range(10):\n if i < 9: # 得到每片最后一个id\n ended = started + number\n else: # 最后一片拿到最后一个id\n ended = TestSoftId.objects.all().last().id\n t = threading.Thread(target=runGetid, args=(lock, started, ended,)) # 初始化线程\n threads.append(t) # 将线程加入threads组\n started += number # 得到每片初始id\n for j in threads: # 开始运行线程\n j.start()\n\n result = {'result': '多线程执行中....'}\n return HttpResponse(json.dumps(result))\n\n\ndef threadGetResult(req):\n if req.started == \"POST\":\n lock = threading.Lock()\n threads = [] # 初始化线程列表\n number = int(TestSoftId.objects.all().count() / 10) # 获取总数并分成10片\n started = TestSoftId.objects.all().first().id # h获取初始位置id\n for i in range(10):\n if i < 9: # 得到每片最后一个id\n ended = started + number\n else: # 最后一片拿到最后一个id\n ended = TestSoftId.objects.all().last().id\n t = threading.Thread(target=getResult, args=(lock, started, ended,)) # 初始化线程\n threads.append(t) # 将线程加入threads组\n started += number # 得到每片初始id\n for j in threads: # 开始运行线程\n j.start()\n\n result = {'result': '多线程执行中....'}\n return HttpResponse(json.dumps(result))\n","repo_name":"king152/interfaceAutoTest","sub_path":"download/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20830823330","text":"from utils.git import *\n\ndef check_for_updates():\n local = local_last_commit_hash()\n remote = remote_last_commit_hash()\n\n if (local != remote):\n short_local = local[:SHORT_COMMIT_HASH_LENGTH]\n short_remote = remote[:SHORT_COMMIT_HASH_LENGTH]\n \n print_error(f\"Updates are available! Please pull the latest changes and try again\")\n print_error(f\"{short_local} < {short_remote}\")\n exit()\n","repo_name":"martinalebachew/Asunder","sub_path":"scripts/utils/updates.py","file_name":"updates.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"41102421230","text":"from django import forms\nfrom django.core.validators import RegexValidator\nfrom phone_field import PhoneField\nimport phone_field\n\n\nclass ContactForm(forms.Form):\n name = forms.CharField(\n label='Name',\n help_text=\"Enter your name\",\n widget=forms.TextInput(attrs={\n 'placeholder': 'Name',\n 'class': 'form-control',\n 'id': 'name',\n 'type': 'text',\n 'required': \"required\",\n 'data - validation - required - message': \"Please enter your name.\"\n })\n )\n email_address = forms.EmailField(\n label='Email Address',\n help_text=\"Enter your email address\",\n widget=forms.EmailInput(attrs={\n 'placeholder': 'Email Address',\n 'class': 'form-control',\n 'id': 'email',\n 'type': 'email',\n 'required': \"required\",\n 'data - validation - required - message': \"Please enter your email address.\"\n })\n )\n # cell = PhoneField()\n phone_number = forms.CharField(\n label='Contact number',\n validators=[RegexValidator(r'^[0-9]+$', 'Enter a valid phone number.')],\n help_text=\"Enter your contact number\",\n widget=forms.TextInput(attrs={\n 'placeholder': 'Contact Number',\n 'class': 'form-control',\n 'id': 'phone',\n 'type': 'tel',\n 'required': \"required\",\n 'data - validation - required - message': \"Please enter your phone number.\"\n })\n )\n message = forms.CharField(\n label='Message',\n empty_value='Message',\n widget=forms.Textarea(attrs={\n 'placeholder': 'Message',\n 'class': 'form-control',\n 'id': 'message',\n 'rows': '5',\n 'required': \"required\",\n 'data - validation - required - message': \"Please enter a message.\"\n })\n )\n\n\n\n\n\n","repo_name":"kamaun/resume-website","sub_path":"resume/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38611645323","text":"import datetime\nimport requests\nimport json\nfrom bitcoin import ecdsa_verify, ecdsa_recover, ecdsa_sign, pubtoaddr, privtoaddr\n\nfrom .exceptions import InvalidSignature\nfrom .consensus import validate_timestamp\nfrom .network import SEED_NODES\nimport dateutil.parser\n\ndef make_peer_registration(pk, domain):\n timestamp = datetime.datetime.now().isoformat()\n address = privtoaddr(pk)\n to_sign = \"%s%s%s\" % (domain, address, timestamp)\n return {\n 'domain': domain,\n 'payout_address': address,\n 'timestamp': timestamp,\n 'signature': ecdsa_sign(to_sign, pk)\n }\n\ndef validate_peer_registration(reg, now=None):\n ts = dateutil.parser.parse(reg['timestamp'])\n validate_timestamp(ts, now=now)\n\n to_sign = \"{domain}{payout_address}{timestamp}\".format(**reg)\n try:\n pubkey = ecdsa_recover(to_sign, reg['signature'])\n except:\n raise InvalidSignature(\"Can't recover pubkey from signature\")\n\n valid_address = pubtoaddr(pubkey) == reg['payout_address']\n valid_sig = ecdsa_verify(to_sign, reg['signature'], pubkey)\n\n if not valid_sig or not valid_address:\n raise InvalidSignature(\"Invalid Signature\")\n return True\n\ndef get_peerlist():\n \"\"\"\n Tries seed nodes until a peerlist is returned\n \"\"\"\n response = None\n for seed in SEED_NODES:\n url = \"http://%s/staeon/peerlist?top\" % seed\n print(url)\n try:\n response = requests.get(url).json()\n except (requests.exceptions.ConnectionError, ValueError) as exc:\n print(exc)\n continue\n break\n\n if not response:\n raise Exception(\"Can't get peerlist\")\n\n return response['peers']\n\n\ndef push_peer_registration(reg, peers=None, verbose=True):\n if not peers: peers = get_peerlist()\n\n for peer in peers:\n domain = peer['domain']\n url = \"http://%s/peerlist\" % domain\n if verbose: print(\"Pushing to: \" + domain)\n try:\n response = requests.post(url, {'registration': json.dumps(reg)})\n except requests.exceptions.ConnectionError as exc:\n print(exc)\n\n if verbose: print(\"...\" + response.content)\n\ndef register_peer(domain, pk, peers=None, verbose=True):\n reg = make_peer_registration(pk, domain)\n push_peer_registration(reg, peers=peers, verbose=verbose)\n","repo_name":"priestc/libstaeon","sub_path":"staeon/peer_registration.py","file_name":"peer_registration.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15479636049","text":"class Solution:\n # @param A, a list of integers\n # @return an integer\n def firstMissingPositive(self, A):\n '''\n Given an unsorted integer array, find the first missing positive integer.\n\nFor example,\nGiven [1,2,0] return 3,\nand [3,4,-1,1] return 2.\n\nYour algorithm should run in O(n) time and uses constant space. \n '''\n if len(A) == 0:\n return 1\n for i in range(len(A)):\n while 0 < A[i] < len(A) and A[i] != i + 1 and A[A[i] - 1] != A[i]:\n j = A[i] - 1\n A[i], A[j] = A[j], A[i]\n i = 0\n while i < len(A):\n if A[i] != i + 1:\n break\n i += 1\n return i + 1\n\na0 = [-10,-3,-100,-1000,-239,1]\na1 = [-1,4,2,1,9,10]\na2 = [1, 1]\na = a2\nprint(a)\nprint(Solution.firstMissingPositive(Solution(), a))","repo_name":"KnightChan/LeetCode-Python","sub_path":"First Missing Positive.py","file_name":"First Missing Positive.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71349917281","text":"import logging\nimport urllib.parse\nimport uuid\n\nimport requests\nfrom click import ClickException\nfrom tldextract import tldextract\n\nfrom nudge_bot.api.congito_helper import Cognito\n\nnudge_url_target = \"https://www.nudgesecurity.io\"\n\n\ndef _transform_app_name(app_name):\n is_domain = _is_domain(app_name)\n if is_domain:\n loc = tldextract.extract(app_name)\n return str(loc.domain)\n return app_name\n\n\ndef _is_domain(app_name):\n return 'http' in app_name\n\n\nclass NudgeClient:\n\n def __init__(self, refresh_token) -> None:\n super().__init__()\n self.fields = None\n self.cognito_config = self.get(\"/api/config/auth\", auth=False)['config']\n self.cognito = Cognito(user_pool_id=self.cognito_config['aws_user_pools_id'],\n user_pool_region=self.cognito_config['aws_project_region'],\n client_id=self.cognito_config['aws_user_pools_web_client_id'],\n refresh_token=refresh_token)\n self.cognito.renew_access_token()\n self.session = requests.session()\n\n def get_bearer_token(self):\n pass\n\n def get(self, url, auth=True):\n response = requests.get(f\"{nudge_url_target}{url}\", headers=self._get_auth_header() if auth else None)\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 401:\n self.cognito.renew_access_token()\n if self.cognito.is_token_expired():\n raise Exception(\"Unable to renew token - need new auth\")\n else:\n return self.get(url,auth=auth)\n else:\n logging.debug(response)\n raise Exception(f\"Request failed {url} - {response.status_code}\")\n\n def post(self, api, body):\n response = self.session.post(f\"{nudge_url_target}{api}\", json=body,\n headers=self._get_auth_header(csrf=True))\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 401:\n self.cognito.renew_access_token()\n if self.cognito.is_token_expired():\n raise Exception(\"Unable to renew token - need new auth\")\n else:\n return self.post(api,body)\n else:\n raise ClickException(f\"Error with post {api} {response.json()}\")\n\n def put(self, api, body):\n response = self.session.put(f\"{nudge_url_target}{api}\", json=body,\n headers=self._get_auth_header(csrf=True))\n if response.status_code == 200:\n return response.json()\n else:\n raise ClickException(f\"Error with put {api} {response.json()}\")\n\n def _get_auth_header(self, csrf=False):\n if self.cognito.is_token_expired():\n self.cognito.renew_access_token()\n bearer_token = self.cognito.access_token\n headers = {\"authorization\": f\"Bearer {bearer_token}\"}\n if csrf:\n token = self._get_csrf_token()\n if token:\n headers['x-csrftoken'] = token\n return headers\n\n def list_fields(self):\n if not self.fields:\n response_json = self.get(f\"/api/fields/\")\n self.fields = response_json['fields']\n return self.fields\n\n def get_ids_for_field_and_value(self, field: str, value=None):\n field_id = None\n value_id = None\n field_list = self.list_fields()\n for field_def in field_list:\n if field.lower() == field_def['name'].lower():\n field_id = field_def['id']\n for value_def in field_def['allowed_values']:\n if value and value.lower() == value_def['value'].lower():\n value_id = value_def['id']\n if not field_id or (not value_id and value):\n raise ClickException(f\"Can not locate field and value {field} - {value}\")\n return field_id, value_id\n\n def _get_csrf_token(self):\n token = self._get_csrf_token_from_cookies()\n if not token:\n # run a get request in case this is our first call and there is no token\n self.session.get(f\"{nudge_url_target}/api/fields/\", headers=self._get_auth_header())\n token = self._get_csrf_token_from_cookies()\n return token\n\n def _get_csrf_token_from_cookies(self):\n return next((cookie.value for cookie in self.session.cookies if cookie.name == \"_csrf_token\"), None)\n\n def set_app_field(self, entity, field_id, value_id):\n body = {\n \"value\": str(value_id)\n }\n api = f\"/api/fields/{field_id}/saas/{entity}\"\n self.post(api, body)\n return True\n\n\n\n def find_app_by_field(self, field_name=None, field_value=None, page=None):\n search = {\"search\": [],\n \"filters\": [], \"page\": 1 if not page else page, \"per_page\": 500, \"sort\": \"account_count\", \"sort_dir\": \"desc\"}\n for field, value in zip(field_name, field_value):\n if value == 'None':\n op = \"withNoField\"\n field_id, value_id =self.get_ids_for_field_and_value(field, None)\n constraint = f\"{field_id}\"\n else:\n field_id, value_id = self.get_ids_for_field_and_value(field, value)\n op = \"withFieldAndValue\"\n constraint = f\"{field_id}###{value_id}\"\n search['search'].append({\"field\": \"fields\", \"op\": op, \"value\": constraint})\n response = self.post(\"/api/analysis/app/search\", search)\n ret = response['values']\n if response['next_page']:\n ret.extend(self.find_app_by_field(field_name,field_value,response['next_page']))\n return ret\n\n def find_app(self, app_name, page=None, exact=False):\n is_domain = _is_domain(app_name)\n app_name = _transform_app_name(app_name)\n # {\"search\":[{\"field\":\"service_info.name\",\"op\":\"ilike\",\"value\":\"%Zoom%\"},{\"field\":\"service_info.category.name\",\"op\":\"ilike\",\"value\":\"%Zoom%\"}],\"filters\":[],\"page\":1,\"per_page\":50,\"sort\":\"account_count\",\"sort_dir\":\"desc\"}\n if exact:\n op = \"=\"\n val=f\"{app_name}\"\n else:\n op = \"ilike\"\n val=f\"%{app_name}%\"\n search = {\"search\": [],\n \"filters\": [], \"page\": 1 if not page else page, \"per_page\": 50, \"sort\": \"account_count\", \"sort_dir\": \"desc\"}\n if is_domain:\n search['search'].append({\"field\": \"service_info.service_canonical_domain\", \"op\": op, \"value\": val})\n else:\n search['search'].append({\"field\": \"service_info.name\", \"op\": op, \"value\": val})\n search['search'].append({\"field\": \"name\", \"op\": op, \"value\": val})\n response = self.post(\"/api/analysis/app/search\", search)\n ret = response['values']\n if response['next_page']:\n ret.extend(self.find_app(app_name,response['next_page']))\n return ret\n\n def find_field(self, field_name, field_identifier=None):\n fields = self.list_fields()\n for field_def in fields:\n if field_identifier and field_def['identifier'] == field_identifier:\n return field_def\n if field_def['name'] == field_name:\n return field_def\n return None\n\n def create_field(self, field_name, field_type, allowed_values, field_scope):\n body = {\n \"name\": field_name,\n \"field_type\": field_type.upper(),\n \"scopes\": [field_s.upper() for field_s in field_scope]\n }\n if allowed_values:\n body['allowed_values'] = [{'identifier': str(uuid.uuid4()), \"value\": value} for value in allowed_values]\n self.post('/api/fields/', body=body)\n\n def update_field(self, field_identifier, field_name, allowed_values, field_scope):\n body = {\n \"identifier\":field_identifier,\n }\n if field_name:\n body['name'] = field_name\n if field_scope and len(field_scope) >0:\n body['scopes'] = [field_s.upper() for field_s in field_scope]\n if allowed_values:\n body['allowed_values'] = [{'identifier': str(uuid.uuid4()), \"value\": value} for value in allowed_values]\n self.put('/api/fields/', body=body)\n\n def get_supply_chain(self, canonical_domain):\n return self.get(f'/api/service/vendors/{canonical_domain}')['vendors']\n\n def get_service_info(self, canonical_domain):\n return self.get(f'/api/service/details/{canonical_domain}')\n","repo_name":"Nudge-Security/nudge-bot","sub_path":"src/main/python/nudge_bot/api/nudge.py","file_name":"nudge.py","file_ext":"py","file_size_in_byte":8531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26022582638","text":"\"\"\"\nNode is defined as\nself.left (the left child of the node)\nself.right (the right child of the node)\nself.data (the value of the node)\n\"\"\"\n# expeceted output is to print in a single line\ndef preOrder(root):\n if not root :\n return\n print(root.data),\n preOrder(root.left)\n preOrder(root.right)\n return\n","repo_name":"codingkohli/Algorithms","sub_path":"Binary Tree/preOrderTraversal.py","file_name":"preOrderTraversal.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74443023521","text":"from dateutil.parser import parse as dparse\nimport pytz\nimport re\nfrom jsonpath_ng.ext import parse\nfrom functools import reduce\n\n\ndef kinds_count(kinds_dict):\n return sum(len(values) for values in kinds_dict.values())\n\n\ndef openactive_item_mapper(**kwargs):\n r = kwargs.get('item')\n mappings = kwargs.get('mappings')\n # These are both used in f strings so do not remove\n org = kwargs.get('org')\n license = kwargs.get('license', 'Not found')\n kind = r.get('kind', 'errors')\n record = {}\n\n try:\n kind = kind.lower() # to cope with Event, event etc...\n # to cope with scheduledsession.sessionseries or facilityuse/slot etc...\n kind = re.split(r'[^a-zA-Z]', kind)[0]\n except Exception as error:\n kind = 'errors'\n\n # Only deal with kinds that are registered in mappings\n if kind not in mappings:\n return None, None\n\n state = r.get('state')\n\n # No point mapping deleted records\n if state == 'deleted':\n return kind, {'oa_org': org, 'oa_id': r.get('id'), 'published': False, 'state': 'deleted',\n 'modified': int(r.get('modified')), 'rawdata': r}\n\n # Map incoming data to our schema\n\n model_map = mappings[kind]\n if model_map:\n\n for key, value in model_map.items():\n\n if key == 'model':\n continue\n\n # Enter json packet stored for debug\n if key == 'rawdata' and value == '*':\n # Add the url we got this data from if passed in\n r['sourceurl'] = kwargs.get('url', '')\n record[key] = r\n continue\n\n # 'oa_org' = 'f{{variable}}' or oa_org = 'fixed value'\n if isinstance(value, str):\n record[key] = eval(f\"f'{value}'\")\n\n # 'title': {'paths': ['title', 'data.title'], 'default' : ''}\n elif isinstance(value, dict) and 'paths' in value:\n val = None\n for path in value['paths']:\n try:\n # jsonpath\n if path[0] == '$':\n jp = parse(path)\n val = [match.value for match in jp.find(r)]\n else:\n # standard key1.key2.key3 method\n keys = path.split('.')\n val = reduce(lambda acc, i: acc[i], keys, r)\n if val:\n break\n except (KeyError, TypeError):\n pass\n\n # Carry out any type conversions\n if 'type' in value:\n record[key] = cnv(val, value['type'])\n else:\n record[key] = val\n\n # Set defaults as last resort\n if record[key] is None and 'default' in value:\n record[key] = value['default']\n\n # Fixed numerics\n elif isinstance(value, int) or isinstance(value, float):\n record[key] = value\n\n # Nothing found\n else:\n record[key] = None\n\n return kind, record\n\n\ndef cnv(item, type):\n type_map = {\n 'int': int,\n 'float': float,\n 'str': str,\n }\n\n # Boolean checks can work on None values\n if type == 'exists':\n if item:\n return True\n return False\n\n # No point doing anything if there's no data\n if item is None:\n return None\n\n # Force a specific type\n if type in type_map:\n try:\n return type_map[type](item)\n except ValueError:\n return None\n except TypeError:\n return None\n\n # Convert to an array\n elif type == 'array':\n if isinstance(item, list):\n return item\n elif isinstance(item, (str, int, float)):\n return [item]\n else:\n return []\n\n # Pick the first item in an array and return as a value\n elif type == 'array_first':\n if isinstance(item, list):\n return item[0]\n\n # Convert date/time strings to a date, if many are returned then choose the first\n elif type == 'datetime' or type == 'time':\n if isinstance(item, (str, list)):\n item = item[0] if isinstance(item, list) and len(item) > 0 else None if isinstance(item, list) else item\n\n if item:\n try:\n parsed_date = dparse(item)\n\n if parsed_date.tzinfo is None:\n parsed_date = parsed_date.replace(tzinfo=pytz.UTC)\n\n if type == 'time':\n return parsed_date.time()\n\n return parsed_date\n\n except ValueError:\n pass\n\n return None\n\n\ndef insert_blocks(model_map, reusable_blocks):\n for model, fields in model_map.items():\n for block_key in reusable_blocks:\n if block_key in fields:\n block = reusable_blocks[block_key]\n fields.update(block)\n del fields[block_key]\n","repo_name":"citizenfish/prototyping","sub_path":"openactivedj/openactive/common/util/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70884903523","text":"import unittest\nfrom io import StringIO\nfrom textwrap import dedent\n\nimport pandas as pd\nfrom pandas.api.types import is_object_dtype, is_datetime64_any_dtype, is_numeric_dtype\nfrom pandas.testing import assert_frame_equal\n\nfrom gurobi_optimods.workforce import solve_workforce_scheduling\nfrom gurobi_optimods.datasets import load_workforce\n\n\ndef read_csv(text):\n return pd.read_csv(StringIO(dedent(text)))\n\n\nclass TestWorkforceScheduling(unittest.TestCase):\n def test_dataset(self):\n data = load_workforce()\n self.assertEqual(set(data.keys()), {\"preferences\", \"shift_requirements\"})\n\n self.assertEqual(\n set(data.preferences.columns), {\"Worker\", \"Shift\", \"Preference\"}\n )\n self.assertTrue(is_object_dtype(data.preferences[\"Worker\"]))\n self.assertTrue(is_numeric_dtype(data.preferences[\"Preference\"]))\n self.assertTrue(is_datetime64_any_dtype(data.preferences[\"Shift\"]))\n\n self.assertEqual(set(data.shift_requirements.columns), {\"Shift\", \"Required\"})\n self.assertTrue(is_datetime64_any_dtype(data.shift_requirements[\"Shift\"]))\n self.assertTrue(is_numeric_dtype(data.shift_requirements[\"Required\"]))\n\n def test_no_option(self):\n # Simple example where there is only one way to cover requirements\n availability = read_csv(\n \"\"\"\n Worker,Shift,Preference\n Bob,2022-07-02,1.0\n Alice,2022-07-03,1.0\n \"\"\"\n )\n shift_requirements = read_csv(\n \"\"\"\n Shift,Required\n 2022-07-02,1\n 2022-07-03,1\n \"\"\"\n )\n\n assignments = solve_workforce_scheduling(\n availability=availability, shift_requirements=shift_requirements\n )\n\n self.assertIsInstance(assignments, pd.DataFrame)\n self.assertIsNot(assignments, availability)\n assert_frame_equal(assignments, availability)\n\n def test_preferences(self):\n # Choose an assignment which maximises preferences\n availability = read_csv(\n \"\"\"\n Worker,Shift,Preference\n Alice,2022-07-02,1.0\n Alice,2022-07-03,2.0\n Bob,2022-07-02,2.0\n Bob,2022-07-03,1.0\n \"\"\"\n )\n shift_requirements = read_csv(\n \"\"\"\n Shift,Required\n 2022-07-02,1\n 2022-07-03,1\n \"\"\"\n )\n\n assignments = solve_workforce_scheduling(\n availability=availability, shift_requirements=shift_requirements\n )\n\n expected = read_csv(\n \"\"\"\n Worker,Shift,Preference\n Alice,2022-07-03,2.0\n Bob,2022-07-02,2.0\n \"\"\"\n )\n self.assertIsInstance(assignments, pd.DataFrame)\n self.assertIsNot(assignments, availability)\n assert_frame_equal(\n assignments.sort_values([\"Worker\", \"Shift\"]).reset_index(drop=True),\n expected,\n )\n","repo_name":"stevedwards/gurobi-optimods-broken","sub_path":"tests/test_workforce.py","file_name":"test_workforce.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73164174243","text":"import pyvista as pv\nimport numpy as np\n\n# Parametric equations for a trefoil knot\n\n\ndef trefoil_knot(theta):\n x = np.sin(theta) + 2 * np.sin(2 * theta)\n y = np.cos(theta) - 2 * np.cos(2 * theta)\n z = -np.sin(3 * theta)\n return x, y, z\n\n\n# Generate points for the trefoil knot\ntheta_values = np.linspace(0, 2 * np.pi, 1000)\npoints = np.array([trefoil_knot(theta) for theta in theta_values])\n\n\ndef polyline_from_points(points):\n poly = pv.PolyData()\n poly.points = points\n the_cell = np.arange(0, len(points), dtype=np.int_)\n the_cell = np.insert(the_cell, 0, len(points))\n poly.lines = the_cell\n return poly\n\npolyline = polyline_from_points(points)\npolyline[\"scalars\"] = np.arange(polyline.n_points)\ntube = polyline.tube(radius=0.5)\ntube.plot(smooth_shading=True)\n","repo_name":"martinlejko/pyvista-game-of-life","sub_path":"shape_renderer.py","file_name":"shape_renderer.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22734947904","text":"'''Crie um programa que tenha uma FUNÇÃO chamada VOTO() que vai receber como parametro de uma pessoa nascimento\nde uma pessoa, retornando um valor literal indicado se uma pessoa tem voto NEATIVO, OPCIONAL ou OBRIGATÓRIO nas eleições'''\n\n\ndef voto(ano):\n from datetime import date\n\n print('-'*30)\n data_atual = date.today().year\n idade = data_atual - ano\n\n if idade < 16:\n return f'Com {idade} anos: NÃO VOTA.'\n elif 18 <= idade < 60:\n return f'Com {idade} anos: VOTO OBRIGATÓRIO.'\n elif 16 <= idade < 18 or idade >= 60:\n return f'Com {idade} anos: VOTO OPCIONAL.'\n\n\n# Programa Principal\nnasc = int(input('Em que ano você nasceu? '))\nprint(voto(nasc))\n","repo_name":"Murilo831/Aulas-python-modulo-3","sub_path":"aula_21/ex101.py","file_name":"ex101.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74282488483","text":"import time\n\nprint(\"Deixar a letra em minusculo.\\n\")\ntime.sleep(2)\n\nselecionar = \"s\"\n\nwhile(selecionar == \"s\"):\n\n text = input(\"Digite algo para teste: \").lower() #Usando a função lower no python, para deixar as letras minusculas.\n print(text)\n\n time.sleep(1)\n selecionar = input(\"\\nDeseja continuar ? (S / N) \").lower()\n","repo_name":"jeangsilva/atv_programacao","sub_path":"Módulo 4/man string/minuscula.py","file_name":"minuscula.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6711172167","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport numpy as np\nnp.set_printoptions(linewidth=np.inf)\nnp.set_printoptions(precision=7)\nimport matplotlib.pyplot as plt\nimport math \nimport pandas as pd\nfrom numpy import linalg as LA\nfrom sympy import * \n\n\n# In[2]:\n\n\n# Scale function\ndef scale(vec_1):\n \n # Test\n v1_scale = np.square(np.linalg.norm(vec_1))\n# print(v1_scale)\n\n v1 = np.multiply(1/np.sqrt(v1_scale + 1e-15),vec_1)\n# print(v1)\n \n return list(v1)\n \n \ndef QR(span):\n \n # Setup arrays\n # Original Values\n vector = []\n # Normalized Values\n vec_norm = []\n\n # Size of array\n row,col = span.shape\n \n # First Column\n v1 = span[:,0]\n vec_1 = v1\n vector.append(vec_1)\n\n v1 = scale(vec_1)\n vec_norm.append(v1)\n \n # Subsequent Columns\n\n start = 1\n end = col\n\n for j in range(start,end):\n\n # Column\n projection = span[:,j]\n v = span[:,j]\n\n #Orthonormal vector\n for i in range(len(vec_norm)):\n #projection\n # dot\n proj_dot = np.dot(v,vec_norm[i])/(np.dot(vec_norm[i],vec_norm[i]) + 1e-15)\n proj = np.multiply(proj_dot,vec_norm[i])\n projection = projection - proj\n\n v_norm = scale(projection)\n\n vec_norm.append(v_norm)\n \n # Calculate R\n Q = vec_norm\n Q = np.array(Q)\n R = np.dot(Q,span)\n R = np.array(R)\n\n \n return Q.T,R\n\ndef eigenspace(span):\n \n eig_vec = np.eye(span.shape[0])\n X = span.copy()\n\n for _ in range(100):\n Q,R = QR(X)\n eig_vec = np.dot(eig_vec,Q)\n X = np.dot(R,Q)\n eig_val = np.diag(X)\n \n return eig_val,eig_vec, X\n\n\n# In[3]:\n\n\ndef SVD(A):\n \n # Copy of array\n B = A.copy()\n \n # Size of array\n row,col = A.shape\n \n A1 = A.dot(A.T)\n print(A1)\n A2 = np.dot(A.T,A)\n print(A2)\n \n if row < col:\n # Eigenvalues\n eig_val_A1,eig_vec_A1,x_A1 = eigenspace(A1)\n \n # U: left Singular Array\n U = eig_vec_A1\n\n # Sigma: Middle Singular Array\n # Initialize Sigma array\n Sigma = np.zeros((row,col))\n # Initialize Eigenvalues array\n Eig = eig_val_A1*np.eye(len(eig_val_A1))\n\n # Create Sigma\n if row < col:\n sig = row\n elif row > col:\n sig = col\n Sigma[0:sig,0:sig] = Eig[0:sig,0:sig]\n Sigma = np.sqrt(Sigma)\n\n # Solve for V: Right Singular Array\n # Initialize V\n V = np.zeros((col,col))\n\n for i in range(len(U)):\n\n u1 = A.T.dot(U[:,i])\n # Normalize\n u1 = u1/(np.linalg.norm(u1) + 1e-20)\n\n # Update V array\n V[i,:] = u1\n\n # Determine V\n V = A.T.dot(U).dot(Sigma)\n\n for i in range(row):\n V[:,i] = V[:,i]/(np.linalg.norm(V[:,i]) + 1e-20)\n \n else:\n # Eigenvalues\n eig_val_A1,eig_vec_A1,x_A1 = eigenspace(A1)\n eig_val_A2,eig_vec_A2,x_A2 = eigenspace(A2)\n \n # Right Singular Vector\n V = eig_vec_A2\n \n # Sigma\n # Initialize Sigma array\n Sigma = np.zeros((row,col))\n# print(Sigma)\n # Initialize Eigenvalues array\n Eig = eig_val_A2*np.eye(len(eig_val_A2))\n# print(Eig)\n\n # Create Sigma\n if row < col:\n sig = row\n else:\n sig = col\n Sigma[0:sig,0:sig] = Eig[0:sig,0:sig]\n Sigma = np.sqrt(Sigma)\n# print(Sigma)\n \n # Solve for U\n # Initialize U\n # U = np.eye((row))\n U = np.zeros((row,row))\n# print(U)\n\n eig = eig_val_A2\n for i in range(len(eig)): \n\n u1 = (1/(np.sqrt(eig[i]) + 1e-20))*A.dot(V[:,i])\n# print(u1)\n\n u1 = u1/(np.linalg.norm(u1) + 1e-20)\n# print(u1)\n\n U[:,i] = u1\n \n \n \n return U,Sigma,V\n \n\n\n# In[4]:\n\n\n# A = np.array([[1,1],[1,1],[1,-1]])\n# A = np.array([[2,4],[1,3],[0,0],[0,0]])\n# A = np.array([[-5,1,-2,5],[0,-2,4,8]])\n\nrow = 5\ncol = 6\nA = np.random.randint(-10,10,size=(row,col))\n\nrow,col = A.shape\n\nprint(A)\n\nB = A.copy()\n\n\n# In[5]:\n\n\nU,Sigma,V = SVD(B)\nprint(U)\nprint(Sigma)\nprint(V)\n\n\n# In[6]:\n\n\nprint(B)\nA = U.dot(Sigma).dot(V.T)\nprint(A)\n\n# Check for identity matrices\nU_test = U.T.dot(U)\nprint(U_test)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"DizzleMoon/Example-Codes-Edit-V2","sub_path":"Eigen/test_SVD_v9.py","file_name":"test_SVD_v9.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"182300069","text":"import csv\nfrom PIL import Image,ImageFont,ImageDraw\n\ndef application2(str):\n list=[]\n with open('大学内专业评级(正式).csv', 'r',encoding='utf-8') as f:\n reader = csv.reader(f)\n #print(type(reader))\n for row in reader:\n if(row[1]==str):\n list.append([row[2]+' '+row[3]])\n print(list)\n text = \"'字数长度限制(字数长度限制AAA) D'\"\n font = ImageFont.truetype(\"font.ttf\", 18)\n lines = list\n line_height = font.getsize(text)[1]\n img_height = line_height * (lines.__len__()+1)\n im = Image.new(\"RGB\", (600, img_height), (255, 255, 255))\n dr = ImageDraw.Draw(im)\n x, y = 5, 5\n for line in lines:\n dr.text((x, y), line[0], font=font, fill=\"#000000\")\n y += line_height\n im.save(\"majorRank.jpg\")\n\n#测试\n#application2('兰州大学')\n","repo_name":"destinyvoilet/REC2020","sub_path":"aiB/app2_use.py","file_name":"app2_use.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18259917828","text":"\"\"\"Sequence model function test case.\"\"\"\n\nimport os\nimport os.path as path\n\nimport numpy as np\nimport emloop as el\nimport pytest\n\nfrom emloop.constants import EL_CONFIG_FILE\nfrom emloop.datasets import StreamWrapper\nfrom emloop.models.sequence import Sequence\n\n\n_STEP1_CONFIG = \"\"\"\nmodel:\n name: step1\n class: emloop.tests.models.sequence_test.Step1\n inputs: [images]\n outputs: [masks]\n\n\"\"\"\n\n\n_STEP2_CONFIG = \"\"\"\nmodel:\n name: step2\n class: emloop.tests.models.sequence_test.Step2\n inputs: [images, masks]\n outputs: [classes]\n\n\"\"\"\n\n\n_STEP3_CONFIG = \"\"\"\nmodel:\n name: step3\n class: emloop.tests.models.sequence_test.Step3\n inputs: [masks, classes]\n outputs: [results]\n\n\"\"\"\n\n_IMAGES = [[2.], [2.], [2.]]\n\n\nclass Step1:\n def __init__(self, dataset, **_):\n self.dataset = dataset\n\n @property\n def input_names(self):\n return ['images']\n\n @property\n def output_names(self):\n return ['masks']\n\n def run(self, batch: el.Batch, train: bool, stream: StreamWrapper) -> el.Batch:\n return {'masks': np.zeros_like(batch['images'])}\n\n\nclass Step2:\n def __init__(self, **_):\n pass\n\n @property\n def input_names(self):\n return ['images', 'masks']\n\n @property\n def output_names(self):\n return ['classes']\n\n def run(self, batch: el.Batch, train: bool, stream: StreamWrapper) -> el.Batch:\n assert 'images' in batch\n assert 'masks' in batch\n return {'classes': np.ones(np.array(batch['images']).shape[0])}\n\n\nclass Step3:\n def __init__(self, **_):\n pass\n\n @property\n def input_names(self):\n return ['masks', 'classes']\n\n @property\n def output_names(self):\n return ['results']\n\n def run(self, batch: el.Batch, train: bool, stream: StreamWrapper) -> el.Batch:\n return {'results': np.ones(np.array(batch['masks']).shape[0])}\n\n\n@pytest.fixture\ndef create_models(tmpdir):\n\n def _create_models():\n \"\"\"Create two step models in the tmp dir.\"\"\"\n for name, config in zip(['step1', 'step2', 'step3'], [_STEP1_CONFIG, _STEP2_CONFIG, _STEP3_CONFIG]):\n model_dir = path.join(tmpdir, name)\n os.mkdir(model_dir)\n with open(path.join(model_dir, EL_CONFIG_FILE), 'w') as config_file:\n config_file.write(config)\n\n return _create_models\n\n\ndef test_init(create_models, tmpdir):\n \"\"\"Test if Sequence model ``__init__`` works properly\"\"\"\n create_models()\n sequence = Sequence(models_root=tmpdir, model_paths=['step1', 'step2'])\n assert sequence._models is None\n\n # test eager loading\n sequence2 = Sequence(models_root=tmpdir, model_paths=['step1', 'step2'], eager_loading=True)\n assert sequence2._models is not None\n\n assert sequence2.input_names == ['images']\n assert list(sequence2.output_names) == ['masks', 'classes']\n\n\ndef test_run(create_models, tmpdir):\n \"\"\"Test if Sequence model accumulates the outputs properly.\"\"\"\n create_models()\n sequence = Sequence(models_root=tmpdir, model_paths=['step1', 'step2', 'step3'], dataset='my_dataset')\n\n # outputs accumulating\n output = sequence.run({'images': _IMAGES}, False, None)\n assert 'masks' in output\n assert 'classes' in output\n assert 'results' in output\n np.testing.assert_array_equal(np.zeros_like(_IMAGES), output['masks'])\n np.testing.assert_array_equal(np.ones((3,)), output['classes'])\n np.testing.assert_array_equal(np.ones((3,)), output['results'])\n\n # test dataset is propagated\n assert 'my_dataset' == sequence._models[0].dataset\n\n\ndef test_raising(create_models, tmpdir):\n \"\"\"Test if Sequence model raises the exceptions as expected.\"\"\"\n create_models()\n sequence = Sequence(models_root=tmpdir, model_paths=['step1', 'step2'])\n\n with pytest.raises(ValueError):\n sequence.run(None, True, None)\n with pytest.raises(NotImplementedError):\n sequence.save()\n\n # test ValueError raised if models don't follow up correctly\n sequence2 = Sequence(models_root=tmpdir, model_paths=['step1', 'step3'])\n with pytest.raises(ValueError):\n sequence2.run({'images': _IMAGES}, False, None)\n","repo_name":"iterait/emloop","sub_path":"emloop/tests/models/sequence_test.py","file_name":"sequence_test.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"12469464182","text":"import datetime\nimport logging\nimport sys\nimport json\nimport shutil\nimport schedule\nimport time\nimport os\n\ndef service(config):\n\n logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(\"MAIN\")\n\n \n #DATE\n today = datetime.date.today()\n delta = datetime.timedelta(1)\n yesterday = today - delta\n date_str = yesterday.strftime(\"%Y%m%d\")\n\n #DOWNLOAD\n topic = 'ztf_%s_programid1' % (date_str)\n group = '%s_%s' %( config['group'], date_str )\n output_path = os.path.join(config['working_dir'],topic)\n avro_directory = output_path\n command = 'python3 /app/scripts/download_topic.py %s %s %s %s' % ( \n config['kafka_server'],\n topic,\n output_path,\n group\n )\n logger.info(\"DOWNLOADING topic %s from %s\" % (topic,config['kafka_server']) )\n os.system(command)\n\n #CONCAT\n input_path = output_path\n output_path = os.path.join(config['working_dir'],'%s_concatened' % (topic) )\n concat_directory = output_path\n command = 'python3 /app/scripts/concat_avros.py %s %s' % (input_path, output_path)\n logger.info(\"CONCATENATING topic %s\" %(topic) )\n os.system(command)\n\n #UPLOAD\n input_path = output_path\n output_path = os.path.join(config['output_bucket'],topic)\n command = 'python3 /app/scripts/upload_to_s3.py %s %s' % (input_path,output_path)\n logger.info(\"UPLOADING topic %s\" %(topic) )\n os.system(command)\n\n #REMOVE \n logger.info(\"REMOVING topic directories %s\" %(topic) )\n shutil.rmtree(avro_directory)\n shutil.rmtree(concat_directory)\n\n#READ CONFIG\ninfile = open('/app/config.json','r')\nconfig = json.load(infile)\ninfile.close()\n\nschedule.every().day.at(config['start_time']).do(service,config=config)\nwhile True:\n schedule.run_pending()\n time.sleep(3600)\n","repo_name":"alercebroker/save_kafka_topic","sub_path":"scripts/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72958594723","text":"#!/usr/bin/env python3\n#--------------------------------------------------------------------------\n# Program: Exercise 1.2.21 - Continuously Compounded Interest\n# Programmer: Joseph Cunningham\n# Project: CsC 15 - Python\n# Date: March 14, 2021\n#\n# This program will take 3 command line arguments:\n#\n# t - number of years - float\n# p - principle of the loan - float\n# r - annual interest rates - float\n#\n# It will then calculate the future value of the load.\n#--------------------------------------------------------------------------\n\nimport stdio, sys, math\n\nt = float(sys.argv[1])\np = float(sys.argv[2])\nr = float(sys.argv[3])\n\nfV = p * math.exp(r * t)\n\nstdio.writeln('$' + str(fV))\n","repo_name":"Gnasch1972/CsC20","sub_path":"Chapter_01/Section01_02/Exercises/Exercise01_02_21.py","file_name":"Exercise01_02_21.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22601136537","text":"class test:\n def __init__(self):\n self.first = 0\n self.second = 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n num = self.first + self.second\n self.first = self.second\n self.second = num\n return num\n\n\nclass Alphabet:\n def __init__(self):\n self.letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n self.index = -1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= len(self.letters) - 1:\n raise StopIteration\n self.index += 1\n return self.letters[self.index]\n\n\ndef initial():\n samp_string = iter(\"Sample\")\n print(\"Char :\", next(samp_string))\n print(\"Char :\", next(samp_string))\n print(\"Char :\", next(samp_string))\n print(\"Char :\", next(samp_string))\n alpha = Alphabet()\n for letter in alpha:\n print(letter, end=\" \")\n print()\n return\n\n\nif __name__ == \"__main__\":\n # initial()\n fib = test()\n for i in range(10):\n print(\"FIB :\", next(fib))\n","repo_name":"sanyamcodes26/Efforts","sub_path":"DerekBanas_UDEMY/Section_26.py","file_name":"Section_26.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37168339275","text":"# Analisando Triângulos v2.0\nn1 = float(input('Primeiro segmento: '))\nn2 = float(input('Segundo segmento: '))\nn3 = float(input('Terceiro segmento: '))\nif n1 <(n2 + n3) and n2 < (n1 + n3) and n3 < (n1 + n2):\n if n1 == n2 == n3:\n print('Triângulo equilátero')\n elif n1 == n2 or n1 == n3 or n2 == n3:\n print('Triangulo isóceles')\n else:\n print('Triangulo escaleno')\nelse:\n print('{:.1f}, {:.1f} e {:.1f} nao podem formar triangulo'.format(n1, n2, n3))\n","repo_name":"myller-silva/Python","sub_path":"Exercicios-CURSOEMVIDEO/ex042.py","file_name":"ex042.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37781035320","text":"\"\"\"\nLimited implementation of reading yaml files. Should be replaced when such\nlibrary is available in build-in Python source.\n\"\"\"\n\n\ndef load(file):\n with open(file) as yaml_lines:\n stack: list = []\n for raw_line in yaml_lines:\n line = YamlLine(raw_line)\n if line.is_blank or line.is_comment:\n continue\n while len(stack) > line.indent_level:\n stack.pop()\n current = stack[-1] if stack else ...\n\n if line.is_list_value:\n\n # init root list\n if not stack:\n data = []\n stack.append(data)\n current = data\n\n # replace None dict value with a list\n if current is None: # all dicts can keep only dicts for now\n new_list = []\n for key in stack[-2].keys(): # should have only one key\n stack[-2][key] = new_list\n break\n stack[-1] = new_list\n current = new_list\n\n if line.is_dict_value: # it is list item and new dictionary\n if not isinstance(current, list):\n raise TypeError(\"A list was expected here\")\n new_dict = {line.key: line.dict_value}\n current.append(new_dict)\n stack.append(new_dict)\n stack.append(line.dict_value)\n\n else: # new list item to a list\n stack[-1].append(line.list_value)\n\n elif line.is_dict_value:\n raise TypeError(f'Dictionary values are excepted only as part of some list - \"{raw_line}\"')\n else:\n raise TypeError(f'Any value should be either list of dictionary key - \"{raw_line}\"')\n return data\n\n\nclass YamlLine:\n def __init__(self, line):\n self._line: str = self._remove_comment(line)\n\n @property\n def is_comment(self):\n return self._line.strip().startswith('#')\n\n @property\n def is_blank(self):\n return not self._line.strip()\n\n @property\n def is_list_value(self):\n return self._line.strip().startswith('-')\n\n @property\n def is_dict_value(self):\n line = self._line.expandtabs(1).strip()\n return ': ' in line or ':' == line[-1]\n\n @property\n def indent_level(self):\n # https://docs.python.org/3/reference/lexical_analysis.html#indentation\n first_letter, *_ = self._line.strip()\n indention = self._line.expandtabs(4).index(first_letter)\n return (indention // 4) * 2 + 1 # fragile\n\n @property\n def key(self):\n line = self._line.expandtabs(1).strip()\n if line.startswith('-'):\n line = line[1:].strip()\n if ':' == line[-1]:\n return line[:-1].strip()\n key, value = line.split(': ')\n return key.strip()\n\n @property\n def list_value(self):\n line = self._line.expandtabs(1).strip()\n if self.is_list_value:\n return line.split('-', 1)[1].strip()\n\n @property\n def dict_value(self):\n line = self._line.expandtabs(1).strip()\n if ':' == line[-1]:\n return None\n key, value = line.split(': ')\n return value.strip().strip(\"'\\\"\") # always return as a string for now\n\n @staticmethod\n def _remove_comment(line):\n line, _, comment = line.partition(' #') # can be incorrect inside string values\n return line\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(load('../index.yaml'))\n","repo_name":"nortikin/sverchok","sub_path":"utils/yaml_parser.py","file_name":"yaml_parser.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"42293373132","text":"\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport sys\nimport os\nfrom msvcrt import getch\n\n\ndef get_thread_subject(url):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n subject = soup.findAll(\"span\", {\"class\": \"subject\"})[1].get_text()\n if subject == \"\":\n subject = soup.find(\"blockquote\", {\"class\": \"postMessage\"}).get_text()\n\n return subject\n\n\ndef get_files(url):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n links = soup.findAll(\"div\", {\"class\": \"fileText\"})\n\n files = []\n spoiler_count = 0\n for link in links:\n filename = link.find(\"a\").get_text()\n\n if filename == \"Spoiler Image\":\n spoiler_count += 1\n filename = f\"spoiler-image{spoiler_count}.png\"\n\n url = \"https:\" + link.find(\"a\")[\"href\"]\n files.append([filename, url])\n\n return files\n\n\ndef download(files, url):\n board = url.split(\".org/\")[1].split(\"/\")[0]\n thread = url.split(\"/thread/\")[1]\n\n get_thread_subject(url)\n\n # Create folder\n folder = \"threads/\" + board + \" - \" + thread + \"/\"\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n # Download images\n for file in files:\n file_name = file[0]\n file_url = file[1]\n\n r = requests.get(file_url)\n open(folder + file_name, \"wb\").write(r.content)\n\n # Create file with info\n with open(folder + \"info.txt\", \"w\") as f:\n f.write(f\"Board: /{board}/ \\n\")\n f.write(f\"Thread Subject: {get_thread_subject(url)} \\n\")\n\n date = datetime.now().strftime(\"%m/%d/%Y %H:%M:%S\")\n f.write(f\"Downloaded on: {date} \\n\")\n\n f.write(f\"Images downloaded: {len(files)} \\n\")\n f.write(\"\\n\" + url)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n url = sys.argv[1]\n else:\n url = input(\"Thread URL: \")\n\n files = get_files(url)\n print(f\"Downloading {len(files)} images/videos\")\n download(files, url)\n\n print(\"Done!\")\n print(\"Press any key to exit\")\n getch()\n","repo_name":"JosefVesely/4chan-thread-image-downloader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24695707858","text":"from queue import Queue\nfrom queue import Node\nimport pytest\n\n# Create an empty queue for testing.\n@pytest.fixture(scope='function')\ndef make_empty_q():\n return Queue()\n\n\n# Create a queue with entries for testing.\n@pytest.fixture(scope='function')\ndef make_popd_q():\n q = Queue()\n for x in range(20):\n q.enqueue(x)\n q.enqueue('asdf')\n return q\n\n\ndef test_node_init():\n b = Node(u'a')\n assert b.data == u'a'\n c = Node(u'a', b)\n assert c.next.data == u'a'\n\n\ndef test_queue_init(make_empty_q):\n # Create an empty Queue.\n d = make_empty_q\n assert d\n assert d.head == None\n assert d.tail == None\n\n\ndef test_enqueue(make_empty_q, make_popd_q):\n q = make_empty_q\n # Test enqueue() for an empty queue.\n q.enqueue(1)\n assert q.tail.data == 1\n # Test enqueue() for a queue with something in it.\n q.enqueue(u'a')\n assert q.tail.data =='a'\n # Test enqueue() for a queue with many items in it.\n k = make_popd_q\n k.enqueue(u'hello')\n assert k.tail.data == u'hello'\n\n\ndef test_dequeue(make_empty_q, make_popd_q):\n # Test dequeue() for an empty queue.\n q = make_empty_q\n with pytest.raises(IndexError):\n q.dequeue()\n # Test dequeue() for a queue containing items.\n k = make_popd_q\n for x in range(20):\n assert k.dequeue() == x\n assert k.dequeue() == 'asdf'\n\n\ndef test_size(make_empty_q, make_popd_q):\n q = make_empty_q\n # Test for size() returning 0 for an empty queue.\n assert q.size() == 0\n # Test for size() changing according to number of items in queue.\n for a in range(1, 6):\n q.enqueue(a)\n assert q.size() == a\n q.dequeue()\n assert q.size() == 4\n\n k = make_popd_q\n # Test for size() for a queue already containing more items.\n assert k.size() == 21\n k.dequeue()\n assert k.size() == 20\n k.enqueue(u'A')\n assert k.size() == 21\n","repo_name":"jefimenko/data-structures","sub_path":"test_queue.py","file_name":"test_queue.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3664658338","text":"from itertools import product\n\nimport numpy as np\nimport nevergrad as ng\n\n\ndef n_dim_inf_generator(n=1):\n \"\"\"Iterates in n-dimensional box from 0 to 1 in each dimension.\n\n Perform sort of exhaustive search with steps decreasing as power of 2\"\"\"\n for idx in product(*[[0, 1]]*n):\n yield np.array(idx)\n\n power = 1\n while True:\n k = 2**power\n one_d_vals = np.linspace(0, 1, k+1)\n for idxs in product(*[range(k+1)]*n):\n if all(not i % 2 for i in idxs):\n continue\n yield one_d_vals[list(idxs)]\n power += 1\n\n\nclass BaseOptimizer(ng.optimization.base.Optimizer):\n def __init__(self, *args, parametrization=None, **kwargs):\n super(BaseOptimizer, self).__init__(*args, parametrization=parametrization, **kwargs)\n self.grid_state = n_dim_inf_generator(n=parametrization.dimension)\n\n def _internal_ask(self):\n \"\"\"Called every time `ask` request processed.\n\n Should be implemented for optimizer to work.\n Return 1-d array-like structure with shape same as parametrization.dimension\n Returned values should be standardized to [-3, 3] interval.\n Further mapping on hyperparameters space performed automatically\n (i.e. for scalar hyperparameter -3 will be mapped to lower limit, 3 to upper limit)\n Values out of [-3, 3] interval are truncated\n (may be useful to consider that as truncated z-index of normal distribution)\"\"\"\n return next(self.grid_state)*6-3\n\n\ndef get_optimizer(title):\n \"\"\"Mapping of optimizer name to implementation\"\"\"\n custom_registry = {\n 'GridSearch': BaseOptimizer,\n # New optimizers may be added here\n }\n optimizer = custom_registry.get(title, None)\n if optimizer is not None:\n return optimizer\n optimizer = ng.optimizers.registry.get(title, None)\n if optimizer is None:\n raise ValueError(f'unknown optimizer {title!r}')\n return optimizer\n","repo_name":"Alex314/optimum","sub_path":"source/optimizers.py","file_name":"optimizers.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27114943216","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 15 20:35:53 2019\n\n@author: Pedro\n\n'final' code for calculating DRDT and plotting results\n\"\"\"\n\nfrom scipy.interpolate import interp1d\nimport scipy.signal as sig\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#######First read data from files#######\ndirec = r'C:\\Users\\Pedro\\Dropbox\\1-Three Omega with Ara\\new_python\\3omega-lockin-Pedro\\TARA8\\DRDT\\Old_Fashion'\n\n\nhigher_temp = 52\n\nlower_temp = 28\n\nsample = ''\n\nfilename = str(lower_temp) + '_' + str(higher_temp) + '.csv'\n\ntemp_data = pd.read_csv(direc + '/' + sample + '200_temp.csv')\n\nT = temp_data['Temp'].values\ntime_T = temp_data['Time'].values\n\nvolt_data = pd.read_csv(direc +'/'+ sample + '200_v.csv')\nV = volt_data['volt'].values\ntime_V = volt_data['time'].values\n\nshunt_data = pd.read_csv(direc + '/' + sample + '200_vsh.csv')\nVsh = shunt_data['volt'].values\n'''\ntemp_data = pd.read_csv(direc + '/' + 'temp_taramethod_' + '2' + '.csv')\n\nT = temp_data['Temp'].values\ntime_T = temp_data['Time'].values\n\nvolt_data = pd.read_csv(direc +'/'+ 'voltA1_taramethod_' + '2' + '.csv')\nV = volt_data['volt'].values\ntime_V = volt_data['time'].values\n\nshunt_data = pd.read_csv(direc + '/' + 'vshA1_taramethod_' + '2' + '.csv')\nVsh = shunt_data['volt'].values\n'''\nVsh = [i for i in Vsh if i!=0]\n#this is to check how stable Vsh has been. Have had trouble with this in the past. \nfig = plt.figure()\nplt.ion()\nfig.suptitle('Vsh(V) vs sample number', fontsize=26)\nplt.errorbar([n for n in range(len(Vsh))],Vsh, fmt='o')\nVsh=np.array( Vsh[80:120])\n#######Calculate Resistance from Volt and Shunt measurement#######\nVsh_avg = Vsh.mean()\nVsh = np.array([Vsh_avg for i in V])\nR_shunt = 0.099\nI = Vsh/R_shunt\nR = V/I\n\n#######Interpolate#######\n\nif len(time_T) > len(time_V):\n\ttime = time_V\nelse:\n\ttime = time_T\n\nfig = plt.figure()\nplt.ion()\nfig.suptitle('Temp raw data (Temp(C) vs Time(s))', fontsize=26)\nplt.errorbar(time_T,T)\n\nfig = plt.figure()\nfig.suptitle('Resist raw data (Resist(\\u03A9) vs Time(s)', fontsize=26)\nplt.plot(time_V,R)\n\nplt.show()\n\ntry:\n print('to set to beginning of data input nothing')\n lower = float(input('Input lower bound on the range that will be fitted: '))\nexcept:\n print('invalid input')\n print('setting to default lower range bound of the very beginning of data')\n lower = time_T[0]\n \ntry:\n print('to set to beginning of data input nothing')\n upper = float(input('Input upper bound on the range that will be fitted: '))\nexcept:\n print('invalid input')\n print('setting to default higher range bound of the very end of data') \n if time_T[-1] > time_V[-1]:\n upper = time_V[-1]\n else:\n \tupper = time_T[-1]\n \ntimeV=[]\ntimeT=[]\n\n\nfor tV,tT in zip(time_V,time_T):\n if tVlower:\n timeV.append(tV)\n if tTlower:\n timeT.append(tT)\n\nV_lower_ind = int(np.where(time_V == timeV[0])[0])\nV_upper_ind = int(np.where(time_V == timeV[-1])[0])\n\nT_lower_ind = int(np.where(time_T == timeT[0])[0])\nT_upper_ind = int(np.where(time_T == timeT[-1])[0])\n \nT = T[T_lower_ind:T_upper_ind + 1] \nR = R[V_lower_ind:V_upper_ind + 1] \nupper = timeT[-1]\nlower = timeT[0]\nspacing = (upper - lower) / (len(timeT) - 1)\nfor t in T:\n timeTT = [lower + i*spacing for i in range(len(timeT))]\n\ntimeT = timeTT\nif len(T)%2==0:\n lenT = len(T)-1\nelse:\n lenT = len(T)\n\nif len(R)%2==0:\n lenR = len(R)-1\nelse:\n lentR = len(R)\n\n#apply savitzky-golay filter to smooth data\nT2 = sig.savgol_filter(T, window_length = lenT, polyorder = 4)\nR2 = sig.savgol_filter(R, window_length = lenR, polyorder = 4)\n\nfig = plt.figure()\nfig.suptitle('Temp data filtered (Temp(C) vs Time(s))', fontsize=26)\nplt.plot(timeT,T2)\n\nfig = plt.figure()\nfig.suptitle('Resist data filtered (Resist(\\u03A9) vs Time(s)', fontsize=26)\nplt.plot(timeV,R2)\n\nfT = interp1d(timeT, T2,'quadratic')\nfR = interp1d(timeV, R2,'quadratic')\n\nif timeT[0] < timeV[0]:\n time = timeV\n if timeV[-1] > timeT[-1]:\n time = time[:-2]\nelse:\n time = timeT\n if timeT[-1] > timeV[-1]:\n time = time[:-2]\n\ntemp = np.array([fT(t) for t in time])\nresist = np.array([fR(t) for t in time])\n\nfig = plt.figure()\nfig.suptitle('Temp filted and interpolated (Temp(C) vs Time(s))')\nplt.plot(time,temp)\nplt.plot(timeT,T2)\n\nfig = plt.figure()\nfig.suptitle('Resist filtered and interpolated (Resist(\\u03A9) vs Time(s)')\nplt.plot(time,resist)\nplt.plot(timeV,R2)\n\ndel_r = np.diff(resist)\ndel_t = np.diff(temp)\n\nfig = plt.figure()\nfig.suptitle('\\u0394R', fontsize=26)\nplt.plot(time[:-1],del_r)\n\nfig = plt.figure()\nfig.suptitle('\\u0394T', fontsize=26)\nplt.plot(time[:-1],del_t)\nplt.show()\n\nDRDT = del_r/del_t\n\nfig = plt.figure()\nfig.suptitle('\\u0394R savitzky-golay')\nplt.plot(del_r)\n\nfig = plt.figure()\nfig.suptitle('\\u0394T savitzky-golay')\nplt.plot(del_t)\n\nfig2 = plt.figure()\nfig2.suptitle('savitzky-golay drdt', fontsize=26)\nplt.plot(DRDT)\n\n\nprint('DRDT_median: ' + str(np.median(DRDT)))\nprint('savitzky-golay drdt' + str(DRDT.mean()))","repo_name":"PedroOliviera/3-omega","sub_path":"Current code/DRDT.py","file_name":"DRDT.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27011227577","text":"import os\nimport re\nimport logging\nimport time\nimport copy\nimport threading\nimport datetime\nfrom shutil import copyfile\nimport requests\nfrom flask import Flask, jsonify, redirect, make_response\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_cors import CORS\nimport influxdb\n\nlock = threading.Lock()\nflush_out_after = 90 # Flush object into the database older than this value\nimage_list_len = 10 # maximum number of images per shape_type\nstate_db = [] # keep track of recent events\nimage_db = {} # dictionary with key shape_type, data is a list of images (oldest last)\n\n# setup logging\ntry:\n log_level = os.environ['LOG_LEVEL']\nexcept:\n log_level = \"DEBUG\"\nFORMAT = '%(levelname)s %(message)s'\nlogging.basicConfig(format=FORMAT, level=getattr(logging, log_level))\nlog = logging.getLogger()\n\napp = Flask(__name__)\napi = Api(app)\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\ndef time2epoch(s):\n match = re.match(\"^(\\d{4})-(\\d{2})-(\\d{2})-(\\d{2}):(\\d{2}).*\", s)\n appendix_time_influx = '000000000'\n if match:\n year = int(match.group(1))\n month = int(match.group(2))\n day = int(match.group(3))\n hour = int(match.group(4))\n minute = int(match.group(5))\n local_time = int(datetime.datetime(year,month,day,hour,minute).timestamp())\n print(\"local_time\", local_time, time.timezone)\n time_utc = local_time - time.timezone\n epoch = str(time_utc)\n epoch = epoch + appendix_time_influx\n return epoch\n return \"0\"\n\ndef age_db():\n while(1):\n time.sleep(5)\n lock.acquire()\n for record in reversed(state_db):\n if int(time.time()) > record['timestamp']+flush_out_after:\n print(\"remove record from state_db\", record)\n state_db.remove(record)\n lock.release()\n\ndef get_shape_ojects():\n url = \"http://\"+cia+\":8000/rest/analytics_shapes/\"\n try:\n r = requests.get(url)\n if r.status_code == 200:\n alist = r.json()\n alist.append({\"shape\": \"all\", \"id\": 99})\n return alist\n else:\n log.error(\"GetShapeObjects status code {}\".format(r.status_code))\n except Exception as e:\n log.error(str(e))\n\n return []\n\nclass ShapesVideoInflux(Resource):\n '''\n REST API class getting shapes for a given recording\n '''\n def __init__(self):\n # self.reqparse = reqparse.RequestParser()\n # self.reqparse.add_argument('recording_id', type = str, required = True, location = 'json')\n super(ShapesVideoInflux, self).__init__()\n\n def get(self, recording_id):\n '''\n Receive request to find shapes in an image file or video file\n Supports two file formats (file suffixes) : jpg and mp4\n '''\n # args = self.reqparse.parse_args()\n\n client = influxdb.InfluxDBClient(host=cia, port=8086)\n client.switch_database('concierge')\n query = \"select * FROM camera_shapes_detected_video WHERE \\\"recording_id\\\"='{}'\".format(recording_id)\n results = client.query(query)\n try:\n columns = results.raw['series'][0]['columns']\n except:\n return [], 200\n object_link_template = None\n try:\n r = requests.get(\"http://\"+cia+\":8000/rest/recordings/\"+str(recording_id))\n if r.status_code == 200:\n object_link_template = r.json()['url_video']\n except Exception as e:\n log.error(\"get recording error {}\".format(str(e)))\n index2name = {}\n for i, name in enumerate(columns):\n index2name[i] = name\n recording_shape_list = []\n for rec in results.raw['series'][0]['values']:\n adict = {}\n for i, aval in enumerate(rec):\n adict[index2name[i]] = aval\n if object_link_template:\n object_link = object_link_template.replace('.mp4', \"_object_\"+str(recording_id)+\"_\"+str(adict['frame_nbr'])+\".jpg\")\n adict['object_link'] = object_link\n recording_shape_list.append(adict)\n\n return recording_shape_list, 200\n\nclass GetRecordings(Resource):\n '''\n REST API class getting shapes for a given recording\n '''\n def __init__(self):\n super(GetRecordings, self).__init__()\n\n def get(self, shape, start_time, end_time):\n '''\n get list of recording ids based on the filter arguments \n - shape : all or specific shape (person, car, ...)\n - start_time, end_time\n '''\n\n client = influxdb.InfluxDBClient(host=cia, port=8086)\n client.switch_database('concierge')\n query = \"select recording_id from camera_shapes_detected_video where \\\"time\\\">={} and \\\"time\\\"<={}\".format(start_time, end_time)\n\n if shape != \"all\":\n query = \"select recording_id from camera_shapes_detected_video where \\\"time\\\">={} and \\\"time\\\"<={} and \\\"shape\\\"='{}'\".format(start_time, end_time, shape)\n results = client.query(query)\n parsed_list = list(results.get_points(measurement='camera_shapes_detected_video'))\n recording_set = set()\n for rec in parsed_list:\n recording_set.add(rec['recording_id'])\n\n return list(recording_set), 200\n\n\nclass GetVideoShapes(Resource):\n '''\n REST API class getting shapes for a given period\n '''\n def __init__(self):\n super(GetVideoShapes, self).__init__()\n\n def get(self, shape, start_time, end_time):\n '''\n get list of shapes in a specific timeframe (start_time, end_time)\n '''\n appendix_time_influx = '000000000'\n\n start_time_influx = str(start_time)+appendix_time_influx\n end_time_influx = str(end_time)+appendix_time_influx\n start_time_influx = time2epoch(start_time)\n end_time_influx = time2epoch(end_time)\n client = influxdb.InfluxDBClient(host=cia, port=8086)\n client.switch_database('concierge')\n query = \"select * from camera_shapes_detected_video where \\\"time\\\">={} and \\\"time\\\"<={}\".format(start_time_influx, end_time_influx)\n\n results = client.query(query)\n parsed_list = list(results.get_points(measurement='camera_shapes_detected_video'))\n shape_list = []\n for rec in parsed_list:\n if shape == 'all':\n shape_list.append(rec)\n else:\n if rec['shape'] == shape:\n shape_list.append(rec)\n return shape_list, 200, {'Access-Control-Allow-Origin': '*'}\n\nclass GetShapeObjects(Resource):\n '''\n REST API class getting shapes for a given recording\n '''\n def __init__(self):\n super(GetShapeObjects, self).__init__()\n\n def get(self):\n # shape_objects = []\n # shape_objects.append({\"id\": \"all\", \"name\": \"all\"})\n # shape_objects.append({\"id\": \"person\", \"name\": \"person\"})\n shape_list = get_shape_ojects()\n\n return shape_list, 200, {'Access-Control-Allow-Origin': '*'}\n\nclass CreateKnownObject(Resource):\n '''\n REST API class for the reception of motion on a given camera\n '''\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('name', type = str, required = True, location = 'json')\n self.reqparse.add_argument('recording_id', type = str, required = True, location = 'json')\n self.reqparse.add_argument('frame_nbr', type = str, required = True, location = 'json')\n self.reqparse.add_argument('snapshot', type = str, required = True, location = 'json')\n self.reqparse.add_argument('shape', type = str, required = True, location = 'json')\n super(CreateKnownObject, self).__init__()\n\n def post(self):\n '''\n Create a known object into the database\n '''\n args = self.reqparse.parse_args()\n src_fn = args['snapshot']\n dummy_path, fn = os.path.split(src_fn)\n if os.path.exists(src_fn):\n copyfile(src_fn, \"/root/static/ko/\"+fn)\n url = \"http://\"+cia+\":8000/rest/known_objects/\"\n data = {}\n data['recording_id'] = args['recording_id']\n data['frame_nbr'] = args['frame_nbr']\n data['file_path_image'] = \"/root/static/ko/\"+fn\n if len(args['name']) == 0:\n data['identified'] = False\n else:\n data['name'] = args['name']\n shape_list = get_shape_ojects()\n fnd_shape = None\n for rec in shape_list:\n if rec['shape'] == args['shape']:\n data['object_type'] = rec['id']\n print(\"data\", data)\n try:\n r = requests.post(url, json=data)\n print(r.status_code)\n except Exception as e:\n log.error(str(e))\n \n \n return {}, 201, {'Access-Control-Allow-Origin': '*'}\n\nclass RegisterDetectedShapes(Resource):\n '''\n Concierge detected shapes, keep track of detected objects for a while so that automation\n systems (like home assistant) can poll for state\n '''\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('shape_type', type = str, required = True, location = 'json')\n self.reqparse.add_argument('camera_name', type = str, required = True, location = 'json')\n self.reqparse.add_argument('snapshot_url', type = str, required = True, location = 'json')\n self.reqparse.add_argument('time', type = str, required = True, location = 'json')\n super(RegisterDetectedShapes, self).__init__()\n\n def post(self):\n '''\n Create a detected shape into the state database\n '''\n args = self.reqparse.parse_args()\n t = args['time']\n record = {'type': 'shape'}\n shape_type = args['shape_type']\n record['shape_type'] = shape_type\n record['camera_name'] = args['camera_name']\n record['snapshot_url'] = args['snapshot_url']\n record['time'] = t\n record['timestamp'] = int(time.time())\n lock.acquire()\n if shape_type not in image_db:\n image_db[shape_type] = []\n match = re.match(\"(.*)_snapshot\\d+_{}\\d+_frame(\\d+).jpg\".format(shape_type), args['snapshot_url'])\n image_url = None\n if match:\n image_url = match.group(1) + \"_frame{}.jpg\".format(match.group(2))\n else:\n image_url = args['snapshot_url']\n log.error(\"espr_debug {}\".format(image_url))\n image_db[shape_type].insert(0, image_url)\n if len(image_db[shape_type]) > image_list_len:\n del image_db[shape_type][-1]\n state_db.append(record) \n lock.release()\n\n return {}, 201, {'Access-Control-Allow-Origin': '*'}\n\nclass ShapesDetected(Resource):\n '''\n REST API class getting shapes for a given recording\n '''\n def __init__(self):\n super(ShapesDetected, self).__init__()\n\n def get(self, camera_name, shape_type):\n shape_dict = {\"detected\": False}\n if camera_name== \"all_cameras\":\n lock.acquire()\n copy_db = copy.deepcopy(state_db)\n lock.release()\n for record in copy_db:\n if record['shape_type'] == shape_type:\n if record['type'] == 'shape':\n shape_dict['detected'] = True\n shape_dict['camera_name'] = record['camera_name']\n shape_dict['shape_type'] = shape_type\n shape_dict['snapshot_url'] = record['snapshot_url']\n else:\n # specific camera\n pass\n\n return shape_dict, 200, {'Access-Control-Allow-Origin': '*'}\n\nclass LastDetectedImages(Resource):\n '''\n REST API return last image detected\n '''\n def __init__(self):\n super(LastDetectedImages, self).__init__()\n\n def get(self, shape_type, sequence_nbr):\n # all_types\n seq_nbr = int(sequence_nbr)\n if seq_nbr >= image_list_len:\n seq_nbr = image_list_len - 1\n print(image_db, shape_type, seq_nbr)\n if shape_type != 'all_shapes':\n try:\n url = image_db[shape_type][seq_nbr]\n print(url)\n redirect(url)\n except Exception as e:\n print(str(e))\n\n return [], 200, {'Access-Control-Allow-Origin': '*'}\n\n\n@app.route('/interworking/api/v1.0/last_detected_images//.jpg', methods=['GET'])\ndef last_detected_images(shape_type, sequence_nbr):\n seq_nbr = int(sequence_nbr)\n if seq_nbr >= image_list_len:\n seq_nbr = image_list_len - 1\n if shape_type != 'all_shapes':\n url = image_db[shape_type][seq_nbr]\n r = requests.get(url)\n if r.status_code == 200:\n data = None\n for chunk in r.iter_content(1024):\n if data:\n data += chunk\n else:\n data = chunk\n response = make_response(data)\n response.headers.set('Content-Type', 'image/jpeg')\n response.headers.set('Content-Disposition', 'attachment', filename='%s.jpg' % seq_nbr)\n return response\n\n\n\n# bind resource for REST API service\napi.add_resource(ShapesVideoInflux, '/interworking/api/v1.0/shapes_video/', endpoint = 'shapes_video')\napi.add_resource(GetRecordings, '/interworking/api/v1.0/get_recordings///', endpoint = 'get_recordings')\napi.add_resource(GetVideoShapes, '/interworking/api/v1.0/get_video_shapes///', endpoint = 'get_video_shapes')\napi.add_resource(GetShapeObjects, '/interworking/api/v1.0/get_shape_objects', endpoint = 'get_shape_objects')\napi.add_resource(CreateKnownObject, '/interworking/api/v1.0/create_known_object', endpoint = 'create_known_object')\napi.add_resource(RegisterDetectedShapes, '/interworking/api/v1.0/register_detected_shape', endpoint = 'register_detected_shape')\napi.add_resource(ShapesDetected, '/interworking/api/v1.0/shape_detected//', endpoint = 'shape_detected')\n# api.add_resource(LastDetectedImages, '/interworking/api/v1.0/last_detected_images//', endpoint = 'last_detected_images')\n\n\ntry:\n cia = os.environ['CONCIERGE_IP_ADDRESS']\nexcept:\n cia = '127.0.0.1'\n\nlog.info(\"CONCIERGE_IP_ADDRESS %s\", cia)\n\nt1 = threading.Thread(target=age_db, args=())\nt1.start()\napp.run(host=\"0.0.0.0\", port=5107)\n","repo_name":"sprenge/concierge","sub_path":"interworking/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14576,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"2856819121","text":"# import json\nimport os\n\n\nimport numpy as np\nfrom flask import Flask, request, jsonify\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport urllib.request\n\n\napp = Flask(__name__)\n\nmodel= tf.keras.models.load_model(\n ('croprice_damage_mobileNetv5.h5'),\n custom_objects={'KerasLayer': hub.KerasLayer}\n)\n\n@app.route('/get', methods= ['GET'])\ndef get():\n return jsonify({'message':'hello'})\n\n\n@app.route('/estimate_damage', methods= ['POST'])\ndef calculate_damage_estimate():\n data = request.get_json(force=True)\n for i in range(5):\n urllib.request.urlretrieve(data['img'+str(i+1)],\n \"./images/img\"+str(i+1)+\".png\")\n\n\n for i in os.listdir('./images'):\n test = np.array(tf.keras.utils.load_img(\"./images/\"+i, target_size=(224, 224))) / 255\n input = np.append(test)\n print(input.shape)\n predictions= model.predict(input)\n l=[]\n for j in range(3):\n for i in range(5):\n l[j]= l[j]+predictions[i][j]\n l[j]= l[j]/3.0\n\n k= np.argmax(l)\n return jsonify({\"prediction\":str(k), \"confidence\": str(l[k])})\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n\n","repo_name":"krishna9304/Ekaci","sub_path":"machine_learning/crop_damage_estimation/prod/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"71494174561","text":"from __future__ import division, absolute_import, print_function\r\nfrom CoolProp.CoolProp import PropsSI\r\nimport CoolProp as CP\r\n\r\nfrom scipy.optimize import brentq\r\nfrom math import pi,exp,log,sqrt,tan,cos,sin,pow,atan\r\n# from ACHP.convert_units import cms2gpm, psi2kPa, C2K, in2m\r\nfrom convert_units import cms2gpm, psi2kPa, C2K, in2m\r\n\r\nclass ExpDevClass():\r\n \"\"\"\r\n Expansion devices models\r\n \"\"\"\r\n def __init__(self,**kwargs):\r\n #Load the parameters passed in\r\n # using the dictionary\r\n self.__dict__.update(kwargs)\r\n \r\n def Update(self,**kwargs):\r\n #Update the parameters passed in\r\n # using the dictionary\r\n self.__dict__.update(kwargs)\r\n \r\n def OutputList(self): #TODO: fix this list of outputs\r\n \"\"\"\r\n Return a list of parameters for this component for further output\r\n \r\n It is a list of tuples, and each tuple is formed of items:\r\n [0] Description of value\r\n [1] Units of value\r\n [2] The value itself\r\n \"\"\"\r\n \r\n return [\r\n ('Expansion Device Type','-',self.ExpType),\r\n ('Upstream Pressure','Pa',self.pin_r),\r\n ('Upstream Enthalpy','j/kg',self.hin_r),\r\n ('Downstream Pressure','Pa',self.pout_r),\r\n ('Downstream Quality','-',self.xout_r),\r\n ('Mass flow rate','kg/s',self.mdot_r),\r\n\r\n ]\r\n \r\n def Initialize(self):\r\n \r\n # AbstractState\r\n assert hasattr(self,'AS'), 'Please specify the Abstract State'\r\n \r\n # If the user doesn't include the ExpType, fail\r\n assert hasattr(self,'ExpType'), 'Please specify the type of the expansion device'\r\n \r\n def natlconv(self, g,beta,nu,Pr,T_s,T_amb,L,g_fac):\r\n \" Used in Viper expander \"\r\n # Natural convection function\r\n \r\n Gr_L = (g*beta*(abs(T_s - T_amb))*L**3)/nu**2 #Grashof number\r\n Ra_L = Gr_L*Pr\r\n \r\n if (g_fac == 1): #vertical surface\r\n Nu_L = (0.825 + (0.387*(Ra_L**(1/6))/(1 + (0.492/Pr)**(9/16))**(8/27)))**2 \r\n elif (g_fac == 2): #horizontal surface, top\r\n Nu_L = 0.25*(Ra_L**0.25)\r\n elif (g_fac == 3 and Ra_L>10^4): #horizontal surface, bottom\r\n Nu_L = 0.54*(Ra_L**0.25)\r\n else:\r\n Nu_L = 0.15*Ra_L**(1/3)\r\n return Nu_L\r\n\r\n def phasesep(self, x_th,del_x_sep,m_dot):\r\n \" Used in Viper expander \"\r\n # Phase separation\r\n x_vap = 1 - del_x_sep\r\n x_liq = del_x_sep\r\n m_dot_vap = x_th*m_dot\r\n m_dot_liq = (1 - x_th)*m_dot\r\n return x_vap, m_dot_vap, x_liq, m_dot_liq\r\n \r\n def mv(self, n,c_v,m_dot,p,x):\r\n \" Used in Viper expander \"\r\n # Metering valve pressure drop calculation\r\n n_max = 9.5 # max # of metering valve turns [-]\r\n n_frac = n/n_max\r\n self.AS.update(CP.PQ_INPUTS, p, x)\r\n v = 1/self.AS.rhomass() # [m^3/kg]\r\n V_dot = m_dot*v\r\n V_dot_IP = cms2gpm(V_dot) #convert 'm^3/sec' to 'gal/min' \r\n cap = (87.17*(n_frac**2) + 14.821*n_frac - 0.6503)/100\r\n c_v_eff = c_v*cap\r\n delta_p_IP = c_v_eff*V_dot_IP\r\n delta_p = psi2kPa(delta_p_IP)*1000 #convert 'psi' to 'Pa'\r\n return delta_p\r\n \r\n def bv(self, c_v,m_dot,p,x):\r\n \" Used in Viper expander \"\r\n # Ball valve pressure drop calculation\r\n self.AS.update(CP.PQ_INPUTS, p, x)\r\n v = 1/self.AS.rhomass() # [m^3/kg]\r\n V_dot = m_dot*v\r\n V_dot_IP = cms2gpm(V_dot) #convert 'm^3/sec' to 'gal/min'\r\n delta_p_IP = c_v*V_dot_IP\r\n delta_p = psi2kPa(delta_p_IP)*1000 #convert 'psi' to 'Pa'\r\n return delta_p\r\n \r\n \r\n def Calculate(self):\r\n \r\n # Initialize\r\n self.Initialize()\r\n # AbstractState\r\n AS = self.AS\r\n \r\n if self.ExpType == 'Ideal':\r\n #===================================================================\r\n # No information about expansion device is given\r\n #===================================================================\r\n # inlet state\r\n if self.pin_r > AS.p_critical(): #Supercritical\r\n AS.update(CP.HmassP_INPUTS, self.hin_r, self.pin_r)\r\n self.sin_r = AS.smass() #[J/kg-K]\r\n self.Tin_r = AS.T() #[K]\r\n else: #other refrigerants \r\n AS.update(CP.PQ_INPUTS, self.pin_r, 0.0)\r\n Tbubble_in = AS.T() #[K]\r\n h_l_in = AS.hmass() #[J/kg]\r\n s_l_in = AS.smass() #[J/kg-K]\r\n AS.update(CP.PQ_INPUTS, self.pin_r, 1.0)\r\n Tdew_in = AS.T() #[K]\r\n h_v_in = AS.hmass() #[J/kg]\r\n s_v_in = AS.smass() #[J/kg-K]\r\n \r\n self.xin_r = (self.hin_r-h_l_in)/(h_v_in-h_l_in)\r\n if (self.xin_r>0.999):\r\n print (\"ExpDev :: Upstream state in the expansion device is superheated\")\r\n raise\r\n if (self.xin_r>0.0 and self.xin_r<1.0): #two-phase state at the inlet\r\n self.sin_r = self.xin_r*s_v_in+(1-self.xin_r)*s_l_in #[J/kg-K]\r\n self.Tin_r = self.xin_r*Tdew_in+(1-self.xin_r)*Tbubble_in #[K]\r\n else: #liquid state at the inlet\r\n AS.update(CP.HmassP_INPUTS, self.hin_r, self.pin_r)\r\n self.sin_r = AS.smass() #[J/kg-K]\r\n self.Tin_r = AS.T() #[K]\r\n \r\n # outlet state (assume h = constant)\r\n self.hout_r = self.hin_r #[J/kg]\r\n \r\n AS.update(CP.PQ_INPUTS, self.pout_r, 0.0)\r\n Tbubble_out = AS.T() #[K]\r\n h_l_out = AS.hmass() #[J/kg]\r\n s_l_out = AS.smass() #[J/kg-K]\r\n AS.update(CP.PQ_INPUTS, self.pout_r, 1.0)\r\n Tdew_out = AS.T() #[K]\r\n h_v_out = AS.hmass() #[J/kg]\r\n s_v_out = AS.smass() #[J/kg-K]\r\n \r\n # outlet state (two-phase)\r\n self.xout_r = (self.hout_r-h_l_out)/(h_v_out-h_l_out) #[-]\r\n self.Tout_r = self.xout_r*Tdew_out+(1-self.xout_r)*Tbubble_out #[K]\r\n self.sout_r = self.xout_r*s_v_out+(1-self.xout_r)*s_l_out #[J/kg-K]\r\n \r\n # mass flow rate \r\n self.mdot_r = 'N/A'\r\n \r\n # heat losses\r\n self.Q_amb = 0.0 #[W]\r\n \r\n if self.ExpType == 'Linear-TXV':\r\n #===================================================================\r\n # Global Linear TxV model from Haorong Li paper (2004)\r\n # paper title: Modeling Adjustable throat-Area Expansion Valves\r\n #===================================================================\r\n D = self.D #inside diameter [m] \r\n Tsh_static = self.Tsh_static #[K] \r\n Tsh_max = self.Tsh_max #[K]\r\n Adj = self.Adj #[-] \r\n C = self.C #[m^2/K]\r\n \r\n Tsup = self.Tsup #superheat value (user defined)\r\n \r\n P_up = self.pin_r\r\n P_down = self.pout_r\r\n \r\n A = (Tsup-Tsh_static)\r\n if (A>Tsh_max):\r\n A=Tsh_max\r\n \r\n ## thermodynamic properties\r\n AS.update(CP.PQ_INPUTS, self.pin_r, 0.0)\r\n Tbubble_in = AS.T() #[K]\r\n h_l_in = AS.hmass() #[J/kg]\r\n s_l_in = AS.smass() #[J/kg-K]\r\n rho_l_in = AS.rhomass() #[kg/m^3]\r\n AS.update(CP.PQ_INPUTS, self.pin_r, 1.0)\r\n Tdew_in = AS.T() #[K]\r\n h_v_in = AS.hmass() #[J/kg]\r\n s_v_in = AS.smass() #[J/kg-K]\r\n rho_v_in = AS.rhomass() #[kg/m^3]\r\n \r\n # inlet state\r\n self.xin_r = (self.hin_r-h_l_in)/(h_v_in-h_l_in)\r\n if (self.xin_r>0.999):\r\n print (\"ExpDev :: Upstream state in the expansion device is superheated\")\r\n raise\r\n if (self.xin_r>0.0 and self.xin_r<1.0):\r\n # 2phase upstream state\r\n print (\"ExpDev :: Upstream state in the expansion device is 2-phase\")\r\n self.sin_r = self.xin_r*s_v_in+(1-self.xin_r)*s_l_in #[J/kg-K]\r\n self.Tin_r = self.xin_r*Tdew_in+(1-self.xin_r)*Tbubble_in #[K]\r\n else: # liquid state at the inlet\r\n AS.update(CP.HmassP_INPUTS, self.hin_r, self.pin_r)\r\n self.sin_r = AS.smass() #[J/kg-K]\r\n self.Tin_r = AS.T() #[K]\r\n \r\n # upstream saturated liquid density\r\n rho_up = rho_l_in\r\n \r\n # calculate mass flow rate\r\n mdot_r = C*A*pow(rho_up*(P_up-P_down),0.5) \r\n \r\n # adjust the mass flow rate via adjustment factor related with geometry (tuning factor)\r\n self.mdot_r = mdot_r*Adj\r\n \r\n # outlet state (assume h = constant)\r\n self.hout_r = self.hin_r #[J/kg]\r\n \r\n AS.update(CP.PQ_INPUTS, self.pout_r, 0.0)\r\n Tbubble_out = AS.T() #[K]\r\n h_l_out = AS.hmass() #[J/kg]\r\n s_l_out = AS.smass() #[J/kg-K]\r\n AS.update(CP.PQ_INPUTS, self.pout_r, 1.0)\r\n Tdew_out = AS.T() #[K]\r\n h_v_out = AS.hmass() #[J/kg]\r\n s_v_out = AS.smass() #[J/kg-K]\r\n \r\n # outlet state (two-phase)\r\n self.xout_r = (self.hout_r-h_l_out)/(h_v_out-h_l_out) #[-]\r\n self.Tout_r = self.xout_r*Tdew_out+(1-self.xout_r)*Tbubble_out #[K]\r\n self.sout_r = self.xout_r*s_v_out+(1-self.xout_r)*s_l_out #[J/kg-K]\r\n\r\n # heat losses\r\n self.Q_amb = 0.0 #[W]\r\n\r\n if self.ExpType == 'Nonlinear-TXV':\r\n #===================================================================\r\n # Nonlinear TxV model from Haorong Li paper (2004)\r\n # paper title: Modeling Adjustable throat-Area Expansion Valves\r\n #===================================================================\r\n D = self.D #inside diameter [m] \r\n Tsh_static = self.Tsh_static #[K] \r\n Tsh_max = self.Tsh_max #[K]\r\n Adj = self.Adj #[-] \r\n C = self.C #[m^2/K]\r\n \r\n Tsup = self.Tsup #superheat value (user defined)\r\n \r\n P_up = self.pin_r\r\n P_down = self.pout_r\r\n \r\n A = (Tsup-Tsh_static)/Tsh_max\r\n if (A>1):\r\n A=1\r\n \r\n ## thermodynamic properties\r\n AS.update(CP.PQ_INPUTS, self.pin_r, 0.0)\r\n Tbubble_in = AS.T() #[K]\r\n h_l_in = AS.hmass() #[J/kg]\r\n s_l_in = AS.smass() #[J/kg-K]\r\n rho_l_in = AS.rhomass() #[kg/m^3]\r\n AS.update(CP.PQ_INPUTS, self.pin_r, 1.0)\r\n Tdew_in = AS.T() #[K]\r\n h_v_in = AS.hmass() #[J/kg]\r\n s_v_in = AS.smass() #[J/kg-K]\r\n rho_v_in = AS.rhomass() #[kg/m^3]\r\n \r\n # inlet state\r\n self.xin_r = (self.hin_r-h_l_in)/(h_v_in-h_l_in)\r\n if (self.xin_r>0.999):\r\n print (\"ExpDev :: Upstream state in the expansion device is superheated\")\r\n raise\r\n if (self.xin_r>0.0 and self.xin_r<1.0):\r\n # 2phase upstream state\r\n print (\"ExpDev :: Upstream state in the expansion device is 2-phase\")\r\n self.sin_r = self.xin_r*s_v_in+(1-self.xin_r)*s_l_in #[J/kg-K]\r\n self.Tin_r = self.xin_r*Tdew_in+(1-self.xin_r)*Tbubble_in #[K]\r\n else: # liquid state at the inlet\r\n AS.update(CP.HmassP_INPUTS, self.hin_r, self.pin_r)\r\n self.sin_r = AS.smass() #[J/kg-K]\r\n self.Tin_r = AS.T() #[K]\r\n \r\n # upstream saturated liquid density\r\n rho_up = rho_l_in\r\n \r\n # calculate mass flow rate\r\n mdot_r = C*(2*A-A*A)*pow(rho_up*(P_up-P_down),0.5) \r\n \r\n # adjust the mass flow rate via adjustment factor related with geometry (tuning factor)\r\n self.mdot_r = mdot_r*Adj\r\n \r\n # outlet state (assume h = constant)\r\n self.hout_r = self.hin_r #[J/kg]\r\n \r\n AS.update(CP.PQ_INPUTS, self.pout_r, 0.0)\r\n Tbubble_out = AS.T() #[K]\r\n h_l_out = AS.hmass() #[J/kg]\r\n s_l_out = AS.smass() #[J/kg-K]\r\n AS.update(CP.PQ_INPUTS, self.pout_r, 1.0)\r\n Tdew_out = AS.T() #[K]\r\n h_v_out = AS.hmass() #[J/kg]\r\n s_v_out = AS.smass() #[J/kg-K]\r\n \r\n # outlet state (two-phase)\r\n self.xout_r = (self.hout_r-h_l_out)/(h_v_out-h_l_out) #[-]\r\n self.Tout_r = self.xout_r*Tdew_out+(1-self.xout_r)*Tbubble_out #[K]\r\n self.sout_r = self.xout_r*s_v_out+(1-self.xout_r)*s_l_out #[J/kg-K]\r\n\r\n # heat losses\r\n self.Q_amb = 0.0 #[W] \r\n \r\n if self.ExpType == 'Short-tube':\r\n #===================================================================\r\n # Short tube expansion from Payne and O'Neal (2004)\r\n # paper title: A Mass Flowrate Correlation for Refrigerants and Refrigerant Mixtures, Journal of HVAC\r\n # based on empirical dimensionless PI correlation, recommended for R-12, R-134a, R-502, R-22, R-407C, and R-410A\r\n #===================================================================\r\n D = self.D #inside diameter of the short-tube[m] \r\n L = self.L #length of the short-tube [m]\r\n Adj = self.Adj #adjusting the inside diameter [-]; \r\n L_c = self.L_c #chamfered length [m]\r\n Ang_c = self.Ang_c #chamfered angle [degree]\r\n BranNum = int(self.BranNum) #Number of Paralelled expansion devices \r\n \r\n A_s = pi/4*D*D\r\n \r\n P_up = self.pin_r\r\n P_down = self.pout_r\r\n \r\n # critical point of refirgerant\r\n P_c = AS.p_critical() #[Pa]\r\n T_c = AS.T_critical() #[K]\r\n \r\n # orifice adjustment parameter\r\n C_c = Adj\r\n \r\n ## thermodynamic properties\r\n AS.update(CP.PQ_INPUTS, self.pin_r, 0.0)\r\n Tbubble_in = AS.T() #[K]\r\n h_l_in = AS.hmass() #[J/kg]\r\n s_l_in = AS.smass() #[J/kg-K]\r\n rho_l_in = AS.rhomass() #[kg/m^3]\r\n AS.update(CP.PQ_INPUTS, self.pin_r, 1.0)\r\n Tdew_in = AS.T() #[K]\r\n h_v_in = AS.hmass() #[J/kg]\r\n s_v_in = AS.smass() #[J/kg-K]\r\n rho_v_in = AS.rhomass() #[kg/m^3]\r\n \r\n # inlet state\r\n self.xin_r = (self.hin_r-h_l_in)/(h_v_in-h_l_in)\r\n if (self.xin_r>0.999):\r\n print (\"ExpDev :: Upstream state in the expansion device is superheated\")\r\n raise\r\n if (self.xin_r>0.0 and self.xin_r<1.0): #two-phase state at the inlet\r\n self.sin_r = self.xin_r*s_v_in+(1-self.xin_r)*s_l_in #[J/kg-K]\r\n self.Tin_r = self.xin_r*Tdew_in+(1-self.xin_r)*Tbubble_in #[K]\r\n else: #liquid state at the inlet\r\n AS.update(CP.HmassP_INPUTS, self.hin_r, self.pin_r)\r\n self.sin_r = AS.smass() #[J/kg-K]\r\n self.Tin_r = AS.T() #[K]\r\n \r\n AS.update(CP.QT_INPUTS, 0, self.Tin_r)\r\n P_sat = AS.p() #P_sat corresponding to upstream temperature (liquid saturation pressure) [Pa]\r\n T_sat = Tbubble_in\r\n T_sub = T_sat - self.Tin_r\r\n \r\n # upstream saturated liquid density\r\n AS.update(CP.PQ_INPUTS, P_sat, 0.0)\r\n rho_f = AS.rhomass() #[kg/m^3]\r\n # upstream saturated gas density\r\n AS.update(CP.PQ_INPUTS, P_sat, 1.0)\r\n rho_g = AS.rhomass() #[kg/m^3]\r\n \r\n # non-dimensional groups\r\n pi_3=(P_up-P_sat)/P_c\r\n pi_6=rho_g/rho_f\r\n pi_9=T_sub/T_c\r\n pi_10=L/D\r\n \r\n # coeffcients\r\n a1=3.8811e-1\r\n a2=1.1427e1\r\n a3=-1.4194e1\r\n a4=1.0703e0\r\n a5=-9.1928e-2\r\n a6=2.1425e1\r\n a7=-5.8195e2\r\n \r\n # single-phase flow rate\r\n pi_1 = (a1+a2*pi_3+a3*pi_9+a4*pi_6+a5*log(pi_10))/(1+a6*pi_3+a7*pi_9*pi_9);\r\n G = pi_1*pow((rho_f*P_c),0.5);\r\n \r\n # mass flow rate\r\n mdot_r = G*A_s\r\n \r\n if(self.xin_r<0.000001): #subcooled upstream state\r\n mdot_r = mdot_r\r\n \r\n else: #two-phase upstream state \r\n x_up = self.xin_r\r\n rho_mup=1/((1-x_up)/rho_f+x_up/rho_g)\r\n \r\n # non-dimensional groups\r\n tp6=rho_mup/rho_f\r\n tp35=(P_c-P_sat)/(P_c)\r\n tp32=(P_c-P_up)/(P_c)\r\n tp27=L/D\r\n tp34=x_up/(1-x_up)*pow((rho_f/rho_g),0.5)\r\n tp28=P_up/P_c\r\n \r\n # coeffcients\r\n b1=1.1831e0\r\n b2=-1.468e0\r\n b3=-1.5285e-1\r\n b4=-1.4639e1\r\n b5=9.8401e0\r\n b6=-1.9798e-2\r\n b7=-1.5348e0\r\n b8=-2.0533e0\r\n b9=-1.7195e1\r\n \r\n numer = (b1*tp6+b2*pow(tp6,2.0)+b3*pow(log(tp6),2.0)+b4*pow(log(tp35),2.0)+b5*pow(log(tp32),2.0)+b6*pow(log(tp27),2.0))\r\n deno = (1+b7*tp6+b8*tp34+b9*pow(tp28,3.0))\r\n C_tp= numer/deno #two-phase flow rate adjustment\r\n \r\n if(C_tp>1):\r\n C_tp=1 #since C_tp>1 is not right\r\n \r\n # correct the mass flow rate by two-phase entrance\r\n mdot_r = mdot_r*C_tp\r\n \r\n # adjust the mass flow rate via adjustment factor related with geometry (tuning factor)\r\n self.mdot_r = mdot_r*C_c\r\n \r\n # multiply mass flow rate by the number of parallel branches\r\n if BranNum == 0:\r\n self.mdot_r = self.mdot_r\r\n else: \r\n self.mdot_r = self.mdot_r * BranNum\r\n \r\n # outlet state (assume h = constant)\r\n self.hout_r = self.hin_r #[J/kg]\r\n \r\n AS.update(CP.PQ_INPUTS, self.pout_r, 0.0)\r\n Tbubble_out = AS.T() #[K]\r\n h_l_out = AS.hmass() #[J/kg]\r\n s_l_out = AS.smass() #[J/kg-K]\r\n AS.update(CP.PQ_INPUTS, self.pout_r, 1.0)\r\n Tdew_out = AS.T() #[K]\r\n h_v_out = AS.hmass() #[J/kg]\r\n s_v_out = AS.smass() #[J/kg-K]\r\n \r\n # outlet state (two-phase)\r\n self.xout_r = (self.hout_r-h_l_out)/(h_v_out-h_l_out) #[-]\r\n self.Tout_r = self.xout_r*Tdew_out+(1-self.xout_r)*Tbubble_out #[K]\r\n self.sout_r = self.xout_r*s_v_out+(1-self.xout_r)*s_l_out #[J/kg-K]\r\n\r\n if self.ExpType == 'Expander':\r\n #===================================================================\r\n # General expander with given isentropic efficiency\r\n #===================================================================\r\n # inlet state\r\n if self.pin_r > AS.p_critical(): #Supercritical\r\n AS.update(CP.HmassP_INPUTS, self.hin_r, self.pin_r)\r\n self.sin_r = AS.smass() #[J/kg-K]\r\n self.Tin_r = AS.T() #[K]\r\n else: #other refrigerants \r\n AS.update(CP.PQ_INPUTS, self.pin_r, 0.0)\r\n Tbubble_in = AS.T() #[K]\r\n h_l_in = AS.hmass() #[J/kg]\r\n s_l_in = AS.smass() #[J/kg-K]\r\n AS.update(CP.PQ_INPUTS, self.pin_r, 1.0)\r\n Tdew_in = AS.T() #[K]\r\n h_v_in = AS.hmass() #[J/kg]\r\n s_v_in = AS.smass() #[J/kg-K]\r\n \r\n self.xin_r = (self.hin_r-h_l_in)/(h_v_in-h_l_in)\r\n if (self.xin_r>0.999):\r\n print (\"ExpDev :: Upstream state in the expansion device is superheated\")\r\n raise\r\n if (self.xin_r>0.0 and self.xin_r<1.0): #two-phase state at the inlet\r\n self.sin_r = self.xin_r*s_v_in+(1-self.xin_r)*s_l_in #[J/kg-K]\r\n self.Tin_r = self.xin_r*Tdew_in+(1-self.xin_r)*Tbubble_in #[K]\r\n else: #liquid state at the inlet\r\n AS.update(CP.HmassP_INPUTS, self.hin_r, self.pin_r)\r\n self.sin_r = AS.smass() #[J/kg-K]\r\n self.Tin_r = AS.T() #[K]\r\n \r\n # isentropic outlet state\r\n AS.update(CP.PSmass_INPUTS,self.pout_r,self.sin_r)\r\n self.hout_s_r = AS.hmass() #[J/kg]\r\n \r\n # outlet state (assume eta_is = given)\r\n self.hout_r = self.hin_r - self.eta_is*(self.hin_r-self.hout_s_r) #[J/kg]\r\n \r\n AS.update(CP.PQ_INPUTS, self.pout_r, 0.0)\r\n Tbubble_out = AS.T() #[K]\r\n h_l_out = AS.hmass() #[J/kg]\r\n s_l_out = AS.smass() #[J/kg-K]\r\n AS.update(CP.PQ_INPUTS, self.pout_r, 1.0)\r\n Tdew_out = AS.T() #[K]\r\n h_v_out = AS.hmass() #[J/kg]\r\n s_v_out = AS.smass() #[J/kg-K]\r\n \r\n # outlet state (two-phase)\r\n self.xout_r = (self.hout_r-h_l_out)/(h_v_out-h_l_out) #[-]\r\n self.Tout_r = self.xout_r*Tdew_out+(1-self.xout_r)*Tbubble_out #[K]\r\n self.sout_r = self.xout_r*s_v_out+(1-self.xout_r)*s_l_out #[J/kg-K]\r\n\r\n # adjust the mass flow rate via adjustment factor related with geometry (tuning factor)\r\n # TODO: need to add a mass flow model \r\n mdot_r = self.mdot\r\n self.mdot_r = mdot_r*self.C_exp\r\n \r\n # heat losses\r\n self.Q_amb = 0.0 #[W]\r\n\r\n if self.ExpType == 'Viper':\r\n #===================================================================\r\n # Viper expander model\r\n #===================================================================\r\n # inlet state\r\n if self.pin_r > AS.p_critical(): #Supercritical\r\n AS.update(CP.HmassP_INPUTS, self.hin_r, self.pin_r)\r\n self.sin_r = AS.smass() #[J/kg-K]\r\n self.Tin_r = AS.T() #[K]\r\n self.rhoin_r = AS.rhomass() # [kg/m^3]\r\n else: #other refrigerants \r\n AS.update(CP.PQ_INPUTS, self.pin_r, 0.0)\r\n Tbubble_in = AS.T() #[K]\r\n h_l_in = AS.hmass() #[J/kg]\r\n s_l_in = AS.smass() #[J/kg-K]\r\n rho_l_in = AS.rhomass() # [kg/m^3]\r\n AS.update(CP.PQ_INPUTS, self.pin_r, 1.0)\r\n Tdew_in = AS.T() #[K]\r\n h_v_in = AS.hmass() #[J/kg]\r\n s_v_in = AS.smass() #[J/kg-K]\r\n rho_v_in = AS.rhomass() # [kg/m^3]\r\n \r\n self.xin_r = (self.hin_r-h_l_in)/(h_v_in-h_l_in)\r\n if (self.xin_r>0.999):\r\n print (\"ExpDev :: Upstream state in the expansion device is superheated\")\r\n raise\r\n if (self.xin_r>0.0 and self.xin_r<1.0): #two-phase state at the inlet\r\n self.sin_r = self.xin_r*s_v_in+(1-self.xin_r)*s_l_in #[J/kg-K]\r\n self.Tin_r = self.xin_r*Tdew_in+(1-self.xin_r)*Tbubble_in #[K]\r\n self.rhoin_r = self.xin_r*rho_v_in+(1-self.xin_r)*rho_l_in # [kg/m^3]\r\n else: #liquid state at the inlet\r\n AS.update(CP.HmassP_INPUTS, self.hin_r, self.pin_r)\r\n self.sin_r = AS.smass() #[J/kg-K]\r\n self.Tin_r = AS.T() #[K]\r\n self.rhoin_r = AS.rhomass() # [kg/m^3]\r\n \r\n \r\n # Nozzel Model based on the work of Lennart's MS Thesis \r\n D_m = self.D_m # inside diameter [m]\r\n D_t = self.D_t # throat diameter [m]\r\n \r\n \r\n AS.update(CP.PSmass_INPUTS, self.pout_r, self.sin_r)\r\n h_out_s = AS.hmass() #[J/kg]\r\n \r\n W_dot_is = self.mdot_r*(self.hin_r - h_out_s)\r\n \r\n # Ambient conditions - air properties\"\r\n AS_air = CP.AbstractState('HEOS', 'air')\r\n #TO DO: need to vary this based on ambient temperature input by user\r\n T_amb = C2K(35) # [C]\r\n p_atm = 101325 # [Pa]\r\n AS_air.update(CP.PT_INPUTS, p_atm, T_amb)\r\n k_air = AS_air.conductivity() # [W/m-K]\r\n mu_air = AS_air.viscosity() # [Pa-s]\r\n rho_air = AS_air.rhomass() #[kg/m^3]\r\n nu = mu_air/rho_air # Kinematic viscosity\r\n beta = 1/T_amb\r\n Pr = AS_air.Prandtl() # Prandtl number\r\n \r\n AS.update(CP.PQ_INPUTS, self.pout_r, 0.0)\r\n T_s = AS.T() #[K]\r\n g = 9.81 # [m/s^2]\r\n \r\n # Natural convection\r\n # Sides, vertical, cold\r\n A_side = self.height*self.diameter\r\n P_side = 2*self.height + 2*self.diameter\r\n L_side = A_side/P_side\r\n g_fac_side = 1\r\n Nusselt_L_side = self.natlconv(g,beta,nu,Pr,T_s,T_amb,L_side,g_fac_side)\r\n h_side = Nusselt_L_side*k_air/L_side\r\n Q_dot_side = h_side*A_side*(T_s - T_amb)\r\n # Top, horizontal, cold\r\n A_top = (pi*(self.diameter**2))/4\r\n P_top = pi*self.diameter\r\n L_top = A_top/P_top\r\n g_fac_top = 2 \r\n Nusselt_L_top = self.natlconv(g,beta,nu,Pr,T_s,T_amb,L_top,g_fac_top)\r\n h_top = Nusselt_L_top*k_air/L_top\r\n Q_dot_top = h_top*A_top*(T_s - T_amb)\r\n # Bottom, horizontal, cold\r\n A_bottom = A_top\r\n L_bottom = L_top\r\n g_fac_bottom = 3\r\n Nusselt_L_bottom = self.natlconv(g,beta,nu,Pr,T_s,T_amb,L_bottom,g_fac_bottom)\r\n h_bottom = Nusselt_L_bottom*k_air/L_bottom\r\n Q_dot_bottom = h_bottom*A_bottom*(T_s - T_amb)\r\n \r\n # heat losses\r\n Q_dot_loss_tot = Q_dot_bottom + Q_dot_top + Q_dot_side\r\n self.Q_amb = -Q_dot_loss_tot #[W]\r\n \r\n # basic calculation to start with\r\n A_inlet = pi * pow(D_m, 2) / 4\r\n u_inlet = self.mdot_r / (A_inlet * self.rhoin_r)\r\n A_t = pi * pow(D_t, 2) / 4\r\n \r\n \r\n def ObjectiveViper(delta_p_Viper):\r\n \r\n def ObjectiveNozzle(eta_nozzle):\r\n \r\n P_t = self.pout_r + delta_p_Viper \r\n \r\n # calculation of enthalpy\r\n AS.update(CP.PSmass_INPUTS, P_t, self.sin_r)\r\n h_t_is = AS.hmass() #[J/kg]\r\n h_t = self.hin_r - eta_nozzle * (self.hin_r - h_t_is)\r\n AS.update(CP.PQ_INPUTS, P_t, 1.0)\r\n rho_g = AS.rhomass() # [kg/m^3]\r\n AS.update(CP.PQ_INPUTS, P_t, 0.0)\r\n rho_l = AS.rhomass() # [kg/m^3]\r\n \r\n AS.update(CP.HmassP_INPUTS, h_t, P_t)\r\n s_t = AS.smass() #[J/kg-K]\r\n # calculation of mass fraction vapor at h_t and P_t\r\n x_t = AS.Q() # [-]\r\n \r\n s = 2\r\n \r\n if s == 1:\r\n slip = pow(rho_l / rho_g, 1. / 3)\r\n elif s == 2:\r\n e = 0.12 # entrainment needs to be calculated or kept as a variable\r\n slip = e + (1 - e) * pow(((rho_l / rho_g) + e * ((1 - x_t) / x_t)) / (1 + e * ((1 - x_t) / x_t)),1. / 2)\r\n elif s == 3 and x_t > 0:\r\n slip = sqrt((1 + x_t * (rho_l / rho_g - 1)))\r\n else:\r\n slip = 1\r\n \r\n # calculation of void fraction\r\n alpha_t = 1 / (1 + ((1 - x_t) / x_t) * rho_g / rho_l * slip)\r\n \r\n if x_t <= 0:\r\n x_t = 0\r\n \r\n # calculate of mixing density\r\n if x_t > 0 and x_t < 1:\r\n rho_t = alpha_t * rho_g + (1 - alpha_t) * rho_l\r\n else:\r\n rho_t = AS.rhomass() #density at h_t and P_t\r\n \r\n u_t = sqrt(2 * (self.hin_r - h_t) + pow(u_inlet, 2))\r\n \r\n if alpha_t > 1:\r\n alpha_t = 0\r\n \r\n # now check the calculation with the massflow\r\n m_dot_check = A_t * u_t * rho_t\r\n \r\n #pass Throat nozzle parameters\r\n self.eta_nozzle = eta_nozzle\r\n self.pt_r = P_t\r\n self.xt_r = x_t\r\n self.alphat_r = alpha_t\r\n self.ut_r = u_t\r\n self.st_r = s_t\r\n self.ht_r = h_t\r\n \r\n return self.mdot_r - m_dot_check\r\n \r\n #Actual solver for eta_nozzle\r\n brentq(ObjectiveNozzle,0.1,0.9)\r\n \r\n # Calculate main parameters \r\n self.delta_p_nozzle = self.pin_r - self.pt_r\r\n self.E_dot_nozzle = self.mdot_r*(self.hin_r - self.ht_r) #Power recovered by nozzle [W] \r\n \r\n # #Power losses calculation\r\n # self.W_dot_mech = self.W_dot_elec/self.eta_gen \r\n # self.W_dot_fluid_tot = self.W_dot_mech/self.eta_mech\r\n # \r\n # # Turbine losses\r\n # self.W_dot_fluid = self.W_dot_fluid_tot + Q_dot_loss_tot\r\n # self.eta_flow = self.W_dot_fluid/self.E_dot_nozzle\r\n \r\n \r\n \r\n # Turbine losses\r\n self.eta_flow = -4.3354*(self.pin_r/self.pt_r)**2 + 16.3*(self.pin_r/self.pt_r) - 14.777\r\n self.W_dot_fluid = self.eta_flow*self.E_dot_nozzle\r\n self.W_dot_fluid_tot = self.W_dot_fluid - Q_dot_loss_tot\r\n \r\n # Power losses calculation\r\n self.W_dot_mech = self.W_dot_fluid_tot*self.eta_mech\r\n self.W_dot_elec = self.W_dot_mech*self.eta_gen\r\n self.CycleEnergyOut = -self.W_dot_elec + self.Q_amb\r\n \r\n #save delta_p_Viper\r\n self.delta_p_Viper = delta_p_Viper\r\n \r\n return self.W_dot_elec - self.W_dot_elec_target\r\n \r\n #Actual solver for delta_p_viper\r\n brentq(ObjectiveViper,self.delta_p_viper_init-100000,self.delta_p_viper_init+100000)\r\n \r\n \r\n def ObjectiveSeperation(delta_x_sep):\r\n #Phase separation losses\r\n self.x_vap,self.mdot_vap,self.x_liq,self.mdot_liq = self.phasesep(self.xt_r,delta_x_sep,self.mdot_r)\r\n self.delta_x_sep = delta_x_sep\r\n \r\n # Valve losses\r\n #metering valve\r\n c_v_mv = 0.73 #[psi/(gal/min)]\r\n n_MV = 2 # [-]\r\n #ball valve\"\r\n c_v_bv = 4.4 # [psi/(gal/min)]\r\n #Vapor line valve losses\r\n delta_p_mv_vap = self.mv(n_MV,c_v_mv,self.mdot_vap,self.pout_r,self.x_vap)\r\n delta_p_bv_vap = self.bv(c_v_bv,self.mdot_vap,self.pout_r,self.x_vap)\r\n #Liquid line valve losses\r\n delta_p_bv_liq = self.bv(c_v_bv,self.mdot_liq,self.pout_r,self.x_liq)\r\n \r\n # Outlet state (vapor)\r\n AS.update(CP.PQ_INPUTS, self.pout_r, self.x_vap)\r\n self.Tout_vap = AS.T() #[K]\r\n self.hout_vap = AS.hmass() #[J/kg]\r\n self.sout_vap = AS.smass() #[J/kg-K]\r\n # Outlet state (liquid)\r\n AS.update(CP.PQ_INPUTS, self.pout_r, self.x_liq)\r\n self.Tout_liq = AS.T() #[K]\r\n self.hout_liq = AS.hmass() #[J/kg]\r\n self.sout_liq = AS.smass() #[J/kg-K]\r\n \r\n return self.mdot_r*self.ht_r - (self.mdot_liq*self.hout_liq + self.mdot_vap*self.hout_vap)\r\n \r\n #Actual solver for delta_x_sep\r\n brentq(ObjectiveSeperation,0.0,0.5)\r\n\r\n \r\n \r\nif __name__=='__main__':\r\n #Abstract State\r\n Ref = 'R410A'\r\n Backend = 'HEOS' #choose between: 'HEOS','TTSE&HEOS','BICUBIC&HEOS','REFPROP','SRK','PR'\r\n AS = CP.AbstractState(Backend, Ref)\r\n \r\n print('Example for Ideal expansion device')\r\n params={\r\n 'AS':AS,\r\n 'ExpType':'Ideal', #expansion device type\r\n 'pin_r': PropsSI('P','T',60+273.15,'Q',0,Ref), #upsteam pressure\r\n 'hin_r': PropsSI('H','P',PropsSI('P','T',60+273.15,'Q',0,Ref),'Q',0,Ref), #upstream enthalpy\r\n 'pout_r': PropsSI('P','T',10+273.15,'Q',0,Ref), #downstream pressure \r\n }\r\n Exp=ExpDevClass(**params)\r\n Exp.Calculate()\r\n print(Exp.OutputList())\r\n print()\r\n \r\n \r\n print('Example for Linear expansion device')\r\n params={\r\n 'AS':AS,\r\n 'ExpType':'Linear-TXV', #expansion device type\r\n 'Tsh_static':4, #static superheat\r\n 'Tsh_max':6, #maximum superheat\r\n 'D':0.0006604, #inside diameter [m]\r\n 'C':1.2656e-6, #constant from manufacturer [m^2/K]\r\n 'Adj':0.7630, #Adjust the diameter (tuning factor)\r\n 'Tsup':5, #superheat value (user defined)\r\n 'pin_r': PropsSI('P','T',60+273.15,'Q',0,Ref), #upsteam pressure\r\n 'hin_r': PropsSI('H','P',PropsSI('P','T',60+273.15,'Q',0,Ref),'Q',0,Ref), #upstream enthalpy\r\n 'pout_r': PropsSI('P','T',10+273.15,'Q',0,Ref), #downstream pressure \r\n } \r\n Exp=ExpDevClass(**params)\r\n Exp.Calculate()\r\n print('Tout =',Exp.Tout_r,'[K]')\r\n print('hout =',Exp.hout_r,'[J/kg]')\r\n print('xout =',Exp.xout_r,'[-]')\r\n print('mdot_r =',Exp.mdot_r,'[kg/s]')\r\n print()\r\n \r\n \r\n print('Example for short-tube expansion device')\r\n params={\r\n 'AS':AS,\r\n 'ExpType':'Short-tube', #expansion device type\r\n 'D':0.0006604, #inside diameter [m]\r\n 'L':0.0052324, #length of short-tube[m]\r\n 'L_c':0.0001524, #chamfered length [m] (P.S. not included in the solver yet)\r\n 'Ang_c':45, #chamfered angle [degree] (P.S. not included in the solver yet)\r\n 'BranNum':12, #Number of Paralelled short-tubes (0 -- default for 1 short-tube only)\r\n 'Adj':1.094, #Adjust the diameter (tuning factor)\r\n 'pin_r': PropsSI('P','T',60+273.15,'Q',0,Ref), #upsteam pressure\r\n 'hin_r': PropsSI('H','P',PropsSI('P','T',60+273.15,'Q',0,Ref),'Q',0,Ref), #upsteam enthalpy\r\n 'pout_r': PropsSI('P','T',10+273.15,'Q',0,Ref), #downstream pressure \r\n }\r\n Exp.Update(**params)\r\n Exp.Calculate()\r\n print('Tout =',Exp.Tout_r,'[K]')\r\n print('hout =',Exp.hout_r,'[J/kg]')\r\n print('xout =',Exp.xout_r,'[-]')\r\n print('mdot_r =',Exp.mdot_r,'[kg/s]')\r\n print()\r\n\r\n\r\n print('Example for expander device')\r\n params={\r\n 'AS':AS,\r\n 'ExpType':'Expander', #expansion device type\r\n 'eta_is':0.8, #isentropic efficiency [-]\r\n 'C_exp':1, #flow factor [-] \r\n 'mdot':0.01, # mass flow rate [kg/s]\r\n 'pin_r': PropsSI('P','T',60+273.15,'Q',0,Ref), #upsteam pressure\r\n 'hin_r': PropsSI('H','P',PropsSI('P','T',60+273.15,'Q',0,Ref),'Q',0,Ref), #upsteam enthalpy\r\n 'pout_r': PropsSI('P','T',10+273.15,'Q',0,Ref), #downstream pressure \r\n }\r\n Exp.Update(**params)\r\n Exp.Calculate()\r\n print('Tout =',Exp.Tout_r,'[K]')\r\n print('hout =',Exp.hout_r,'[J/kg]')\r\n print('xout =',Exp.xout_r,'[-]')\r\n print('hout_s =',Exp.hout_s_r,'[J/kg]')\r\n print('mdot_r =',Exp.mdot_r,'[kg/s]')\r\n print()\r\n \r\n \r\n print('Example for Viper expander')\r\n params={\r\n #Original code from Ammar\r\n # 'AS':AS,\r\n # 'ExpType':'Viper', #expansion device type \r\n # 'mdot_r': 0.1025, # mass flow rate [kg/s]\r\n # 'pin_r': 2709000, #upsteam pressure\r\n # 'hin_r': PropsSI('H','P',2709000,'T',42.64+ 273.15,'R410A'), #upsteam enthalpy\r\n # 'pout_r': 1297000, #downstream pressure \r\n # \r\n # 'D_m': 0.3 * 0.0254, # inside diameter pipe [m]\r\n # 'D_t': 0.09 * 0.0254, # inside diameter throat [m] v\r\n # 'diameter': in2m(2.75), # diameter of viper [m]\r\n # 'height' : in2m(9.75), # height of viper [m]\r\n # 'delta_p_Viper': 176000, #pressure drop across viper\r\n # 'eta_gen' : 0.88, # generator efficiency [-]\r\n # 'eta_mech' : 0.9, # shaft mechanical efficiency [-]\r\n # 'del_x_sep' : 0.025, # percentage vapor/liquid separation [-]\r\n # 'W_dot_elec': 59, # generator power output [W] \r\n \r\n #Constants over all data test points \r\n \r\n 'AS':AS,\r\n 'ExpType':'Viper', #expansion device type \r\n 'D_m': 0.3 * 0.0254, # inside diameter pipe [m]\r\n 'D_t': 0.09 * 0.0254, # inside diameter throat [m] v\r\n 'diameter': in2m(2.75), # diameter of viper [m]\r\n 'height' : in2m(9.75), # height of viper [m]\r\n 'delta_p_viper_init': 176000, #pressure drop across viper\r\n 'eta_gen' : 0.88, # generator efficiency [-]\r\n 'eta_mech' : 0.9, # shaft mechanical efficiency [-] \r\n \r\n\r\n \r\n # # Hybrid_Control_12_11_2018_Viper_I752_O95\r\n# 'mdot_r': 0.1015, # mass flow rate [kg/s]\r\n# 'pin_r': 2690000, #upsteam pressure\r\n# 'hin_r': PropsSI('H','P',2690000,'T',42.5+ 273.15,'R410A'), #upsteam enthalpy\r\n# 'W_dot_elec_target': 58.6, # generator power output [W] \r\n# 'pout_r': 1279000,\r\n \r\n # # Hybrid_Control_12_11_2018_Viper_I743_O95\r\n# 'mdot_r': 0.1009, # mass flow rate [kg/s]\r\n# 'pin_r': 2680000, #upsteam pressure\r\n# 'hin_r': PropsSI('H','P',2680000,'T',42.54+ 273.15,'R410A'), #upsteam enthalpy\r\n# 'W_dot_elec_target': 58.4, # generator power output [W] \r\n# 'pout_r': 1270000,\r\n \r\n \r\n # # Hybrid_Control_12_11_2018_Viper_I80_O95\r\n# 'mdot_r': 0.1049, # mass flow rate [kg/s]\r\n# 'pin_r': 2723000, #upsteam pressure\r\n# 'hin_r': PropsSI('H','P',2723000,'T',42.32+ 273.15,'R410A'), #upsteam enthalpy\r\n# 'W_dot_elec_target': 58, # generator power output [W] \r\n# 'pout_r': 1325000,\r\n \r\n # # Hybrid_Control_12_11_2018_Viper_I77_O1076\r\n# 'mdot_r': 0.1049, # mass flow rate [kg/s]\r\n# 'pin_r': 3110000, #upsteam pressure\r\n# 'hin_r': PropsSI('H','P',3110000,'T',49.94 + 273.15,'R410A'), #upsteam enthalpy\r\n# 'W_dot_elec_target': 76.4, # generator power output [W] \r\n# 'pout_r': 1405000,\r\n \r\n \r\n # # Hybrid_Control_12_11_2018_Viper_I77_O104\r\n# 'mdot_r': 0.1043, # mass flow rate [kg/s]\r\n# 'pin_r': 2991000, #upsteam pressure\r\n# 'hin_r': PropsSI('H','P',2991000,'T',47.68+ 273.15,'R410A'), #upsteam enthalpy\r\n# 'W_dot_elec_target': 71.7, # generator power output [W] \r\n# 'pout_r': 1377000,\r\n \r\n \r\n # # Hybrid_Control_12_11_2018_Viper_I77_O95\r\n 'mdot_r': 0.1025, # mass flow rate [kg/s]\r\n 'pin_r': 2709000, #upsteam pressure\r\n 'hin_r': PropsSI('H','P',2709000,'T',42.64+ 273.15,'R410A'), #upsteam enthalpy\r\n 'W_dot_elec_target': 59, # generator power output [W] \r\n 'pout_r': 1297000,\r\n \r\n \r\n # # Oct. 12th 2017 0.90\" straight nozzle data, no separation\r\n# 'mdot_r': 0.09937, # mass flow rate [kg/s]\r\n# 'pin_r': 2629230, #upsteam pressure\r\n# 'hin_r': PropsSI('H','P',2629230,'T',41.38 + 273.15,'R410A'), #upsteam enthalpy\r\n# 'W_dot_elec_target': 26.4, # generator power output [W] \r\n# 'pout_r': 1154620,\r\n\r\n }\r\n Exp.Update(**params)\r\n Exp.Calculate()\r\n print('pin_r =',Exp.pin_r/1000,'[kPa]')\r\n print('pt_r =',Exp.pt_r/1000,'[kPa]')\r\n print('pout_r =',Exp.pout_r/1000,'[kPa]')\r\n print('xt_r =',Exp.xt_r,'[-]')\r\n print('ht_r =',Exp.ht_r,'[J/kg]')\r\n print('mdot_r =',Exp.mdot_r,'[kg/s]')\r\n print('eta_nozzle =',Exp.eta_nozzle,'[-]')\r\n print('eta_flow =',Exp.eta_flow,'[-]')\r\n print('W_dot_elec =',Exp.W_dot_elec,'[W]')\r\n print('delta_x_sep =',Exp.delta_x_sep,'[-]')\r\n print()","repo_name":"abahman/Trunk","sub_path":"Riley/ExpDev_1.py","file_name":"ExpDev_1.py","file_ext":"py","file_size_in_byte":41921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29821615712","text":"import pygame\nimport sys\nimport time\nfrom itertools import product, repeat\nfrom functools import partial as pf\nfrom operator import mul, truediv, attrgetter, itemgetter\n\nfrom intersection import intersect\nfrom intersection import distance, intersect_circle_point, intersect_aabb_circle\nfrom PartitionTree import partition_tree\nfrom PQ import PQ\nfrom shapes import AABB, Sphere\nfrom utils import vect\n\nimage_shape = (1000,1000)\nblack = origin = (0,0,0)\n\ndef count_intersections(point):\n return sum(map(lambda c: intersect_circle_point(c, point), drones))\n\ndef min_distance(aabb):\n return min(map(pf(distance, origin), product(aabb.min_vert, aabb.max_vert)))\n\ndef priority(n):\n return len(n), -min_distance(n.bounds)\n\ndef find_highest_priority(aabb):\n ranges = [range(start, end+1)\n for start, end in zip(aabb.min_vert, aabb.max_vert)]\n points = product(*ranges)\n\n point_priorities = map(lambda point: (\n point, (count_intersections(point), -distance(origin, point))\n ), points)\n\n return max(point_priorities, key=itemgetter(1))\n\ndrones = [*map(Sphere.from_string, sys.stdin)]\n\nTree = partition_tree(pos_fn=attrgetter(\"centre\"),\n membership_fn=intersect_aabb_circle)\n\ntree = Tree(drones)\n\nz_min = tree.bounds.min_vert[2]\nz_range = tree.bounds.shape[2]\nscale = min(vect(truediv, image_shape, tree.bounds.shape))\nunit = int(1e7*scale)\n\ndef T(point):\n return (*(int(x*scale + w//2) for x, w in zip(point, image_shape)),)\n\npygame.init()\n\n#image = pygame.Surface(image_shape, pygame.SRCALPHA, 32)\ndrone_image = pygame.Surface(image_shape,0, 24)\n#image.fill((0xFF,0xFF,0x00))\ndrone_image.fill((0xFF,0xFF,0xFF))\n\ndef get_colour(z):\n h = int(300*(z - z_min) / z_range)\n c = pygame.Color(*black,255)\n c.hsva = (h, 100, 100, 100)\n return c\n\nfor circ in drones:\n r = int(circ.r * scale)\n centre = T(circ.centre)\n\n Sphere(centre, r).draw(drone_image, colour=get_colour(circ.centre[2]))\n\n\nnode_queue = PQ([(tree, priority(tree))])\nbest_priority = -sys.maxsize, -sys.maxsize\nbest_point = None\n\ndef draw_aabb(surface, aabb, col=None):\n if not col:\n col = get_colour(aabb.min_vert[2] + aabb.shape[2]//2)\n min_vert = T(aabb.min_vert)\n max_vert = T(aabb.max_vert)\n\n AABB(min_vert, max_vert).draw(surface, col)\n\ndef draw_tree(surface):\n bounds = []\n nodes = [tree]\n\n while nodes:\n node = nodes.pop()\n if node.children:\n nodes.extend(node.children)\n else:\n bounds.append(node.bounds)\n\n for aabb in bounds:\n draw_aabb(surface, aabb)\n\nfont = pygame.font.Font(None, 8*unit)\nfont_pos = (unit, unit)\ni = 0\nwhile node_queue:\n image = drone_image.copy()\n node, priority_metric = node_queue.popitem()\n\n if priority_metric < best_priority:\n break\n\n children = node.split()\n for c in children:\n if not c.done():\n node_queue[c] = priority(c)\n\n elif priority(c) >= best_priority and len(c) > Tree.max_bucket:\n point, p = find_highest_priority(c.bounds)\n if p > best_priority:\n best_priority = p\n best_point = point\n\n\n\n draw_tree(image)\n image.blit(font.render(f\"{i}\", False, black), font_pos)\n for c in children:\n draw_aabb(image, c.bounds, (0xFF,0,0))\n pygame.image.save(image, f\"img/img{i:03d}.png\")\n i += 1\n\ndrone_image.blit(font.render(f\"{i}: Finished\", False, black), font_pos)\npygame.draw.circle(drone_image, (0xFF,0,0), T(best_point), 2)\nfor i in range(i,i+10):\n pygame.image.save(drone_image, f\"img/img{i:03d}.png\")\n","repo_name":"qualiaa/aoc","sub_path":"2018/23/visualiser.py","file_name":"visualiser.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32547115641","text":"from gurobipy import GRB, Model\r\nimport networkx as nx \r\n\r\nclass SofdaiLp(object):\r\n def __init__(self, G, source, destination, Vms, VNFS):\r\n \"\"\"\r\n This method is used for initialization\r\n Args:\r\n G: networkx graph\r\n source: Source node in the network\r\n destination: destination node in the network\r\n Vms: List of nodes which where we can place Vms\r\n VNFS: number of Vnfs \r\n\r\n \"\"\"\r\n self.model = Model()\r\n# self.enabled_vm = dict()\r\n# self.edge = dict()\r\n self.Vars = dict()\r\n self.F = list()\r\n\r\n# \r\n #Parameters\r\n self.G = G\r\n self.source = source\r\n self.destination = destination\r\n self.Vms = Vms\r\n self.VNFS = VNFS\r\n \r\n \r\n \r\n def check_source(self):\r\n \"\"\"\r\n It check that source is the part of the network or invalid source. if \r\n invalid then raise exception\r\n Return:\r\n True or false\r\n \"\"\"\r\n source_bool = False\r\n for a in G.nodes():\r\n if a in self.source:\r\n source_bool = True\r\n if source_bool == False:\r\n raise Exception('Source is not in Network')\r\n \r\n return source_bool\r\n \r\n def check_destination(self):\r\n \"\"\"\r\n It check that destination is the part of the network or invalid \r\n destination. if invalid then raise exception\r\n Return:\r\n True or false\r\n \"\"\"\r\n destination_bool = {}\r\n destinations_bool = False\r\n for b in self.destination:\r\n destination_bool[b] = False\r\n for a in G.nodes():\r\n if a in self.destination:\r\n destination_bool [a] = True\r\n \r\n for c in destination_bool:\r\n if destination_bool[c] == False:\r\n raise Exception('Destination is not in Network')\r\n else:\r\n destinations_bool = True\r\n \r\n \r\n return destinations_bool\r\n\r\n\r\n def check_Vms(self):\r\n \"\"\"\r\n It check that Vms is the part of the network. if invalid then raise \r\n exception.\r\n Return:\r\n True or false\r\n \"\"\"\r\n vm_bool = {}\r\n vms_bool = False\r\n for b in self.Vms:\r\n vm_bool[b] = False\r\n for a in G.nodes():\r\n if a in self.Vms:\r\n vm_bool [a] = True\r\n \r\n\r\n for c in vm_bool:\r\n if vm_bool[c] == False:\r\n raise Exception('Node for Vm assignment is not in Network')\r\n if c in self.source:\r\n raise Exception('Source is assgined as VM')\r\n if c in self.destination:\r\n raise Exception('Destination is assnied as VM')\r\n else:\r\n vms_bool = True\r\n \r\n return vms_bool\r\n \r\n \r\n def Creatvarriables(self): # for creating decision varriables q,x,y,p\r\n \"\"\"\r\n This method creates Gurobi Decision Varriables\r\n r_: denote if node u is assigned as the enabled VM for VNF f in the \r\n walk to destination d.\r\n lemda : denote if edge e u,v is located in the walk connecting the \r\n enabled VM of VNF f and the enabled VM of the next VNF fN.\r\n lemda2 : denote if edge e v,u is located in the walk connecting the \r\n enabled VM of VNF f and the enabled VM of the next VNF fN.\r\n T_: if edge e u,v is located in the forest.\r\n o_: represents if node u is assigned as the enabled VM of service f\r\n for the whole service forest.\r\n \"\"\"\r\n self.F = self.source + self.VNFS\r\n \r\n self.r_template = \"r_{:s}_{:s}_{:s}\"\r\n self.lembda_template = \"lambda_{:s}_{:s}_{:s}_{:s}\"\r\n self.T_template = \"T_{:s}_{:s}_{:s}\"\r\n self.o_template = \"o_{:s}_{:s}\"\r\n self.lambda2_template = \"lambda2_{:s}_{:s}_{:s}_{:s}\"\r\n self.r1_template = \"r_{:s}_{:s}_{:s}\"\r\n \r\n for d in self.destination:\r\n for f in G.nodes():\r\n for u in G.nodes():\r\n name = self.r_template.format(d, f, u)\r\n self.Vars[name] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name = name)\r\n \r\n for u,v in G.edges():\r\n name_lembda = self.lembda_template.format(d, f, u, v)\r\n self.Vars[name_lembda] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_lembda)\r\n for u,v in G.edges():\r\n name_T = self.T_template.format(f, u, v)\r\n self.Vars[name_T] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_T)\r\n for u in G.nodes():\r\n name_o = self.o_template.format(f, u) \r\n self.Vars[name_o] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_o)\r\n for u,v in G.edges():\r\n name_lambda2 = self.lambda2_template.format(d, f, v, u)\r\n self.Vars[name_lambda2] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_lambda2)\r\n \r\n for d in self.destination:\r\n for fn in self.VNFS:\r\n for u in G.nodes():\r\n name_r1 = self.r1_template.format(d, f, u)\r\n self.Vars[name_r1] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_r1)\r\n\r\n self.model.update()\r\n\r\n def Source_Selection(self):\r\n\r\n \"\"\"\r\n Constraint 1 ensures that each destination chooses one source s in S\r\n as its service source.\r\n Returns:\r\n None\r\n \"\"\"\r\n self.add_fs = 0\r\n for d in self.destination:\r\n for f in self.source:\r\n for s in self.source:\r\n name_r =self.r_template.format(d, f, s)\r\n add1 = self.model.getVarByName(name_r)\r\n \r\n self.add_fs = self.add_fs + add1\r\n self.model.addConstr(self.add_fs, GRB.EQUAL, 1)\r\n \r\n \r\n def enabled_VM(self):\r\n \r\n \"\"\"\r\n Constraint 2 finds a node u from M as the enaled VM of each VNF f for\r\n each destination.\r\n Returns:\r\n None\r\n \"\"\"\r\n self.add = 0\r\n for d in self.destination:\r\n for f in self.source:\r\n for u in self.Vms:\r\n name_r =self.r_template.format(d, f, u)\r\n add1 = self.model.getVarByName(name_r)\r\n \r\n self.add = self.add + add1\r\n self.model.addConstr(self.add, GRB.EQUAL, 1)\r\n \r\n\r\n def destination_assignment1(self):\r\n \r\n \"\"\"\r\n There are two constraints which deals with assignment of destinations\r\n for Function Fd. Constraint 3 is the 1st one in that\r\n Constraint 3 assign only one destination for Function Fd\r\n Returns:\r\n None\r\n \"\"\"\r\n self.assign = 0\r\n for d in self.destination:\r\n for f in self.destination:\r\n for u in self.destination:\r\n name_r =self.r_template.format(d, f, u)\r\n self.assign = self.model.getVarByName(name_r)\r\n \r\n self.model.addConstr(self.assign, GRB.EQUAL, 1)\r\n\r\n\r\n def destination_assignment2(self):\r\n \r\n \"\"\"\r\n The 2nd constraint for destination assignment\r\n Contraint 4 assign only one destination for Function Fd\r\n Returns:\r\n None\r\n \"\"\"\r\n self.add1 = 0\r\n for d in self.destination:\r\n for f in self.source:\r\n for u in self.Vms:\r\n name_r =self.r_template.format(d, f, u)\r\n add2 = self.model.getVarByName(name_r)\r\n \r\n self.add1 = add2\r\n self.model.addConstr(self.add1, GRB.EQUAL, 0)\r\n\r\n\r\n def assignment_of_enabled_VM(self):\r\n \r\n \"\"\"\r\n Contraint 5 assign u as the enabled VM of VNF f for the whole service\r\n forest if u has been selected by atleast one destination d for VNF f\r\n Returns:\r\n None\r\n \"\"\"\r\n for d in self.destination:\r\n for f in self.VNFS:\r\n for u in G.nodes():\r\n name_r = self.r_template.format(d, f, u)\r\n name_o = self.o_template.format(f, u)\r\n LHS = self.model.getVarByName(name_r)\r\n RHS = self.model.getVarByName(name_o)\r\n self.model.addConstr(LHS, GRB.LESS_EQUAL, RHS)\r\n \r\n \r\n def atmost_one_VNF(self):\r\n \r\n \"\"\"\r\n Contraint 6 ensures that each node u is in charge of at most one VNF.\r\n Returns:\r\n None\r\n \"\"\"\r\n self.sum_o = 0\r\n for f in self.VNFS:\r\n for u in G.nodes():\r\n name_o = self.o_template.format(f, u)\r\n RHS = self.model.getVarByName(name_o)\r\n self.sum_o += RHS\r\n self.model.addConstr(self.sum_o, GRB.LESS_EQUAL, 1)\r\n\r\n \r\n def routing_of_service_chain(self):\r\n \"\"\"\r\n Contraint 7 It first finds the routing of the service chain for each \r\n destination d. It ensures that at least one edge eu;v incident from u\r\n is selected for the service chain because no edge e v;u incident \r\n to u is chosen\r\n Returns:\r\n None\r\n \"\"\"\r\n add_lemda1 = 0\r\n add_lemda2 = 0\r\n self.final_lemda = 0\r\n for d in self.destination:\r\n for f in self.F:\r\n for u,v in G.edges():\r\n name_lembda = self.lembda_template.format(d, f, u, v)\r\n LS = self.model.getVarByName(name_lembda)\r\n add_lemda1 += LS\r\n for d in self.destination:\r\n for f in self.F:\r\n for u,v in G.edges():\r\n name_lembda2 = self.lambda2_template.format(d, f, v, u)\r\n RS = self.model.getVarByName(name_lembda2)\r\n add_lemda2 += RS\r\n \r\n self.final_lemda = add_lemda1 - add_lemda2\r\n \r\n for d in self.destination:\r\n for f in self.F:\r\n for fn in self.VNFS:\r\n for u in G.nodes():\r\n name_r = self.r_template.format(d, f, u)\r\n name_r1 = self.r1_template.format(d, fn, u)\r\n LS1 = self.model.getVarByName(name_r)\r\n RS1 = self.model.getVarByName(name_r1)\r\n final_r = LS1 - RS1\r\n \r\n self.model.addConstr(self.final_lemda, \r\n GRB.GREATER_EQUAL, final_r)\r\n \r\n \r\n def edge_inthe_service_forest(self):\r\n \"\"\"\r\n Contraint 8 states that any edge e u;v is in the service forest if it\r\n is in the service chain for at least one destination d..\r\n Returns:\r\n None\r\n \"\"\"\r\n for d in self.destination:\r\n for f in self.F:\r\n for u,v in G.edges():\r\n name_lembda = self.lembda_template.format(d, f, u, v)\r\n name_T = self.T_template.format(f, u, v)\r\n LS = self.model.getVarByName(name_lembda)\r\n RS = self.model.getVarByName(name_T)\r\n self.model.addConstr(LS, GRB.LESS_EQUAL, RS)\r\n\r\n \r\n def optimzation(self):\r\n cost_nodes = 0\r\n cost_edges = 0\r\n for f in self.VNFS:\r\n for u in G.nodes:\r\n name_o =self.o_template.format(f, u)\r\n LS = G.nodes[u]['Cost'] * self.model.getVarByName(name_o)\r\n cost_nodes += LS \r\n for f in self.VNFS:\r\n for u,v in G.edges():\r\n name_T = self.T_template.format(f, u, v)\r\n RS = G.edges[u,v]['Cost'] * self.model.getVarByName(name_T)\r\n cost_edges += RS\r\n final_cost = cost_nodes + cost_edges\r\n self.model.setObjective(final_cost, GRB.MINIMIZE)\r\n self.model.optimize()\r\n status = self.model.status\r\n\r\n return status\r\n \r\n def build(self):\r\n self.check_source()\r\n self.check_destination()\r\n self.check_Vms()\r\n self.Creatvarriables()\r\n self.Source_Selection()\r\n self.enabled_VM()\r\n self.destination_assignment1()\r\n self.destination_assignment2()\r\n self.assignment_of_enabled_VM()\r\n self.atmost_one_VNF()\r\n self.routing_of_service_chain()\r\n self.edge_inthe_service_forest()\r\n \r\n \r\n \r\n\r\ntopo = 'Ibm.graphml'\r\nG = nx.read_graphml(topo)\r\ndestination = ['15','16']\r\nvnfs = ['1','2','3','4','5']\r\nS = ['1']\r\nVms = ['4','5','6','7','8']\r\n\r\nfor u in G.nodes():\r\n G.nodes[u]['Cost'] = 1\r\nfor u,v in G.edges():\r\n G.edges[u,v]['Cost'] = 1\r\n \r\n \r\nif __name__ == \"__main__\":\r\n newobject = SofdaiLp(G, S, destination, Vms, vnfs)\r\n newobject.build()\r\n newobject.optimzation()\r\n\r\n","repo_name":"jalaltareen/Sofda","sub_path":"Sofdailp_inprogress.py","file_name":"Sofdailp_inprogress.py","file_ext":"py","file_size_in_byte":13528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33708211231","text":"import os\nimport json\nimport argparse\nfrom tqdm import trange\nimport matplotlib.pyplot as plt\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow as tf\n\nfrom models import dcgan, resnet\nfrom datasets import get_dataset\nfrom losses import BCEWithLogits\n\n\nnet_G_models = {\n 'dcgan.32': dcgan.Generator32,\n 'dcgan.48': dcgan.Generator48,\n 'dcgan.64': dcgan.Generator64,\n 'resnet.32': resnet.ResGenerator32,\n 'resnet.48': resnet.ResGenerator48,\n 'resnet.64': resnet.ResGenerator64,\n 'resnet.128': resnet.ResGenerator128,\n 'resnet.256': resnet.ResGenerator256,\n}\n\n\nnet_D_models = {\n 'dcgan.32': dcgan.Discriminator32,\n 'dcgan.48': dcgan.Discriminator48,\n 'dcgan.64': dcgan.Discriminator64,\n 'resnet.32': resnet.ResDiscriminator32,\n 'resnet.48': resnet.ResDiscriminator48,\n 'resnet.64': resnet.ResDiscriminator64,\n 'resnet.128': resnet.ResDiscriminator128,\n 'resnet.256': resnet.ResDiscriminator256,\n}\n\n\nloss_fns = {\n 'bce': BCEWithLogits,\n}\n\n\ndatasets = ['car_brand', 'car_brand_color']\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser(description=\"GNGAN\")\n parser.add_argument(\"--resume\", action='store_true', help=\"resume from checkpoint\")\n # model and training\n parser.add_argument(\"--dataset\", type=str, default=\"car_brand\", choices=datasets, help=\"options: {}\".format(datasets))\n parser.add_argument(\"--rootpath\", type=str, default=\"./confirmed_fronts\", help=\"path to dataset folder\")\n parser.add_argument(\"--arch\", type=str, default=\"dcgan.64\", choices=net_G_models.keys(), help=\"options: {}\".format(net_G_models.keys()))\n parser.add_argument(\"--loss\", type=str, default=\"bce\", choices=loss_fns.keys(), help=\"options: {}\".format(loss_fns.keys()))\n parser.add_argument(\"--total_steps\", type=int, default=200000, help=\"total number of training steps\")\n parser.add_argument(\"--batch_size_D\", type=int, default=43, help=\"batch size for discriminator\")\n parser.add_argument(\"--batch_size_G\", type=int, default=129, help=\"batch size for generator\")\n parser.add_argument(\"--lr_D\", type=float, default=2e-4, help=\"Discriminator learning rate\")\n parser.add_argument(\"--lr_G\", type=float, default=2e-4, help=\"Generator learning rate\")\n parser.add_argument(\"--beta_1\", type=float, default=0.0, help=\"for Adam\")\n parser.add_argument(\"--beta_2\", type=float, default=0.9, help=\"for Adam\")\n parser.add_argument(\"--n_dis\", type=int, default=1, help=\"update Generator every this steps\")\n parser.add_argument(\"--z_dim\", type=int, default=128, help=\"latent space dimension\")\n parser.add_argument(\"--n_classes\", type=int, default=3, help=\"# classes for condition GAN\")\n # logging\n parser.add_argument(\"--sample_step\", type=int, default=1000, help=\"sample image every this steps\")\n parser.add_argument(\"--save_step\", type=int, default=5000, help=\"save model every this step\")\n parser.add_argument(\"--num_images\", type=int, default=16384, help=\"# images for evaluation\")\n parser.add_argument(\"--logdir\", type=str, default=\"./logdir\", help=\"log folder\") \n return parser.parse_args()\n \n\ndef make_grid(sample, idx):\n plt.figure(figsize=(10,4))\n for i in range(args.n_classes*8):\n plt.subplot(args.n_classes, 8, i+1)\n plt.imshow(sample[i])\n plt.axis('off')\n plt.subplots_adjust(wspace=0, hspace=0)\n plt.savefig(os.path.join(args.logdir, 'sample', 'image_{:d}.png'.format(idx)))\n plt.show()\n\n\n@tf.function\ndef G_infer(z, label):\n return net_G(tf.concat([z, label], -1))\n\n\n@tf.function\ndef train_D_step(images, label, wrong_label):\n z = tf.random.normal((args.batch_size_D, args.z_dim))\n\n with tf.GradientTape() as disc_tape, tf.GradientTape() as gn_tape:\n fake_images = tf.stop_gradient(net_G(tf.concat([z, label], -1)))\n # discriminator will get 3 kinds of pair:\n # (real_img, real_label), (fake_img, real_label), (real_img, wrong_label)\n # only the first pair is valid pair\n label_map = label[:, tf.newaxis, tf.newaxis, :] * tf.ones((images.shape[0], images.shape[1], images.shape[2], args.n_classes))\n wrong_label_map = wrong_label[:, tf.newaxis, tf.newaxis, :] * tf.ones((images.shape[0], images.shape[1], images.shape[2], args.n_classes))\n pair1 = tf.concat([images, label_map], -1)\n pair2 = tf.concat([fake_images, label_map], -1)\n pair3 = tf.concat([images, wrong_label_map], -1)\n D_input = tf.concat([pair1, pair2, pair3], 0)\n gn_tape.watch(D_input)\n y = net_D(D_input)\n\n # gradnorm\n grad = gn_tape.gradient(y, D_input)\n grad_norm = tf.norm(tf.reshape(grad, (tf.shape(grad)[0], -1)), ord=2, axis=1)\n grad_norm = grad_norm[:, tf.newaxis]\n pred = (y / (grad_norm + tf.abs(y)))\n\n pred_real, pred_fake = pred[:args.batch_size_D], pred[args.batch_size_D:]\n loss, loss_real, loss_fake = loss_fn(pred_real, pred_fake)\n\n gradients_of_D = disc_tape.gradient(loss, net_D.trainable_variables)\n optim_D.apply_gradients(zip(gradients_of_D, net_D.trainable_variables))\n return loss, loss_real, loss_fake\n\n\n@tf.function\ndef train_G_step(label):\n z = tf.random.normal((args.batch_size_G, args.z_dim))\n\n with tf.GradientTape() as gen_tape, tf.GradientTape() as gn_tape:\n fake_images = net_G(tf.concat([z, label], -1))\n label_map = label[:, tf.newaxis, tf.newaxis, :] * tf.ones((fake_images.shape[0], fake_images.shape[1], fake_images.shape[2], args.n_classes))\n D_input = tf.concat([fake_images, label_map], -1)\n gn_tape.watch(D_input)\n y = net_D(D_input)\n\n # gradnorm\n grad = gn_tape.gradient(y, D_input)\n grad_norm = tf.norm(tf.reshape(grad, (tf.shape(grad)[0], -1)), ord=2, axis=1)\n grad_norm = grad_norm[:, tf.newaxis]\n pred_fake = (y / (grad_norm + tf.abs(y)))\n\n loss = loss_fn(pred_fake)\n\n gradients_of_G = gen_tape.gradient(loss, net_G.trainable_variables)\n optim_G.apply_gradients(zip(gradients_of_G, net_G.trainable_variables))\n return loss\n\n\ndef train():\n # fixed z\n fixed_z = tf.random.normal((args.n_classes*8, args.z_dim))\n fixed_z = tf.Variable(fixed_z) # trackable for tf.train.Checkpoint\n # fixed z\n fixed_label = []\n for i in range(args.n_classes):\n fixed_label += [i]*8\n fixed_label = tf.one_hot((fixed_label), args.n_classes)\n fixed_label = tf.Variable(fixed_label) # trackable for tf.train.Checkpoint\n\n writer = tf.summary.create_file_writer(str(args.logdir), max_queue=1000, flush_millis=20000)\n writer.set_as_default()\n\n model_path = os.path.join(args.logdir, 'model')\n model_ckpt = tf.train.Checkpoint(net_G=net_G)\n model_manager = tf.train.CheckpointManager(model_ckpt, model_path, max_to_keep=10)\n \n checkpoint_path = os.path.join(args.logdir, 'checkpoints')\n ckpt = tf.train.Checkpoint(net_G=net_G, net_D=net_D, optim_G=optim_G, optim_D=optim_D, fixed_z=fixed_z, fixed_label=fixed_label)\n ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=2)\n if args.resume:\n if ckpt_manager.latest_checkpoint:\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('{} restored!!'.format(ckpt_manager.latest_checkpoint))\n else:\n print('checkpoint not found!!')\n else:\n os.makedirs(os.path.join(args.logdir, 'sample'), exist_ok=True)\n os.makedirs(os.path.join(args.logdir, 'model'), exist_ok=True)\n os.makedirs(os.path.join(args.logdir, 'checkpoints'), exist_ok=True)\n\n with trange(1, args.total_steps + 1, ncols=0, initial=0, total=args.total_steps) as pbar:\n for step in pbar:\n loss_sum = 0\n loss_real_sum = 0\n loss_fake_sum = 0\n\n x, labels = next(dataset)\n wrong_labels = tf.math.floormod(labels + tf.random.uniform(labels.shape, 1, args.n_classes, dtype=tf.int32), args.n_classes)\n labels = tf.one_hot(tf.reshape(labels, (x.shape[0])), args.n_classes)\n wrong_labels = tf.one_hot(tf.reshape(wrong_labels, (x.shape[0])), args.n_classes)\n x = iter(tf.split(x, num_or_size_splits=args.n_dis))\n labels = iter(tf.split(labels, num_or_size_splits=args.n_dis))\n wrong_labels = iter(tf.split(wrong_labels, num_or_size_splits=args.n_dis))\n # Discriminator\n for _ in range(args.n_dis):\n x_real = next(x)\n label = next(labels)\n wrong_label = next(wrong_labels)\n loss, loss_real, loss_fake = train_D_step(x_real, label, wrong_label)\n\n loss_sum += loss\n loss_real_sum += loss_real\n loss_fake_sum += loss_fake\n\n loss = loss_sum / args.n_dis\n loss_real = loss_real_sum / args.n_dis\n loss_fake = loss_fake_sum / args.n_dis\n\n pbar.set_postfix(loss_real='%.3f' % loss_real, loss_fake='%.3f' % loss_fake)\n\n # Generator\n label = tf.random.uniform((args.batch_size_G,), 0, args.n_classes, dtype=tf.int32)\n label = tf.one_hot(label, args.n_classes)\n loss_G = train_G_step(label)\n\n # write summaries\n tf.summary.scalar('Discriminator/loss', loss, step=step)\n tf.summary.scalar('Discriminator/loss_real', loss_real, step=step)\n tf.summary.scalar('Discriminator/loss_fake', loss_fake, step=step)\n tf.summary.scalar('Generator/loss', loss_G, step=step)\n writer.flush()\n\n # sample from fixed z\n if step == 1 or step % args.sample_step == 0:\n sample = G_infer(fixed_z, fixed_label)\n sample = (sample + 1) / 2\n make_grid(sample, step)\n\n if step == 1 or step % args.save_step == 0:\n ckpt_save_path = ckpt_manager.save()\n model_manager.save()\n pbar.write(f'Step: {step}, save checkpoint at {ckpt_save_path}')\n\n k = len(str(args.total_steps))\n pbar.write(f\"{step:{k}d}/{args.total_steps} \")\n\n writer.close()\n\n\nif __name__ == '__main__':\n args = get_arguments()\n os.makedirs(args.logdir, exist_ok=True)\n with open('{}/config.json'.format(args.logdir), 'w') as fp:\n json.dump(vars(args), fp, indent=4)\n\n # model\n net_G = net_G_models[args.arch](args.z_dim)\n net_D = net_D_models[args.arch]()\n\n # loss\n loss_fn = loss_fns[args.loss]\n\n # optimizer\n optim_G = tf.keras.optimizers.Adam(learning_rate=args.lr_G, beta_1=args.beta_1, beta_2=args.beta_2)\n optim_D = tf.keras.optimizers.Adam(learning_rate=args.lr_D, beta_1=args.beta_1, beta_2=args.beta_2)\n\n # dataset\n dataset = get_dataset(args.dataset, args.rootpath, args.batch_size_D * args.n_dis)\n\n train()\n","repo_name":"JNNNNYao/GNGAN-Tensorflow","sub_path":"train_cGAN.py","file_name":"train_cGAN.py","file_ext":"py","file_size_in_byte":10776,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"7206290811","text":"#\n# @lc app=leetcode id=112 lang=python3\n#\n# [112] Path Sum\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def hasPathSum(self, root: Optional[TreeNode], targetSum: int) -> bool:\n if not root:\n return False\n\n return self.addToLeaf(root,targetSum,0)\n \n def addToLeaf(self, root:Optional[TreeNode], targetSum, parentSum) -> bool:\n if root:\n sum = parentSum + root.val\n # leaf -> judge ans\n if not root.left and not root.right:\n if sum == targetSum:\n return True\n else:\n return False\n # none leaf -> keep add\n else:\n return self.addToLeaf(root.left,targetSum,sum) or self.addToLeaf(root.right,targetSum,sum)\n \n \n \n\n# @lc code=end\n\n","repo_name":"WeiHan00457030/LeetCodePython","sub_path":"Problem/112.path-sum.py","file_name":"112.path-sum.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69991614562","text":"from typing import List\n\nclass Solution:\n\n def permute(self, nums: List[int]) -> List[List[int]]:\n\n def backtrack(first = 0):\n\n # if first is at the end of nums, append to out a copy of what is in nums\n if first == n: out.append(nums[:])\n\n # for each num after first\n # swap num with first\n # backtrack with first+1\n # un-swap\n for i in range(first, n):\n nums[first], nums[i] = nums[i], nums[first]\n backtrack(first+1)\n nums[first], nums[i] = nums[i], nums[first]\n\n n = len(nums)\n out = []\n backtrack()\n return out\n\nif __name__ == \"__main__\":\n s = Solution()\n input = [1,2,3]\n\n print(s.permute(input))","repo_name":"NicolasADavid/PythonChallenges","sub_path":"Leetcode/permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74322329122","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.paginator import Paginator\nfrom django.http import JsonResponse\nimport copy\nimport os\nfrom . import query\n\n\n# 全局变量,用于缓存查询语句,查询算法,查询结果\nlast_keywords = \"\"\nlast_algorithms = \"\"\nlast_query_results = {}\n\n\ndef search(request):\n global last_keywords, last_algorithms, last_query_results\n # 设置request编码方式, 获取参数\n request.encoding = 'utf-8'\n keywords = request.GET['keywords'].replace(\"%20\", \" \").replace(\"%22\", \"\\\"\")\n algorithm = request.GET['algorithm']\n page = int(request.GET['page'])\n if not (keywords == last_keywords and algorithm == last_algorithms):\n last_query_results = query.query(keywords, algorithm)\n last_keywords = keywords\n last_algorithms = algorithm\n response = last_query_results.copy()\n response[\"content\"] = response[\"content\"][5 * (page - 1): 5 * page]\n response = copy.deepcopy(response)\n keys = response['maybefinding']\n count = 0\n for content in response['content']:\n count += 1\n content['title'] = query.highlight(content['title'], keys)[0]\n ab, flag = query.highlight(content['abstract'], keys)\n if flag == 0:\n ab, flag = query.highlight2(content['content'], keys)\n if flag == 0:\n if content['abstract'] != 'Unknown':\n ab = content['abstract']\n else:\n ab = '...'\n content['abstract'] = ab\n # print('id=',count,ab)\n del content['content']\n response[\"currentPage\"] = str(page)\n return JsonResponse(response)\n","repo_name":"bbjbbjbbj/IR","sub_path":"IR/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73592769123","text":"\"\"\"\n @author: JiaGuo\n @emil: 1520047927@qq.com\n @date: Created in 2022/8/13 17:12\n @description: \n @modified By:\n @version: 1.0\n\"\"\"\n\n\nclass Solution:\n def question(self, num, nums):\n # num = len(nums)\n if num < 3:\n return 0\n count = 0\n for i in range(0, num):\n for j in range(i + 1, num):\n for k in range(j + 1, num):\n if nums[j] < 0 and nums[i]+nums[k] > 0:\n break\n if nums[k] == (3*nums[j] - nums[i]):\n count += 1\n return count\n\n\nif __name__ == '__main__':\n num = int(input())\n nums = input().split()\n nums = [int(i) for i in nums]\n s = Solution().question(num, nums)\n print(s)\n","repo_name":"gj-hat/Leetcode","sub_path":"笔试/笔试-美团/22/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"42079924322","text":"from tqdm import tqdm\r\nimport numpy as np\r\nimport logging\r\nimport torch\r\nfrom sklearn.decomposition import PCA\r\n\r\n\r\ndef pcc(u, v, eps=1e-8):\r\n u, v = u - torch.mean(u, dim=-1, keepdims=True), v - torch.mean(v, dim=-1, keepdims=True)\r\n u, v = torch.unsqueeze(u, 1), torch.unsqueeze(v, 0)\r\n return torch.sum(u * v, dim=-1) / (torch.sqrt(torch.sum(u ** 2, dim=-1)) * torch.sqrt(torch.sum(v ** 2, dim=-1)) + eps)\r\n\r\n\r\ndef extractConstraints(representation):\r\n\r\n # Laplace\r\n representation = representation + np.eye(representation.shape[0])\r\n D = representation.sum(axis=1)\r\n D_ = np.diag(np.power(D, -0.5))\r\n representation = np.dot(np.dot(D_, representation), D_)\r\n\r\n # PCA\r\n pca = PCA(n_components=600)\r\n representation = pca.fit_transform(representation)\r\n\r\n representation = torch.from_numpy(representation).float().cuda().detach()\r\n\r\n pcc_mat = np.zeros((representation.shape[0], representation.shape[0]), dtype='float')\r\n for i in tqdm(range(0, representation.shape[0], 10)):\r\n pcc_mat[i:i + 10] = pcc(representation[i:i + 10], representation).cpu().numpy()\r\n\r\n pcc_mat = np.abs(pcc_mat)\r\n\r\n return pcc_mat\r\n\r\n\r\ndef obtain_constraints(net_numbs, emb, symbols, topN, idx_layer):\r\n\r\n pcc_mats = []\r\n for idx_net in range(net_numbs):\r\n pcc_mat = extractConstraints(emb[idx_net])\r\n pcc_mats.append(pcc_mat)\r\n\r\n must_links = []\r\n for i, pcc_mat in enumerate(pcc_mats):\r\n np.fill_diagonal(pcc_mat, 0)\r\n\r\n pcc_order = np.sort(pcc_mat.flatten())\r\n threshold_max = pcc_order[-topN[i]]\r\n\r\n must_link = (pcc_mat >= threshold_max) #.astype('float')\r\n must_links.append(must_link)\r\n \r\n #############################################################################################################\r\n for i in range(net_numbs):\r\n for j in range(i):\r\n xy, x_indx, y_inds = np.intersect1d(symbols[i], symbols[j], return_indices=True)\r\n tmp = must_links[i][x_indx][:, x_indx] + must_links[j][y_inds][:, y_inds]\r\n\r\n must_links[i][np.ix_(x_indx, x_indx)] = tmp \r\n must_links[j][np.ix_(y_inds, y_inds)] = tmp\r\n \r\n logging.info('### Network {}: Number of Must link: {}'.format(i, must_links[i].sum()))\r\n #############################################################################################################\r\n return must_links\r\n","repo_name":"MedicineBiology-AI/brainMI","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"5469330854","text":"import pickle\nimport os\n\nfold = f'users/'\ndata_file = f'data/'\n\nusers = os.listdir('users')\n\n\n\ndef load_badge():\n al = os.listdir('badges')\n o = {}\n for x in al:\n with open(f'badges/{x}', 'rb') as f:\n obr = f.read()\n o[x.split('.')[0]] = obr\n\n return o\n\ndef load_pics():\n al = os.listdir('pictures')\n o = {}\n for x in al:\n with open(f'pictures/{x}', 'rb') as f:\n obr = f.read()\n o[x.split('.')[0]] = obr\n\n return o\n\ndef save(user):\n with open(f'{fold}{user.nick}.user', 'wb') as f:\n pickle.dump(user, f)\n return False\n\ndef load(uid):\n with open(f'{fold}{uid}.user', 'rb') as f:\n user = pickle.load(f)\n return user\n\n\nclass User:\n def __init__(self, nick, passwd):\n self.nick = nick\n self.passwd = passwd\n self.admin = False\n self.pfp = f'default'\n self.badges = ['default']\n self.badge = 'default'\n self.avatar = ['default']\n self.history = []\n self.rc = 0\n self.title = 'Wruszka'\n self.titles = ['Wruszka']\n self.huj = {}\n self.wordly_won = 0\n self.wordly_max_streak = 0\n self.wordly_cur_streak = 0\n self.wordly_win_today = False\n self.wordly_tries = []\n self.wordly_won_after = []\n\ndef end_game(info):\n\n ig = len(info)\n print(info)\n for x in info:\n r = 0\n u = load(x['name'])\n u.history.append(x)\n\n if 'Przezyl' in x['check'] and 'Wygral' in x['check']:\n r += 100\n elif 'Wygral' in x['check']:\n r += 50\n elif 'Przezyl' in x['check']:\n r += 50\n else:\n r += 25\n\n if x['role'] == 'Heroine':\n r += 50\n elif x['role'] == 'Rival':\n r += 50\n elif x['role'] == 'Partner':\n r += 0\n elif x['role'] == 'EX Midboss':\n r += 25\n elif x['role'] == 'One True Partner':\n r += 75\n elif x['role'] == 'Stage Boss':\n r += 0\n elif x['role'] == 'Final Boss':\n r += 25\n elif x['role'] == 'Challenger':\n r += 50\n elif x['role'] == 'Anti-Heroine':\n r += 50\n else:\n r += 25\n\n\n if ig == 5:\n r = round(r*1.1)\n if ig == 6:\n r = round(r*1.3)\n if ig == 7:\n r = round(r*1.6)\n if ig == 8:\n r = round(r*2)\n\n cirno = 0\n for y in u.history:\n if y['postac'] == 'Cirno':\n cirno += 1\n if y['role'] =='Anti-Heroine':\n cirno += 1\n if cirno == 9:\n if 'Baka' not in u.badges:\n u.badges.append('Baka')\n u.titles.append('The Baka')\n r += 400\n\n u.rc += r\n save(u)\n\ndef read_data(data):\n with open(f'{data_file}{data}.txt', 'r') as f:\n data_list = f.readline().split(', ')\n return [x.replace('\\'', '') for x in data_list]\n\ndef add_data(data,to_add):\n with open(f'{data_file}{data}.txt', 'a') as f:\n f.write(\", '\"+to_add+\"'\")\n\n","repo_name":"RemilSca/danmakuweb","sub_path":"mod.py","file_name":"mod.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27208148966","text":"import os\n\nimport plotly.graph_objs as go\nfrom statsmodels.tsa.stattools import kpss, adfuller\nfrom tqdm import tqdm\n\nfrom libs import compute_lib\nfrom libs.experiments import load, filtering, compute, paths, config\nfrom libs.experiments.config import QUANTIFICATION_WINDOW_LENGTH_IN_CELL_DIAMETER, \\\n QUANTIFICATION_WINDOW_HEIGHT_IN_CELL_DIAMETER, QUANTIFICATION_WINDOW_WIDTH_IN_CELL_DIAMETER, all_experiments, \\\n OUT_OF_BOUNDARIES\nfrom plotting import save\n\nOFFSET_X = 0\nOFFSET_Y = 0\nOFFSET_Z = 0\n\nPAIR_DISTANCE_RANGE = [4, 10]\n\nDERIVATIVES = [0, 1, 2]\nDERIVATIVES_TEXT = ['D', 'D\\'', 'D\\'\\'']\n\n\ndef main():\n _experiments = all_experiments()\n _experiments = filtering.by_categories(\n _experiments=_experiments,\n _is_single_cell=False,\n _is_high_temporal_resolution=False,\n _is_bleb=False,\n _is_dead_dead=False,\n _is_live_dead=False,\n _is_bead=False,\n _is_metastasis=False\n )\n\n _tuples = load.experiments_groups_as_tuples(_experiments)\n _tuples = filtering.by_time_frames_amount(_tuples, compute.density_time_frame(_experiments[0]))\n _tuples = filtering.by_real_pairs(_tuples)\n _tuples = filtering.by_band(_tuples)\n _tuples = filtering.by_pair_distance_range(_tuples, PAIR_DISTANCE_RANGE)\n print('Total tuples:', len(_tuples))\n\n _arguments = []\n for _tuple in _tuples:\n _experiment, _series_id, _group = _tuple\n _latest_time_frame = compute.latest_time_frame_before_overlapping(_experiment, _series_id, _group, OFFSET_X)\n for _cell_id in ['left_cell', 'right_cell']:\n _arguments.append({\n 'experiment': _experiment,\n 'series_id': _series_id,\n 'group': _group,\n 'length_x': QUANTIFICATION_WINDOW_LENGTH_IN_CELL_DIAMETER,\n 'length_y': QUANTIFICATION_WINDOW_HEIGHT_IN_CELL_DIAMETER,\n 'length_z': QUANTIFICATION_WINDOW_WIDTH_IN_CELL_DIAMETER,\n 'offset_x': OFFSET_X,\n 'offset_y': OFFSET_Y,\n 'offset_z': OFFSET_Z,\n 'cell_id': _cell_id,\n 'direction': 'inside',\n 'time_points': _latest_time_frame\n })\n\n _windows_dictionary, _windows_to_compute = \\\n compute.windows(_arguments, _keys=['experiment', 'series_id', 'group', 'cell_id'])\n _fiber_densities = compute.fiber_densities(_windows_to_compute, _subtract_border=True)\n\n _experiments_fiber_densities = {\n _key: [_fiber_densities[_tuple] for _tuple in _windows_dictionary[_key]]\n for _key in _windows_dictionary\n }\n\n _kpss_y_arrays = [[] for _i in DERIVATIVES]\n _adf_y_arrays = [[] for _i in DERIVATIVES]\n for _tuple in tqdm(_tuples, desc='Experiments loop'):\n _experiment, _series_id, _group = _tuple\n _properties = load.group_properties(_experiment, _series_id, _group)\n\n _left_cell_fiber_densities = _experiments_fiber_densities[(_experiment, _series_id, _group, 'left_cell')]\n _left_cell_fiber_densities = compute.remove_blacklist(\n _experiment, _series_id, _properties['cells_ids']['left_cell'], _left_cell_fiber_densities)\n _right_cell_fiber_densities = _experiments_fiber_densities[(_experiment, _series_id, _group, 'right_cell')]\n _right_cell_fiber_densities = compute.remove_blacklist(\n _experiment, _series_id, _properties['cells_ids']['right_cell'], _right_cell_fiber_densities)\n\n if not OUT_OF_BOUNDARIES:\n _left_cell_fiber_densities = \\\n compute.longest_fiber_densities_ascending_sequence(_left_cell_fiber_densities)\n _right_cell_fiber_densities = \\\n compute.longest_fiber_densities_ascending_sequence(_right_cell_fiber_densities)\n else:\n _left_cell_fiber_densities = [_fiber_density[0] for _fiber_density in _left_cell_fiber_densities]\n _right_cell_fiber_densities = [_fiber_density[0] for _fiber_density in _right_cell_fiber_densities]\n\n # ignore small arrays\n _minimum_time_frames_for_correlation = compute.minimum_time_frames_for_correlation(_experiment)\n if len(_left_cell_fiber_densities) < _minimum_time_frames_for_correlation or \\\n len(_right_cell_fiber_densities) < _minimum_time_frames_for_correlation:\n continue\n\n for _derivative_index, _derivative in enumerate(DERIVATIVES):\n for _cell_fiber_densities in [_left_cell_fiber_densities, _right_cell_fiber_densities]:\n _cell_fiber_densities_derivative = compute_lib.derivative(_cell_fiber_densities, _n=_derivative)\n _, _kpss_p_value, _, _ = kpss(_cell_fiber_densities_derivative, nlags='legacy')\n _kpss_y_arrays[_derivative_index].append(_kpss_p_value)\n _, _adf_p_value, _, _, _, _ = adfuller(_cell_fiber_densities_derivative)\n _adf_y_arrays[_derivative_index].append(_adf_p_value)\n\n print('Total pairs:', len(_kpss_y_arrays[0]) / 2)\n\n # print results\n print('KPSS:')\n for _derivative_index, _derivative in enumerate(DERIVATIVES):\n _stationary_count = len([_value for _value in _kpss_y_arrays[_derivative_index] if _value > 0.05])\n print('Derivative:', _derivative, 'Stationary:',\n str(_stationary_count / len(_kpss_y_arrays[_derivative_index]) * 100) + '%')\n print('ADF:')\n for _derivative_index, _derivative in enumerate(DERIVATIVES):\n _stationary_count = len([_value for _value in _adf_y_arrays[_derivative_index] if _value < 0.05])\n print('Derivative:', _derivative, 'Stationary:',\n str(_stationary_count / len(_adf_y_arrays[_derivative_index]) * 100) + '%')\n\n # plot\n _colors_array = config.colors(3)\n for _test_name, _y_title, _y_tickvals, _p_value_line, _y_arrays in \\\n zip(\n ['kpss', 'adf'],\n ['KPSS test p-value', 'ADF test p-value'],\n [[0.05, 0.1], [0.05, 1]],\n [0.05, 0.05],\n [_kpss_y_arrays, _adf_y_arrays]\n ):\n _fig = go.Figure(\n data=[\n go.Box(\n y=_y,\n name=_derivative,\n boxpoints='all',\n jitter=1,\n pointpos=0,\n line={\n 'width': 1\n },\n fillcolor='white',\n marker={\n 'size': 10,\n 'color': _color\n },\n opacity=0.7,\n showlegend=False\n ) for _y, _derivative, _color in zip(_y_arrays, DERIVATIVES_TEXT, _colors_array)\n ],\n layout={\n 'xaxis': {\n 'title': 'Fiber density derivative',\n 'zeroline': False\n },\n 'yaxis': {\n 'title': _y_title,\n 'zeroline': False,\n 'tickmode': 'array',\n 'tickvals': _y_tickvals\n },\n 'shapes': [\n {\n 'type': 'line',\n 'x0': DERIVATIVES[0] - 0.75,\n 'y0': _p_value_line,\n 'x1': DERIVATIVES[-1] + 0.75,\n 'y1': _p_value_line,\n 'line': {\n 'color': 'red',\n 'width': 2,\n 'dash': 'dash'\n }\n }\n ]\n }\n )\n\n save.to_html(\n _fig=_fig,\n _path=os.path.join(paths.PLOTS, save.get_module_name()),\n _filename='plot_' + _test_name\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"assafna/cell-ecm-project","sub_path":"fiber_density/experiments/stationary_vs_inner_dynamics_derivatives_cell_pairs.py","file_name":"stationary_vs_inner_dynamics_derivatives_cell_pairs.py","file_ext":"py","file_size_in_byte":7851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8407152548","text":"import math, time\n\nt = time.time()\n\ndef process(intv, n, val, n_mult):\n\tdenum = (n - val * val) / n_mult\n\ta = 0\n\twhile val-denum >= -intv:\n\t\tval -= denum\n\t\ta += 1\n\tval = -val\n\treturn [a, val, denum] #intv, val, n_mult\n\nresult = 0\n\nfor n in range(10000):\n\tsqrt = math.sqrt(n)\n\ta = int(sqrt)\n\tif sqrt - a > 0:\n\t\tintv = a\n\t\tafirst = process(a, n, a, 1)\n\t\ti = 1\n\t\tr = afirst\n\t\twhile True:\n\t\t\tr = process(intv, n, r[1], r[2])\n\t\t\tif r == afirst:\n\t\t\t\tbreak\t\t\t\n\t\t\ti += 1\n\t\tif i % 2 != 0:\n\t\t\tresult += 1\n\nprint(result, (time.time()-t)*1000)\n","repo_name":"Derexas/Euler","sub_path":"64.py","file_name":"64.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31369234706","text":"#################################################################\r\n# Code that allows to get arduino data from the serial monitor #\r\n# and to visualize this data live using the \"drawnow\" library #\r\n#################################################################\r\n\r\nimport serial # will enable us to start the communication with the Arduino software\r\nfrom drawnow import drawnow, plt # import necessary methods from drawnow\r\n\r\n# create an object and opens the port, in this case COM3. \r\n# note that the baudrate, 9600 in our case must be specified.\r\narduino_serial = serial.Serial(\"COM4\", 9600) # adjust com number as necessary\r\n\r\n# create empty lists to capture all temperature values with respect to time\r\ndegree = []\r\nLDR = []\r\n\r\n# turn on interactive mode for data visualization\r\nplt.ion()\r\n\r\n# this variable initializes a counter that will allow us to delete the 31st point (of course in reverse order) on\r\n# both the temperature and the time data so that we are always visualizing the last 30 data points. In Pycharm,\r\n# a snapshot is taken for the graphs of the previous data as the plot shifts to accommodate for newer data. This is\r\n# good if we only want to focus on the most recent data for our project and takes less resources which means\r\n# lower possibility for 'overload' or the likes of it. Furthermore it will allow the graph to not be too squished\r\n# as all data are being plotted on the same graph. This can be deleted simply by deleting the variable below and\r\n# the last 4 lines in the while loop.\r\n\r\ncount = 0\r\n\r\n# In order to know which column is which\r\nprint(\"degree, LDR\")\r\n\r\n# define a function that will plot the data to be visualized.\r\ndef plot_live_data():\r\n plt.title(\"Live Streaming Data - Godin rules!\")\r\n plt.grid(True)\r\n plt.xlabel(\"Degree (o)\")\r\n plt.ylabel(\"LDR (OHMS)\")\r\n plt.plot(degree, LDR)\r\n\r\n# create an infinite while loop to continuously fetch the data from the arduino serial monitor\r\nwhile True:\r\n\r\n # we use the readline() method to read the data line by line but it is in byte mode\r\n # so we need to decode it using utf-8 so that our data can be represented as a string\r\n arduino_string_data = arduino_serial.readline().decode(\"utf-8\")\r\n\r\n # our data is now in this form using the split() method:\r\n # [time_value, temp_value] but these values are not numbers yet\r\n dataArray = arduino_string_data.split(\",\")\r\n\r\n # convert both time and temperature values as floats\r\n # for them to be interpreted as numbers so that the plot can be made\r\n # indexing is used to get each individual data in their respective variables\r\n d = float(dataArray[0])\r\n l = float(dataArray[1])\r\n\r\n # print the data to see what the current values are and look at your graph to\r\n # visualize the effect. Also for potential debugging purposes.\r\n print(f\"{degree}, {LDR}\")\r\n\r\n # add each individual data to their respective arrays after each while loop execution\r\n degree.append(d)\r\n LDR.append(l)\r\n\r\n # pass in our function to drawnow to plot the data live and pause some fraction of a second\r\n # to avoid potential overload of the program due to live plotting\r\n drawnow(plot_live_data)\r\n plt.pause(0.0001)\r\n\r\n # After every while loop one data point is appended for both the time and the temperature,\r\n # and the count will correspond to the same number. So here we are basically saying: if there\r\n # are more than 30 points on the plot, so 31, delete the the first point of each array or list.\r\n # For example, let's say that there are now 31 points in temperature.\r\n # temperature = [1st point, 2nd point, ..., 31st point]\r\n # The condition below will be met and therefore 1st point is deleted by\r\n # temperature.pop(0) because index starts at 0. pop(indexNumber) removed the item at index indexNumber.\r\n # So now the data being visualized is from the 2nd point\r\n # to the 31st point and the process continues ...\r\n count += 1\r\n if count > 500:\r\n LDR.pop(0)\r\n degree.pop(0)\r\n","repo_name":"Pizza-zip/Python-Projects","sub_path":"live_data_plot.py","file_name":"live_data_plot.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20926372127","text":"import json\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom oauth2_provider.models import AccessToken\n\nfrom authentication.models import Shop\nfrom dashboard.models import Service, ServiceImage, Customer, Employee, Booking, BookingDetail\t\n\nfrom dashboard.serializers import ShopSerializerCustomer, ShopSerializerEmployee, ServiceSerializer, ServiceImageSerializer, BookingSerializer\n\nimport stripe\nfrom tribarbDesktop.settings import STRIPE_API_KEY\n\nstripe.api_key = STRIPE_API_KEY\n\n\n\n###### CUSTOMERS ######\ndef customer_get_shops(request):\n shops = ShopSerializerCustomer(\n Shop.objects.all().order_by(\"-id\"),\n many = True,\n context = {\"request\": request}\n ).data\n\n return JsonResponse({\"shops\": shops})\n\n\ndef customer_get_services(request, shop_id):\n services = ServiceSerializer(\n Service.objects.filter(shop_id = shop_id).order_by(\"-id\"),\n many = True,\n context = {\"request\": request}\n ).data\n\n return JsonResponse({\"services\": services})\n \n\ndef customer_get_service_album(request, service_id):\n\n album = ServiceImageSerializer(\n ServiceImage.objects.filter(service_id = service_id),\n many = True,\n context = {\"request\": request}\n ).data\n\n return JsonResponse({\"album\": album})\n\n\n\n@csrf_exempt\ndef customer_add_booking(request):\n \"\"\"\n params:\n access_token\n barber_id\n booking_type\n requested_employee\n requested_time\n address\n booking_details (json format), example:\n [{\"service_id\": 1},{\"service_id\": 2}]\n stripe_token\n\n return:\n {\"status\": \"success\"}\n \"\"\"\n if request.method == \"POST\":\n # Get token\n access_token = AccessToken.objects.get(token = request.POST.get(\"access_token\"),\n expires__gt = timezone.now())\n\n # Get profile\n customer = access_token.user.customer\n\n # Get Stripe token\n global stripe_token\n stripe_token = request.POST.get(\"stripe_token\")\n\n # Check whether customer has any booking that is not completed\n if Booking.objects.filter(customer = customer).exclude(status__in = [Booking.COMPLETED, Booking.DECLINED]):\n return JsonResponse({\"status\": \"failed\", \"error\": \"Your last booking must be completed.\"})\n\n # Check Address\t\n if request.POST[\"booking_type\"] == \"1\":\n if not request.POST[\"address\"]:\n return JsonResponse({\"status\": \"failed\", \"error\": \"Address is required.\"})\n\n\n # Get Booking Details \n booking_details = json.loads(request.POST[\"booking_details\"])\n\n booking_total = 0\n for service in booking_details:\n booking_total += Service.objects.get(id = service[\"service_id\"]).price\n\n\n if len(booking_details) > 0:\n\n booking = Booking.objects.create(\n customer = customer,\n shop_id = request.POST[\"shop_id\"],\n booking_type = request.POST[\"booking_type\"],\n payment_mode = request.POST[\"payment_mode\"],\n requests = request.POST[\"requests\"],\n requested_time = request.POST[\"requested_time\"],\n total = booking_total,\n status = Booking.PLACED,\n address = request.POST[\"address\"],\n )\n\n\n # Step 3 - Create Order details \n for service in booking_details:\n BookingDetail.objects.create(\n booking = booking,\n service_id = service[\"service_id\"],\n sub_total = Service.objects.get(id = service[\"service_id\"]).price\n )\n\n return JsonResponse({\"status\": \"success\"})\n\n else:\n return JsonResponse({\"status\": \"failed\", \"error\": \"Failed to connect to Stripe.\"})\n\n\ndef customer_get_latest_booking(request):\n\taccess_token = AccessToken.objects.get(token = request.GET.get(\"access_token\"),\n\t\texpires__gt = timezone.now())\n\n\tcustomer = access_token.user.customer\n\tbooking = BookingSerializer(Booking.objects.filter(customer = customer).last()).data\n\n\treturn JsonResponse({\"booking\": booking})\n\n\n\ndef customer_employee_location(request):\n\n access_token = AccessToken.objects.get(token = request.GET.get(\"access_token\"),\n expires__gt = timezone.now())\n\n customer = access_token.user.customer\n\n # Get driver's location related to this customer's current order.\n current_booking = Booking.objects.filter(customer = customer, status = Booking.ONTHEWAY).last()\n location = current_booking.employee.location\n\n return JsonResponse({\"location\": location})\n\n\n\n\n\n###### EMPLOYEES ######\ndef employee_get_shop(request, shop_id):\n shop = ShopSerializerEmployee(\n Shop.objects.filter(id = shop_id),\n many = True,\n context = {\"request\": request}\n ).data\n \n return JsonResponse({\"shop\": shop})\n\n\ndef employee_get_placed_bookings(request, shop_id):\n bookings = BookingSerializer(\n Booking.objects.filter(shop_id = shop_id, status = Booking.PLACED, employee = None).order_by(\"-id\"),\n many = True\n ).data\n\n return JsonResponse({\"bookings\": bookings})\n\n\n\n\n@csrf_exempt\ndef employee_accept_booking(request):\n if request.method == \"POST\":\n access_token = AccessToken.objects.get(token = request.POST.get(\"access_token\"),\n\t\texpires__gt = timezone.now())\n\n employee = access_token.user.employee\n \n try:\n booking = Booking.objects.get(\n id = request.POST[\"booking_id\"],\n employee = None,\n status = Booking.PLACED\n )\n booking.employee = employee\n booking.status = Booking.ACCEPTED\n booking.accepted_at = timezone.now()\n booking.save()\n\n return JsonResponse({\"status\": \"success\"})\n\n except Booking.DoesNotExist:\n return JsonResponse({\"status\": \"failed\", \"error\": \"This booking has already been accepted.\"})\n \n\n\n@csrf_exempt\ndef employee_decline_booking(request):\n if request.method == \"POST\":\n access_token = AccessToken.objects.get(token = request.POST.get(\"access_token\"),\n\t\texpires__gt = timezone.now())\n\n employee = access_token.user.employee \n\n try:\n booking = Booking.objects.get(\n id = request.POST[\"booking_id\"],\n )\n booking.employee = employee\n booking.status = Booking.DECLINED\n booking.save()\n\n return JsonResponse({\"status\": \"success\"})\n\n except Booking.DoesNotExist:\n return JsonResponse({\"status\": \"failed\", \"error\": \"This booking has already been declined.\"})\n\n\n\n\n@csrf_exempt\ndef employee_enroute(request):\n if request.method == \"POST\":\n access_token = AccessToken.objects.get(token = request.POST.get(\"access_token\"),\n\t\texpires__gt = timezone.now())\n\n employee = access_token.user.employee\n\n try:\n booking = Booking.objects.get(\n id = request.POST[\"booking_id\"],\n status = Booking.ACCEPTED,\n booking_type = 1,\n )\n\n booking.status = Booking.ONTHEWAY\n booking.save()\n\n return JsonResponse({\"status\": \"success\"})\n\n except Booking.DoesNotExist:\n return JsonResponse({\"status\": \"failed\", \"error\": \"This booking has been accepted by another barber.\"})\n \n\n\n\n@csrf_exempt\ndef employee_complete_booking(request):\n access_token = AccessToken.objects.get(token = request.POST.get(\"access_token\"),\n expires__gt = timezone.now())\n\n employee = access_token.user.employee\n\n booking = Booking.objects.get(id = request.POST[\"booking_id\"], employee = employee)\n booking_total = booking.total\n print(\"TOTAL\")\n print(booking_total)\n\n if booking.payment_mode == 1:\n charge = stripe.Charge.create(\n amount = int(booking_total * 100),\n currency = \"gbp\",\n source = stripe_token,\n description = \"Tribarb Booking\"\n )\n\n if charge.status != \"failed\":\n booking.status = Booking.COMPLETED \n booking.save()\n return JsonResponse({\"status\": \"success\"})\n else:\n return JsonResponse({\"status\": \"failed\", \"error\": \"Failed to connect to Stripe.\"})\n else:\n booking.status = Booking.COMPLETED \n booking.save()\n return JsonResponse({\"status\": \"success\"})\n\n \n\n\n\ndef employee_get_booking_info(request, booking_id):\n booking = BookingSerializer(\n Booking.objects.filter(id = booking_id),\n many = True,\n context = {\"request\": request}\n ).data\n\n return JsonResponse({\"booking\": booking})\n\n\n\n\ndef employee_get_revenue(request):\n access_token = AccessToken.objects.get(token = request.GET.get(\"access_token\"),\n expires__gt = timezone.now())\n\n employee = access_token.user.employee\n\n from datetime import timedelta\n\n revenue = {}\n today = timezone.now()\n current_weekdays = [today + timedelta(days = i) for i in range(0 - today.weekday(), 7 - today.weekday())]\n\n for day in current_weekdays:\n bookings = Booking.objects.filter(\n employee = employee,\n status = Booking.COMPLETED,\n created_at__year = day.year,\n created_at__month = day.month,\n created_at__day = day.day\n )\n\n revenue[day.strftime(\"%a\")] = sum(booking.total for booking in bookings)\n\n return JsonResponse({\"revenue\": revenue})\n\n\n\n@csrf_exempt\ndef employee_update_location(request):\n if request.method == \"POST\":\n access_token = AccessToken.objects.get(token = request.POST.get(\"access_token\"),\n expires__gt = timezone.now())\n\n employee = access_token.user.employee\n\n # Set location string => database\n employee.location = request.POST[\"location\"]\n employee.save()\n\n return JsonResponse({\"status\": \"success\"})\n\n\n\n\n###### NOTIFICATION ######\ndef shop_booking_notification(request, last_request_time):\n #notification = Booking.objects.filter(shop = request.user.shop,\n # created_at__gt = last_request_time).count()\n \n notification = Booking.objects.filter(shop = request.user.shop, status=1).count()\n\n\n return JsonResponse({\"notification\": notification})","repo_name":"anith-manu/tribarb","sub_path":"dashboard/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":10558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1801577724","text":"import numpy as np; import matplotlib.pyplot as plt\n\ndef get_X(textfile):\n return np.array([[int(n) for n in line] for line in open(textfile, \"r\").read().splitlines()], dtype=np.uint8)\n\n\ndef get_low(X): # Get low points\n L, R = np.pad(X[:, :-1], [(0, 0), (1, 0)], constant_values=10), np.pad(X[:, 1:], [(0, 0), (0, 1)], constant_values=10)\n T, B = np.pad(X[:-1, :], [(1, 0), (0, 0)], constant_values=10), np.pad(X[1:, :], [(0, 1), (0, 0)], constant_values=10)\n return (X < L) * (X < R) * (X < T) * (X < B)\n\n\ndef padding(textfile): # Part 1 solution\n X = get_X(textfile=textfile)\n P = get_low(X)\n plt.imshow(P) # Just for fun\n plt.savefig(\"low_points.png\", dpi=300)\n return np.sum(X[P] + 1)\n\n\ndef slicing(textfile): # Part 2 solution\n X = get_X(textfile)\n P = get_low(X)\n n_low = np.sum(P)\n\n slices = {\"L\": (slice(None, None), slice(None, -1)), \"R\": (slice(None, None), slice(1, None)), \n \"T\": (slice(None, -1), slice(None, None)), \"B\": (slice(1, None), slice(None, None))}\n dirs = [(\"L\", \"R\"), (\"R\", \"L\"), (\"T\", \"B\"), (\"B\", \"T\")]\n\n M = np.zeros(shape=X.shape, dtype=np.int16)\n M[P] = np.arange(1, n_low + 1)\n M[X == 9] = -1\n\n def transfer(receive_dir, give_dir):\n receive, give = M[slices[receive_dir]], M[slices[give_dir]]\n available, marked = (receive == 0), (give > 0)\n idx = available * marked\n receive[idx] = give[idx]\n\n while (M == 0).any():\n for dir in dirs:\n transfer(*dir)\n \n plt.imshow(M) # Just for fun \n plt.savefig(\"numbered_basins.png\", dpi=300)\n\n sizes = np.array([np.sum(M == j) for j in range(1, n_low + 1)], dtype=np.uint16)\n return np.product(sizes[np.argpartition(sizes, -3)[-3:]])\n\n\nprint(\"Part 1:\", padding(textfile=\"input.txt\"))\nprint(\"Part 2:\", slicing(textfile=\"input.txt\"))\n","repo_name":"jessebmurray/Advent-21","sub_path":"day09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73817103201","text":"def validBraces(string):\n\tcounter_par = 0\n\tcounter_brackets = 0\n\tcounter_braces = 0\n\tfor letter in string:\n\t\tif letter == \"(\":\n\t\t\tcounter_par +=1\n\t\tif letter == \")\":\n\t\t\tif counter_par == 0:\n\t\t\t\treturn False\n\t\t\tcounter_par -=1\n\t\tif letter == \"[\":\n\t\t\tcounter_brackets +=1\n\t\tif letter == \"]\":\n\t\t\tif counter_brackets == 0:\n\t\t\t\treturn False\n\t\t\tcounter_brackets -=1\n\t\tif letter == \"{\":\n\t\t\tcounter_braces +=1\n\t\tif letter == \"}\":\n\t\t\tif counter_braces == 0:\n\t\t\t\treturn False\n\t\t\tcounter_braces -=1\n\tif counter_par ==0 and counter_brackets == 0 and counter_braces ==0:\n\t\treturn True\n\telse:\n\t\treturn False","repo_name":"binnie869/CodeWars","sub_path":"kyu4/ValidBraces.py","file_name":"ValidBraces.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19370890456","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport colorsys\n\n\ndef grid_plot(images, shape=None, cmap=\"gray\"):\n \"\"\"Plot the given images in a grid.\n\n Parameters\n ----------\n images : array_like\n List of images to show in the grid. Images should have\n the correct shape, unless the `shape` parameter is\n explicitly set.\n shape : tuple\n Shape of each individual image. If ``None`` the original\n shape is kept, otherwise all images are reshaped to the\n given shape.\n cmap : colormap\n Colormap to use.\n \n Returns\n -------\n R, C : int\n Number of rows and columns in the grid.\n\n \"\"\"\n N = len(images)\n C = int(np.ceil(np.sqrt(N)))\n R = int(np.ceil(N/C))\n\n plt.figure(figsize=(5, 5*R/C))\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n for i, img in enumerate(images):\n if shape is not None:\n img = np.reshape(img, shape)\n plt.subplot(R, C, i+1)\n plt.imshow(img, cmap=cmap)\n plt.axis('off')\n\n return R, C\n\n\ndef _adjust_lightness(rgba, lightness):\n r, g, b, a = rgba\n h, l, s = colorsys.rgb_to_hls(r, g, b)\n return (*colorsys.hls_to_rgb(h, lightness, s), a)\n\n\ndef confusion_plot(y_true, y_pred):\n \"\"\"Create and show the confusion plot for the given predictions and ground truths.\n\n Parameters\n ----------\n y_true : array_like\n The ground-truth class labels. Both the 'class index' (shape Dx1) and 'one-hot'\n encodings (shape DxC) are supported, where D is the size of the dataset and C\n is the number of classes.\n y_pred : array_like\n The predicted class labels by a trained model. Both the 'class index' (shape\n Dx1) and 'one-hot'/'probability' encodings (shape DxC) are supported, where D\n is the size of the dataset and C is the number of classes.\n\n \"\"\"\n # Convert one-hot encoding to class labels\n if len(y_true.shape) == 2:\n y_true = tf.argmax(y_true, axis=1)\n if len(y_pred.shape) == 2:\n y_pred = tf.argmax(y_pred, axis=1)\n # Calculate confusion matrix\n cm = tf.math.confusion_matrix(y_true, y_pred)\n N = tf.reduce_sum(cm)\n C = cm.shape[0]\n # Calculate marginal and overall accuracy\n d = np.diag(cm)\n mp = d / np.sum(cm, axis=0)\n mt = d / np.sum(cm, axis=1)\n o = np.sum(d) / N\n\n # Create plot (based on https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html)\n f, ax = plt.subplots(figsize=(C, C))\n # Plot the heatmap\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\n \"red_yellow_green\", # Custom color map between red, yellow and green\n [(0.98, 0.77, 0.75), (0.98, 0.96, 0.71), (0.74, 0.90, 0.77)]\n )\n im = ax.imshow(cm, cmap=cmap)\n # Show all ticks and label them with the respective list entries.\n ax.set_xticks(np.arange(C+1))\n ax.set_xticklabels(np.arange(C))\n ax.set_yticks(np.arange(C+1))\n ax.set_yticklabels(np.arange(C))\n ax.minorticks_off()\n ax.set_xlabel(\"Predicted class\", fontsize=18)\n ax.set_ylabel(\"True class\", fontsize=18)\n ax.set_title(\"Confusion matrix\", fontsize=24)\n # Turn spines off and create white grid.\n for spine in ax.spines.values():\n spine.set_visible(False)\n ax.set_xticks(np.arange(C+1)-0.5, minor=True)\n ax.set_yticks(np.arange(C+1)-0.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"both\", bottom=False, left=False)\n # Create black lines to separate marginals\n ax.plot([-0.5, C+0.5], [C-0.5, C-0.5], color=\"k\", linewidth=2)\n ax.plot([C-0.5, C-0.5], [-0.5, C+0.5], color=\"k\", linewidth=2)\n # Loop over the data and create a `Text` for each \"pixel\".\n for i in range(C):\n for j in range(C):\n val = f\"$\\mathbf{{{cm[i, j]}}}$\\n${100 * cm[i, j] / N:.1f}\\%$\"\n im.axes.text(j, i, val, ha=\"center\", va=\"center\")\n # Create a `Text` for every marginal accuracy\n for i in range(C):\n val = f\"${100 * mp[i]:.1f}\\%$\"\n ax.text(i, C, val, color=_adjust_lightness(cmap(mp[i]), 0.3), ha=\"center\", va=\"center\")\n val = f\"${100 * mt[i]:.1f}\\%$\"\n ax.text(C, i, val, color=_adjust_lightness(cmap(mt[i]), 0.3), ha=\"center\", va=\"center\")\n # Create a `Text` for the overall accuracy\n val = f\"$\\mathbf{{{100 * o:.1f}}}\\%$\"\n ax.text(C, C, val, color=_adjust_lightness(cmap(o), 0.3), ha=\"center\", va=\"center\")\n\n # Show the plot\n plt.show()\n","repo_name":"KULasagna/ANN_DL_public","sub_path":"session3/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22662772638","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals, absolute_import, division\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nfrom domain_admin.service.file_service import resolve_log_file\nfrom domain_admin.config import APP_MODE\n\nlogger = logging.getLogger('domain-admin')\n\n# 单个日志文件最大为1M\nhandler = RotatingFileHandler(\n filename=resolve_log_file(\"domain-admin.log\"),\n maxBytes=1024 * 1024 * 1,\n encoding='utf-8'\n)\n\n# 设置日志格式\nformatter = logging.Formatter(\n fmt='%(asctime)s [%(levelname)s] %(filename)s/%(funcName)s:\\n%(message)s\\n',\n datefmt='%Y-%m-%d %H:%M:%S')\nhandler.setFormatter(formatter)\n\n# logger.addHandler(logging.FileHandler(resolve_log_file(\"domain-admin.log\")))\nlogger.addHandler(handler)\nlogger.setLevel(logging.ERROR)\n\n# development\nif APP_MODE == 'development':\n logger.setLevel(logging.DEBUG)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n","repo_name":"mouday/domain-admin","sub_path":"domain_admin/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":1065,"dataset":"github-code","pt":"54"} +{"seq_id":"10726484055","text":"import argparse\n\nfrom bench.metrics import efficiencies, speedups\n\n\noptions = {\n \"--all\": {\n \"action\": \"store_true\",\n \"help\": \"consider all benchmarks in configuration file\",\n \"required\": False,\n },\n \"--speedup\": {\n \"action\": \"store_const\",\n \"const\": speedups,\n \"help\": \"calculate parallel speedups\",\n \"required\": False,\n\n },\n \"--efficiency\": {\n \"action\": \"store_const\",\n \"const\": efficiencies,\n \"help\": \"calculate parallel efficiencies\",\n \"required\": False,\n }\n}\n\n\ndef add_argument(parser, option):\n parser.add_argument(option, **options[option])\n","repo_name":"aprell/benchmark","sub_path":"bench/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3847941484","text":"import json\nfrom collections import Counter\n\nimport numpy as np\n\nfrom osgeo import osr\nfrom osgeo import gdal\nfrom osgeo.gdalconst import (\n GDT_Float32\n)\n\nspatial_vars = ('Ksat', 'Cly', 'Snd', 'OM', 'BlkDns', 'DpthB',\n 'Hydrc', 'Drng', 'WTDp', 'FldFrq', 'WS25', 'WS150',\n 'HydGr', 'DrngCl', 'SolThk')\n\ndef classifier_factory(lookup):\n return lambda x: (lookup.get(x, -9999), -9999)[x == -9999]\n\n\nclass SurgoSpatializer(object):\n def __init__(self, ssurgo_c, ssurgo_map):\n from wepppy.soils.ssurgo import SurgoMap, SurgoSoilCollection\n\n assert isinstance(ssurgo_c, SurgoSoilCollection)\n assert isinstance(ssurgo_map, SurgoMap)\n\n self.ssurgo_c = ssurgo_c\n self.ssurgo_map = ssurgo_map\n\n def getFirstHorizonVar(self, mukey, var):\n ssurgo_c = self.ssurgo_c\n\n if mukey in ssurgo_c.weppSoils:\n return getattr(ssurgo_c.weppSoils[mukey].getFirstHorizon(), var, -9999)\n return -9999\n\n def getHorizonsVar(self, mukey, var, aggregator=np.sum):\n ssurgo_c = self.ssurgo_c\n\n if mukey not in ssurgo_c.weppSoils:\n return -9999\n\n horizons = ssurgo_c.weppSoils[mukey].horizons\n\n if horizons is None:\n return -9999\n\n x = []\n for h in ssurgo_c.weppSoils[mukey].horizons:\n if h is None:\n continue\n\n v = getattr(h, var, None)\n\n if v is None:\n continue\n\n x.append(v)\n\n if len(x) > 0:\n return aggregator(x)\n\n return -9999\n\n def getMajorComponentVar(self, mukey, var, classifier=None):\n ssurgo_c = self.ssurgo_c\n\n if mukey in ssurgo_c.weppSoils:\n x = getattr(ssurgo_c.weppSoils[mukey].majorComponent, var, -9999)\n if classifier is None:\n return x\n\n return classifier(x)\n return -9999\n\n def spatialize_var(self, var, dst_fname, drivername='GTiff', nodata_value=-9999):\n \"\"\"\n Creates a raster of the variable specified by var\n \"\"\"\n drainage_classifier = classifier_factory(\n {\"Very poorly drained\": 0,\n \"Poorly drained\": 1,\n \"Somewhat poorly drained\": 2,\n \"Well drained\": 3,\n \"Moderately well drained\": 4,\n \"Somewhat excessively drained\": 5,\n \"Excessively drained\": 6})\n\n _spatial_vars = dict([\n ('Ksat', lambda mukey: self.getFirstHorizonVar(mukey, 'ksat_r')),\n ('Cly', lambda mukey: self.getFirstHorizonVar(mukey, 'claytotal_r')),\n ('Snd', lambda mukey: self.getFirstHorizonVar(mukey, 'sandtotal_r')),\n ('OM', lambda mukey: self.getFirstHorizonVar(mukey, 'om_r')),\n ('BlkDns', lambda mukey: self.getFirstHorizonVar(mukey, 'dbthirdbar_r')),\n ('DpthB', lambda mukey: self.getFirstHorizonVar(mukey, 'hzdepb_r')),\n ('Hydrc', lambda mukey: self.getMajorComponentVar(mukey, 'hydricrating',\n classifier_factory({'No': 0, 'Yes': 1}))),\n ('Drng', lambda mukey: self.getMajorComponentVar(mukey, 'drainagecl',\n drainage_classifier)),\n ('WTDp', lambda mukey: self.getMajorComponentVar(mukey, 'wtdepannmin')),\n ('FldFrq', lambda mukey: self.getMajorComponentVar(mukey, 'flodfreqdcd',\n classifier_factory(\n {'None': 0, 'Rare': 1, 'Occasional': 2, 'Frequent': 3}))),\n ('WS25', lambda mukey: self.getMajorComponentVar(mukey, 'aws025wta')),\n ('WS150', lambda mukey: self.getMajorComponentVar(mukey, 'aws0150wta')),\n ('HydGr', lambda mukey: self.getMajorComponentVar(mukey, 'hydgrpdcd',\n classifier_factory(\n {\"A\": 0, \"B\": 1, \"A/D\": 2, \"C\": 3, \"C/D\": 4, \"B/D\": 5, \"D\": 6}))),\n ('DrngCl', lambda mukey: self.getMajorComponentVar(mukey, 'drclassdcd',\n drainage_classifier)),\n ('SolThk', lambda mukey: self.getHorizonsVar(mukey, 'hzdepb_r', np.sum))\n ])\n\n assert var in _spatial_vars\n\n func = _spatial_vars[var]\n\n ssurgo_map = self.ssurgo_map\n\n data, mukeys = ssurgo_map.data, ssurgo_map.mukeys\n num_cols, num_rows = data.shape\n proj, transform = ssurgo_map.proj, ssurgo_map.transform\n\n # create empty array to hold data\n var_r = np.ones(data.shape) * nodata_value\n\n # iterate over mukeys and fill data\n meta = Counter()\n for mukey in mukeys:\n indx = np.where(data == mukey)\n\n assert len(indx[0]) > 0\n\n value = func(mukey)\n var_r[indx] = value\n\n meta[str(value)] += len(indx[0])\n\n with open(dst_fname + '.meta', 'w') as fid:\n fid.write(json.dumps(meta, sort_keys=True,\n indent=4, separators=(',', ': '),\n allow_nan=False))\n\n # create raster\n driver = gdal.GetDriverByName(drivername)\n dst = driver.Create(dst_fname, num_cols, num_rows, 1, GDT_Float32)\n\n srs = osr.SpatialReference()\n srs.ImportFromProj4(proj)\n wkt = srs.ExportToWkt()\n\n dst.SetProjection(wkt)\n dst.SetGeoTransform(transform)\n band = dst.GetRasterBand(1)\n band.WriteArray(var_r.T)\n band.SetNoDataValue(-9999)\n\n del dst # Writes and closes file\n\n\nif __name__ == \"__main__\":\n from wepppy.soils.ssurgo import SurgoMap, SurgoSoilCollection\n\n _map = ('tests/test_maps/ssurgo.tif')\n var = 'Cly'\n ssurgo_map = SurgoMap(_map)\n\n ssurgo_c = SurgoSoilCollection(ssurgo_map.mukeys)\n ssurgo_c.makeWeppSoils(horizon_defaults=None)\n\n spatializer = SurgoSpatializer(ssurgo_c, ssurgo_map)\n\n spatializer.spatialize_var(var, 'tests/test_maps/%s.tif' % var)\n","repo_name":"rogerlew/wepppy","sub_path":"wepppy/soils/ssurgo/spatializer.py","file_name":"spatializer.py","file_ext":"py","file_size_in_byte":6058,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"11478578340","text":"from argparse import ArgumentParser\nimport os\nimport re\nimport sys\n\nfrom . import common\nfrom . import metadata\nfrom . import rewritemeta\n\nconfig = None\noptions = None\n\n\ndef enforce_https(domain):\n return (re.compile(r'.*[^sS]://[^/]*' + re.escape(domain) + r'(/.*)?'),\n domain + \" URLs should always use https://\")\n\nhttps_enforcings = [\n enforce_https('github.com'),\n enforce_https('gitlab.com'),\n enforce_https('bitbucket.org'),\n enforce_https('apache.org'),\n enforce_https('google.com'),\n enforce_https('svn.code.sf.net'),\n]\n\n\ndef forbid_shortener(domain):\n return (re.compile(r'https?://[^/]*' + re.escape(domain) + r'/.*'),\n \"URL shorteners should not be used\")\n\nhttp_url_shorteners = [\n forbid_shortener('goo.gl'),\n forbid_shortener('t.co'),\n forbid_shortener('ur1.ca'),\n]\n\nhttp_checks = https_enforcings + http_url_shorteners + [\n (re.compile(r'.*github\\.com/[^/]+/[^/]+\\.git'),\n \"Appending .git is not necessary\"),\n (re.compile(r'.*://[^/]*(github|gitlab|bitbucket|rawgit)[^/]*/([^/]+/){1,3}master'),\n \"Use /HEAD instead of /master to point at a file in the default branch\"),\n]\n\nregex_checks = {\n 'Web Site': http_checks,\n 'Source Code': http_checks,\n 'Repo': https_enforcings,\n 'Issue Tracker': http_checks + [\n (re.compile(r'.*github\\.com/[^/]+/[^/]+/*$'),\n \"/issues is missing\"),\n (re.compile(r'.*gitlab\\.com/[^/]+/[^/]+/*$'),\n \"/issues is missing\"),\n ],\n 'Donate': http_checks + [\n (re.compile(r'.*flattr\\.com'),\n \"Flattr donation methods belong in the FlattrID flag\"),\n ],\n 'Changelog': http_checks,\n 'Author Name': [\n (re.compile(r'^\\s'),\n \"Unnecessary leading space\"),\n (re.compile(r'.*\\s$'),\n \"Unnecessary trailing space\"),\n ],\n 'License': [\n (re.compile(r'^(|None|Unknown)$'),\n \"No license specified\"),\n ],\n 'Summary': [\n (re.compile(r'^$'),\n \"Summary yet to be filled\"),\n (re.compile(r'.*\\b(free software|open source)\\b.*', re.IGNORECASE),\n \"No need to specify that the app is Free Software\"),\n (re.compile(r'.*((your|for).*android|android.*(app|device|client|port|version))', re.IGNORECASE),\n \"No need to specify that the app is for Android\"),\n (re.compile(r'.*[a-z0-9][.!?]( |$)'),\n \"Punctuation should be avoided\"),\n (re.compile(r'^\\s'),\n \"Unnecessary leading space\"),\n (re.compile(r'.*\\s$'),\n \"Unnecessary trailing space\"),\n ],\n 'Description': [\n (re.compile(r'^No description available$'),\n \"Description yet to be filled\"),\n (re.compile(r'\\s*[*#][^ .]'),\n \"Invalid bulleted list\"),\n (re.compile(r'^\\s'),\n \"Unnecessary leading space\"),\n (re.compile(r'.*\\s$'),\n \"Unnecessary trailing space\"),\n (re.compile(r'.*([^[]|^)\\[[^:[\\]]+( |\\]|$)'),\n \"Invalid link - use [http://foo.bar Link title] or [http://foo.bar]\"),\n (re.compile(r'(^|.* )https?://[^ ]+'),\n \"Unlinkified link - use [http://foo.bar Link title] or [http://foo.bar]\"),\n ],\n}\n\n\ndef check_regexes(app):\n for f, checks in regex_checks.items():\n for m, r in checks:\n v = app.get_field(f)\n t = metadata.fieldtype(f)\n if t == metadata.TYPE_MULTILINE:\n for l in v.splitlines():\n if m.match(l):\n yield \"%s at line '%s': %s\" % (f, l, r)\n else:\n if v is None:\n continue\n if m.match(v):\n yield \"%s '%s': %s\" % (f, v, r)\n\n\ndef get_lastbuild(builds):\n lowest_vercode = -1\n lastbuild = None\n for build in builds:\n if not build.disable:\n vercode = int(build.vercode)\n if lowest_vercode == -1 or vercode < lowest_vercode:\n lowest_vercode = vercode\n if not lastbuild or int(build.vercode) > int(lastbuild.vercode):\n lastbuild = build\n return lastbuild\n\n\ndef check_ucm_tags(app):\n lastbuild = get_lastbuild(app.builds)\n if (lastbuild is not None\n and lastbuild.commit\n and app.UpdateCheckMode == 'RepoManifest'\n and not lastbuild.commit.startswith('unknown')\n and lastbuild.vercode == app.CurrentVersionCode\n and not lastbuild.forcevercode\n and any(s in lastbuild.commit for s in '.,_-/')):\n yield \"Last used commit '%s' looks like a tag, but Update Check Mode is '%s'\" % (\n lastbuild.commit, app.UpdateCheckMode)\n\n\ndef check_char_limits(app):\n limits = config['char_limits']\n\n if len(app.Summary) > limits['Summary']:\n yield \"Summary of length %s is over the %i char limit\" % (\n len(app.Summary), limits['Summary'])\n\n if len(app.Description) > limits['Description']:\n yield \"Description of length %s is over the %i char limit\" % (\n len(app.Description), limits['Description'])\n\n\ndef check_old_links(app):\n usual_sites = [\n 'github.com',\n 'gitlab.com',\n 'bitbucket.org',\n ]\n old_sites = [\n 'gitorious.org',\n 'code.google.com',\n ]\n if any(s in app.Repo for s in usual_sites):\n for f in ['Web Site', 'Source Code', 'Issue Tracker', 'Changelog']:\n v = app.get_field(f)\n if any(s in v for s in old_sites):\n yield \"App is in '%s' but has a link to '%s'\" % (app.Repo, v)\n\n\ndef check_useless_fields(app):\n if app.UpdateCheckName == app.id:\n yield \"Update Check Name is set to the known app id - it can be removed\"\n\nfilling_ucms = re.compile(r'^(Tags.*|RepoManifest.*)')\n\n\ndef check_checkupdates_ran(app):\n if filling_ucms.match(app.UpdateCheckMode):\n if not app.AutoName and not app.CurrentVersion and app.CurrentVersionCode == '0':\n yield \"UCM is set but it looks like checkupdates hasn't been run yet\"\n\n\ndef check_empty_fields(app):\n if not app.Categories:\n yield \"Categories are not set\"\n\nall_categories = set([\n \"Connectivity\",\n \"Development\",\n \"Games\",\n \"Graphics\",\n \"Internet\",\n \"Money\",\n \"Multimedia\",\n \"Navigation\",\n \"Phone & SMS\",\n \"Reading\",\n \"Science & Education\",\n \"Security\",\n \"Sports & Health\",\n \"System\",\n \"Theming\",\n \"Time\",\n \"Writing\",\n])\n\n\ndef check_categories(app):\n for categ in app.Categories:\n if categ not in all_categories:\n yield \"Category '%s' is not valid\" % categ\n\n\ndef check_duplicates(app):\n if app.Name and app.Name == app.AutoName:\n yield \"Name '%s' is just the auto name - remove it\" % app.Name\n\n links_seen = set()\n for f in ['Source Code', 'Web Site', 'Issue Tracker', 'Changelog']:\n v = app.get_field(f)\n if not v:\n continue\n v = v.lower()\n if v in links_seen:\n yield \"Duplicate link in '%s': %s\" % (f, v)\n else:\n links_seen.add(v)\n\n name = app.Name or app.AutoName\n if app.Summary and name:\n if app.Summary.lower() == name.lower():\n yield \"Summary '%s' is just the app's name\" % app.Summary\n\n if app.Summary and app.Description and len(app.Description) == 1:\n if app.Summary.lower() == app.Description[0].lower():\n yield \"Description '%s' is just the app's summary\" % app.Summary\n\n seenlines = set()\n for l in app.Description.splitlines():\n if len(l) < 1:\n continue\n if l in seenlines:\n yield \"Description has a duplicate line\"\n seenlines.add(l)\n\n\ndesc_url = re.compile(r'(^|[^[])\\[([^ ]+)( |\\]|$)')\n\n\ndef check_mediawiki_links(app):\n wholedesc = ' '.join(app.Description)\n for um in desc_url.finditer(wholedesc):\n url = um.group(1)\n for m, r in http_checks:\n if m.match(url):\n yield \"URL '%s' in Description: %s\" % (url, r)\n\n\ndef check_bulleted_lists(app):\n validchars = ['*', '#']\n lchar = ''\n lcount = 0\n for l in app.Description.splitlines():\n if len(l) < 1:\n lcount = 0\n continue\n\n if l[0] == lchar and l[1] == ' ':\n lcount += 1\n if lcount > 2 and lchar not in validchars:\n yield \"Description has a list (%s) but it isn't bulleted (*) nor numbered (#)\" % lchar\n break\n else:\n lchar = l[0]\n lcount = 1\n\n\ndef check_builds(app):\n for build in app.builds:\n if build.disable:\n if build.disable.startswith('Generated by import.py'):\n yield \"Build generated by `fdroid import` - remove disable line once ready\"\n continue\n for s in ['master', 'origin', 'HEAD', 'default', 'trunk']:\n if build.commit and build.commit.startswith(s):\n yield \"Branch '%s' used as commit in build '%s'\" % (s, build.version)\n for srclib in build.srclibs:\n ref = srclib.split('@')[1].split('/')[0]\n if ref.startswith(s):\n yield \"Branch '%s' used as commit in srclib '%s'\" % (s, srclib)\n\n\ndef check_files_dir(app):\n dir_path = os.path.join('metadata', app.id)\n if not os.path.isdir(dir_path):\n return\n files = set()\n for name in os.listdir(dir_path):\n path = os.path.join(dir_path, name)\n if not os.path.isfile(path):\n yield \"Found non-file at %s\" % path\n continue\n files.add(name)\n\n used = set()\n for build in app.builds:\n for fname in build.patch:\n if fname not in files:\n yield \"Unknown file %s in build '%s'\" % (fname, build.version)\n else:\n used.add(fname)\n\n for name in files.difference(used):\n yield \"Unused file at %s\" % os.path.join(dir_path, name)\n\n\ndef check_format(app):\n if options.format and not rewritemeta.proper_format(app):\n yield \"Run rewritemeta to fix formatting\"\n\n\ndef check_extlib_dir(apps):\n dir_path = os.path.join('build', 'extlib')\n files = set()\n for root, dirs, names in os.walk(dir_path):\n for name in names:\n files.add(os.path.join(root, name)[len(dir_path) + 1:])\n\n used = set()\n for app in apps:\n for build in app.builds:\n for path in build.extlibs:\n if path not in files:\n yield \"%s: Unknown extlib %s in build '%s'\" % (app.id, path, build.version)\n else:\n used.add(path)\n\n for path in files.difference(used):\n if any(path.endswith(s) for s in [\n '.gitignore',\n 'source.txt', 'origin.txt', 'md5.txt',\n 'LICENSE', 'LICENSE.txt',\n 'COPYING', 'COPYING.txt',\n 'NOTICE', 'NOTICE.txt',\n ]):\n continue\n yield \"Unused extlib at %s\" % os.path.join(dir_path, path)\n\n\ndef main():\n\n global config, options\n\n # Parse command line...\n parser = ArgumentParser(usage=\"%(prog)s [options] [APPID [APPID ...]]\")\n common.setup_global_opts(parser)\n parser.add_argument(\"-f\", \"--format\", action=\"store_true\", default=False,\n help=\"Also warn about formatting issues, like rewritemeta -l\")\n parser.add_argument(\"appid\", nargs='*', help=\"app-id in the form APPID\")\n metadata.add_metadata_arguments(parser)\n options = parser.parse_args()\n metadata.warnings_action = options.W\n\n config = common.read_config(options)\n\n # Get all apps...\n allapps = metadata.read_metadata(xref=True)\n apps = common.read_app_args(options.appid, allapps, False)\n\n anywarns = False\n\n apps_check_funcs = []\n if len(options.appid) == 0:\n # otherwise it finds tons of unused extlibs\n apps_check_funcs.append(check_extlib_dir)\n for check_func in apps_check_funcs:\n for warn in check_func(apps.values()):\n anywarns = True\n print(warn)\n\n for appid, app in apps.items():\n if app.Disabled:\n continue\n\n app_check_funcs = [\n check_regexes,\n check_ucm_tags,\n check_char_limits,\n check_old_links,\n check_checkupdates_ran,\n check_useless_fields,\n check_empty_fields,\n check_categories,\n check_duplicates,\n check_mediawiki_links,\n check_bulleted_lists,\n check_builds,\n check_files_dir,\n check_format,\n ]\n\n for check_func in app_check_funcs:\n for warn in check_func(app):\n anywarns = True\n print(\"%s: %s\" % (appid, warn))\n\n if anywarns:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"touchao123/fdroidserver","sub_path":"fdroidserver/lint.py","file_name":"lint.py","file_ext":"py","file_size_in_byte":12747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15156179559","text":"__author__ = 'Дмитрий Назаркин'\n\n# Написать свой модуль utils и перенести в него функцию currency_rates() из предыдущего задания. Создать скрипт,\n# в котором импортировать этот модуль и выполнить несколько вызовов функции currency_rates().\n# Убедиться, что ничего лишнего не происходит.\n# *(вместо 4) Доработать скрипт из предыдущего задания: теперь он должен работать и из консоли. Например:\n# > python task_4_5.py USD\n# 75.18, 2020-09-05\n\n\nimport utils, sys\n\nif not sys.argv[1:]:\n print('Укажите дополнительным параметром валюту (прим. Usd, Eur)')\nelse:\n currency = sys.argv[1:]\n data = utils.currency_rates(currency[0])\n\n if data == None:\n print('Данные не найдены')\n else:\n print(f'Курс {currency[0]} на дату {data[0]} составляет {data[1]}')","repo_name":"Shadow48lip/GB_Python","sub_path":"1_Python_basics/lesson_4/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7870355042","text":"from django.contrib import admin\nfrom django.urls import path\nfrom Rating import views\n\nurlpatterns = [\n path(\"\",views.index , name = 'Form'),\n path(\"submitData\",views.submitData, name = 'submitData'),\n path(\"add_movie\",views.add_movie,name='add_movie'),\n path(\"AddMovieName\",views.AddMovieName,name='AddMovieName'),\n path(\"recommend_movie\",views.recommend_movie,name='recommend_movie'),\n path(\"recommend_movie_display\",views.recommend_movie_display,name='recommend_movie_display'),\n path(\"redirect_to_rate\",views.redirect_to_rate,name='redirect_to_rate'),\n path(\"add_user\",views.add_user,name='add_user'),\n path(\"AddUserName\",views.AddUserName,name='AddUserName'),\n path('find_movie',views.find_movie,name='find_movie')\n \n \n]","repo_name":"CodeWithAaru/MovieRecommender","sub_path":"Movie Recommender/Rating/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12843441298","text":"import pandas as pd\nfrom constants import FEATURES, TITLE, MAKE_NORMALIZED, MODELNAME_NORMALIZED, MODELNO_NORMALIZED\nfrom pipeline import model\nfrom preprocessor import preprocess\n\ndef extract_tags(title:str) -> pd.DataFrame:\n df = pd.DataFrame([{'title':title, 'doc':None, 'span': None, MODELNAME_NORMALIZED:None, MAKE_NORMALIZED: None}])\n df['doc'] = df.title.apply(lambda x: model(preprocess(x)))\n df['span'] = _add_span(df)\n for tag in FEATURES:\n df[tag.upper()] = df['span'].apply(lambda x: x.get(tag.upper())).fillna('')\n df = df.drop(columns=['span'])\n df[MAKE_NORMALIZED] = df.doc.apply(lambda x: x._.make_normalized)\n df[MODELNAME_NORMALIZED] = df.doc.apply(lambda x: x._.model_normalized)\n df[MODELNO_NORMALIZED] = df.doc.apply(lambda x: x._.modelno_normalized)\n columns = [feature.upper() for feature in FEATURES] + [MAKE_NORMALIZED, MODELNAME_NORMALIZED, MODELNO_NORMALIZED]\n columns = sorted(columns)\n columns = [TITLE]+columns\n df = df[columns]\n return df\n\ndef _add_span(df:pd.DataFrame) -> pd.Series:\n return df.doc.apply(lambda x: dict([(span.label_,str(span)) for span in x.spans['sc'] if str(span)]))","repo_name":"NaelsonDouglas/NER","sub_path":"src/scripts/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32120667247","text":"def nextPrime(n):\n while True: \n if n < 0: \n print('Khong hop le, nhap lai:')\n n = float(input())\n elif n != int(n): \n print('Khong hop le, nhap lai:')\n n = float(input())\n else: \n break\n n = int(n)\n n = n+1\n while True:\n for i in range(2,n): \n if n%i == 0: \n n = n+1\n break\n else: \n print( n)\n break\ndef Nhap():\n number = float(input())\n nextPrime(number)\nNhap()","repo_name":"tranletuanh2801/CoSoLapTrinh","sub_path":"Chuong4/BTN/Ex93.py","file_name":"Ex93.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36236162872","text":"\"\"\"\nHandlers for Bugout Slack CLI\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport os\nimport textwrap\nfrom typing import Any, cast, Dict, List, Optional\nimport urllib\nimport urllib.parse\n\nimport requests # type: ignore\nfrom sqlalchemy.orm import Session\n\nfrom . import admin as slack_admin\nfrom . import indices as slack_indices\nfrom .models import SlackOAuthEvent, SlackMention, SlackBugoutUser\nfrom .parse import SlackTextTokenType, parse_raw_text\nfrom .handlers import HTMLToText, InstallationNotFound\nfrom .. import db\nfrom ..broodusers import get_bugout_user, BugoutUserNotFound, Existence\nfrom ..utils.settings import BUGOUT_CLIENT_ID_HEADER\n\nlogger = logging.getLogger(__name__)\n\n\nSLACK_MAX_BLOCK_LENGTH = 2900\nSLACK_MESSAGE_CONTINUATION = \"...\"\n\n\nclass SlackArgumentParseError(Exception):\n \"\"\"\n Raised when there is an error parsing arguments for a CLI invocation from Slack.\n \"\"\"\n\n\nclass CustomHelpAction(argparse._HelpAction):\n \"\"\"\n Custom argparse action that handles -h and --help flags in Bugout Slack argument parsers.\n\n This is part of the dirty hack to get around the annoying exit behaviour of argparse. The other\n part of this is the custom ArgumentParser subclass we use (defined below).\n \"\"\"\n\n def __init__(\n self,\n option_strings,\n dest=argparse.SUPPRESS,\n default=argparse.SUPPRESS,\n help=None,\n ):\n super().__init__(option_strings, dest, default, help)\n\n def __call__(self, parser, namespace, values, option_string=None):\n raise SlackArgumentParseError(parser.format_help())\n\n\nclass BugoutSlackArgumentParser(argparse.ArgumentParser):\n \"\"\"\n Parser for CLI invocations via Slack\n \"\"\"\n\n def error(self, message):\n message_with_usage = f\"{self.format_usage()}\\n{message}\"\n raise SlackArgumentParseError(message_with_usage)\n\n def register(self, registry_name, value, object):\n registry = self._registries.setdefault(registry_name, {})\n if value == \"help\":\n registry[value] = CustomHelpAction\n else:\n registry[value] = object\n\n\ndef generate_bugout_parser() -> BugoutSlackArgumentParser:\n logger.info(\"Generating Bugout Slack argument parser\")\n\n bugout_description = textwrap.dedent(\n \"\"\"\\\n Bugout: The Search Engine for Programmers\n\n Welcome to Bugout. You got here by sending a message to @bugout. @bugout helps you execute Bugout search queries directly from your Slack workspace.\n\n @bugout is a command-line tool with Slack as its command line. Everything you know about CLIs applies.\n\n Try a web search:\n $ @bugout search web how to exit vim\n\n If you have any questions or comments, please reach out to Neeraj, the creator of Bugout, at neeraj@simiotics.com.\n \"\"\"\n )\n parser = BugoutSlackArgumentParser(\n prog=\"@bugout\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=bugout_description,\n )\n\n parser.add_argument(\n \"-n\",\n \"--nothread\",\n action=\"store_true\",\n help=\"Set this flag if you do not want @bugout to use threads\",\n )\n subparsers = parser.add_subparsers(title=\"Commands\", dest=\"command\")\n\n search_parser = subparsers.add_parser(\n \"search\", description=\"Perform a Bugout search against an available index\"\n )\n search_parser.add_argument(\n \"index\",\n help=\"Name of index to search against (view available indices with `@bugout indices list`)\",\n )\n search_parser.add_argument(\n \"-b\",\n \"--browser\",\n action=\"store_true\",\n help=(\n \"Set this flag if you would like @bugout to give you a link to view your search results\"\n \" in your browser\"\n ),\n )\n search_parser.add_argument(\"query\", nargs=\"+\", help=\"Bugout search query\")\n\n indices_parser = subparsers.add_parser(\n \"indices\", description=\"Information about the available indices\"\n )\n slack_indices.populate_indices_parser(indices_parser)\n\n admin_parser = subparsers.add_parser(\n \"admin\", description=\"Administrative actions you can take with Bugout\"\n )\n slack_admin.populate_admin_parser(admin_parser)\n\n return parser\n\n\nasync def search_blocks_modifier(\n db_session: Session,\n blocks: List[Dict[str, Any]],\n args: argparse.Namespace,\n team_id: str,\n user_id: str,\n bot_installation: SlackOAuthEvent,\n channel_id: Optional[str] = None,\n):\n \"\"\"\n Modifies Slack message blocks array to present appropriate output back to Slack users on a\n Bugout search.\n \"\"\"\n query_string = \" \".join(args.query)\n query_string_urlencoded = urllib.parse.quote_plus(query_string)\n client_id = f\"slack-{team_id}-{user_id}\"\n\n bugout_web_url = os.environ.get(\"BUGOUT_WEB_URL\")\n if bugout_web_url is not None:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"<{bugout_web_url}/?clientID={client_id}&q={query_string_urlencoded}&auto=search|View results in your browser>\",\n },\n }\n )\n elif args.browser:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": (\n \"Your @bugout backend is not configured to connect to a Bugout \"\n f\"web instance.\\nContact <@{bot_installation.user_id}> to fix \"\n \"this problem.\",\n ),\n },\n }\n )\n\n if not args.browser:\n available_indices = slack_indices.get_installation_indices(\n db_session, bot_installation\n )\n available_index_mapping = {\n available_index.index_name: available_index\n for available_index in available_indices\n }\n\n blocks.append({\"type\": \"divider\"})\n\n if args.index not in available_index_mapping:\n return blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": (\n f\"There is no available index named {args.index}. Choose from one of: \"\n f\"{', '.join(available_index_mapping)}\"\n ),\n },\n }\n )\n\n specified_index = available_index_mapping[args.index]\n headers: Dict[str, str] = {}\n if specified_index.use_bugout_client_id:\n # TODO(neeraj): Change this to BUGOUT_CLIENT_ID_HEADER once you've updated parasite and\n # usage backends to live in Spire.\n headers[BUGOUT_CLIENT_ID_HEADER] = client_id\n\n if specified_index.use_bugout_auth:\n try:\n bugout_user = get_bugout_user(\n db_session,\n bot_installation.id,\n throw_on=Existence.DoesNotExist,\n )\n except BugoutUserNotFound:\n return blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": (\n f\"Index ({specified_index.index_name}) requires your @bugout \"\n \"installation to be authenticated. Ask your @bugout administrator \"\n f\"(<@{bot_installation.authed_user_id}>) to authenticate using: \"\n \"`@bugout admin register` or `@bugout admin login`.\"\n ),\n },\n }\n )\n\n bugout_user = cast(SlackBugoutUser, bugout_user)\n headers[\"Authorization\"] = f\"Bearer {bugout_user.bugout_access_token}\"\n\n logger.info(f\"Executing search against: {specified_index.index_url}\")\n r = requests.get(\n specified_index.index_url,\n params={\"q\": query_string},\n headers=headers,\n timeout=5,\n )\n r.raise_for_status()\n bugout_response = r.json()\n results = bugout_response.get(\"results\", [])\n num_results = len(results)\n\n if not results:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"Sorry, I found no results for your query.\",\n },\n }\n )\n\n # TODO(neeraj): This is the quick and hacky solution to different result formats from\n # different search engines. Find a more elegant solution to this SOON.\n if args.index == \"journal\":\n for i, result in enumerate(results):\n entry_number = i + 1\n rendered_result = (\n f\"*Title:* {result['title']}\\n\"\n f\"*Tags:* {', '.join(result['tags'])}\\n\"\n f\"{result['content']}\"\n )\n if len(rendered_result) > SLACK_MAX_BLOCK_LENGTH:\n rendered_result = (\n rendered_result[\n : max(\n 0,\n SLACK_MAX_BLOCK_LENGTH\n - len(SLACK_MESSAGE_CONTINUATION),\n )\n ]\n + SLACK_MESSAGE_CONTINUATION\n )\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": rendered_result},\n }\n )\n if entry_number < num_results:\n blocks.append({\"type\": \"divider\"})\n else:\n html_to_text = HTMLToText()\n\n for i, result in enumerate(results):\n entry_number = i + 1\n result_url = result.get(\"url\", \"\")\n raw_result_name = result.get(\"name\", \"\")\n raw_result_snippet = result.get(\"snippet\", \"\")\n\n html_to_text.reset()\n html_to_text.feed(raw_result_name)\n html_to_text.close()\n result_name = html_to_text.generate()\n\n html_to_text.reset()\n html_to_text.feed(raw_result_snippet)\n html_to_text.close()\n result_snippet = html_to_text.generate()\n\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"<{result_url}>\\n{result_name}\\n```{result_snippet}```\",\n },\n }\n )\n if entry_number < num_results:\n blocks.append({\"type\": \"divider\"})\n\n\nasync def handle_mention(\n team_id: str,\n user_id: str,\n channel_id: str,\n text: str,\n thread_ts: Optional[str],\n bugout_parser: BugoutSlackArgumentParser,\n spire_api_url: str,\n) -> None:\n \"\"\"\n Handles a mention of bugout by:\n 1. Checking that it is authorized to respond with a search (if not, it will post a friendly\n message in slack)\n 2. Extracting the @bugout command from the given text.\n 3. Handling the @bugout command\n 4. Responding to the message with an explicit mention of the user with the given ID (if\n thread_ts is specified, responds in that thread).\n \"\"\"\n with db.yield_connection_from_env_ctx() as db_session:\n query = (\n db_session.query(SlackOAuthEvent)\n .filter(SlackOAuthEvent.team_id == team_id)\n .order_by(SlackOAuthEvent.updated_at.desc())\n )\n\n bot_installation: Optional[SlackOAuthEvent] = query.first()\n if bot_installation is None:\n raise InstallationNotFound(\n f\"Did not find active installation of @bugout in team: {team_id}\"\n )\n\n if user_id == bot_installation.bot_user_id:\n return None\n\n lines = text.split(\"\\n\")\n invocations: List[List[str]] = []\n for line in lines:\n raw_tokens = line.split()\n tokens = [parse_raw_text(raw_token) for raw_token in raw_tokens]\n bot_mention_indices: List[int] = [\n index\n for index, token in enumerate(tokens)\n if token.token_type == SlackTextTokenType.USER\n and token.token == bot_installation.bot_user_id\n ]\n # On each line, only process the final mention as issuing a command to the Slackbot\n # This allows users to discuss the behaviour of the Slackbot and issue a command on the\n # same line.\n if len(bot_mention_indices) > 0:\n raw_args: List[str] = [\n token.raw for token in tokens[bot_mention_indices[-1] + 1 :]\n ]\n invocations.append(raw_args)\n\n for invocation in invocations:\n invocation_text = \"@bugout \" + \" \".join(invocation)\n blocks: List[Dict[str, Any]] = [\n {\n \"type\": \"header\",\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": \"Bugout\",\n \"emoji\": True,\n },\n },\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"Query:\\n```{invocation_text}```\",\n },\n },\n {\"type\": \"divider\"},\n ]\n payload = {\n \"token\": bot_installation.bot_access_token,\n \"text\": \"Bugout response\",\n \"channel\": channel_id,\n }\n\n proceed = True\n try:\n args = bugout_parser.parse_args(invocation)\n except SlackArgumentParseError as e:\n proceed = False\n if thread_ts:\n payload[\"thread_ts\"] = thread_ts\n\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": f\"```{str(e)}```\"},\n }\n )\n\n if proceed:\n if thread_ts and (not args.nothread):\n payload[\"thread_ts\"] = thread_ts\n\n if args.command is None:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"```{bugout_parser.format_help()}```\",\n },\n }\n )\n elif args.command == \"indices\":\n slack_indices.indices_blocks_modifier(\n db_session,\n blocks,\n args,\n team_id,\n user_id,\n channel_id,\n bot_installation,\n spire_api_url,\n )\n elif args.command == \"admin\":\n try:\n await slack_admin.admin_handler(\n blocks,\n args,\n team_id,\n user_id,\n channel_id,\n spire_api_url,\n bot_installation,\n )\n except Exception as e:\n logger.error(f\"ERROR processing admin command -- {str(e)}\")\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"Something went wrong handling your request. I am very sorry.\",\n },\n }\n )\n elif args.command == \"search\":\n await search_blocks_modifier(\n db_session,\n blocks,\n args,\n team_id,\n user_id,\n bot_installation,\n channel_id,\n )\n\n payload[\"blocks\"] = json.dumps(blocks)\n responded = True\n try:\n r = requests.post(\n \"https://api.slack.com/api/chat.postMessage\",\n data=payload,\n timeout=3,\n )\n r.raise_for_status()\n except requests.HTTPError as e:\n responded = False\n\n slack_response = r.json()\n if not slack_response.get(\"ok\"):\n if slack_response.get(\"error\"):\n logger.error(\n f\"Error return response to slack {slack_response.get('error')}.\"\n )\n responded = False\n\n slack_mention = SlackMention(\n slack_oauth_event_id=bot_installation.id,\n team_id=team_id,\n user_id=user_id,\n channel_id=channel_id,\n invocation=invocation_text,\n thread_ts=thread_ts,\n responded=responded,\n )\n db_session.add(slack_mention)\n\n db_session.commit()\n","repo_name":"bugout-dev/spire","sub_path":"spire/slack/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":17972,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"14975543437","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql import functions as F\n\nfrom datetime import datetime\nfrom processing.schema import schema\n\n\nBOOTSTRAP_SERVERS = \"172.31.65.164:9092,172.31.67.180:9092,172.31.67.236:9092\"\n\n\ndef write_to_cassandra(streaming_df, keyspace, table, trigger_time):\n query = (streaming_df.\n writeStream.\n format(\"org.apache.spark.sql.cassandra\").\n option(\"table\", table).\n option(\"keyspace\", keyspace).\n trigger(processingTime=trigger_time)\n )\n\n return query\n\n\nspark = SparkSession.builder \\\n .appName(f\"Meetups_{str(datetime.now())}\") \\\n .getOrCreate()\n\nspark.conf.set(\"spark.sql.streaming.checkpointLocation\", '.')\nspark.conf.set(\"spark.cassandra.auth.username\", \"cassandra\")\nspark.conf.set(\"spark.cassandra.auth.password\", \"cassandra\")\n\n\ndf = (spark.\n readStream.\n format(\"kafka\").\n option(\"kafka.bootstrap.servers\", BOOTSTRAP_SERVERS).\n option(\"subscribe\", \"meetups\").\n option(\"startingOffsets\", \"earliest\").\n load())\n\ndf = (df.\n selectExpr(\"CAST(key AS STRING)\", \"CAST(value AS STRING)\").\n withColumn('value', F.from_json('value', schema)).\n selectExpr('value.*'))\n\ndf1 = (df.\n select(\n F.col('group.group_country').alias('country'),\n F.col('group.group_city').alias('city')\n))\n\ndf2 = (df.\n select(\n F.col('event.event_id').alias('event_id'),\n F.col('event.event_name').alias('event_name'),\n F.col('event.time').alias('event_time'),\n F.array_join('group.group_topics.topic_name', delimiter=';').alias('topics'),\n F.col('group.group_name').alias('group_name'),\n F.col('group.group_country').alias('country'),\n F.col('group.group_city').alias('city'),\n))\n\ndf3 = (df.\n select(\n F.col('group.group_city').alias('city_name'),\n F.col('group.group_name').alias('group_name'),\n F.col('group.group_id').alias('group_id'),\n))\n\ndf4 = (df.\n select(\n F.col('group.group_id').alias('group_id'),\n F.col('event.event_id').alias('event_id'),\n F.col('event.event_name').alias('event_name'),\n F.col('event.time').alias('event_time'),\n F.array_join('group.group_topics.topic_name', delimiter=';').alias('topics'),\n F.col('group.group_name').alias('group_name'),\n F.col('group.group_country').alias('country'),\n F.col('group.group_city').alias('city'),\n))\n\nq1 = write_to_cassandra(df1, keyspace='meetups', table='event_cities', trigger_time='1 minute').start()\nq2 = write_to_cassandra(df2, keyspace='meetups', table='events', trigger_time='1 minute').start()\nq3 = write_to_cassandra(df3, keyspace='meetups', table='cities_groups', trigger_time='1 minute').start()\nq4 = write_to_cassandra(df4, keyspace='meetups', table='groups_events', trigger_time='1 minute').start()\n\nq1.awaitTermination()\nq2.awaitTermination()\nq3.awaitTermination()\nq4.awaitTermination()\n","repo_name":"ostapViniavskyi/MeetupsBigData","sub_path":"processing/stream_processing/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71533616483","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.bakeries, name='bakeries'),\n path('zone/', views.bakerieszone, name='bakeries-zone'),\n path('mahsoolat//', views.mahsoolat, name='mahsoolat'),\n path('mahsool-jadid//', views.mahsool_jadid , name='mahsool-jadid'),\n path('mahsool-detail//', views.mahsool_detail, name='mahsool-detail'),\n path('search/', views.search, name='search'),\n path('submit_review//', views.submit_review, name='submit_review'),\n\n\n\n\n]\n ","repo_name":"liberouf/bakeryxa","sub_path":"bakeries/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32314987535","text":"from django.contrib import admin\nfrom django.contrib.admin import DateFieldListFilter\nfrom django.utils.translation import ugettext as _\n\nfrom vital_records.admin.filter import GenderFieldListFilter\nfrom ..forms import BirthNoteAdminForm\nfrom ..models import BirthNote, BirthEvidence, BirthNoteLaw, BirthPlace\n\nadmin.site.register(BirthPlace)\nadmin.site.register(BirthEvidence)\nadmin.site.register(BirthNoteLaw)\n\n\n@admin.register(BirthNote)\nclass BirthNoteAdmin(admin.ModelAdmin):\n form = BirthNoteAdminForm\n fieldsets = [\n ('Common note info', {\n 'fields': (\n 'note_number',\n 'compose_date',\n ('was_restored', 'was_revoked'),\n 'language',\n 'official_info',\n 'created_by',\n 'notes',\n 'registrar'\n )\n }),\n ('Child info', {\n 'fields': (\n 'birth_date',\n 'birth_place',\n 'child_name',\n 'child_last_name',\n 'child_patronymic',\n 'child_gender',\n 'child_number',\n 'stillborn'\n )\n }),\n ('Birth note details', {\n 'fields': (\n 'law',\n 'deadline_passed',\n 'children_born_count',\n 'birth_evidences',\n 'applicant'\n )\n }),\n ('Parents info', {\n 'fields': (\n 'parents',\n 'father_info_reason'\n )\n })\n ]\n readonly_fields = ['created_by']\n list_display = (\n 'note_number', 'compose_date', 'child_name', 'child_last_name', 'child_patronymic',\n 'children_born_count', 'child_number'\n )\n date_hierarchy = 'compose_date'\n list_filter = (\n ('child_gender', GenderFieldListFilter),\n 'children_born_count', 'created_by',\n ('birth_date', DateFieldListFilter),\n 'language'\n )\n filter_horizontal = ['birth_evidences', 'parents']\n search_fields = (\n 'note_number', 'child_name', 'child_last_name', 'child_patronymic'\n )\n\n def save_model(self, request, obj, form, change):\n obj.created_by = request.user\n super().save_model(request, obj, form, change)\n","repo_name":"wi110w/vital-records-registry","sub_path":"registry/vital_records/admin/birth.py","file_name":"birth.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30020769928","text":"from tkinter import *\nfrom PIL import Image,ImageTk\nfrom random import *\n\n#main window\nroot=Tk()\nroot.title(\"ROCK PAPER AND SCISSOR \")\nroot.configure(background=\"#9b59b6\")\nroot.geometry(\"600x160\")\n\n\n\n#picture\nrock_img=ImageTk.PhotoImage(Image.open(\"rock.png\"))\npaper_img=ImageTk.PhotoImage(Image.open(\"paper.png\"))\nsciccor_img=ImageTk.PhotoImage(Image.open(\"scissor.png\"))\n\n\n\n\n#insert picture\n\nuser_label=Label(root,image=sciccor_img,bg=\"#9b59b6\")\ncomp_label=Label(root,image=sciccor_img,bg=\"#9b59b6\")\ncomp_label.grid(row=1,column=0)\nuser_label.grid(row=1,column=4)\n\n\n#scores\n\nplayerscore=Label(root,text=0,font=100,bg=\"#9b59b6\",fg=\"white\")\ncomputerscore=Label(root,text=0,font=100,bg=\"#9b59b6\",fg=\"white\")\ncomputerscore.grid(row=1,column=1)\nplayerscore.grid(row=1,column=3)\n\n\n#Indicator\nuser_indicator=Label(root,font=50,text=\"USER\",bg=\"#9b59b6\",fg=\"white\")\ncomp_indicator=Label(root,font=50,text=\"COMPUTER\",bg=\"#9b59b6\",fg=\"white\")\nuser_indicator.grid(row=0,column=3)\ncomp_indicator.grid(row=0,column=1)\n\n#Message\n\nmsg=Label(root,font=50,bg=\"#9b59b6\",fg=\"white\")\nmsg.grid(row=3,column=2)\n#update msg\ndef updateMsg(x):\n msg['text']=x\n \n \ndef updateUserScore():\n score=int(playerscore['text'])\n score+=1\n playerscore['text']=str(score)\n \ndef updateCompScore():\n score=int(computerscore['text'])\n score+=1\n computerscore['text']=str(score)\n\n\ndef checkWinner(player,computer):\n if player == computer:\n updateMsg(\"It's a Draw !!!\")\n elif player == \"rock\":\n if computer ==\"paper\":\n updateMsg(\"You Loose :( .\")\n updateCompScore()\n else:\n updateMsg(\"You Win :) .\")\n updateUserScore()\n \n elif player == \"paper\":\n if computer ==\"scissor\":\n updateMsg(\"You Loose :( .\")\n updateCompScore()\n else:\n updateMsg(\"You Win :) .\")\n updateUserScore()\n elif player==\"scissor\":\n if computer==\"rock\":\n updateMsg(\"You Loose :( .\")\n updateCompScore()\n else:\n updateMsg(\"You Win :) .\")\n updateUserScore()\n else:\n pass \n \nchoices=[\"rock\",\"paper\",\"scissor\"]\n\n#upadte choice\ndef updatechoice(x):\n #for computer\n \n compchoice=choices[randint(0,2)]\n if compchoice == \"rock\":\n comp_label.configure(image=rock_img)\n elif compchoice==\"paper\":\n comp_label.configure(image=paper_img)\n else:\n comp_label.configure(image=sciccor_img)\n \n \n \n #for ueser\n if x==\"rock\":\n user_label.configure(image=rock_img)\n elif x==\"paper\":\n user_label.configure(image=paper_img)\n else:\n user_label.configure(image=sciccor_img)\n checkWinner(x,compchoice) \n \n#Buttons\n\nrock=Button(root,width=20,height=2,text=\"ROCK\",bg=\"#FF3E4D\",fg=\"white\",command=lambda:updatechoice(\"rock\")).grid(row=2,column=1)\npaper=Button(root,width=20,height=2,text=\"PAPER\",bg=\"#FAD02E\",fg=\"white\",command=lambda:updatechoice(\"paper\")).grid(row=2,column=2)\nscissor=Button(root,width=20,height=2,text=\"SCISSOR\",bg=\"#0ABDE3\",fg=\"white\",command=lambda:updatechoice(\"scissor\")).grid(row=2,column=3)\n\n\n\nroot.mainloop()","repo_name":"Justmephoenix/CODSOFT","sub_path":"ROCK PAPER SCISSOR GAME TASK -4/rockpaperscissor.py","file_name":"rockpaperscissor.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14794568239","text":"name = 'ali sher khan'\n\nprint(name.title())\n\n# The lower() method is particularly useful for storing data. Many times\n# you won’t want to trust the capitalization that your users provide, so you’ll\n# convert strings to lowercase before storing them\n\nfirst_name = \"Ali\"\nmiddle_name = 'Sher'\nlast_name = 'khan'\n\nprint(first_name + \" \" + middle_name + \"\\t \" + last_name)","repo_name":"AliSherTR/python-crash-course","sub_path":"Chapter 2/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17708949271","text":"\nfrom pathlib import Path\n\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\n\nimport fiona\nimport shapefile\n\nfrom cartopy.io import shapereader\nfrom shapely.geometry import Polygon\n\nfrom esmvaltool.diag_scripts.shared import run_diagnostic\nfrom esmvaltool.diag_scripts.shared._base import get_plot_filename\n\ndef rect_from_bound(xmin, xmax, ymin, ymax):\n \"\"\"Returns list of (x,y)'s for a rectangle\"\"\"\n xs = [xmax, xmin, xmin, xmax, xmax]\n ys = [ymax, ymax, ymin, ymin, ymax]\n return [(x, y) for x, y in zip(xs, ys)]\n\n\ndef get_country_polygon(country):\n resolution = '10m'\n category = 'cultural'\n name = 'admin_0_countries'\n\n shpfilename = shapereader.natural_earth(resolution, category, name)\n f = fiona.open(shpfilename)\n reader = shapereader.Reader(shpfilename)\n records = list(reader.records())\n\n for record in records:\n if record.attributes[\"ADMIN\"] == \"Australia\":\n return [record.geometry]\n\n\ndef plot_australia(data, name, cmap):\n poly = get_country_polygon(\"Australia\")\n # Make the figure larger\n fig = plt.figure(figsize=(11,8.5))\n \n # Set the axes using the specified map projection\n ax = plt.axes(projection=ccrs.PlateCarree())\n ax.add_geometries(poly, crs=ccrs.PlateCarree(), facecolor='none', edgecolor='black')\n \n pad1 = 0.1 #padding, degrees unit\n exts = [poly[0].bounds[0] - pad1, poly[0].bounds[2] + pad1, poly[0].bounds[1] - pad1, poly[0].bounds[3] + pad1];\n exts = [110, 165, -45, -9]\n ax.set_extent(exts, crs=ccrs.PlateCarree())\n\n min_lon, max_lon, min_lat, max_lat = exts\n pad2 = 1 # padding, degrees unit\n data = data.sel(lat=slice(min_lat - pad2, max_lat + pad2), lon=slice(min_lon - pad2,max_lon + pad2))\n data = data.mean(\"time\")\n\n # make a mask polygon by polygon's difference operation\n # base polygon is a rectangle, another polygon is simplified switzerland\n msk = Polygon(rect_from_bound(*exts)).difference( poly[0].simplify(0.01) )\n msk_stm = ccrs.PlateCarree().project_geometry (msk, ccrs.PlateCarree()) # project geometry to the projection used by stamen\n \n # Make a filled contour plot\n cs=ax.contourf(data['lon'], data['lat'], data['tas'],\n transform = ccrs.PlateCarree(),cmap='coolwarm',extend='both')\n \n # plot the mask using semi-transparency (alpha=0.65) on the masked-out portion\n ax.add_geometries( msk_stm, ccrs.PlateCarree(), zorder=12, facecolor='white', edgecolor='none', alpha=1.0)\n \n # Add gridlines\n #ax.gridlines(draw_labels=True)\n \n # Add colorbar\n cbar = plt.colorbar(cs,shrink=0.7,orientation='vertical',label='Surface Air Temperature (K)')\n \n # Add title\n plt.title('Australia Temperature')\n\n return fig\n\ndef main(cfg):\n \"\"\"Compute the time average for each input dataset.\"\"\"\n # Get a description of the preprocessed data that we will use as input.\n input_data = cfg['input_data'].values()\n\n # Example of how to loop over variables/datasets in alphabetical order\n for dataset in input_data:\n # Load the data\n input_file = dataset['filename']\n name = dataset['variable_group']\n data = xr.open_dataset(input_file)\n\n cmap = \"coolwarm\"\n fig = plot_australia(data, name, cmap)\n\n # Save output\n output_file = Path(input_file).stem.replace('tas', name)\n output_path = get_plot_filename(output_file, cfg)\n fig.savefig(output_path)\n\n\nif __name__ == '__main__':\n\n with run_diagnostic() as config:\n main(config)\n","repo_name":"rbeucher/esmvaltool_australia_recipes","sub_path":"australia.py","file_name":"australia.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3186790658","text":"import create_datasets as cd\nimport time, os, pickle\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\n\n# to avoid Error #15: Initializing libiomp5.dylib on Mac OS\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\ndef train(model='CNN',rep=5):\n\n # define a CNN model\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.fc1 = nn.Linear(16*22*22, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 8)\n # Define proportion or neurons to dropout\n self.dropout = nn.Dropout(0.1)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16*22*22)\n x = self.dropout(x)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n # choose a model to train\n if model == 'CNN':\n net = Net()\n elif model == 'AlexNet':\n net = torchvision.models.alexnet()\n elif model == 'ResNet18':\n net = torchvision.models.resnet18()\n elif model == 'ResNet50':\n net = torchvision.models.resnet50()\n \n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(net.parameters(), lr=0.001)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)\n\n # datasets\n train_label_list = cd.create_datasets(type='train')\n \n os.chdir('..')\n\n train = []\n for i,row in train_label_list.iterrows():\n img_path = 'CW_Dataset/' + 'train' + '/'\n filename = row[0]\n label = row[1]\n img_path += str(filename.split('.')[0])+'_aligned.jpg'\n img = Image.open(img_path).convert('RGB')\n img = transforms.ToTensor()(img)\n train.append([img,label])\n trainLoader = torch.utils.data.DataLoader(train, shuffle=True)\n\n # training\n start_time = time.time()\n rep = rep # number of epochs (5 or 20 were selected for the report)\n for epoch in range(rep): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i,data in enumerate(trainLoader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 1000 == 999: # print every 1000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 1000))\n running_loss = 0.0\n\n scheduler.step()\n\n end_time = time.time() - start_time\n print('Finished Training ', end_time)\n\n # save the model\n os.chdir('..')\n torch.save(net,'Models/' + model + '_' + str(rep) + '.p')\n pickle.dump(end_time, open('Code/' + model + '_' + str(rep) + '_speed.p', 'wb'))\n os.chdir('Code')\n \ndef test(model='CNN', rep=5):\n \n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 4, 5)\n self.conv2 = nn.Conv2d(4, 8, 5)\n self.conv3 = nn.Conv2d(8, 16, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.fc1 = nn.Linear(16*9*9, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n # Define proportion or neurons to dropout\n self.dropout = nn.Dropout(0.1)\n \n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = self.pool(F.relu(self.conv3(x)))\n x = x.view(-1, 16*9*9)\n x = self.dropout(x)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n \n model_name = model + '_' + str(rep)\n os.chdir('..')\n \n if model == 'CNN':\n net = Net()\n net.load_state_dict(torch.load('Models/' + model_name + '.p', map_location=torch.device('cpu')))\n else:\n net = torch.load('Models/' + model_name + '.p', map_location=torch.device('cpu'))\n\n\n # datasets\n os.chdir('Code')\n test_label_list = cd.create_datasets(type='test')\n os.chdir('..')\n test = []\n for i,row in test_label_list.iterrows():\n img_path = 'CW_Dataset/' + 'test' + '/'\n filename = row[0]\n label = row[1]\n img_path += str(filename.split('.')[0])+'_aligned.jpg'\n img = Image.open(img_path).convert('RGB')\n img = transforms.ToTensor()(img)\n test.append([img,label])\n testLoader = torch.utils.data.DataLoader(test, shuffle=True)\n\n # test\n correct = 0\n total = 0\n classes = (1, 2, 3, 4, 5, 6, 7)\n class_correct = list(0 for i in range(7))\n fp = list(0 for i in range(7))\n tn = list(0 for i in range(7))\n fn = list(0 for i in range(7))\n class_total = list(0 for i in range(7))\n with torch.no_grad():\n for data in testLoader:\n images, labels = data\n outputs = net(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n label = labels - 1\n if predicted == labels:\n class_correct[label] += 1\n l = labels\n p = predicted\n if p != l:\n fn[label] += 1\n for i in range(7):\n i += 1\n if i != l:\n if i == p:\n fp[i-1] += 1\n elif i != p:\n tn[i-1] += 1\n class_total[label] += 1\n\n print('Accuracy of the network on the testset: %d %%' % (\n 100 * correct / total))\n for i in range(7):\n print('Accuracy of %5s : %2d %%' % (\n classes[i], 100 * class_correct[i] / class_total[i]))\n \n recall = []\n precision = []\n f1 = []\n tp = class_correct\n for i in range(7):\n r = tp[i]/(tp[i]+fn[i])\n pre = tp[i]/(tp[i]+fp[i])\n f = 2*(r*pre)/(r+pre)\n recall.append(round(r,2))\n precision.append(round(pre,2))\n f1.append(round(f,2))\n \n print(len(recall))\n print('recall',recall)\n print('precision',precision)\n print('f1',f1)","repo_name":"ChikazeMori/FacialEmotionRecognition","sub_path":"CW_Folder_ChikazeMori_200038013/Code/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":6719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72263735200","text":"import os\nimport pandas as pd\nimport openpyxl\nimport re\n\npatron_mes = r'(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre)'\ncarpeta = r\"C:\\Users\\JFROJAS\\Desktop\\Consolidado Incidencias\\Coecillo-20230711T184742Z-001\\Coecillo\\GE\"\nsalida = r\"C:\\Users\\JFROJAS\\Desktop\\Consolidado Incidencias\\Resultados\\Coecillo\\GE\"\n\n# Obtener lista de archivos en la carpeta\narchivos = os.listdir(carpeta)\ni = 1\nfor archivo in archivos:\n if archivo.endswith(\".xlsx\"):\n # Ruta completa del archivo de entrada\n path_entrada = os.path.join(carpeta, archivo)\n\n # Trabajar con el archivo de Excel\n archivo_excel = openpyxl.load_workbook(path_entrada)\n hoja_trabajo = archivo_excel.active\n\n fecha = hoja_trabajo['G5'].value\n coincidencias = re.findall(patron_mes, fecha.lower())\n if coincidencias:\n fecha = coincidencias[0].capitalize()\n else:\n fecha = 'Error'\n\n compañia = hoja_trabajo['C9'].value\n\n archivo_excel.close()\n\n # Crear dataframe con el archivo Excel\n df = pd.read_excel(path_entrada, skiprows=10, usecols=\"C:O\")\n\n # Agregar columnas al dataframe\n df['Compañia'] = compañia\n df['Mes'] = fecha\n df['Cede'] = 'Coecillo'\n\n # Ruta completa del archivo de salida\n path_salida = os.path.join(salida, f\"{fecha + '_' + compañia}.xlsx\")\n\n # Guardar dataframe en un nuevo archivo de Excel\n df.to_excel(path_salida, index=False)\n\n # Imprimir información del archivo procesado\n print(\"Archivo \" + str(i) + \" de \" + str(len(archivos)) + \" \"+ fecha + '_' + compañia)\n print(\"Procesando:\", archivo)\n print()\n i += 1\n\nprint(\"Proceso completado.\")\n","repo_name":"rojasfuentes/Incidencias","sub_path":"Project/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38684245729","text":"\"\"\"added artist\n\nRevision ID: 93f890c6a745\nRevises: 9a0dd5340383\nCreate Date: 2023-08-07 17:46:34.958656\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '93f890c6a745'\ndown_revision = '9a0dd5340383'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('artists', schema=None) as batch_op:\n batch_op.add_column(sa.Column('user_id', sa.Integer(), nullable=True))\n batch_op.create_foreign_key(\n \"fk_artist_user\", 'users', ['user_id'], ['id'])\n\n with op.batch_alter_table('songs', schema=None) as batch_op:\n batch_op.add_column(\n sa.Column('artist_id', sa.Integer(), nullable=True))\n batch_op.create_foreign_key(\n 'fk_song_artist', 'artists', ['artist_id'], ['id'])\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('songs', schema=None) as batch_op:\n batch_op.drop_constraint(\"fk_song_artist\", type_='foreignkey')\n batch_op.drop_column('artist_id')\n\n with op.batch_alter_table('artists', schema=None) as batch_op:\n batch_op.drop_constraint(\"fk_artist_user\", type_='foreignkey')\n batch_op.drop_column('user_id')\n\n # ### end Alembic commands ###\n","repo_name":"Enoch2k2/flask-association-examples","sub_path":"server/migrations/versions/93f890c6a745_added_artist.py","file_name":"93f890c6a745_added_artist.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24468620748","text":"\"\"\"\nA SLEAP dataset collects labeled video frames, together with required metadata.\n\nThis contains labeled frame data (user annotations and/or predictions),\ntogether with all the other data that is saved for a SLEAP project\n(videos, skeletons, etc.).\n\nThe most convenient way to load SLEAP labels files is to use the high level loader: ::\n\n > import sleap\n > labels = sleap.load_file(filename)\n\nThe Labels class provides additional functionality for loading SLEAP labels files. To\nload a labels dataset file from disk: ::\n\n > labels = Labels.load_file(filename)\n\nIf you're opening a dataset file created on a different computer (or if you've\nmoved the video files), it's likely that the paths to the original videos will\nnot work. We automatically check for the videos in the same directory as the\nlabels file, but if the videos aren't there, you can tell `load_file` where\nto search for the videos. There are various ways to do this: ::\n\n > Labels.load_file(filename, single_path_to_search)\n > Labels.load_file(filename, [path_a, path_b])\n > Labels.load_file(filename, callback_function)\n > Labels.load_file(filename, video_search=...)\n\nThe callback_function can be created via `make_video_callback()` and has the\noption to make a callback with a GUI window so the user can locate the videos.\n\nTo save a labels dataset file, run: ::\n\n > Labels.save_file(labels, filename)\n\nIf the filename has a supported extension (e.g., \".slp\", \".h5\", \".json\") then\nthe file will be saved in the corresponding format. You can also specify the\ndefault extension to use if none is provided in the filename.\n\"\"\"\nimport itertools\nimport os\nfrom collections.abc import MutableSequence\nfrom pathlib import Path\nfrom typing import (\n Callable,\n List,\n Union,\n Dict,\n Optional,\n Tuple,\n Text,\n Iterable,\n Any,\n Set,\n Callable,\n cast,\n)\n\nimport attr\nimport cattr\nimport h5py as h5\nimport numpy as np\nimport datetime\nfrom sklearn.model_selection import train_test_split\n\ntry:\n from typing import ForwardRef\nexcept:\n from typing import _ForwardRef as ForwardRef\n\nfrom sleap.skeleton import Skeleton, Node\nfrom sleap.instance import (\n Instance,\n LabeledFrame,\n Track,\n make_instance_cattr,\n PredictedInstance,\n)\n\nfrom sleap.io import pathutils\nfrom sleap.io.video import Video, ImgStoreVideo, HDF5Video\nfrom sleap.gui.suggestions import SuggestionFrame\nfrom sleap.gui.dialogs.missingfiles import MissingFilesDialog\nfrom sleap.rangelist import RangeList\nfrom sleap.util import uniquify, json_dumps\n\n\"\"\"\nThe version number to put in the Labels JSON format.\n\"\"\"\nLABELS_JSON_FILE_VERSION = \"2.0.0\"\n\n# For debugging, we can replace missing video files with a \"dummy\" video\nUSE_DUMMY_FOR_MISSING_VIDEOS = os.getenv(\"SLEAP_USE_DUMMY_VIDEOS\", default=\"\")\n\n\n@attr.s(auto_attribs=True)\nclass LabelsDataCache:\n \"\"\"Class for maintaining cache of data in labels dataset.\"\"\"\n\n labels: \"Labels\"\n\n def __attrs_post_init__(self):\n self.update()\n\n def update(self, new_frame: Optional[LabeledFrame] = None):\n \"\"\"Build (or rebuilds) various caches.\"\"\"\n # Data structures for caching\n if new_frame is None:\n self._lf_by_video = {video: [] for video in self.labels.videos}\n self._frame_idx_map = dict()\n self._track_occupancy = dict()\n self._frame_count_cache = dict()\n\n # Loop through labeled frames only once\n for lf in self.labels:\n self._lf_by_video[lf.video].append(lf)\n\n # Loop through videos a second time after _lf_by_video is created\n for video in self.labels.videos:\n self._frame_idx_map[video] = {\n lf.frame_idx: lf for lf in self._lf_by_video[video]\n }\n self._track_occupancy[video] = self._make_track_occupancy(video)\n else:\n new_vid = new_frame.video\n\n if new_vid not in self._lf_by_video:\n self._lf_by_video[new_vid] = []\n if new_vid not in self._frame_idx_map:\n self._frame_idx_map[new_vid] = dict()\n self._lf_by_video[new_vid].append(new_frame)\n self._frame_idx_map[new_vid][new_frame.frame_idx] = new_frame\n\n def find_frames(\n self, video: Video, frame_idx: Optional[Union[int, Iterable[int]]] = None\n ) -> Optional[List[LabeledFrame]]:\n \"\"\"Return list of LabeledFrames matching video/frame_idx, or None.\"\"\"\n if frame_idx is not None:\n if video not in self._frame_idx_map:\n return None\n\n if isinstance(frame_idx, Iterable):\n return [\n self._frame_idx_map[video][idx]\n for idx in frame_idx\n if idx in self._frame_idx_map[video]\n ]\n\n if frame_idx not in self._frame_idx_map[video]:\n return None\n\n return [self._frame_idx_map[video][frame_idx]]\n else:\n if video not in self._lf_by_video:\n return None\n return self._lf_by_video[video]\n\n def find_fancy_frame_idxs(self, video, from_frame_idx, reverse):\n \"\"\"Return a list of frame idxs, with optional start position/order.\"\"\"\n if video not in self._frame_idx_map:\n return None\n\n # Get sorted list of frame indexes for this video\n frame_idxs = sorted(self._frame_idx_map[video].keys())\n\n # Find the next frame index after (before) the specified frame\n if not reverse:\n next_frame_idx = min(\n filter(lambda x: x > from_frame_idx, frame_idxs), default=frame_idxs[0]\n )\n else:\n next_frame_idx = max(\n filter(lambda x: x < from_frame_idx, frame_idxs), default=frame_idxs[-1]\n )\n cut_list_idx = frame_idxs.index(next_frame_idx)\n\n # Shift list of frame indices to start with specified frame\n frame_idxs = frame_idxs[cut_list_idx:] + frame_idxs[:cut_list_idx]\n\n return frame_idxs\n\n def _make_track_occupancy(self, video: Video) -> Dict[Video, RangeList]:\n \"\"\"Build cached track occupancy data.\"\"\"\n frame_idx_map = self._frame_idx_map[video]\n\n tracks = dict()\n frame_idxs = sorted(frame_idx_map.keys())\n for frame_idx in frame_idxs:\n instances = frame_idx_map[frame_idx]\n for instance in instances:\n if instance.track not in tracks:\n tracks[instance.track] = RangeList()\n tracks[instance.track].add(frame_idx)\n return tracks\n\n def get_track_occupancy(self, video: Video, track: Track) -> RangeList:\n \"\"\"Access track occupancy cache that adds video/track as needed.\"\"\"\n if track not in self.get_video_track_occupancy(video=video):\n self._track_occupancy[video][track] = RangeList()\n return self._track_occupancy[video][track]\n\n def get_video_track_occupancy(self, video: Video) -> Dict[Track, RangeList]:\n \"\"\"Return track occupancy information for specified video.\"\"\"\n if video not in self._track_occupancy:\n self._track_occupancy[video] = dict()\n\n return self._track_occupancy[video]\n\n def remove_frame(self, frame: LabeledFrame):\n \"\"\"Remove frame and update cache as needed.\"\"\"\n self._lf_by_video[frame.video].remove(frame)\n # We'll assume that there's only a single LabeledFrame for this video and\n # frame_idx, and remove the frame_idx from the cache.\n if frame.video in self._frame_idx_map:\n if frame.frame_idx in self._frame_idx_map[frame.video]:\n del self._frame_idx_map[frame.video][frame.frame_idx]\n\n def remove_video(self, video: Video):\n \"\"\"Remove video and update cache as needed.\"\"\"\n if video in self._lf_by_video:\n del self._lf_by_video[video]\n if video in self._frame_idx_map:\n del self._frame_idx_map[video]\n\n def track_swap(\n self,\n video: Video,\n new_track: Track,\n old_track: Optional[Track],\n frame_range: tuple,\n ):\n \"\"\"Swap tracks and update cache as needed.\"\"\"\n # Get ranges in track occupancy cache\n _, within_old, _ = self.get_track_occupancy(video, old_track).cut_range(\n frame_range\n )\n _, within_new, _ = self.get_track_occupancy(video, new_track).cut_range(\n frame_range\n )\n\n if old_track is not None:\n # Instances that didn't already have track can't be handled here.\n # See track_set_instance for this case.\n self._track_occupancy[video][old_track].remove(frame_range)\n\n self._track_occupancy[video][new_track].remove(frame_range)\n self._track_occupancy[video][old_track].insert_list(within_new)\n self._track_occupancy[video][new_track].insert_list(within_old)\n\n def add_track(self, video: Video, track: Track):\n \"\"\"Add a track to the labels.\"\"\"\n self.get_track_occupancy(video=video, track=track)\n\n def add_instance(self, frame: LabeledFrame, instance: Instance):\n \"\"\"Add an instance to the labels.\"\"\"\n\n # Add track in its not already present in labels\n track_occupancy = self.get_track_occupancy(\n video=frame.video, track=instance.track\n )\n\n track_occupancy.insert((frame.frame_idx, frame.frame_idx + 1))\n\n self.update_counts_for_frame(frame)\n\n def remove_instance(self, frame: LabeledFrame, instance: Instance):\n \"\"\"Remove an instance and update the cache as needed.\"\"\"\n if instance.track not in self._track_occupancy[frame.video]:\n return\n\n # If this is only instance in track in frame, then remove frame from track.\n if len(frame.find(track=instance.track)) == 1:\n self._track_occupancy[frame.video][instance.track].remove(\n (frame.frame_idx, frame.frame_idx + 1)\n )\n\n self.update_counts_for_frame(frame)\n\n def get_frame_count(self, video: Optional[Video] = None, filter: Text = \"\") -> int:\n \"\"\"Return (possibly cached) count of frames matching video/filter.\"\"\"\n if filter not in (\"\", \"user\", \"predicted\"):\n raise ValueError(\n f\"Labels.get_labeled_frame_count() invalid filter: {filter}\"\n )\n\n if video not in self._frame_count_cache:\n self._frame_count_cache[video] = dict()\n if self._frame_count_cache[video].get(filter, None) is None:\n self._frame_count_cache[video][filter] = self.get_filtered_frame_idxs(\n video, filter\n )\n\n return len(self._frame_count_cache[video][filter])\n\n def get_filtered_frame_idxs(\n self, video: Optional[Video] = None, filter: Text = \"\"\n ) -> Set[Tuple[int, int]]:\n \"\"\"Return list of (video_idx, frame_idx) tuples matching video/filter.\"\"\"\n if video not in self.labels.videos:\n # Set value of video to None if not present in the videos list.\n video = None\n\n if filter == \"\":\n filter_func = lambda lf: video is None or lf.video == video\n elif filter == \"user\":\n filter_func = (\n lambda lf: (video is None or lf.video == video)\n and lf.has_user_instances\n )\n elif filter == \"predicted\":\n filter_func = (\n lambda lf: (video is None or lf.video == video)\n and lf.has_predicted_instances\n )\n else:\n raise ValueError(f\"Invalid filter: {filter}\")\n\n # Make a set of (video_idx, frame_idx) tuples.\n # We'll use a set since it's faster to remove items, and we need the\n # video_idx so that we count frames from distinct videos with the same\n # frame index.\n\n if video is not None:\n video_idx = self.labels.videos.index(video)\n return {(video_idx, lf.frame_idx) for lf in self.labels if filter_func(lf)}\n\n return {\n (self.labels.videos.index(lf.video), lf.frame_idx)\n for lf in self.labels\n if filter_func(lf)\n }\n\n def update_counts_for_frame(self, frame: LabeledFrame):\n \"\"\"\n Updated the cached count. Should be called after frame is modified.\n \"\"\"\n video = frame.video\n\n if video is None or video not in self._frame_count_cache:\n return\n\n frame_idx = frame.frame_idx\n video_idx = self.labels.videos.index(video)\n\n # Update count of frames with user instances\n if frame.has_user_instances:\n self._add_count_cache(video, video_idx, frame_idx, \"user\")\n else:\n self._del_count_cache(video, video_idx, frame_idx, \"user\")\n\n # Update count of frames with predicted instances\n if frame.has_predicted_instances:\n self._add_count_cache(video, video_idx, frame_idx, \"predicted\")\n else:\n self._del_count_cache(video, video_idx, frame_idx, \"predicted\")\n\n # Update count of all labeled frames\n if len(frame.instances):\n self._add_count_cache(video, video_idx, frame_idx, \"\")\n else:\n self._del_count_cache(video, video_idx, frame_idx, \"\")\n\n def _add_count_cache(self, video, video_idx, frame_idx, type_key: str):\n idx_pair = (video_idx, frame_idx)\n\n # Update count for this specific video\n if type_key in self._frame_count_cache[video]:\n self._frame_count_cache[video][type_key].add(idx_pair)\n\n # Update total for all videos\n if None in self._frame_count_cache:\n if type_key in self._frame_count_cache[None]:\n self._frame_count_cache[None][type_key].add(idx_pair)\n\n def _del_count_cache(self, video, video_idx, frame_idx, type_key: str):\n idx_pair = (video_idx, frame_idx)\n\n # Update count for this specific video\n if type_key in self._frame_count_cache[video]:\n self._frame_count_cache[video][type_key].discard(idx_pair)\n\n # Update total for all videos\n if None in self._frame_count_cache:\n if type_key in self._frame_count_cache[None]:\n self._frame_count_cache[None][type_key].discard(idx_pair)\n\n\n@attr.s(auto_attribs=True, repr=False, str=False)\nclass Labels(MutableSequence):\n \"\"\"\n The :class:`Labels` class collects the data for a SLEAP project.\n\n This class is front-end for all interactions with loading, writing,\n and modifying these labels. The actual storage backend for the data\n is mostly abstracted away from the main interface.\n\n Attributes:\n labeled_frames: A list of :class:`LabeledFrame` objects\n videos: A list of :class:`Video` objects that these labels may or may\n not reference. The video for every `LabeledFrame` will be\n stored in `videos` attribute, but some videos in\n this list may not have any associated labeled frames.\n skeletons: A list of :class:`Skeleton` objects (again, that may or may\n not be referenced by an :class:`Instance` in labeled frame).\n tracks: A list of :class:`Track` that instances can belong to.\n suggestions: List that stores \"suggested\" frames for\n videos in project. These can be suggested frames for user\n to label or suggested frames for user to review.\n negative_anchors: Dictionary that stores center-points around\n which to crop as negative samples when training.\n Dictionary key is :class:`Video`, value is list of\n (frame index, x, y) tuples.\n provenance: Dictionary that denotes the origin of the :py:class:`Labels`.\n \"\"\"\n\n labeled_frames: List[LabeledFrame] = attr.ib(default=attr.Factory(list))\n videos: List[Video] = attr.ib(default=attr.Factory(list))\n skeletons: List[Skeleton] = attr.ib(default=attr.Factory(list))\n nodes: List[Node] = attr.ib(default=attr.Factory(list))\n tracks: List[Track] = attr.ib(default=attr.Factory(list))\n suggestions: List[SuggestionFrame] = attr.ib(default=attr.Factory(list))\n negative_anchors: Dict[Video, list] = attr.ib(default=attr.Factory(dict))\n provenance: Dict[Text, Union[str, int, float, bool]] = attr.ib(\n default=attr.Factory(dict)\n )\n\n def __attrs_post_init__(self):\n \"\"\"\n Called by attrs after the class is instantiated.\n\n This updates the top level contains (videos, skeletons, etc)\n from data in the labeled frames, as well as various caches.\n \"\"\"\n\n # Add any videos/skeletons/nodes/tracks that are in labeled\n # frames but not in the lists on our object\n self._update_from_labels()\n\n # Update caches used to find frames by frame index\n self._cache = LabelsDataCache(self)\n\n # Create a variable to store a temporary storage directory\n # used when we unzip\n self.__temp_dir = None\n\n def _update_from_labels(self, merge: bool = False):\n \"\"\"Updates top level attributes with data from labeled frames.\n\n Args:\n merge: If True, then update even if there's already data.\n\n Returns:\n None.\n \"\"\"\n\n # Add any videos that are present in the labels but\n # missing from the video list\n if merge or len(self.videos) == 0:\n # find videos in labeled frames or suggestions\n # that aren't yet in top level videos\n lf_videos = {label.video for label in self.labels}\n suggestion_videos = {sug.video for sug in self.suggestions}\n new_videos = lf_videos.union(suggestion_videos) - set(self.videos)\n # just add the new videos so we don't re-order current list\n if len(new_videos):\n self.videos.extend(list(new_videos))\n\n # Ditto for skeletons\n if merge or len(self.skeletons) == 0:\n self.skeletons = list(\n set(self.skeletons).union(\n {\n instance.skeleton\n for label in self.labels\n for instance in label.instances\n }\n )\n )\n\n # Ditto for nodes\n if merge or len(self.nodes) == 0:\n self.nodes = list(\n set(self.nodes).union(\n {node for skeleton in self.skeletons for node in skeleton.nodes}\n )\n )\n\n # Ditto for tracks, a pattern is emerging here\n if merge or len(self.tracks) == 0:\n # Get tracks from any Instances or PredictedInstances\n other_tracks = {\n instance.track\n for frame in self.labels\n for instance in frame.instances\n if instance.track\n }\n\n # Add tracks from any PredictedInstance referenced by instance\n # This fixes things when there's a referenced PredictionInstance\n # which is no longer in the frame.\n other_tracks = other_tracks.union(\n {\n instance.from_predicted.track\n for frame in self.labels\n for instance in frame.instances\n if instance.from_predicted and instance.from_predicted.track\n }\n )\n\n # Get list of other tracks not already in track list\n new_tracks = list(other_tracks - set(self.tracks))\n\n # Sort the new tracks by spawned on and then name\n new_tracks.sort(key=lambda t: (t.spawned_on, t.name))\n\n self.tracks.extend(new_tracks)\n\n def _update_containers(self, new_label: LabeledFrame):\n \"\"\"Ensure that top-level containers are kept updated with new\n instances of objects that come along with new labels.\"\"\"\n\n if new_label.video not in self.videos:\n self.videos.append(new_label.video)\n\n for skeleton in {instance.skeleton for instance in new_label}:\n if skeleton not in self.skeletons:\n self.skeletons.append(skeleton)\n for node in skeleton.nodes:\n if node not in self.nodes:\n self.nodes.append(node)\n\n # Add any new Tracks as well\n for instance in new_label.instances:\n if instance.track and instance.track not in self.tracks:\n self.tracks.append(instance.track)\n\n # Sort the tracks again\n self.tracks.sort(key=lambda t: (t.spawned_on, t.name))\n\n # Update cache datastructures\n self._cache.update(new_label)\n\n def update_cache(self):\n self._cache.update()\n\n # Below are convenience methods for working with Labels as list.\n # Maybe we should just inherit from list? Maybe this class shouldn't\n # exists since it is just a list really with some class methods. I\n # think more stuff might appear in this class later down the line\n # though.\n\n @property\n def labels(self):\n \"\"\"Alias for labeled_frames.\"\"\"\n return self.labeled_frames\n\n @property\n def skeleton(self) -> Skeleton:\n \"\"\"Return the skeleton if there is only a single skeleton in the labels.\"\"\"\n if len(self.skeletons) == 1:\n return self.skeletons[0]\n else:\n raise ValueError(\n \"Labels.skeleton can only be used when there is only a single skeleton \"\n \"saved in the labels. Use Labels.skeletons instead.\"\n )\n\n @property\n def video(self) -> Video:\n \"\"\"Return the video if there is only a single video in the labels.\"\"\"\n if len(self.videos) == 0:\n raise ValueError(\"There are no videos in the labels.\")\n elif len(self.videos) == 1:\n return self.videos[0]\n else:\n raise ValueError(\n \"Labels.video can only be used when there is only a single video saved \"\n \"in the labels. Use Labels.videos instead.\"\n )\n\n @property\n def has_missing_videos(self) -> bool:\n \"\"\"Return True if any of the video files in the labels are missing.\"\"\"\n return any(video.is_missing for video in self.videos)\n\n def __len__(self) -> int:\n \"\"\"Return number of labeled frames.\"\"\"\n return len(self.labeled_frames)\n\n def index(self, value) -> int:\n \"\"\"Return index of labeled frame in list of labeled frames.\"\"\"\n return self.labeled_frames.index(value)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return (\n \"Labels(\"\n f\"labeled_frames={len(self.labeled_frames)}, \"\n f\"videos={len(self.videos)}, \"\n f\"skeletons={len(self.skeletons)}, \"\n f\"tracks={len(self.tracks)}\"\n \")\"\n )\n\n def __str__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return self.__repr__()\n\n def __contains__(self, item) -> bool:\n \"\"\"Check if object contains the given item.\n\n Args:\n item: The item to look for within `Labels`.\n This can be :class:`LabeledFrame`,\n :class:`Video`, :class:`Skeleton`,\n :class:`Node`, or (:class:`Video`, frame idx) tuple.\n\n Returns:\n True if item is found.\n \"\"\"\n if isinstance(item, LabeledFrame):\n return item in self.labeled_frames\n elif isinstance(item, Video):\n return item in self.videos\n elif isinstance(item, Skeleton):\n return item in self.skeletons\n elif isinstance(item, Node):\n return item in self.nodes\n elif isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], Video):\n if isinstance(item[1], int):\n return self.find_first(*item) is not None\n elif isinstance(item[1], np.integer):\n return self.find_first(item[0], item[1].tolist()) is not None\n raise ValueError(\"Item is not an object type contained in labels.\")\n\n def __getitem__(\n self,\n key: Union[\n int,\n slice,\n np.integer,\n np.ndarray,\n list,\n range,\n Video,\n Tuple[Video, Union[np.integer, np.ndarray, int, list, range]],\n ],\n *secondary_key: Union[\n int,\n slice,\n np.integer,\n np.ndarray,\n list,\n range,\n ],\n ) -> Union[LabeledFrame, List[LabeledFrame]]:\n \"\"\"Return labeled frames matching key or return `None` if not found.\n\n This makes `labels[...]` safe and will not raise an exception if the\n item is not found.\n\n Do not call __getitem__ directly, use get instead (get allows kwargs for logic).\n If you happen to call __getitem__ directly, get will be called but without any\n keyword arguments.\n\n Args:\n key: Indexing argument to match against. If `key` is a `Video` or tuple of\n `(Video, frame_index)`, frames that match the criteria will be searched\n for. If a scalar, list, range or array of integers are provided, the\n labels with those linear indices will be returned.\n secondary_key: Numerical indexing argument(s) which supplement `key`. Only\n used when `key` is a `Video`.\n \"\"\"\n return self.get(key, *secondary_key)\n\n def get(\n self,\n key: Union[\n int,\n slice,\n np.integer,\n np.ndarray,\n list,\n range,\n Video,\n Tuple[Video, Union[np.integer, np.ndarray, int, list, range]],\n ],\n *secondary_key: Union[\n int,\n slice,\n np.integer,\n np.ndarray,\n list,\n range,\n ],\n use_cache: bool = False,\n raise_errors: bool = False,\n ) -> Union[LabeledFrame, List[LabeledFrame]]:\n \"\"\"Return labeled frames matching key or return `None` if not found.\n\n This is a safe version of `labels[...]` that will not raise an exception if the\n item is not found.\n\n Args:\n key: Indexing argument to match against. If `key` is a `Video` or tuple of\n `(Video, frame_index)`, frames that match the criteria will be searched\n for. If a scalar, list, range or array of integers are provided, the\n labels with those linear indices will be returned.\n secondary_key: Numerical indexing argument(s) which supplement `key`. Only\n used when `key` is of type `Video`.\n use_cache: Boolean that determines whether Labels.find_first() should\n instead instead call Labels.find() which uses the labels data cache. If\n True, use the labels data cache, else loop through all labels to search.\n raise_errors: Boolean that determines whether KeyErrors should be raised. If\n True, raises KeyErrors, else catches KeyErrors and returns None instead\n of raising KeyError.\n\n Raises:\n KeyError: If the specified key could not be found.\n\n Returns:\n A list with the matching `LabeledFrame`s, or a single `LabeledFrame` if a\n scalar key was provided, or `None` if not found.\n \"\"\"\n try:\n if len(secondary_key) > 0:\n if type(key) != tuple:\n key = (key,)\n key = key + tuple(secondary_key)\n\n # Do any conversions first.\n if isinstance(key, slice):\n start, stop, step = key.indices(len(self))\n key = range(start, stop, step)\n elif isinstance(key, (np.integer, np.ndarray)):\n key = key.tolist()\n\n if isinstance(key, int):\n return self.labels.__getitem__(key)\n\n elif isinstance(key, Video):\n if key not in self.videos:\n raise KeyError(\"Video not found in labels.\")\n return self.find(video=key)\n\n elif isinstance(key, tuple) and len(key) == 2 and isinstance(key[0], Video):\n if key[0] not in self.videos:\n raise KeyError(\"Video not found in labels.\")\n\n # Do any conversions first.\n if isinstance(key[1], (np.integer, np.ndarray)):\n key = (key[0], key[1].tolist())\n\n if isinstance(key[1], int):\n _hit = self.find_first(\n video=key[0], frame_idx=key[1], use_cache=use_cache\n )\n if _hit is None:\n raise KeyError(\n f\"No label found for specified video at frame {key[1]}.\"\n )\n return _hit\n elif isinstance(key[1], (list, range)):\n return self.find(video=key[0], frame_idx=key[1])\n else:\n raise KeyError(\"Invalid label indexing arguments.\")\n\n elif isinstance(key, (list, range)):\n return [self.__getitem__(i) for i in key]\n\n else:\n raise KeyError(\"Invalid label indexing arguments.\")\n\n except KeyError as e:\n if raise_errors:\n raise e\n return None\n\n def extract(self, inds, copy: bool = False) -> \"Labels\":\n \"\"\"Extract labeled frames from indices and return a new `Labels` object.\n Args:\n inds: Any valid indexing keys, e.g., a range, slice, list of label indices,\n numpy array, `Video`, etc. See `__getitem__` for full list.\n copy: If `True`, create a new copy of all of the extracted labeled frames\n and associated labels. If `False` (the default), a shallow copy with\n references to the original labeled frames and other objects will be\n returned.\n Returns:\n A new `Labels` object with the specified labeled frames.\n This will preserve the other data structures even if they are not found in\n the extracted labels, including:\n - `Labels.videos`\n - `Labels.skeletons`\n - `Labels.tracks`\n - `Labels.suggestions`\n - `Labels.provenance`\n \"\"\"\n lfs = self.__getitem__(inds)\n new_labels = type(self)(\n labeled_frames=lfs,\n videos=self.videos,\n skeletons=self.skeletons,\n tracks=self.tracks,\n suggestions=self.suggestions,\n provenance=self.provenance,\n )\n if copy:\n new_labels = new_labels.copy()\n return new_labels\n\n def copy(self) -> \"Labels\":\n \"\"\"Return a full deep copy of the labels.\n Notes:\n All objects will be re-created by serializing and then deserializing the\n labels. This may be slow and will create new instances of all data\n structures.\n \"\"\"\n return type(self).from_json(self.to_json())\n\n def split(\n self, n: Union[float, int], copy: bool = True\n ) -> Tuple[\"Labels\", \"Labels\"]:\n \"\"\"Split labels randomly.\n\n Args:\n n: Number or fraction of elements in the first split.\n copy: If `True` (the default), return copies of the labels.\n\n Returns:\n A tuple of `(labels_a, labels_b)` where both are `sleap.Labels` instances\n subsampled from these labels.\n\n Notes:\n If there is only 1 labeled frame, this will return two copies of the same\n labels. For `len(labels) > 1`, splits are guaranteed to be mutually\n exclusive.\n\n Example:\n You can generate multiple splits by calling this repeatedly:\n\n ```py\n # Generate a 0.8/0.1/0.1 train/val/test split.\n labels_train, labels_val_test = labels.split(n=0.8)\n labels_val, labels_test = labels_val_test.split(n=0.5)\n ```\n \"\"\"\n if len(self) == 1:\n if copy:\n return self.copy(), self.copy()\n else:\n return self, self\n\n # Split indices.\n if type(n) != int:\n n = round(len(self) * n)\n n = max(min(n, len(self) - 1), 1)\n idx_a, idx_b = train_test_split(list(range(len(self))), train_size=n)\n\n return self.extract(idx_a, copy=copy), self.extract(idx_b, copy=copy)\n\n def __setitem__(self, index, value: LabeledFrame):\n \"\"\"Set labeled frame at given index.\"\"\"\n # TODO: Maybe we should remove this method altogether?\n self.labeled_frames.__setitem__(index, value)\n self._update_containers(value)\n\n def insert(self, index, value: LabeledFrame):\n \"\"\"Insert labeled frame at given index.\"\"\"\n if value in self or (value.video, value.frame_idx) in self:\n return\n\n self.labeled_frames.insert(index, value)\n self._update_containers(value)\n\n def append(self, value: LabeledFrame):\n \"\"\"Add labeled frame to list of labeled frames.\"\"\"\n self.insert(len(self) + 1, value)\n\n def __delitem__(self, key):\n \"\"\"Remove labeled frame with given index.\"\"\"\n self.labeled_frames.remove(self.labeled_frames[key])\n\n def remove(self, value: LabeledFrame):\n \"\"\"Remove given labeled frame.\"\"\"\n self.remove_frame(value)\n\n def remove_frame(self, lf: LabeledFrame, update_cache: bool = True):\n \"\"\"Remove a given labeled frame.\n\n Args:\n lf: Labeled frame instance to remove.\n update_cache: If True, update the internal frame cache. If False, cache\n update can be postponed (useful when removing many frames).\n \"\"\"\n self.labeled_frames.remove(lf)\n if update_cache:\n self._cache.remove_frame(lf)\n\n def remove_frames(self, lfs: List[LabeledFrame]):\n \"\"\"Remove a list of frames from the labels.\n\n Args:\n lfs: A sequence of labeled frames to remove.\n \"\"\"\n to_remove = set(lfs)\n self.labeled_frames = [lf for lf in self.labeled_frames if lf not in to_remove]\n self.update_cache()\n\n def remove_empty_instances(self, keep_empty_frames: bool = True):\n \"\"\"Remove instances with no visible points.\n\n Args:\n keep_empty_frames: If True (the default), frames with no remaining instances\n will not be removed.\n\n Notes:\n This will modify the labels in place. If a copy is desired, call\n `labels.copy()` before this.\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_empty_instances()\n self.update_cache()\n if not keep_empty_frames:\n self.remove_empty_frames()\n\n def remove_empty_frames(self):\n \"\"\"Remove frames with no instances.\"\"\"\n self.labeled_frames = [\n lf for lf in self.labeled_frames if len(lf.instances) > 0\n ]\n self.update_cache()\n\n def find(\n self,\n video: Video,\n frame_idx: Optional[Union[int, Iterable[int]]] = None,\n return_new: bool = False,\n ) -> List[LabeledFrame]:\n \"\"\"Search for labeled frames given video and/or frame index.\n\n Args:\n video: A :class:`Video` that is associated with the project.\n frame_idx: The frame index (or indices) which we want to\n find in the video. If a range is specified, we'll return\n all frames with indices in that range. If not specific,\n then we'll return all labeled frames for video.\n return_new: Whether to return singleton of new and empty\n :class:`LabeledFrame` if none is found in project.\n\n Returns:\n List of `LabeledFrame` objects that match the criteria.\n Empty if no matches found, unless return_new is True,\n in which case it contains a new `LabeledFrame` with\n `video` and `frame_index` set.\n \"\"\"\n null_result = (\n [LabeledFrame(video=video, frame_idx=frame_idx)] if return_new else []\n )\n\n result = self._cache.find_frames(video, frame_idx)\n return null_result if result is None else result\n\n def frames(self, video: Video, from_frame_idx: int = -1, reverse=False):\n \"\"\"Return an iterator over all labeled frames in a video.\n\n Args:\n video: A :class:`Video` that is associated with the project.\n from_frame_idx: The frame index from which we want to start.\n Defaults to the first frame of video.\n reverse: Whether to iterate over frames in reverse order.\n\n Yields:\n :class:`LabeledFrame`\n \"\"\"\n frame_idxs = self._cache.find_fancy_frame_idxs(video, from_frame_idx, reverse)\n\n # Yield the frames\n for idx in frame_idxs:\n yield self._cache._frame_idx_map[video][idx]\n\n def find_first(\n self, video: Video, frame_idx: Optional[int] = None, use_cache: bool = False\n ) -> Optional[LabeledFrame]:\n \"\"\"Find the first occurrence of a matching labeled frame.\n\n Matches on frames for the given video and/or frame index.\n\n Args:\n video: A `Video` instance that is associated with the\n labeled frames\n frame_idx: An integer specifying the frame index within\n the video\n use_cache: Boolean that determines whether Labels.find_first() should\n instead instead call Labels.find() which uses the labels data cache. If\n True, use the labels data cache, else loop through all labels to search.\n\n Returns:\n First `LabeledFrame` that match the criteria\n or None if none were found.\n \"\"\"\n if use_cache:\n label = self.find(video=video, frame_idx=frame_idx)\n return None if len(label) == 0 else label[0]\n else:\n if video in self.videos:\n for label in self.labels:\n if label.video == video and (\n frame_idx is None or (label.frame_idx == frame_idx)\n ):\n return label\n\n def find_last(\n self, video: Video, frame_idx: Optional[int] = None\n ) -> Optional[LabeledFrame]:\n \"\"\"Find the last occurrence of a matching labeled frame.\n\n Matches on frames for the given video and/or frame index.\n\n Args:\n video: a `Video` instance that is associated with the\n labeled frames\n frame_idx: an integer specifying the frame index within\n the video\n\n Returns:\n Last `LabeledFrame` that match the criteria\n or None if none were found.\n \"\"\"\n if video in self.videos:\n for label in reversed(self.labels):\n if label.video == video and (\n frame_idx is None or (label.frame_idx == frame_idx)\n ):\n return label\n\n @property\n def user_labeled_frames(self) -> List[LabeledFrame]:\n \"\"\"Return all labeled frames with user (non-predicted) instances.\"\"\"\n return [lf for lf in self.labeled_frames if lf.has_user_instances]\n\n @property\n def user_labeled_frame_inds(self) -> List[int]:\n \"\"\"Return a list of indices of frames with user labeled instances.\"\"\"\n return [i for i, lf in enumerate(self.labeled_frames) if lf.has_user_instances]\n\n def with_user_labels_only(\n self,\n user_instances_only: bool = True,\n with_track_only: bool = False,\n copy: bool = True,\n ) -> \"Labels\":\n \"\"\"Return a new `Labels` containing only user labels.\n\n This is useful as a preprocessing step to train on only user-labeled data.\n\n Args:\n user_instances_only: If `True` (the default), predicted instances will be\n removed from frames that also have user instances.\n with_track_only: If `True`, remove instances without a track.\n copy: If `True` (the default), create a new copy of all of the extracted\n labeled frames and associated labels. If `False`, a shallow copy with\n references to the original labeled frames and other objects will be\n returned. Warning: If returning a shallow copy, predicted and untracked\n instances will be removed from the original labels as well!\n\n Returns:\n A new `Labels` with only the specified subset of frames and instances.\n \"\"\"\n new_labels = self.extract(self.user_labeled_frame_inds, copy=copy)\n if user_instances_only:\n new_labels.remove_predictions()\n if with_track_only:\n new_labels.remove_untracked_instances()\n new_labels.remove_empty_frames()\n return new_labels\n\n def get_labeled_frame_count(self, video: Optional[Video] = None, filter: Text = \"\"):\n return self._cache.get_frame_count(video, filter)\n\n def instance_count(self, video: Video, frame_idx: int) -> int:\n \"\"\"Return number of instances matching video/frame index.\"\"\"\n count = 0\n labeled_frame = self.find_first(video, frame_idx)\n if labeled_frame is not None:\n count = len(\n [inst for inst in labeled_frame.instances if isinstance(inst, Instance)]\n )\n return count\n\n @property\n def all_instances(self) -> List[Instance]:\n \"\"\"Return list of all instances.\"\"\"\n return list(self.instances())\n\n @property\n def user_instances(self) -> List[Instance]:\n \"\"\"Return list of all user (non-predicted) instances.\"\"\"\n return [inst for inst in self.all_instances if type(inst) == Instance]\n\n @property\n def predicted_instances(self) -> List[PredictedInstance]:\n \"\"\"Return list of all predicted instances.\"\"\"\n return [inst for inst in self.all_instances if type(inst) == PredictedInstance]\n\n @property\n def has_user_instances(self) -> bool:\n return any(lf.has_user_instances for lf in self.labeled_frames)\n\n @property\n def has_predicted_instances(self) -> bool:\n return any(lf.has_predicted_instances for lf in self.labeled_frames)\n\n @property\n def max_user_instances(self) -> int:\n n = 0\n for lf in self.labeled_frames:\n n = max(n, lf.n_user_instances)\n return n\n\n @property\n def min_user_instances(self) -> int:\n n = None\n for lf in self.labeled_frames:\n if n is not None:\n n = min(n, lf.n_user_instances)\n else:\n n = lf.n_user_instances\n return n\n\n @property\n def is_multi_instance(self) -> bool:\n \"\"\"Returns `True` if there are multiple user instances in any frame.\"\"\"\n return self.max_user_instances > 1\n\n def describe(self):\n \"\"\"Print basic statistics about the labels dataset.\"\"\"\n print(f\"Skeleton: {self.skeleton}\")\n print(f\"Videos: {[v.filename for v in self.videos]}\")\n n_user = 0\n n_pred = 0\n n_user_inst = 0\n n_pred_inst = 0\n for lf in self.labeled_frames:\n if lf.has_user_instances:\n n_user += 1\n n_user_inst += len(lf.user_instances)\n if lf.has_predicted_instances:\n n_pred += 1\n n_pred_inst += len(lf.predicted_instances)\n print(f\"Frames (user/predicted): {n_user:,}/{n_pred:,}\")\n print(f\"Instances (user/predicted): {n_user_inst:,}/{n_pred_inst:,}\")\n print(\"Tracks:\", self.tracks)\n print(f\"Suggestions: {len(self.suggestions):,}\")\n print(\"Provenance:\", self.provenance)\n\n def instances(\n self, video: Optional[Video] = None, skeleton: Optional[Skeleton] = None\n ):\n \"\"\"Iterate over instances in the labels, optionally with filters.\n\n Args:\n video: Only iterate through instances in this video\n skeleton: Only iterate through instances with this skeleton\n\n Yields:\n Instance: The next labeled instance\n \"\"\"\n for label in self.labels:\n if video is None or label.video == video:\n for instance in label.instances:\n if skeleton is None or instance.skeleton == skeleton:\n yield instance\n\n def get_template_instance_points(self, skeleton: Skeleton):\n if not hasattr(self, \"_template_instance_points\"):\n self._template_instance_points = dict()\n\n # Use cache unless there are a small number of labeled frames so far, or\n # we don't have a cached template instance yet or the skeleton has changed.\n\n rebuild_template = False\n if len(self.labeled_frames) < 100:\n rebuild_template = True\n elif skeleton not in self._template_instance_points:\n rebuild_template = True\n elif skeleton.nodes != self._template_instance_points[skeleton][\"nodes\"]:\n rebuild_template = True\n\n if rebuild_template:\n # Make sure there are some labeled frames\n if self.labeled_frames and any(self.instances()):\n from sleap.info import align\n\n first_n_instances = itertools.islice(\n self.instances(skeleton=skeleton), 1000\n )\n template_points = align.get_template_points_array(first_n_instances)\n self._template_instance_points[skeleton] = dict(\n points=template_points, nodes=skeleton.nodes\n )\n else:\n # No labeled frames so use force-directed graph layout\n import networkx as nx\n\n node_positions = nx.spring_layout(G=skeleton.graph, scale=50)\n\n template_points = np.stack(\n [\n node_positions[node]\n if node in node_positions\n else np.random.randint(0, 50, size=2)\n for node in skeleton.nodes\n ]\n )\n self._template_instance_points[skeleton] = dict(\n points=template_points, nodes=skeleton.nodes\n )\n\n return self._template_instance_points[skeleton][\"points\"]\n\n def get_track_count(self, video: Video) -> int:\n \"\"\"Return the number of occupied tracks for a given video.\"\"\"\n return len(self.get_track_occupancy(video))\n\n def get_track_occupancy(self, video: Video) -> List:\n \"\"\"Return track occupancy list for given video.\"\"\"\n return self._cache.get_video_track_occupancy(video=video)\n\n def add_track(self, video: Video, track: Track):\n \"\"\"Add track to labels, updating occupancy.\"\"\"\n self.tracks.append(track)\n self._cache.add_track(video, track)\n\n def remove_track(self, track: Track):\n \"\"\"Remove a track from the labels, updating (but not removing) instances.\"\"\"\n for inst in self.instances():\n if inst.track == track:\n inst.track = None\n self.tracks.remove(track)\n\n def remove_all_tracks(self):\n \"\"\"Remove all tracks from labels, updating (but not removing) instances.\"\"\"\n for inst in self.instances():\n inst.track = None\n self.tracks = []\n\n def remove_unused_tracks(self):\n \"\"\"Remove tracks that are not used by any instances.\"\"\"\n if len(self.tracks) == 0:\n return\n\n # Check which tracks are used by instances\n all_tracks = set(self.tracks)\n used_tracks = set()\n for inst in self.instances():\n used_tracks.add(inst.track)\n\n # Remove set difference from tracks in Labels\n tracks_to_remove = all_tracks - used_tracks\n for track in tracks_to_remove:\n self.tracks.remove(track)\n\n def track_set_instance(\n self, frame: LabeledFrame, instance: Instance, new_track: Track\n ):\n \"\"\"Set track on given instance, updating occupancy.\"\"\"\n self.track_swap(\n frame.video,\n new_track,\n instance.track,\n (frame.frame_idx, frame.frame_idx + 1),\n )\n if instance.track is None:\n self._cache.remove_instance(frame, instance) # FIXME\n instance.track = new_track\n\n def track_swap(\n self,\n video: Video,\n new_track: Track,\n old_track: Optional[Track],\n frame_range: tuple,\n ):\n \"\"\"Swap track assignment for instances in two tracks.\n\n If you need to change the track to or from None, you'll need\n to use :meth:`track_set_instance` for each specific\n instance you want to modify.\n\n Args:\n video: The :class:`Video` for which we want to swap tracks.\n new_track: A :class:`Track` for which we want to swap\n instances with another track.\n old_track: The other :class:`Track` for swapping.\n frame_range: Tuple of (start, end) frame indexes.\n If you want to swap tracks on a single frame, use\n (frame index, frame index + 1).\n \"\"\"\n self._cache.track_swap(video, new_track, old_track, frame_range)\n\n # Update tracks set on instances\n\n # Get all instances in old/new tracks\n # Note that this won't match on None track.\n old_track_instances = self.find_track_occupancy(video, old_track, frame_range)\n new_track_instances = self.find_track_occupancy(video, new_track, frame_range)\n\n # swap new to old tracks on all instances\n for instance in old_track_instances:\n instance.track = new_track\n # old_track can be `Track` or int\n # If int, it's index in instance list which we'll use as a pseudo-track,\n # but we won't set instances currently on new_track to old_track.\n if type(old_track) == Track:\n for instance in new_track_instances:\n instance.track = old_track\n\n def remove_instance(\n self, frame: LabeledFrame, instance: Instance, in_transaction: bool = False\n ):\n \"\"\"Remove instance from frame, updating track occupancy.\"\"\"\n frame.instances.remove(instance)\n if not in_transaction:\n self._cache.remove_instance(frame, instance)\n\n def add_instance(self, frame: LabeledFrame, instance: Instance):\n \"\"\"Add instance to frame, updating track occupancy.\"\"\"\n # Ensure that there isn't already an Instance with this track\n tracks_in_frame = [\n inst.track\n for inst in frame\n if type(inst) == Instance and inst.track is not None\n ]\n if instance.track in tracks_in_frame:\n instance.track = None\n\n # Add instance and track to labels\n frame.instances.append(instance)\n if (instance.track is not None) and (instance.track not in self.tracks):\n self.add_track(video=frame.video, track=instance.track)\n\n # Update cache\n self._cache.add_instance(frame, instance)\n\n def find_track_occupancy(\n self, video: Video, track: Union[Track, int], frame_range=None\n ) -> List[Instance]:\n \"\"\"Get instances for a given video, track, and range of frames.\n\n Args:\n video: the `Video`\n track: the `Track` or int (\"pseudo-track\" index to instance list)\n frame_range (optional):\n If specified, only return instances on frames in range.\n If None, return all instances for given track.\n\n Returns:\n List of :class:`Instance` objects.\n \"\"\"\n frame_range = range(*frame_range) if type(frame_range) == tuple else frame_range\n\n def does_track_match(inst, tr, labeled_frame):\n match = False\n if type(tr) == Track and inst.track is tr:\n match = True\n elif (\n type(tr) == int\n and labeled_frame.instances.index(inst) == tr\n and inst.track is None\n ):\n match = True\n return match\n\n track_frame_inst = [\n instance\n for lf in self.find(video)\n for instance in lf.instances\n if does_track_match(instance, track, lf)\n and (frame_range is None or lf.frame_idx in frame_range)\n ]\n return track_frame_inst\n\n def add_suggestion(self, video: Video, frame_idx: int):\n \"\"\"Add a suggested frame to the labels.\n\n Args:\n video: `sleap.Video` instance of the suggestion.\n frame_idx: Index of the frame of the suggestion.\n \"\"\"\n for suggestion in self.suggestions:\n if suggestion.video == video and suggestion.frame_idx == frame_idx:\n return\n self.suggestions.append(SuggestionFrame(video=video, frame_idx=frame_idx))\n\n def remove_suggestion(self, video: Video, frame_idx: int):\n \"\"\"Remove a suggestion from the list by video and frame index.\n\n Args:\n video: `sleap.Video` instance of the suggestion.\n frame_idx: Index of the frame of the suggestion.\n \"\"\"\n for suggestion in self.suggestions:\n if suggestion.video == video and suggestion.frame_idx == frame_idx:\n self.suggestions.remove(suggestion)\n return\n\n def get_video_suggestions(\n self, video: Video, user_labeled: bool = True\n ) -> List[int]:\n \"\"\"Return a list of suggested frame indices.\n\n Args:\n video: Video to get suggestions for.\n user_labeled: If `True` (the default), return frame indices for suggestions\n that already have user labels. If `False`, only suggestions with no user\n labeled instances will be returned.\n\n Returns:\n Indices of the suggested frames for for the specified video.\n \"\"\"\n frame_indices = []\n for suggestion in self.suggestions:\n if suggestion.video == video:\n fidx = suggestion.frame_idx\n if not user_labeled:\n lf = self.get((video, fidx))\n if lf is not None and lf.has_user_instances:\n continue\n frame_indices.append(fidx)\n return frame_indices\n\n def get_suggestions(self) -> List[SuggestionFrame]:\n \"\"\"Return all suggestions as a list of SuggestionFrame items.\"\"\"\n return self.suggestions\n\n def find_suggestion(self, video, frame_idx):\n \"\"\"Find SuggestionFrame by video and frame index.\"\"\"\n matches = [\n item\n for item in self.suggestions\n if item.video == video and item.frame_idx == frame_idx\n ]\n\n if matches:\n return matches[0]\n\n return None\n\n def get_next_suggestion(self, video, frame_idx, seek_direction=1):\n \"\"\"Return a (video, frame_idx) tuple seeking from given frame.\"\"\"\n # make sure we have valid seek_direction\n if seek_direction not in (-1, 1):\n raise ValueError(\"seek_direction should be -1 or 1.\")\n # make sure the video belongs to this Labels object\n if video not in self.videos:\n return None\n\n all_suggestions = self.get_suggestions()\n\n # If we're currently on a suggestion, then follow order of list\n match = self.find_suggestion(video, frame_idx)\n if match is not None:\n suggestion_idx = all_suggestions.index(match)\n new_idx = (suggestion_idx + seek_direction) % len(all_suggestions)\n return all_suggestions[new_idx]\n\n # Otherwise, find the prev/next suggestion sorted by frame order...\n\n # Look for next (or previous) suggestion in current video.\n if seek_direction == 1:\n frame_suggestion = min(\n (i for i in self.get_video_suggestions(video) if i > frame_idx),\n default=None,\n )\n else:\n frame_suggestion = max(\n (i for i in self.get_video_suggestions(video) if i < frame_idx),\n default=None,\n )\n if frame_suggestion is not None:\n return self.find_suggestion(video, frame_suggestion)\n\n # If we didn't find suggestion in current video, then we want earliest\n # frame in next video with suggestions.\n next_video_idx = (self.videos.index(video) + seek_direction) % len(self.videos)\n video = self.videos[next_video_idx]\n if seek_direction == 1:\n frame_suggestion = min(\n (i for i in self.get_video_suggestions(video)), default=None\n )\n else:\n frame_suggestion = max(\n (i for i in self.get_video_suggestions(video)), default=None\n )\n return self.find_suggestion(video, frame_suggestion)\n\n def append_suggestions(self, suggestions: List[SuggestionFrame]):\n \"\"\"Append the suggested frames.\"\"\"\n self.suggestions.extend(suggestions)\n\n def set_suggestions(self, suggestions: List[SuggestionFrame]):\n \"\"\"Set the suggested frames.\"\"\"\n self.suggestions = suggestions\n\n def delete_suggestions(self, video):\n \"\"\"Delete suggestions for specified video.\"\"\"\n self.suggestions = [item for item in self.suggestions if item.video != video]\n\n def clear_suggestions(self):\n \"\"\"Delete all suggestions.\"\"\"\n self.suggestions = []\n\n @property\n def unlabeled_suggestions(self) -> List[SuggestionFrame]:\n \"\"\"Return suggestions without user labels.\"\"\"\n unlabeled_suggestions = []\n for suggestion in self.suggestions:\n lf = self.get(suggestion.video, suggestion.frame_idx)\n if lf is None or not lf.has_user_instances:\n unlabeled_suggestions.append(suggestion)\n return unlabeled_suggestions\n\n def get_unlabeled_suggestion_inds(self) -> List[int]:\n \"\"\"Find labeled frames for unlabeled suggestions and return their indices.\n\n This is useful for generating a list of example indices for inference on\n unlabeled suggestions.\n\n Returns:\n List of indices of the labeled frames that correspond to the suggestions\n that do not have user instances.\n\n If a labeled frame corresponding to a suggestion does not exist, an empty\n one will be created.\n\n See also: `Labels.remove_empty_frames`\n \"\"\"\n inds = []\n for suggestion in self.unlabeled_suggestions:\n lf = self.get((suggestion.video, suggestion.frame_idx))\n if lf is None:\n self.append(\n LabeledFrame(video=suggestion.video, frame_idx=suggestion.frame_idx)\n )\n inds.append(len(self.labeled_frames) - 1)\n else:\n inds.append(self.index(lf))\n return inds\n\n def add_video(self, video: Video):\n \"\"\"Add a video to the labels if it is not already in it.\n\n Video instances are added automatically when adding labeled frames,\n but this function allows for adding videos to the labels before any\n labeled frames are added.\n\n Args:\n video: `Video` instance\n\n \"\"\"\n if video not in self.videos:\n self.videos.append(video)\n\n def remove_video(self, video: Video):\n \"\"\"Remove a video from the labels and all associated labeled frames.\n\n Args:\n video: `Video` instance to be removed.\n \"\"\"\n if video not in self.videos:\n raise KeyError(\"Video is not in labels.\")\n\n # Delete all associated labeled frames\n for label in reversed(self.labeled_frames):\n if label.video == video:\n self.labeled_frames.remove(label)\n\n # Delete data that's indexed by video\n self.delete_suggestions(video)\n if video in self.negative_anchors:\n del self.negative_anchors[video]\n\n # Delete video\n self.videos.remove(video)\n self._cache.remove_video(video)\n\n @classmethod\n def from_json(cls, *args, **kwargs):\n from sleap.io.format.labels_json import LabelsJsonAdaptor\n\n return LabelsJsonAdaptor.from_json_data(*args, **kwargs)\n\n def extend_from(\n self, new_frames: Union[\"Labels\", List[LabeledFrame]], unify: bool = False\n ):\n \"\"\"Merge data from another `Labels` object or `LabeledFrame` list.\n\n Arg:\n new_frames: the object from which to copy data\n unify: whether to replace objects in new frames with\n corresponding objects from current `Labels` data\n\n Returns:\n bool, True if we added frames, False otherwise\n \"\"\"\n # allow either Labels or list of LabeledFrames\n if isinstance(new_frames, Labels):\n new_frames = new_frames.labeled_frames\n\n # return if this isn't non-empty list of labeled frames\n if not isinstance(new_frames, list) or len(new_frames) == 0:\n return False\n if not isinstance(new_frames[0], LabeledFrame):\n return False\n\n # If unify, we want to replace objects in the frames with\n # corresponding objects from the current labels.\n # We do this by deserializing/serializing with match_to.\n if unify:\n new_json = Labels(labeled_frames=new_frames).to_dict()\n new_labels = Labels.from_json(new_json, match_to=self)\n new_frames = new_labels.labeled_frames\n\n # copy the labeled frames\n self.labeled_frames.extend(new_frames)\n\n # merge labeled frames for the same video/frame idx\n self.merge_matching_frames()\n\n # update top level videos/nodes/skeletons/tracks\n self._update_from_labels(merge=True)\n self._cache.update()\n\n return True\n\n def has_frame(\n self,\n lf: Optional[LabeledFrame] = None,\n video: Optional[Video] = None,\n frame_idx: Optional[int] = None,\n use_cache: bool = True,\n ) -> bool:\n \"\"\"Check if the labels contain a specified frame.\n\n Args:\n lf: `LabeledFrame` to search for. If not provided, the `video` and\n `frame_idx` must not be `None`.\n video: `Video` of the frame. Not necessary if `lf` is given.\n frame_idx: Integer frame index of the frame. Not necessary if `lf` is given.\n use_cache: If `True` (the default), use label lookup cache for faster\n searching. If `False`, check every frame without the cache.\n\n Returns:\n A `bool` indicating whether the specified `LabeledFrame` is contained in the\n labels.\n\n This will return `True` if there is a matching frame with the same video and\n frame index, even if they contain different instances.\n\n Notes:\n The `Video` instance must be the same as the ones in these labels, so if\n comparing to `Video`s loaded from another file, be sure to load those labels\n with matching, i.e.: `sleap.Labels.load_file(..., match_to=labels)`.\n \"\"\"\n if lf is not None:\n video = lf.video\n frame_idx = lf.frame_idx\n if video is None or frame_idx is None:\n raise ValueError(\"Either lf or video and frame_idx must be provided.\")\n\n if use_cache:\n return len(self.find(video, frame_idx=frame_idx, return_new=False)) > 0\n\n else:\n if video not in self.videos:\n return False\n for lf in self.labeled_frames:\n if lf.video == video and lf.frame_idx == frame_idx:\n return True\n return False\n\n def remove_user_instances(self, new_labels: Optional[\"Labels\"] = None):\n \"\"\"Clear user instances from the labels.\n\n Useful prior to merging operations to prevent overlapping instances from new\n labels.\n\n Args:\n new_labels: If not `None`, only user instances in frames that also contain\n user instances in the new labels will be removed. If not provided\n (the default), all user instances will be removed.\n\n Notes:\n If providing `new_labels`, it must have been loaded using\n `sleap.Labels.load_file(..., match_to=labels)` to ensure that conflicting\n frames can be detected.\n\n Labeled frames without any instances after clearing will also be removed\n from the dataset.\n \"\"\"\n keep_lfs = []\n for lf in self.labeled_frames:\n if new_labels is not None:\n if not new_labels.has_frame(lf):\n # Base frame is not in new labels, so just keep it without\n # modification.\n keep_lfs.append(lf)\n continue\n\n if lf.has_predicted_instances:\n # Remove predictions from base frame.\n lf.instances = lf.predicted_instances\n keep_lfs.append(lf)\n\n # Keep only labeled frames with no conflicting predictions.\n self.labeled_frames = keep_lfs\n\n def remove_predictions(self, new_labels: Optional[\"Labels\"] = None):\n \"\"\"Clear predicted instances from the labels.\n\n Useful prior to merging operations to prevent overlapping instances from new\n predictions.\n\n Args:\n new_labels: If not `None`, only predicted instances in frames that also\n contain predictions in the new labels will be removed. If not provided\n (the default), all predicted instances will be removed.\n\n Notes:\n If providing `new_labels`, it must have been loaded using\n `sleap.Labels.load_file(..., match_to=labels)` to ensure that conflicting\n frames can be detected.\n\n Labeled frames without any instances after clearing will also be removed\n from the dataset.\n \"\"\"\n keep_lfs = []\n for lf in self.labeled_frames:\n if new_labels is not None:\n if not new_labels.has_frame(lf):\n # Base frame is not in new labels, so just keep it without\n # modification.\n keep_lfs.append(lf)\n continue\n\n if lf.has_user_instances:\n # Remove predictions from base frame.\n lf.instances = lf.user_instances\n keep_lfs.append(lf)\n\n # Keep only labeled frames with no conflicting predictions.\n self.labeled_frames = keep_lfs\n\n def remove_untracked_instances(self, remove_empty_frames: bool = True):\n \"\"\"Remove instances that do not have a track assignment.\n\n Args:\n remove_empty_frames: If `True` (the default), removes frames that do not\n contain any instances after removing untracked ones.\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_untracked()\n if remove_empty_frames:\n self.remove_empty_frames()\n\n @classmethod\n def complex_merge_between(\n cls, base_labels: \"Labels\", new_labels: \"Labels\", unify: bool = True\n ) -> tuple:\n \"\"\"Merge frames and other data from one dataset into another.\n\n Anything that can be merged cleanly is merged into base_labels.\n\n Frames conflict just in case each labels object has a matching\n frame (same video and frame idx) with instances not in other.\n\n Frames can be merged cleanly if:\n\n * the frame is in only one of the labels, or\n * the frame is in both labels, but all instances perfectly match\n (which means they are redundant), or\n * the frame is in both labels, maybe there are some redundant\n instances, but only one version of the frame has additional\n instances not in the other.\n\n Args:\n base_labels: the `Labels` that we're merging into\n new_labels: the `Labels` that we're merging from\n unify: whether to replace objects (e.g., `Video`) in\n new_labels with *matching* objects from base\n\n Returns:\n tuple of three items:\n\n * Dictionary, keys are :class:`Video`, values are\n dictionary in which keys are frame index (int)\n and value is list of :class:`Instance` objects\n * list of conflicting :class:`Instance` objects from base\n * list of conflicting :class:`Instance` objects from new\n\n \"\"\"\n # If unify, we want to replace objects in the frames with\n # corresponding objects from the current labels.\n # We do this by deserializing/serializing with match_to.\n if unify:\n new_json = new_labels.to_dict()\n new_labels = cls.from_json(new_json, match_to=base_labels)\n\n # Merge anything that can be merged cleanly and get conflicts\n merged, extra_base, extra_new = LabeledFrame.complex_merge_between(\n base_labels=base_labels, new_frames=new_labels.labeled_frames\n )\n\n # For clean merge, finish merge now by cleaning up base object\n if not extra_base and not extra_new:\n # Add any new videos (etc) into top level lists in base\n base_labels._update_from_labels(merge=True)\n # Update caches\n base_labels.update_cache()\n\n # Merge suggestions and negative anchors\n base_labels.suggestions.extend(new_labels.suggestions)\n cls.merge_container_dicts(\n base_labels.negative_anchors, new_labels.negative_anchors\n )\n\n return merged, extra_base, extra_new\n\n @staticmethod\n def finish_complex_merge(\n base_labels: \"Labels\", resolved_frames: List[LabeledFrame]\n ):\n \"\"\"Finish conflicted merge from complex_merge_between.\n\n Args:\n base_labels: the `Labels` that we're merging into\n resolved_frames: the list of frames to add into base_labels\n \"\"\"\n # Add all the resolved frames to base\n base_labels.labeled_frames.extend(resolved_frames)\n\n # Combine instances when there are two LabeledFrames for same\n # video and frame index\n base_labels.merge_matching_frames()\n\n # Add any new videos (etc) into top level lists in base\n base_labels._update_from_labels(merge=True)\n # Update caches\n base_labels.update_cache()\n\n @staticmethod\n def merge_container_dicts(dict_a: Dict, dict_b: Dict) -> Dict:\n \"\"\"Merge data from dict_b into dict_a.\"\"\"\n for key in dict_b.keys():\n if key in dict_a:\n dict_a[key].extend(dict_b[key])\n uniquify(dict_a[key])\n else:\n dict_a[key] = dict_b[key]\n\n def merge_matching_frames(self, video: Optional[Video] = None):\n \"\"\"Merge `LabeledFrame` objects that are for the same video frame.\n\n Args:\n video: combine for this video; if None, do all videos\n \"\"\"\n if video is None:\n for vid in {lf.video for lf in self.labeled_frames}:\n self.merge_matching_frames(video=vid)\n else:\n self.labeled_frames = LabeledFrame.merge_frames(\n self.labeled_frames, video=video\n )\n\n def to_dict(self, skip_labels: bool = False) -> Dict[str, Any]:\n \"\"\"Serialize all labels to dicts.\n\n Serializes the labels in the underling list of LabeledFrames to a dict\n structure. This function returns a nested dict structure composed entirely of\n primitive python types. It is used to create JSON and HDF5 serialized datasets.\n\n Args:\n skip_labels: If True, skip labels serialization and just do the metadata.\n\n Returns:\n A dict containing the followings top level keys:\n * version - The version of the dict/json serialization format.\n * skeletons - The skeletons associated with these underlying\n instances.\n * nodes - The nodes that the skeletons represent.\n * videos - The videos that that the instances occur on.\n * labels - The labeled frames\n * tracks - The tracks associated with each instance.\n * suggestions - The suggested frames.\n * negative_anchors - The negative training sample anchors.\n \"\"\"\n # FIXME: Update list of nodes\n # We shouldn't have to do this here, but for some reason we're missing nodes\n # which are in the skeleton but don't have points (in the first instance?).\n self.nodes = list(\n set(self.nodes).union(\n {node for skeleton in self.skeletons for node in skeleton.nodes}\n )\n )\n\n # Register some unstructure hooks since we don't want complete deserialization\n # of video and skeleton objects present in the labels. We will serialize these\n # as references to the above constructed lists to limit redundant data in the\n # json\n label_cattr = make_instance_cattr()\n label_cattr.register_unstructure_hook(\n Skeleton, lambda x: str(self.skeletons.index(x))\n )\n label_cattr.register_unstructure_hook(\n Video, lambda x: str(self.videos.index(x))\n )\n label_cattr.register_unstructure_hook(Node, lambda x: str(self.nodes.index(x)))\n label_cattr.register_unstructure_hook(\n Track, lambda x: str(self.tracks.index(x))\n )\n\n # Make a converter for the top level skeletons list.\n idx_to_node = {i: self.nodes[i] for i in range(len(self.nodes))}\n\n skeleton_cattr = Skeleton.make_cattr(idx_to_node)\n\n # Make attr for tracks so that we save as tuples rather than dicts;\n # this can save a lot of space when there are lots of tracks.\n track_cattr = cattr.Converter(unstruct_strat=cattr.UnstructureStrategy.AS_TUPLE)\n\n # Serialize the skeletons, videos, and labels\n dicts = {\n \"version\": LABELS_JSON_FILE_VERSION,\n \"skeletons\": skeleton_cattr.unstructure(self.skeletons),\n \"nodes\": cattr.unstructure(self.nodes),\n \"videos\": Video.cattr().unstructure(self.videos),\n \"tracks\": track_cattr.unstructure(self.tracks),\n \"suggestions\": label_cattr.unstructure(self.suggestions),\n \"negative_anchors\": label_cattr.unstructure(self.negative_anchors),\n \"provenance\": label_cattr.unstructure(self.provenance),\n }\n\n if not skip_labels:\n dicts[\"labels\"] = label_cattr.unstructure(self.labeled_frames)\n\n return dicts\n\n def to_json(self):\n \"\"\"Serialize all labels in the underling list of LabeledFrame(s) to JSON.\n\n Returns:\n The JSON representation of the string.\n \"\"\"\n # Unstructure the data into dicts and dump to JSON.\n return json_dumps(self.to_dict())\n\n @classmethod\n def load_file(\n cls,\n filename: str,\n video_search: Union[Callable, List[Text], None] = None,\n *args,\n **kwargs,\n ):\n \"\"\"Load file, detecting format from filename.\"\"\"\n from .format import read\n\n return read(\n filename, for_object=\"labels\", video_search=video_search, *args, **kwargs\n )\n\n @classmethod\n def save_file(\n cls, labels: \"Labels\", filename: str, default_suffix: str = \"\", *args, **kwargs\n ):\n \"\"\"Save file, detecting format from filename.\n\n Args:\n labels: The dataset to save.\n filename: Path where we'll save it. We attempt to detect format\n from the suffix (e.g., \".json\").\n default_suffix: If we can't detect valid suffix on filename,\n we can add default suffix to filename (and use corresponding\n format). Doesn't need to have \".\" before file extension.\n\n Raises:\n ValueError: If cannot detect valid filetype.\n \"\"\"\n # Convert to full (absolute) path\n filename = os.path.abspath(filename)\n\n # Make sure that all directories for path exist\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n from .format import write\n\n write(filename, labels, *args, **kwargs)\n\n def save(\n self,\n filename: Text,\n with_images: bool = False,\n embed_all_labeled: bool = False,\n embed_suggested: bool = False,\n ):\n \"\"\"Save the labels to a file.\n\n Args:\n filename: Path to save the labels to ending in `.slp`. If the filename does\n not end in `.slp`, the extension will be automatically appended.\n with_images: If `True`, the image data for frames with labels will be\n embedded in the saved labels. This is useful for generating a single\n file to be used when training remotely. Defaults to `False`.\n embed_all_labeled: If `True`, save image data for labeled frames without\n user-labeled instances (defaults to `False`). This is useful for\n selecting arbitrary frames to save by adding empty `LabeledFrame`s to\n the dataset. Labeled frame metadata will be saved regardless.\n embed_suggested: If `True`, save image data for frames in the suggestions\n (defaults to `False`). Useful for predicting on remaining suggestions\n after training. Suggestions metadata will be saved regardless.\n\n Notes:\n This is an instance-level wrapper for the `Labels.save_file` class method.\n \"\"\"\n if os.path.splitext(filename)[1].lower() != \".slp\":\n filename = filename + \".slp\"\n Labels.save_file(\n self,\n filename,\n save_frame_data=with_images,\n all_labeled=embed_all_labeled,\n suggested=embed_suggested,\n )\n\n def export(self, filename: str):\n \"\"\"Export labels to analysis HDF5 format.\n\n This expects the labels to contain data for a single video (e.g., predictions).\n\n Args:\n filename: Path to output HDF5 file.\n\n Notes:\n This will write the contents of the labels out as a HDF5 file without\n complete metadata.\n\n The resulting file will have datasets:\n - `/node_names`: List of skeleton node names.\n - `/track_names`: List of track names.\n - `/tracks`: All coordinates of the instances in the labels.\n - `/track_occupancy`: Mask denoting which instances are present in each\n frame.\n \"\"\"\n from sleap.io.format.sleap_analysis import SleapAnalysisAdaptor\n\n SleapAnalysisAdaptor.write(filename, self)\n\n def export_nwb(\n self,\n filename: str,\n overwrite: bool = False,\n session_description: str = \"Processed SLEAP pose data\",\n identifier: Optional[str] = None,\n session_start_time: Optional[datetime.datetime] = None,\n ):\n \"\"\"Export all `PredictedInstance` objects in a `Labels` object to an NWB file.\n\n Use `Labels.numpy` to create a `pynwb.NWBFile` with a separate\n `pynwb.ProcessingModule` for each `Video` in the `Labels` object.\n\n To access the `pynwb.ProcessingModule` for a specific `Video`, use the key\n 'SLEAP_VIDEO_{video_idx:03}_{video_fn.stem}' where\n `isinstance(video_fn, pathlib.PurePath)`. Ex:\n video: 'path_to_video/my_video.mp4'\n video index: 3/5\n key: '003_my_video'\n\n Within each `pynwb.ProcessingModule` is a `ndx_pose.PoseEstimation` for\n each unique track in the `Video`.\n\n The `ndx_pose.PoseEstimation` for each unique `Track` is stored under the\n key 'track{track_idx:03}' if tracks are set or 'untrack{track_idx:03}' if\n untracked where `track_idx` ranges from\n 0 to (number of tracks) - 1. Ex:\n track_idx: 1\n key: 'track001'\n\n Each `ndx_pose.PoseEstimation` has a `ndx_pose.PoseEstimationSeries` for\n every `Node` in the `Skeleton`.\n\n The `ndx_pose.PoseEstimationSeries` for a specific `Node` is stored under\n the key '`Node.name`'. Ex:\n node name: 'head'\n key: 'head'\n\n Args:\n filename: Output path for the NWB format file.\n labels: The `Labels` object to covert to a NWB format file.\n overwrite: Boolean that overwrites existing NWB file if True. If False, data\n will be appended to existing NWB file.\n session_description: Description for entire project. Stored under\n NWBFile \"session_description\" key. If appending data to a preexisting\n file, then the session_description will not be used.\n identifier: Unique identifier for project. If no identifier is\n specified, then will generate a GUID. If appending data to a\n preexisting file, then the identifier will not be used.\n session_start_time: THe datetime associated with the project. If no\n session_start_time is given, then the current datetime will be used. If\n appending data to a preexisting file, then the session_start_time will\n not be used.\n\n Returns:\n A `pynwb.NWBFile` with a separate `pynwb.ProcessingModule` for each\n `Video` in the `Labels` object.\n \"\"\"\n from sleap.io.format.ndx_pose import NDXPoseAdaptor\n\n NDXPoseAdaptor.write(\n NDXPoseAdaptor,\n filename=filename,\n labels=self,\n overwrite=overwrite,\n session_description=session_description,\n identifier=identifier,\n session_start_time=session_start_time,\n )\n\n @classmethod\n def load_json(cls, filename: str, *args, **kwargs) -> \"Labels\":\n from .format import read\n\n return read(filename, for_object=\"labels\", as_format=\"json\", *args, **kwargs)\n\n @classmethod\n def save_json(cls, labels: \"Labels\", filename: str, *args, **kwargs):\n from .format import write\n\n write(filename, labels, as_format=\"json\", *args, **kwargs)\n\n @classmethod\n def load_hdf5(cls, filename, *args, **kwargs):\n from .format import read\n\n return read(filename, for_object=\"labels\", as_format=\"hdf5_v1\", *args, **kwargs)\n\n @classmethod\n def save_hdf5(cls, labels, filename, *args, **kwargs):\n from .format import write\n\n write(filename, labels, as_format=\"hdf5_v1\", *args, **kwargs)\n\n @classmethod\n def load_leap_matlab(cls, filename, *args, **kwargs):\n from .format import read\n\n return read(filename, for_object=\"labels\", as_format=\"leap\", *args, **kwargs)\n\n @classmethod\n def load_alphatracker(\n cls,\n filename: str,\n skeleton: Optional[Skeleton] = None,\n full_video: Optional[Video] = None,\n ) -> \"Labels\":\n from .format import read\n\n return read(\n filename,\n for_object=\"labels\",\n as_format=\"alphatracker\",\n skeleton=skeleton,\n full_video=full_video,\n )\n\n @classmethod\n def load_nwb(\n cls,\n filename: str,\n ) -> \"Labels\":\n from .format import read\n\n return read(\n filename,\n for_object=\"labels\",\n as_format=\"nwb\",\n )\n\n @classmethod\n def load_deeplabcut(cls, filename: str) -> \"Labels\":\n from .format import read\n\n return read(filename, for_object=\"labels\", as_format=\"deeplabcut\")\n\n @classmethod\n def load_deeplabcut_folder(cls, filename: str) -> \"Labels\":\n csv_files = glob(f\"{filename}/*/*.csv\")\n merged_labels = None\n for csv_file in csv_files:\n labels = cls.load_file(csv_file, as_format=\"deeplabcut\")\n if merged_labels is None:\n merged_labels = labels\n else:\n merged_labels.extend_from(labels, unify=True)\n return merged_labels\n\n @classmethod\n def load_coco(\n cls, filename: str, img_dir: str, use_missing_gui: bool = False\n ) -> \"Labels\":\n from sleap.io.format.coco import LabelsCocoAdaptor\n from sleap.io.format.filehandle import FileHandle\n\n return LabelsCocoAdaptor.read(FileHandle(filename), img_dir, use_missing_gui)\n\n @classmethod\n def from_deepposekit(\n cls, filename: str, video_path: str, skeleton_path: str\n ) -> \"Labels\":\n from sleap.io.format.deepposekit import LabelsDeepPoseKitAdaptor\n from sleap.io.format.filehandle import FileHandle\n\n return LabelsDeepPoseKitAdaptor.read(\n FileHandle(filename), video_path, skeleton_path\n )\n\n def save_frame_data_imgstore(\n self,\n output_dir: str = \"./\",\n format: str = \"png\",\n all_labeled: bool = False,\n suggested: bool = False,\n progress_callback: Optional[Callable[[int, int], None]] = None,\n ) -> List[ImgStoreVideo]:\n \"\"\"Write images for labeled frames from all videos to imgstore datasets.\n\n This only writes frames that have been labeled. Videos without\n any labeled frames will be included as empty imgstores.\n\n Args:\n output_dir: Path to directory which will contain imgstores.\n format: The image format to use for the data.\n Use \"png\" for lossless, \"jpg\" for lossy.\n Other imgstore formats will probably work as well but\n have not been tested.\n all_labeled: Include any labeled frames, not just the frames\n we'll use for training (i.e., those with `Instance` objects ).\n suggested: Include suggested frames even if they do not have instances.\n Useful for inference after training. Defaults to `False`.\n progress_callback: If provided, this function will be called to report the\n progress of the frame data saving. This function should be a callable\n of the form: `fn(n, n_total)` where `n` is the number of frames saved so\n far and `n_total` is the total number of frames that will be saved. This\n is called after each video is processed. If the function has a return\n value and it returns `False`, saving will be canceled and the output\n deleted.\n\n Returns:\n A list of :class:`ImgStoreVideo` objects with the stored\n frames.\n \"\"\"\n\n # Lets gather all the suggestions by video\n suggestion_frames_by_video = {video: [] for video in self.videos}\n if suggested:\n for suggestion in self.suggestions:\n suggestion_frames_by_video[suggestion.video].append(\n suggestion.frame_idx\n )\n\n # For each label\n imgstore_vids = []\n total_vids = len(self.videos)\n for v_idx, video in enumerate(self.videos):\n lfs_v = self.find(video)\n frame_nums = {\n lf.frame_idx for lf in lfs_v if all_labeled or lf.has_user_instances\n }\n\n if suggested:\n frame_nums.update(suggestion_frames_by_video[video])\n\n # Join with \"/\" instead of os.path.join() since we want\n # path to work on Windows and Posix systems\n frames_fn = Path(output_dir, f\"frame_data_vid{v_idx}\")\n vid = video.to_imgstore(\n path=frames_fn.as_posix(), frame_numbers=frame_nums, format=format\n )\n if progress_callback is not None:\n # Notify update callback.\n ret = progress_callback(v_idx, total_vids)\n if ret == False:\n vid.close()\n return []\n\n # Close the video for now\n vid.close()\n\n imgstore_vids.append(vid)\n\n return imgstore_vids\n\n def save_frame_data_hdf5(\n self,\n output_path: str,\n format: str = \"png\",\n user_labeled: bool = True,\n all_labeled: bool = False,\n suggested: bool = False,\n progress_callback: Optional[Callable[[int, int], None]] = None,\n ) -> List[HDF5Video]:\n \"\"\"Write images for labeled frames from all videos to hdf5 file.\n\n Note that this will make an HDF5 video, not an HDF5 labels dataset.\n\n Args:\n output_path: Path to HDF5 file.\n format: The image format to use for the data. Defaults to png.\n user_labeled: Include labeled frames with user instances. Defaults to\n `True`.\n all_labeled: Include all labeled frames, including those with user-labeled\n instances, predicted instances or labeled frames with no instances.\n Defaults to `False`.\n suggested: Include suggested frames even if they do not have instances.\n Useful for inference after training. Defaults to `False`.\n progress_callback: If provided, this function will be called to report the\n progress of the frame data saving. This function should be a callable\n of the form: `fn(n, n_total)` where `n` is the number of frames saved so\n far and `n_total` is the total number of frames that will be saved. This\n is called after each video is processed. If the function has a return\n value and it returns `False`, saving will be canceled and the output\n deleted.\n\n Returns:\n A list of :class:`HDF5Video` objects with the stored frames.\n \"\"\"\n\n # Lets gather all the suggestions by video\n suggestion_frames_by_video = {video: [] for video in self.videos}\n if suggested:\n for suggestion in self.suggestions:\n suggestion_frames_by_video[suggestion.video].append(\n suggestion.frame_idx\n )\n\n # Build list of frames to save.\n vids = []\n frame_idxs = []\n for video in self.videos:\n lfs_v = self.find(video)\n frame_nums = {\n lf.frame_idx\n for lf in lfs_v\n if all_labeled or (user_labeled and lf.has_user_instances)\n }\n\n if suggested:\n frame_nums.update(suggestion_frames_by_video[video])\n\n frame_nums = sorted(list(frame_nums))\n vids.append(video)\n frame_idxs.append(frame_nums)\n\n n_total = sum([len(x) for x in frame_idxs])\n n = 0\n\n # Save images for each video.\n new_vids = []\n for v_idx, (video, frame_nums) in enumerate(zip(vids, frame_idxs)):\n vid = video.to_hdf5(\n path=output_path,\n dataset=f\"video{v_idx}\",\n format=format,\n frame_numbers=frame_nums,\n )\n n += len(frame_nums)\n if progress_callback is not None:\n # Notify update callback.\n ret = progress_callback(n, n_total)\n if ret == False:\n vid.close()\n return []\n\n vid.close()\n new_vids.append(vid)\n\n return new_vids\n\n def to_pipeline(\n self,\n batch_size: Optional[int] = None,\n prefetch: bool = True,\n frame_indices: Optional[List[int]] = None,\n user_labeled_only: bool = True,\n ) -> \"sleap.pipelines.Pipeline\":\n \"\"\"Create a pipeline for reading the dataset.\n\n Args:\n batch_size: If not `None`, the video frames will be batched into rank-4\n tensors. Otherwise, single rank-3 images will be returned.\n prefetch: If `True`, pipeline will include prefetching.\n frame_indices: Labeled frame indices to limit the pipeline reader to. If not\n specified (default), pipeline will read all the labeled frames in the\n dataset.\n user_labeled_only: If `True` (default), will only read frames with user\n labeled instances.\n\n Returns:\n A `sleap.pipelines.Pipeline` that builds `tf.data.Dataset` for high\n throughput I/O during inference.\n\n See also: sleap.pipelines.LabelsReader\n \"\"\"\n from sleap.nn.data import pipelines\n\n if user_labeled_only:\n reader = pipelines.LabelsReader.from_user_instances(self)\n reader.example_indices = frame_indices\n else:\n reader = pipelines.LabelsReader(self, example_indices=frame_indices)\n pipeline = pipelines.Pipeline(reader)\n if batch_size is not None:\n pipeline += pipelines.Batcher(\n batch_size=batch_size, drop_remainder=False, unrag=False\n )\n\n pipeline += pipelines.Prefetcher()\n return pipeline\n\n def numpy(\n self,\n video: Optional[Union[Video, int]] = None,\n all_frames: bool = True,\n untracked: bool = False,\n return_confidence: bool = False,\n ) -> np.ndarray:\n \"\"\"Construct a numpy array from instance points.\n\n Args:\n video: Video or video index to convert to numpy arrays. If `None` (the\n default), uses the first video.\n all_frames: If `True` (the default), allocate array of the same number of\n frames as the video. If `False`, only return data between the first and\n last frame with data.\n untracked: If `False` (the default), include only instances that have a\n track assignment. If `True`, includes all instances in each frame in\n arbitrary order.\n return_confidence: If `False` (the default), only return points of nodes. If\n `True`, return the points and scores of nodes.\n\n Returns:\n An array of tracks of shape `(n_frames, n_tracks, n_nodes, 2)` if\n `return_confidence` is `False`. Otherwise returned shape is\n `(n_frames, n_tracks, n_nodes, 3)` if `return_confidence` is `True`.\n\n Missing data will be replaced with `np.nan`.\n\n If this is a single instance project, a track does not need to be assigned.\n\n Only predicted instances (NOT user instances) will be returned.\n\n Notes:\n This method assumes that instances have tracks assigned and is intended to\n function primarily for single-video prediction results.\n \"\"\"\n\n def set_track(\n inst: Union[Instance, PredictedInstance],\n track: np.ndarray,\n return_confidence: bool,\n ):\n if return_confidence:\n if isinstance(inst, PredictedInstance):\n track = inst.points_and_scores_array\n else:\n track[:, :-1] = inst.numpy()\n else:\n track = inst.numpy()\n return track\n\n # Get labeled frames for specified video.\n try:\n if video is None:\n video = self.videos[0]\n if type(video) == int:\n video = self.videos[video]\n video = cast(Video, video) # video should now be of type Video\n except IndexError as e:\n raise IndexError(\n f\"There are no videos in this project. No points matrix to return.\"\n )\n\n lfs: List[LabeledFrame] = self.find(video=video)\n\n # Figure out frame index range.\n frame_idxs = [lf.frame_idx for lf in lfs]\n frame_idxs.sort()\n first_frame = 0 if all_frames else frame_idxs[0]\n last_frame = len(video) - 1 if all_frames else frame_idxs[-1]\n\n # Figure out the number of tracks based on number of instances in each frame.\n #\n # First, let's check the max number of instances (regardless of\n # whether they're tracked.\n n_insts = max(\n [\n lf.n_user_instances\n if lf.n_user_instances > 0 # take user instances over predicted\n else lf.n_predicted_instances\n for lf in lfs\n ]\n )\n\n untracked = untracked or n_insts == 1\n if untracked:\n # Case 1: We don't care about order because there's only 1 instance per\n # frame, or we're considering untracked instances.\n n_tracks = n_insts\n else:\n # Case 2: We're considering only tracked instances.\n n_tracks = len(self.tracks)\n\n n_frames = last_frame - first_frame + 1\n n_nodes = len(self.skeleton.nodes)\n\n if return_confidence:\n tracks = np.full((n_frames, n_tracks, n_nodes, 3), np.nan, dtype=\"float32\")\n else:\n tracks = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, dtype=\"float32\")\n for lf in lfs:\n i = lf.frame_idx - first_frame\n lf_insts: Union[List[Instance], List[PredictedInstance]] = (\n lf.user_instances if lf.n_user_instances > 0 else lf.predicted_instances\n ) # Prefer user labeled instances over predicted\n if untracked:\n # Add instances in any order if untracked\n for j, inst in enumerate(lf_insts):\n tracks[i, j] = set_track(inst, tracks[i, j], return_confidence)\n else:\n # Add instances in track-specific order, ignoring instances w/o a track\n for inst in lf_insts:\n if inst.track is None:\n continue\n j = self.tracks.index(inst.track)\n tracks[i, j] = set_track(inst, tracks[i, j], return_confidence)\n\n return tracks\n\n def merge_nodes(self, base_node: str, merge_node: str):\n \"\"\"Merge two nodes and update data accordingly.\n\n Args:\n base_node: Name of skeleton node that will remain after merging.\n merge_node: Name of skeleton node that will be merged into the base node.\n\n Notes:\n This method can be used to merge two nodes that might have been named\n differently but that should be associated with the same node.\n\n This is useful, for example, when merging a different set of labels where\n a node was named differently.\n\n If the `base_node` is visible and has data, it will not be updated.\n Otherwise, it will be updated with the data from the `merge_node` on the\n same instance.\n \"\"\"\n # Update data on all instances.\n for inst in self.instances():\n inst._merge_nodes_data(base_node, merge_node)\n\n # Remove merge node from skeleton.\n self.skeleton.delete_node(merge_node)\n\n # Fix instances.\n for inst in self.instances():\n inst._fix_array()\n\n @classmethod\n def make_gui_video_callback(\n cls,\n search_paths: Optional[List] = None,\n context: Optional[Dict[str, bool]] = None,\n ) -> Callable:\n return cls.make_video_callback(\n search_paths=search_paths, use_gui=True, context=context\n )\n\n @classmethod\n def make_video_callback(\n cls,\n search_paths: Optional[List] = None,\n use_gui: bool = False,\n context: Optional[Dict[str, bool]] = None,\n ) -> Callable:\n \"\"\"Create a callback for finding missing videos.\n\n The callback can be used while loading a saved project and\n allows the user to find videos which have been moved (or have\n paths from a different system).\n\n The callback function returns True to signal \"abort\".\n\n Args:\n search_paths: If specified, this is a list of paths where\n we'll automatically try to find the missing videos.\n context: A dictionary containing a \"changed_on_load\" key with a boolean\n value. Used externally to determine if any filenames were updated.\n\n Returns:\n The callback function.\n \"\"\"\n search_paths = search_paths or []\n context = context or {}\n\n def video_callback(\n video_list: List[dict],\n new_paths: List[str] = search_paths,\n context: Optional[Dict[str, bool]] = context,\n ):\n \"\"\"Callback to find videos which have been moved (or moved across systems).\n\n Args:\n video_list: A list of serialized `Video` objects stored as nested\n dictionaries.\n new_paths: A list of paths where we'll autimatically try to find the\n missing videos.\n context: A dictionary containing a \"changed_on_load\" key with a boolean\n value. Used externally to determine if any filenames were updated.\n \"\"\"\n filenames = [item[\"backend\"][\"filename\"] for item in video_list]\n context = context or {\"changed_on_load\": False}\n missing = pathutils.list_file_missing(filenames)\n\n # Try changing the prefix using saved patterns\n if sum(missing):\n pathutils.fix_paths_with_saved_prefix(filenames, missing)\n\n # Check for file in search_path directories\n if sum(missing) and new_paths:\n for i, filename in enumerate(filenames):\n fixed_path = find_path_using_paths(filename, new_paths)\n if fixed_path != filename:\n filenames[i] = fixed_path\n missing[i] = False\n context[\"changed_on_load\"] = True\n\n if use_gui:\n # If there are still missing paths, prompt user\n if sum(missing):\n # If we are using dummy for any video not found by user\n # then don't require user to find everything.\n allow_incomplete = USE_DUMMY_FOR_MISSING_VIDEOS\n\n okay = MissingFilesDialog(\n filenames, missing, allow_incomplete=allow_incomplete\n ).exec_()\n\n if not okay:\n return True # True for stop\n\n context[\"changed_on_load\"] = True\n\n if not use_gui and sum(missing):\n # If we got the same number of paths as there are videos\n if len(filenames) == len(new_paths):\n # and the file extensions match\n exts_match = all(\n (\n old.split(\".\")[-1] == new.split(\".\")[-1]\n for old, new in zip(filenames, new_paths)\n )\n )\n\n if exts_match:\n # then the search paths should be a list of all the\n # video paths, so we can get the new path for the missing\n # old path.\n for i, filename in enumerate(filenames):\n if missing[i]:\n filenames[i] = new_paths[i]\n missing[i] = False\n\n # Solely for testing since only gui will have a `CommandContext`\n context[\"changed_on_load\"] = True\n\n # Replace the video filenames with changes by user\n for i, item in enumerate(video_list):\n item[\"backend\"][\"filename\"] = filenames[i]\n\n if USE_DUMMY_FOR_MISSING_VIDEOS and sum(missing):\n # Replace any video still missing with \"dummy\" video\n for is_missing, item in zip(missing, video_list):\n from sleap.io.video import DummyVideo\n\n vid = DummyVideo(filename=item[\"backend\"][\"filename\"])\n item[\"backend\"] = cattr.unstructure(vid)\n\n return video_callback\n\n\ndef find_path_using_paths(missing_path: Text, search_paths: List[Text]) -> Text:\n \"\"\"Find a path to a missing file given a set of paths to search in.\n\n Args:\n missing_path: Path to the missing filename.\n search_paths: List of paths to search in.\n\n Returns:\n The corrected path if it was found, or the original missing path if it was not.\n \"\"\"\n # Get basename (filename with directories) using current os path format\n current_basename = os.path.basename(missing_path)\n\n # Handle unix, windows, or mixed paths\n if current_basename.find(\"/\") > -1:\n current_basename = current_basename.split(\"/\")[-1]\n if current_basename.find(\"\\\\\") > -1:\n current_basename = current_basename.split(\"\\\\\")[-1]\n\n # Look for file with that name in each of the search path directories\n for search_path in search_paths:\n\n if os.path.isfile(search_path):\n path_dir = os.path.dirname(search_path)\n else:\n path_dir = search_path\n\n check_path = os.path.join(path_dir, current_basename)\n if os.path.exists(check_path):\n return check_path\n\n return missing_path\n\n\ndef load_file(\n filename: Text,\n detect_videos: bool = True,\n search_paths: Optional[Union[List[Text], Text]] = None,\n match_to: Optional[Labels] = None,\n) -> Labels:\n \"\"\"Load a SLEAP labels file.\n\n SLEAP labels files (`.slp`) contain all the metadata for a labeling project or the\n predicted labels from a video. This includes the skeleton, videos, labeled frames,\n user-labeled and predicted instances, suggestions and tracks.\n\n See `sleap.io.dataset.Labels` for more detailed information.\n\n Args:\n filename: Path to a SLEAP labels (.slp) file.\n detect_videos: If True, will attempt to detect missing videos by searching for\n their filenames in the search paths. This is useful when loading SLEAP\n labels files that were generated on another computer with different paths.\n search_paths: A path or list of paths to search for the missing videos. This can\n be the direct path to the video file or its containing folder. If not\n specified, defaults to searching for the videos in the same folder as the\n labels.\n match_to: If a `sleap.Labels` object is provided, attempt to match and reuse\n video and skeleton objects when loading. This is useful when comparing the\n contents across sets of labels.\n\n Returns:\n The loaded `Labels` instance.\n\n Notes:\n This is a convenience method to call `sleap.Labels.load_file`. See that class\n method for more functionality in the loading process.\n\n The video files do not need to be accessible in order to load the labels, for\n example, when only the predicted instances or user labels are required.\n \"\"\"\n if detect_videos:\n if search_paths is None:\n search_paths = os.path.dirname(filename)\n return Labels.load_file(filename, search_paths, match_to=match_to)\n else:\n return Labels.load_file(filename, match_to=match_to)\n","repo_name":"talmolab/sleap","sub_path":"sleap/io/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":107724,"program_lang":"python","lang":"en","doc_type":"code","stars":340,"dataset":"github-code","pt":"54"} +{"seq_id":"71359286241","text":"#!/usr/bin/python3\n\"\"\"\n\tDemo variable scope w/ global var modified in a function\n\"\"\"\ndef spam():\n\teggs = \"spam local\" \n\tprint(eggs) \n\t\ndef bacon(): \n\tglobal eggs \n\teggs = \"bacon local overwrite\" \n\tprint(eggs) \n\tspam() \n\tprint(eggs) \n\t\n# global scope \neggs = \"global\" \nbacon() \nprint(eggs) \n","repo_name":"mazerlodge/AutoBoring","sub_path":"Scraps/scopeExample2.py","file_name":"scopeExample2.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32262075299","text":"from __future__ import annotations\nimport re\n\nfrom dataclasses import dataclass\nfrom datetime import datetime\n\n\nACTIONS = [\n \"created group\",\n \"added\",\n \"changed the subject to\",\n \"changed this group's icon\",\n \"removed\",\n \"changed their phone number to a new number\",\n \"left\",\n]\n\n\n@dataclass\nclass Message:\n timestamp: datetime\n user: str\n message: str\n auto: bool\n\n\ndef split_line_to_date_and_body(line: str):\n datestr, body = re.match(r'^\\[(.*?)\\] (.*?)$', line.replace('\\u200e', '')).groups()\n return datetime.strptime(datestr, '%d/%m/%Y, %H:%M:%S'), body\n\n\ndef split_body_to_user_and_message(body: str):\n if ':' in body:\n user, message = re.match(r'^(.*?): (.*?)$', body).groups()\n return user.strip(), message\n else:\n for action in ACTIONS:\n idx = body.find(action)\n if idx > 0:\n return body[:idx].strip(), body[idx:]\n return None\n\n\ndef get_message(line: str):\n date, body = split_line_to_date_and_body(line)\n user, message = split_body_to_user_and_message(body)\n return Message(date, user, message, '\\u200e' in line)\n\n\ndef is_new_message(line: str):\n try:\n split_line_to_date_and_body(line)\n return True\n except Exception:\n return False\n\n\ndef iterlines(filepath, rows=None):\n with open(filepath, mode='r') as f:\n count = 1\n line = f.readline()\n while line:\n yield line\n if rows and count > rows:\n break\n line = f.readline()\n count += 1\n\n\ndef itermessages(filepath, rows=None):\n message = None\n for line in iterlines(filepath, rows):\n if is_new_message(line):\n if message is None:\n message = get_message(line)\n else:\n yield message\n message = get_message(line)\n else:\n if message is None:\n raise ValueError('Invalid start of data.')\n else:\n message.message += line\n","repo_name":"dimistsaousis/wa-stats","sub_path":"message_parser.py","file_name":"message_parser.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41882857484","text":"# [형태 3]\r\n## import 패키지명.모듈명 as 별칭\r\n\r\nimport pack.ex as e\r\n\r\nprint('value >>', e.value)\r\n\r\n# show함수 호출\r\ne.show()\r\n\r\n# Increment class로 객체 생성\r\nic = e.Increment()\r\n\r\n# printNum() 메서드 호출\r\nic.printNum(15)\r\n","repo_name":"MilKim0818/Beginning-Python","sub_path":"moduleEx04.py","file_name":"moduleEx04.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1676672950","text":"a=[]\nb=[]\nc=[]\nd=[]\n# get the number of cells in each section\n# input 16 integers with space in between per each list (section)\na = list(map(int, input(\"A 섹션의 세포 수를 공백 한 칸을 두고 입력하세요: \").split()))\na_sum = sum(a)\nprint(\"A 섹션의 세포 수의 합: \"+str(a_sum)+\"\\nA 섹션 개수: \"+str(len(a)))\nb = list(map(int, input(\"B 섹션의 세포 수를 공백 한 칸을 두고 입력하세요: \").split()))\nb_sum = sum(b)\nprint(\"B 섹션의 세포 수의 합: \"+str(b_sum)+\"\\nB 섹션 개수: \"+str(len(b)))\nc = list(map(int, input(\"C 섹션의 세포 수를 공백 한 칸을 두고 입력하세요: \").split()))\nc_sum = sum(c)\nprint(\"C 섹션의 세포 수의 합: \"+str(c_sum)+\"\\nC 섹션 개수: \"+str(len(c)))\nd = list(map(int, input(\"D 섹션의 세포 수를 공백 한 칸을 두고 입력하세요: \").split()))\nd_sum = sum(d)\nprint(\"D 섹션의 세포 수의 합: \"+str(d_sum)+\"\\nD 섹션 개수: \"+str(len(d)))\n# find the mean of each section\nmean = (a_sum+b_sum+c_sum+d_sum)/4\n\nmult = int(input(\"희석 배율을 입력하세요: \"))\nif (mult==1):\n volume = int(input(\"최종 부피를 입력하세요: \"))\n print(\"--------------------\\nResult\\n4개의 섹션에서의 평균: \"+str(mean)+\"\\n세포 수: \"+str(mean*mult*volume*(10**4))+\" cells/\"+str(volume)+\"mL\")\nelse:\n volume = int(input(\"최종 현탁액 부피를 입력하세요: \"))\n print(\"--------------------\\nResult\\n4개의 섹션에서의 평균: \"+str(mean)+\"\\n\"+\"현탁액의 세포 수 (희석 배율: \"+str(mult)+\"배): \"+str(mean*volume*(10**4))+\"\\n원액의 세포 수: \"+str(mean*mult*volume*(10**4))+\" cells/\"+str(volume)+\"mL\")\n","repo_name":"hoony6134/project","sub_path":"bio/hemocytometer.py","file_name":"hemocytometer.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"41763297872","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import JsonResponse, HttpResponse, FileResponse\nfrom django import conf\nfrom repository import models\nimport datetime, re, os, time, requests, json, urllib\nfrom utils import encryption\n\n\ndef DownloadSourceResume(request):\n\t\n\tret = {\"status\": \"failed\", \"status_code\": \"403\", \"description\": \"There is no such permission.\"}\n\tFilePath = request.GET.get(\"file\", None)\n\tobj_id = request.GET.get(\"uid\", None)\n\n\t# 查询简历对象准备对用户记录下载简历次数\n\trecord_download_resume = models.ResumeInfo.objects.filter(id=int(obj_id))\n\n\tif request.method == \"GET\":\n\t\tLOCAL_FilePath = os.path.join(conf.settings.BASE_DIR, \"static\", FilePath)\n\n\t\tisExists = os.path.exists(os.path.dirname(LOCAL_FilePath))\n\t\tif not isExists: os.makedirs(os.path.dirname(LOCAL_FilePath))\n\n\t\tif FilePath and obj_id:\n\t\t\turllib.request.urlretrieve(os.path.join(conf.settings.NGINX_MIRROR_ADDRESS, FilePath), LOCAL_FilePath)\n\t\t\tfile=open(LOCAL_FilePath,'rb') \n\t\t\tresponse =FileResponse(file) \n\t\t\tresponse['Content-Type']='application/octet-stream' \n\t\t\tresponse['Content-Disposition']='attachment;filename=\"{}\"'.format(os.path.basename(LOCAL_FilePath)) \n\n\t\t\t# 记录用户下载事件\n\t\t\tmodels.StatisticalDownloadResume.objects.create(user=request.user, resume=record_download_resume.last())\n\t\t\treturn response \n\treturn JsonResponse(ret)\n\ndef DownloadWordFile(request):\n\n\t# 暂未添加记录用户下载次数\n\tret = {\"status\": \"failed\", \"status_code\": \"403\", \"description\": \"There is no such permission.\"}\n\tif request.method == \"GET\":\n\t\tFilePath = request.GET.get(\"file\", None)\n\t\tToken = request.GET.get(\"token\", None)\n\t\tif FilePath and Token:\n\t\t\tattachmentName, currentTime = Token.split(\"|\")\n\t\t\t_token = encryption.md5(FilePath + \"|\" + currentTime) + \"|\" + currentTime\n\t\t\tif not int(time.time()) - int(currentTime) > 10:\n\t\t\t\tif Token == _token:\n\t\t\t\t\tfile=open(os.path.join(conf.settings.BASE_DIR, FilePath),'rb') \n\t\t\t\t\tresponse =FileResponse(file) \n\t\t\t\t\tresponse['Content-Type']='application/octet-stream' \n\t\t\t\t\tresponse['Content-Disposition']='attachment;filename=\"{}\"'.format(os.path.basename(FilePath)) \n\t\t\t\t\treturn response \n\treturn JsonResponse(ret)","repo_name":"slzcc/ResumeCRM","sub_path":"Documents/views/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19012660460","text":"#!/usr/bin/python3\n\nimport hidden_4\n\nif __name__ == \"__main__\":\n \"\"\"Prints all the names defined by the compiled module hidden_4.pyc\"\"\"\n\n file_names = dir(hidden_4)\n\n ord_name = sorted(name for name in file_names if not name.startswith(\"__\"))\n\n for name in ord_name:\n print(name)\n","repo_name":"MntaseTshepi/alx-higher_level_programming","sub_path":"0x02-python-import_modules/4-hidden_discovery.py","file_name":"4-hidden_discovery.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33700555428","text":"from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression\nfrom sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, BaggingRegressor, GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier, GradientBoostingClassifier, ExtraTreesClassifier\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nimport pickle\nimport pandas as pd\n\n#---------------------------------------------------------------------------------------------------------------------#\n\nmodels = [LinearRegression, Ridge, Lasso, DecisionTreeRegressor, RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor, BaggingRegressor, GradientBoostingRegressor, LogisticRegression, RandomForestClassifier, AdaBoostClassifier, BaggingClassifier, GradientBoostingClassifier, ExtraTreesClassifier, DecisionTreeClassifier]\n\n#---------------------------------------------------------------------------------------------------------------------#\n\ndef get_features(dataset,label):\n '''\n Returns the list of strings containing the names of features in the data set\n\n Parameters:\n dataset (dataframe) : contains data for model training\n label (string) : the column name which is the target of machine learning \n\n Returns:\n features (string)\n '''\n try:\n features = list(dataset.columns)\n features.remove(label)\n except Exception as e:\n raise type(e)(\"Check the label name\")\n return features\n\n\nmap_model = {model.__name__: model for model in models}\n\n#---------------------------------------------------------------------------------------------------------------------#\n\ndef get_model(name):\n '''\n Returns the model corresponding to the model name\n\n Parameters:\n name (string) : name of the model required\n\n Returns:\n model (model class reference) \n '''\n try:\n return map_model.get(name)\n except Exception as e:\n raise type(e)(\"Check the model name\")\n\n#---------------------------------------------------------------------------------------------------------------------#\n\ndef pickle_model(model,file_name='pickled_model'):\n '''\n Saves the model in a pickle format for later use\n\n Parameters:\n model (model object) \n\n\n \n '''\n \n pickle.dump(model, open(file_name+'.sav', 'wb'))\n print('\\n'+'Model Downloaded')\n#---------------------------------------------------------------------------------------------------------------------#\n\ndef get_csv(pd_stats,filename='excel_file'):\n pd_stats.to_excel(filename+'.xlsx')\n print('\\n'+'Excel File Generated')\n\n#---------------------------------------------------------------------------------------------------------------------#\n\ndef download_dataset(dataset_path):\n try:\n return pd.read_csv(dataset_path)\n except Exception as e:\n raise type(e)(\" Check the dataset_path \")\n\n#---------------------------------------------------------------------------------------------------------------------#\n\ndef check(func, *args, **kw):\n try:\n func(*args, **kw)\n return True\n except Exception:\n return False\n\n#---------------------------------------------------------------------------------------------------------------------#\n\n","repo_name":"mihir2510/SuperML","sub_path":"auto_machine_learning/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20122732006","text":"\"\"\"\nNOTE: This module has sideeffects on import. This is by intention.\n\nIt collects information about the tool (main script file) and dumps it\nin the current directory in a file called \"versioninfo\".\n\nThe info is intended to stay mostly fixed, if you keep using the same tool\nwith the same version.\nSo, it is intended to keep the file also under local version control\nfor documentation, so that you can later know what tool version you have used.\nThis is why we don't include information like the current system time.\nFor such purpose, use some local history script.\n\"\"\"\n\nfrom .utils import *\nimport os\n\ndef findMainScript():\n\timport inspect\n\tfirst = inspect.stack()[-1]\n\tfilepath = first[1]\n\t# Check if filepath is valid.\n\t# I'm not sure what to do if that fails.\n\t# Some fallback code would be good.\n\ttest(os.path.exists(filepath))\n\treturn filepath\n\ndef gitTopLevelOrNone(d):\n\ttry:\n\t\treturn git_topLevelDir(gitdir=d)\n\texcept ShellError:\n\t\treturn None\n\ndef collectInfo():\n\tinfo = {}\n\t\n\tscriptfile = findMainScript()\n\tscriptname = os.path.basename(scriptfile)\n\tif scriptname.endswith(\".py\"): scriptname = scriptname[:-3]\n\tscriptfile = os.path.realpath(scriptfile)\n\tscriptfile = os.path.abspath(scriptfile)\n\ttest(os.path.isfile(scriptfile))\n\tinfo[\"file\"] = filenameRepr(scriptfile)\n\tinfo[\"name\"] = scriptname\n\tscriptdir = os.path.dirname(scriptfile)\n\t\n\tgitdir = gitTopLevelOrNone(scriptdir)\n\tgitIsDirty = None\n\tif gitdir:\n\t\tgitCommit = git_headCommit(gitdir=gitdir)\n\t\tgitIsDirty = git_isDirty(gitdir=gitdir)\n\t\tgitDate = git_commitDate(gitdir=gitdir)\n\t\tinfo[\"git-commit\"] = gitCommit\n\t\tinfo[\"git-isDirty\"] = gitIsDirty\n\t\tinfo[\"git-date\"] = gitDate\n\t\tinfo[\"git-dir\"] = filenameRepr(gitdir)\n\t\n\tif not gitdir or gitIsDirty:\n\t\tinfo[\"file-changeDate\"] = sysexecOut(\"ls\", \"-la\", os.path.basename(scriptfile), cwd=scriptdir).strip()\n\n\treturn info\n\ndef myFileRepr():\n\tfilename = os.path.realpath(__file__)\n\tif filename.endswith(\".pyc\"):\n\t\tfilename = filename[:-1]\n\treturn filenameRepr(filename)\n\t\ndef dump():\n\tinfo = collectInfo()\n\tcwd = os.getcwd()\n\tfilename = cwd + \"/versioninfo\"\n\tt = \"\"\n\ttry:\n\t\tt = open(filename).read()\n\texcept IOError:\n\t\t# File does not exist yet or so.\n\t\tpass\n\tif t:\n\t\ttools = eval(t)\n\t\ttest(type(tools) is dict)\n\telse:\n\t\ttools = {}\n\ttools[info[\"name\"]] = info\n\tr = \"# Version info of tools called from this dir.\\n\"\n\tr += \"# Ref: \" + myFileRepr() + \"\\n\"\n\tr += betterRepr(tools)\n\tr += \"\\n\"\n\ttmpfilename = filename + \".tmp\" + tmp_filename()\n\tf = open(tmpfilename, \"w\")\n\tf.write(r)\n\tf.close()\n\tos.rename(tmpfilename, filename)\n\t\ntry:\n\tif os.path.exists(os.path.expanduser(\"~/.version_info_dump.enabled\")):\n\t\tdump()\nexcept IOError:\n\t# Permission denied or so.\n\t# Silently ignore.\n\tpass\n","repo_name":"albertz/system-tools","sub_path":"lib/version_info_dump.py","file_name":"version_info_dump.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"13193859591","text":"import os\nfrom sqlite3 import connect\n\nclass Repository:\n\n def createConection():\n\n dataBasePath = os.path.join(os.getcwd(), 'src', 'database', 'file', 'Banco_Dados_Ressarcimento_Icms.db')\n connection = connect(dataBasePath)\n\n return connection\n\n def FindOne(conn, item, CompanyName):\n\n DBCreated = os.path.join(os.getcwd(), 'src', 'database', 'file', 'Banco_Dados_Ressarcimento_Icms.db')\n\n if DBCreated:\n\n connection = conn.cursor()\n try:\n arrDatas = connection.execute(\n f\"SELECT * FROM _produto{CompanyName} WHERE COD_ITEM ='{item}'\")\n conn.commit()\n return arrDatas.fetchall()\n\n except Exception as Ex:\n print(Ex)\n\n else:\n mensagem = 'BD not exist'\n return mensagem\n \n def save(conn, companyName, info):\n DBCreated = os.path.join(os.getcwd(), 'src', 'database', 'file', 'Banco_Dados_Ressarcimento_Icms.db')\n\n if DBCreated:\n conection = conn.cursor()\n\n try:\n c = conn.cursor()\n c.execute(\n f'INSERT INTO _produto{companyName} VALUES (?, ?, ?, ?, ?, ?, ?)', info)\n\n conn.commit()\n\n return True\n\n except Exception as Ex:\n print(Ex)\n return False\n\n else:\n print('BD not exist')\n return False","repo_name":"Ismael-Oliv/Calculate-previous-balance","sub_path":"src/pages/Importar/cadastro_produtos/repository/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6874788605","text":"import pandas as pd\nimport numpy as np\n# sompy taken from https://github.com/sevamoo/SOMPY\nimport sompy as sp\n# minisom taken from https://github.com/JustGlowing/minisom\nimport minisom as ms\nfrom hyperopt import fmin, tpe, hp, STATUS_OK, Trials\nimport io\nimport pickle\nimport sklearn.metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import SelectKBest, mutual_info_regression, RFE\n\n# ******************** Train on synthetic dataset ******************************\ninputDataFrame = pd.read_csv('./real_data.csv')\n\n#split test train\nfraud_targets = pd.Series(inputDataFrame[\"isFraud\"])\ninputDataFrame.drop(columns=[\"isFraud\"], inplace=True)\nfraud_features = pd.DataFrame(inputDataFrame)\nX_train, X_test, y_train, y_test = train_test_split(fraud_features, fraud_targets, test_size=0.2, random_state=0)\n\nX_train = X_train.values.tolist()\nX_test = X_test.values.tolist()\n\n# just a heuristic for x and y as it needs to be constant to tune the other hyperparameters\nx = int(np.sqrt(5 * np.sqrt(fraud_features.shape[0])))\ny = x\n# number of features\ninput_len = fraud_features.shape[1]\n\nspace = {'sig': hp.uniform('sig', 0.001, x / 2.01), 'learning_rate': hp.uniform('learning_rate', 0.001, 5)}\n\n# taken from example on https://github.com/JustGlowing/minisom/blob/master/examples/Classification.ipynb\ndef classify(som, data, class_assignments):\n \"\"\"Classifies each sample in data in one of the classes definited\n using the method labels_map.\n Returns a list of the same length of data where the i-th element\n is the class assigned to data[i].\n \"\"\"\n winmap = class_assignments\n default_class = np.sum(list(winmap.values())).most_common()[0][0]\n result = []\n for d in data:\n win_position = som.winner(d)\n if win_position in winmap:\n result.append(winmap[win_position].most_common()[0][0])\n else:\n result.append(default_class)\n return result\n\ndef som_quantization_error(space):\n sig = space['sig']\n learning_rate = space['learning_rate']\n error = ms.MiniSom(x=x, y=y, input_len=input_len, sigma=sig, learning_rate=learning_rate).quantization_error(X_train)\n return {'loss': error, 'status': STATUS_OK}\n\n# hyperparameter tuning to obtain sigma and learning rate\ntrials = Trials()\nbest = fmin(fn=som_quantization_error, space=space, algo=tpe.suggest, max_evals=100, trials=trials)\nprint(best)\n\nsom = ms.MiniSom(x=x, y=y, input_len=input_len, sigma=8.007684739287342, learning_rate=4.486348532872689)\nsom.pca_weights_init(X_train)\nsom.train_batch(X_train, 100)\nclass_assignments = som.labels_map(X_train, y_train)\n\nprint(sklearn.metrics.classification_report(y_test, classify(som, X_test, class_assignments)))\n\n# saving the som in the file som.p\nwith open('synthetic_som.p', 'wb') as outfile:\n pickle.dump(som, outfile)\n\n#feature selection\n# k=5\n# mi_transformer = SelectKBest(mutual_info_regression,k=k).fit(X_train, y_train)\n# mi_X_train,mi_X_test = mi_transformer.transform(X_train), mi_transformer.transform(X_test)\n#\n# for feature, importance in zip(fraud_features.columns, mi_transformer.scores_):\n# print(f\"The MI score for {feature} is {importance}\")\n\n# train_data = np.column_stack((X_train, y_train))\n#\n# # Train SOM (Self-organizing-maps)\n# sm = sp.SOMFactory().build(train_data, normalization=\"var\", initialization=\"random\")\n# sm.train(n_job=1, verbose=False, train_rough_len=2, train_finetune_len=5)\n#\n# predicted = sm.predict(X_test, k=5, wt=\"distance\")\n# print(predicted)\n#\n# print(f\"The accuracy score is {sklearn.metrics.accuracy_score(y_test, predicted)}\")\n# print(f\"The f1 score is {sklearn.metrics.f1_score(y_test, predicted)}\")\n","repo_name":"gMorshed/fraudDetection","sub_path":"carl_fnn_old/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2438472945","text":"from pandas import Series,DataFrame\nimport pandas as pd\n\n# numpy, matplotlib, seaborn\nimport numpy as np\n\n# machine learning\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.preprocessing import maxabs_scale\nimport xgboost as xgb\n\n# get homesite & test csv files as a DataFrame\nhomesite_df = pd.read_csv(\"../input/train.csv\")\ntest_df = pd.read_csv(\"../input/test.csv\")\n\n# preview the data\n#homesite_df.head()\n\n# drop unnecessary columns, these columns won't be useful in analysis and prediction\nhomesite_df = homesite_df.drop(['QuoteNumber'], axis=1)\n# date\n\n# Convert Date to Year, Month, and Week\nhomesite_df['Year'] = homesite_df['Original_Quote_Date'].apply(lambda x: int(str(x)[:4]))\nhomesite_df['Month'] = homesite_df['Original_Quote_Date'].apply(lambda x: int(str(x)[5:7]))\nhomesite_df['Week'] = homesite_df['Original_Quote_Date'].apply(lambda x: int(str(x)[8:10]))\n\ntest_df['Year'] = test_df['Original_Quote_Date'].apply(lambda x: int(str(x)[:4]))\ntest_df['Month'] = test_df['Original_Quote_Date'].apply(lambda x: int(str(x)[5:7]))\ntest_df['Week'] = test_df['Original_Quote_Date'].apply(lambda x: int(str(x)[8:10]))\n\nhomesite_df.drop(['Original_Quote_Date'], axis=1,inplace=True)\ntest_df.drop(['Original_Quote_Date'], axis=1,inplace=True)\n\n# fill NaN values\nhomesite_df.fillna(-1, inplace=True)\ntest_df.fillna(-1, inplace=True)\n\n# There are some columns with non-numerical values(i.e. dtype='object'),\n# So, We will create a corresponding unique numerical value for each non-numerical value in a column of training and testing set.\n\nfrom sklearn import preprocessing\n\nfor f in homesite_df.columns:\n if homesite_df[f].dtype=='object':\n lbl = preprocessing.LabelEncoder()\n lbl.fit(np.unique(list(homesite_df[f].values) + list(test_df[f].values)))\n homesite_df[f] = lbl.transform(list(homesite_df[f].values))\n test_df[f] = lbl.transform(list(test_df[f].values))\n \n# define training and testing sets\nX_train = homesite_df.drop(\"QuoteConversion_Flag\",axis=1)\nY_train = homesite_df[\"QuoteConversion_Flag\"]\nX_test = test_df.drop(\"QuoteNumber\",axis=1).copy()\nnames=[x for x in homesite_df.columns]\n\nfrom sklearn.linear_model import RandomizedLasso\nrlasso = RandomizedLasso(alpha=0.025)\nrlasso.fit(X_train, Y_train)\nres=sorted(zip(map(lambda x: round(x, 4), rlasso.scores_), \n names), reverse=True)\nres.to_csv('res.csv', index=False)\n\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/homesite-quote-conversion/jack gong/fease2.py","file_name":"fease2.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"16785655135","text":"from app.page_objects.page_object import PageObject\n\n\nclass LinkedInSearchResultsPageObject(PageObject):\n def __init__(self, page_browser):\n super().__init__(page_browser)\n\n def scrape_contact_links(self):\n self.wait_by_class_name(\"search-results__total\")\n\n search_results = self.browser.find_elements_by_class_name(\"search-result--person\")\n filtered_search_results = [search_result for search_result in search_results if \"1st\" in search_result.text]\n\n contact_links = set()\n\n for result in filtered_search_results:\n contacts = result.find_elements_by_class_name(\"search-result__result-link\")\n contact_links = {contact.get_property('href') for contact in contacts if contact.get_property('href')}\n\n return list(contact_links)\n","repo_name":"DEV3L/agile-scraper","sub_path":"app/page_objects/linkedin_search_results_page_object.py","file_name":"linkedin_search_results_page_object.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15688232037","text":"import asyncio\nfrom struct import pack, unpack\nimport tty\nimport termios\nimport signal\nimport os\nimport argparse\nimport getpass\nimport uuid\nimport secrets\nimport time\nimport sys\nimport encryption\nimport elliptic_curves\n\nBUF_SIZE = 1024\nPACKET_HEADER_LEN = 0x16\n\nSYS_VERSION = 1\nSYS_CLIENT_TYPE = 0x15\n\nSYS_START_SESSION = 0\nSYS_DATA = 1\nSYS_ACKNOWLEDGE = 2\nSYS_END_SESSION = 255\n\nCP_MAGIC = b'\\x56\\x34\\x12\\xff'\n\nCP_BEGIN_AUTHENICATION = 0\nCP_ENCRYPTION_KEY = 1\nCP_PASSWORD = 2\nCP_USERNAME = 3\nCP_TERMINAL_TYPE = 4\nCP_TERMINAL_WIDTH = 5\nCP_TERMINAL_HEIGHT = 6\nCP_END_AUTHENTICATION = 9\n\nclass ControlPacket():\n\n def __init__(self, cp_type: int = 0, data: bytes = b''):\n self.magic = CP_MAGIC\n self.packet_type = cp_type\n self.data = data\n self.data_length = len(self.data)\n\n # for debugging\n def __str__(self):\n if self.packet_type == CP_BEGIN_AUTHENICATION:\n output = \"CP_BEGIN_AUTHENTICATION:\"\n elif self.packet_type == CP_ENCRYPTION_KEY:\n output = \"CP_ENCRYPTION_KEY:\"\n elif self.packet_type == CP_PASSWORD:\n output = \"CP_PASSWORD:\"\n elif self.packet_type == CP_USERNAME:\n output = \"CP_USERNAME:\"\n elif self.packet_type == CP_TERMINAL_TYPE:\n output = \"CP_TERMINAL_TYPE:\"\n elif self.packet_type == CP_TERMINAL_WIDTH:\n output = \"CP_TERMINAL_WIDTH:\"\n elif self.packet_type == CP_TERMINAL_HEIGHT:\n output = \"CP_TERMINAL_HEIGHT:\"\n elif self.packet_type == CP_END_AUTHENTICATION:\n output = \"CP_END_AUTHENTICATION:\"\n else:\n output = \"CP_UNKNOWN_TYPE:\"\n\n output += \" len=\" + str(self.data_length)\n output += \" data=\" + self.data.hex()\n return output\n\n def to_bytes(self):\n header = pack(\">4sbI\",\n CP_MAGIC,\n self.packet_type,\n self.data_length\n )\n return header + self.data\n\n @classmethod\n def from_bytes(cls, data):\n cpacket = cls()\n magic, cpacket.packet_type, cpacket.data_length = unpack(\">4sbI\", data[:9])\n if magic != CP_MAGIC:\n print(\"error: parsing control packet with incorrect magic bytes\", data)\n cp_end = 9+cpacket.data_length\n cpacket.data = data[9:cp_end]\n return data[cp_end:], cpacket\n\nclass Packet():\n\n def __init__(self):\n self.version = 1\n self.message_type = -1\n self.dst_mac = b''\n self.src_mac = b''\n self.client_type = 21\n self.session_id = -1\n self.message_type = -1\n self.byte_counter = -1\n self.data = b''\n self.control_packets = []\n\n # for debugging\n def __str__(self):\n output = \"MACTelnet Packet v\" + str(self.version) + \" \"\n if self.message_type == SYS_START_SESSION:\n output += \"SYS_START_SESSION\"\n elif self.message_type == SYS_DATA:\n output += \"SYS_DATA\"\n elif self.message_type == SYS_ACKNOWLEDGE:\n output += \"SYS_ACKNOWLEDGE\"\n elif self.message_type == SYS_END_SESSION:\n output += \"SYS_END_SESSION\"\n else:\n output += \"SYS_UNKNOWN_TYPE\"\n output += \"\\nsrc_mac=\"\n output += \":\".join(self.src_mac.hex()[i:i+2] for i in range(0, 12, 2))\n output += \" dst_mac=\"\n output += \":\".join(self.dst_mac.hex()[i:i+2] for i in range(0, 12, 2))\n output += \"\\nsession_id=\" + str(self.session_id)\n output += \" byte_counter=\" + str(self.byte_counter)\n if self.control_packets != []:\n output += \"\\nControl Packets:\\n\"\n for cp in self.control_packets:\n output += \"\\t\" + str(cp)\n else:\n output += \"\\nPacket Data=\" + self.data.hex()\n return output\n\n # length of packet data\n def __len__(self):\n return len(self.to_bytes()) - PACKET_HEADER_LEN\n\n def to_bytes(self):\n header = pack(\">BB6s6sHHI\",\n self.version,\n self.message_type,\n self.src_mac,\n self.dst_mac,\n self.session_id,\n self.client_type,\n self.byte_counter)\n # if there's control packets, add them and return\n if self.control_packets != []:\n cp_bytes = b''.join([cp.to_bytes() for cp in self.control_packets])\n return header + cp_bytes\n # otherwise, just append data\n return header + self.data\n\n @classmethod\n def from_bytes(cls, data):\n packet = cls()\n (packet.version,\n packet.message_type,\n packet.src_mac,\n packet.dst_mac,\n packet.session_id,\n packet.client_type,\n packet.byte_counter) = unpack(\">BB6s6sHHI\", data[:PACKET_HEADER_LEN])\n\n # if there's nothing remaining, or no control packets, just return\n remaining = data[PACKET_HEADER_LEN:]\n if remaining == b'':\n return packet\n if remaining[:4] != CP_MAGIC:\n packet.data = remaining\n return packet\n\n while remaining != b'':\n remaining, cpacket = ControlPacket.from_bytes(remaining)\n packet.control_packets.append(cpacket)\n return packet\n\nclass MACTelnetProtocol(asyncio.Protocol):\n\n def __init__(self, mac, username, password, on_session_end):\n self.username = username\n self.password = password\n self.my_ip = None\n self.src_mac = uuid.getnode().to_bytes(6, \"big\")\n self.dst_mac = bytes.fromhex(mac.replace(\":\", \"\"))\n self.port = 20561\n self.session_id = secrets.randbits(16)\n self.transport = None\n\n self.unacked_packet = None\n self.acked_counter = 0\n self.send_counter = 0\n self.receive_counter = 0\n self.last_msg_time = time.time()\n\n self.w = elliptic_curves.WCurve()\n self.client_private = b''\n self.client_public = b''\n self.server_public = b''\n self.client_parity = -1\n self.server_parity = -1\n self.salt = b''\n\n self.old_tty = termios.tcgetattr(sys.stdin.fileno())\n self.on_session_end = on_session_end\n self.keepalive_task = asyncio.create_task(self.keepalive())\n\n def make_packet(self, message_type):\n packet = Packet()\n packet.message_type = message_type\n packet.src_mac = self.src_mac\n packet.dst_mac = self.dst_mac\n packet.session_id = self.session_id\n packet.byte_counter = self.send_counter\n return packet\n\n async def keepalive(self):\n while True:\n if time.time() > self.last_msg_time + 10:\n self.send_ack(None)\n self.last_msg_time = time.time()\n elif self.on_session_end.done():\n return\n else:\n await asyncio.sleep(0.001)\n\n def send(self, packet):\n # resend last packet if not acked\n if self.unacked_packet is not None and (self.acked_counter < self.send_counter or\n self.acked_counter + self.send_counter > 65535):\n self.transport.sendto(self.unacked_packet.to_bytes(),\n (\"255.255.255.255\", self.port))\n\n self.send_counter += len(packet)\n\n if self.send_counter > 65535:\n self.send_counter -= 65536\n\n self.transport.sendto(packet.to_bytes(),\n (\"255.255.255.255\", self.port))\n self.unacked_packet = packet\n\n def send_ack(self, packet):\n ack = self.make_packet(SYS_ACKNOWLEDGE)\n if packet is not None:\n ack.byte_counter = packet.byte_counter + len(packet)\n else:\n # for keep alives:\n ack.byte_counter = self.receive_counter\n self.transport.sendto(ack.to_bytes(), (\"255.255.255.255\", self.port))\n\n def gen_confirmation_code(self):\n validator = self.w.gen_password_validator_priv(self.username, self.password, self.salt)\n validator_point = self.w.redp1(self.w.gen_public_key(validator)[0], 1)\n server_public_point = self.w.lift_x(int.from_bytes(self.server_public, \"big\"),\n self.server_parity)\n server_public_point += validator_point\n pubkeys_hashed = encryption.get_sha2_digest(self.client_public + self.server_public)\n vh = int.from_bytes(validator, \"big\") * int.from_bytes(pubkeys_hashed, \"big\")\n vh += int.from_bytes(self.client_private, \"big\")\n vh = self.w.finite_field_value(vh)\n pt = vh * server_public_point\n z_input, _ = self.w.to_montgomery(pt)\n return encryption.get_sha2_digest(pubkeys_hashed + z_input)\n\n def handle_control_packet(self, packet):\n # assume len(packet.control_packets) > 1\n if packet.control_packets[0].packet_type == CP_ENCRYPTION_KEY:\n self.server_public = packet.control_packets[0].data[:0x20]\n self.server_parity = packet.control_packets[0].data[0x20]\n self.salt = packet.control_packets[0].data[0x21:]\n\n if len(self.salt) != 16:\n # server error retrieving user, exit\n print(\"error: user not registered on server\")\n self.on_session_end.set_result(True)\n return\n\n confirmation_code = self.gen_confirmation_code()\n\n confirmation = self.make_packet(SYS_DATA)\n confirmation.control_packets.append(\n ControlPacket(CP_PASSWORD, confirmation_code))\n confirmation.control_packets.append(\n ControlPacket(CP_USERNAME, self.username.encode()))\n\n term_type = os.getenv(\"TERM\").encode()\n term_size = os.get_terminal_size()\n term_width = term_size[0].to_bytes(2, \"little\")\n term_height = term_size[1].to_bytes(2, \"little\")\n\n confirmation.control_packets.append(\n ControlPacket(CP_TERMINAL_TYPE, term_type))\n confirmation.control_packets.append(\n ControlPacket(CP_TERMINAL_WIDTH, term_width))\n confirmation.control_packets.append(\n ControlPacket(CP_TERMINAL_HEIGHT, term_height))\n self.send(confirmation)\n\n elif packet.control_packets[0].packet_type == CP_END_AUTHENTICATION:\n # not exactly \"logged in\" because auth can fail,\n # but we receieve auth failure as terminal data so we set it up here\n tty.setraw(sys.stdin)\n # helps with reliability to reset window after auth\n loop = asyncio.get_running_loop()\n loop.call_later(1, handle_sigwinch, self)\n\n # asyncio.Protocol methods\n def connection_made(self, transport):\n self.transport = transport\n self.send(self.make_packet(SYS_START_SESSION))\n\n def datagram_received(self, data, addr):\n if self.my_ip is None: # assume first packet is from me\n self.my_ip = addr[0]\n return\n if self.my_ip == addr[0]: # and ignore all future packets from myself\n return\n\n self.last_msg_time = time.time()\n packet = Packet.from_bytes(data)\n\n if packet.message_type == SYS_START_SESSION:\n print(\"error: client recieved SYS_START_SESSION\")\n elif packet.message_type == SYS_DATA:\n self.send_ack(packet)\n\n if packet.byte_counter + len(packet) > self.receive_counter or \\\n self.receive_counter + len(packet) > 65535:\n # new data, set receive_counter\n self.receive_counter += len(packet)\n else:\n # repeat packet, don't handle\n return\n\n if packet.control_packets != []:\n self.handle_control_packet(packet)\n else:\n # terminal data, print to stdout\n sys.stdout.buffer.write(packet.data)\n sys.stdout.flush()\n\n elif packet.message_type == SYS_ACKNOWLEDGE:\n if self.acked_counter <= packet.byte_counter or \\\n self.acked_counter + packet.byte_counter > 65535:\n self.acked_counter = packet.byte_counter\n\n # initial ack from server, start handshake\n if self.client_private == b'':\n self.client_private = secrets.token_bytes(32)\n self.client_public, self.client_parity = self.w.gen_public_key(self.client_private)\n key_data = self.username.encode('utf-8') + b'\\x00'\n key_data += self.client_public\n key_data += int(self.client_parity).to_bytes(1, \"big\")\n\n pubkey_packet = self.make_packet(SYS_DATA)\n pubkey_packet.control_packets = [\n ControlPacket(CP_BEGIN_AUTHENICATION),\n ControlPacket(CP_ENCRYPTION_KEY, key_data)\n ]\n self.send(pubkey_packet)\n\n elif packet.message_type == SYS_END_SESSION:\n self.send_ack(packet)\n self.send(self.make_packet(SYS_END_SESSION))\n termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, self.old_tty)\n self.on_session_end.set_result(True)\n\n else:\n print(\"error: unknown message type\")\n\ndef handle_sigwinch(mactelnet):\n packet = mactelnet.make_packet(SYS_DATA)\n term_size = os.get_terminal_size()\n term_width = term_size[0].to_bytes(2, \"little\")\n term_height = term_size[1].to_bytes(2, \"little\")\n\n packet.control_packets.append(\n ControlPacket(CP_TERMINAL_WIDTH, term_width))\n packet.control_packets.append(\n ControlPacket(CP_TERMINAL_HEIGHT, term_height))\n\n mactelnet.send(packet)\n\ndef send_raw_byte(mactelnet):\n char = sys.stdin.buffer.raw.read(1)\n packet = mactelnet.make_packet(SYS_DATA)\n packet.data = char\n mactelnet.send(packet)\n\nasync def main(mac: str = \"00:00:00:00:00:00\", username: str = \"admin\", password: str = \"\"):\n loop = asyncio.get_running_loop()\n\n on_session_end = loop.create_future()\n\n transport, protocol = await loop.create_datagram_endpoint(\n lambda: MACTelnetProtocol(mac=mac,\n username=username,\n password=password,\n on_session_end=on_session_end),\n allow_broadcast=True, local_addr=('0.0.0.0', 20561))\n\n loop.add_reader(sys.stdin, lambda: send_raw_byte(protocol))\n\n loop.add_signal_handler(\n getattr(signal, \"SIGWINCH\"),\n lambda: handle_sigwinch(protocol))\n\n try:\n await asyncio.gather(\n on_session_end,\n protocol.keepalive_task\n )\n finally:\n transport.close()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='MAC Telnet Client')\n parser.add_argument('mac')\n parser.add_argument('-u', '--username')\n parser.add_argument('-p', '--password')\n\n args = parser.parse_args()\n\n username = args.username if args.username is not None else input(\"Username: \")\n password = args.password if args.password is not None else getpass.getpass(\"Password: \")\n\n asyncio.run(main(args.mac, username, password))","repo_name":"MarginResearch/mikrotik_authentication","sub_path":"src/mactelnet.py","file_name":"mactelnet.py","file_ext":"py","file_size_in_byte":15249,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"54"} +{"seq_id":"17594513917","text":"import os\n\nfrom telegram.ext import (Updater, CommandHandler)\n\nfrom handlers.new_members_handler import new_members_handler\nfrom handlers.start_handler import start_handler\nfrom handlers.sticker_document_handler import sticker_document_handler\n\ndef error_handler(bot, update, error):\n print('Update \"%s\" caused error \"%s\"' % (update, error))\n\ndef run():\n token = os.getenv('TELEGRAM_TOKEN')\n\n updater = Updater(token)\n dispatcher = updater.dispatcher\n\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(new_members_handler)\n dispatcher.add_handler(sticker_document_handler)\n dispatcher.add_error_handler(error_handler)\n\n updater.start_polling()\n updater.idle()\n\nif __name__ == '__main__':\n run()","repo_name":"tualatrix/powerbot","sub_path":"powerbot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"54"} +{"seq_id":"5915932785","text":"#%% Init\n\nimport boto3\nimport time \n\n# Bucket/Prefix constants, bring your own\nBUCKET_NAME = \"austin-schaffer\"\nDATA_PREFIX = \"egress-test/\"\n\n# Creates a test CSV with a single column, and 500 rows of \"content\"\nTEST_DATA = b\"column_name\\n\" + (b\"test data please ignore just some bytes totalling a few KB\\n\" * 500)\n\n# synchronous AWS S3 clients/resources\ns3 = boto3.client(\"s3\")\ns3_resource = boto3.resource(\"s3\")\n\n#%% Load Data\n\nprint(\"Loading data into S3...\")\nprint(\"You can comment out the `s3.put_object` call after the first run.\")\n\nkeys = []\nfor i in range(101):\n key = f\"{DATA_PREFIX}test_data.{i}.csv\"\n keys.append(key)\n # s3.put_object(Body=TEST_DATA, Bucket=BUCKET_NAME, Key=key)\n\n#%% Synchronous Download (Prefix)\n\nimport io\n\ntime.sleep(2)\nprint(\"Initiating synchronous S3 object download (by prefix) in 2 seconds...\")\ntime.sleep(2)\n\ndata = {}\nbucket = s3_resource.Bucket(BUCKET_NAME)\nfor obj_handle in bucket.objects.filter(Prefix=DATA_PREFIX):\n print(\"Downloading:\", obj_handle.key)\n obj = s3.get_object(Bucket=BUCKET_NAME, Key=obj_handle.key)\n data[obj_handle.key] = obj[\"Body\"].read()\n\nprint(\"Objects Downloaded (synchronous prefix method):\", len(data))\n\n#%% Synchronous Download (Keys)\n\nimport io\nimport time\n\ntime.sleep(2)\nprint(\"Initiating synchronous S3 object download (list of keys) in 2 seconds...\")\ntime.sleep(2)\n\ndata = {}\nbucket = s3_resource.Bucket(BUCKET_NAME)\nfor key in keys:\n print(\"Downloading:\", key)\n obj = s3.get_object(Bucket=BUCKET_NAME, Key=key)\n data[key] = obj[\"Body\"].read()\n\nprint(\"Objects Downloaded (synchronous keys method):\", len(data))\n\n#%% Async Method (Prefix)\n\nimport aiobotocore\nimport asyncio\n\ntime.sleep(2)\nprint(\"Initiating async S3 object download (by prefix) in 2 seconds...\")\ntime.sleep(2)\n\nasync def get_objects_by_prefix(bucket, prefix):\n data = {}\n session = aiobotocore.get_session()\n async with session.create_client('s3') as s3_client:\n async def _get_object(key):\n if isinstance(key, dict):\n key = key[\"Key\"]\n return (key, await s3_client.get_object(Bucket=bucket, Key=key))\n\n paginator = s3_client.get_paginator('list_objects')\n async for result in paginator.paginate(Bucket=bucket, Prefix=prefix):\n contents = result.get('Contents', [])\n for future in asyncio.as_completed(map(_get_object, contents)):\n key, s3_object = await future\n print(\"Downloaded:\", key)\n data[key] = await s3_object[\"Body\"].read()\n\n return data\n\ndata = asyncio.run(get_objects_by_prefix(BUCKET_NAME, DATA_PREFIX))\nprint(\"Objects Downloaded (async prefix method):\", len(data))\n\n#%% Async Method (Keys)\n\ntime.sleep(2)\nprint(\"Initiating async S3 object download (list of keys) in 2 seconds...\")\ntime.sleep(2)\n\nasync def get_objects(bucket, keys):\n data = {}\n session = aiobotocore.get_session()\n async with session.create_client('s3') as s3_client:\n\n async def _get_object(key):\n if isinstance(key, dict):\n key = key[\"Key\"]\n return (key, await s3_client.get_object(Bucket=bucket, Key=key))\n\n for future in asyncio.as_completed(map(_get_object, keys)):\n key, s3_object = await future\n print(\"Downloaded:\", key)\n data[key] = await s3_object[\"Body\"].read()\n\n return data\n\ndata = asyncio.run(get_objects(BUCKET_NAME, keys))\nprint(\"Objects Downloaded (async keys method):\", len(data))\n","repo_name":"AustinTSchaffer/DailyProgrammer","sub_path":"Python/ParallelS3Download/sync_vs_async_s3_download.py","file_name":"sync_vs_async_s3_download.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"73623063522","text":"from Telugucoders.core.database import db\n\nlangdb = db.language\n\nlangm = {}\n\nasync def get_lang(chat_id: int) -> str:\n mode = langm.get(chat_id)\n if not mode:\n lang = await langdb.find_one({\"chat_id\": chat_id})\n if not lang:\n langm[chat_id] = \"en\"\n return \"en\"\n langm[chat_id] = lang[\"lang\"]\n return lang[\"lang\"]\n return mode\n\n\nasync def set_lang(chat_id: int, lang: str):\n langm[chat_id] = lang\n await langdb.update_one(\n {\"chat_id\": chat_id}, {\"$set\": {\"lang\": lang}}, upsert=True\n )\n","repo_name":"STKR2/all","sub_path":"Telugucoders/core/database/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"15901683537","text":"\"extract ca-certificates and converts to version 2 JKS from PEM-encoded x509 certs\"\n\nload(\"@rules_pkg//:providers.bzl\", \"PackageFilesInfo\")\nload(\"@rules_pkg//:pkg.bzl\", \"pkg_tar\")\n\nCMD = \"\"\"\\\n#!/usr/bin/env bash\nset -o pipefail -o errexit\ntar -xOf \"$1\" etc/ssl/certs/ca-certificates.crt | $3 > $2\n\"\"\"\n\ndef _impl(ctx):\n cacerts = ctx.actions.declare_file(ctx.label.name)\n ctx.actions.run_shell(\n outputs = [cacerts],\n inputs = [ctx.file.cacerts_tar],\n tools = [ctx.file._jksutil],\n arguments = [ctx.file.cacerts_tar.path, cacerts.path, ctx.executable._jksutil.path],\n command = CMD,\n )\n return [\n DefaultInfo(files = depset([cacerts])),\n PackageFilesInfo(dest_src_map = {\"/etc/ssl/certs/java/cacerts\": cacerts}),\n ]\n\n_cacerts_java = rule(\n doc = \"\"\"\nRule for converting the PEM formatted ca-certs in to JKS format. Output is a tar\nfile with the JKS file at etc/ssl/certs/java/cacerts.\n\"\"\",\n attrs = {\n \"cacerts_tar\": attr.label(\n allow_single_file = [\".tar\"],\n mandatory = True,\n ),\n \"_jksutil\": attr.label(\n default = Label(\"//cacerts/jksutil:jksutil\"),\n cfg = \"host\",\n executable = True,\n allow_single_file = True,\n ),\n },\n implementation = _impl,\n)\n\ndef cacerts_java(name, cacerts_tar, **kwargs):\n _cacerts_java(name = \"%s_extract\" % name, cacerts_tar = cacerts_tar, **kwargs)\n pkg_tar(name = name, srcs = [\"%s_extract\" % name], **kwargs)\n","repo_name":"GoogleContainerTools/distroless","sub_path":"cacerts/java.bzl","file_name":"java.bzl","file_ext":"bzl","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":16714,"dataset":"github-code","pt":"54"} +{"seq_id":"390721144","text":"from __future__ import annotations\n\nimport datetime\nimport logging\nimport os\nimport re\nimport socket\nimport sys\nimport typing\nimport warnings\nfrom http.client import HTTPConnection as _HTTPConnection\nfrom http.client import HTTPException as HTTPException # noqa: F401\nfrom http.client import ResponseNotReady\nfrom socket import timeout as SocketTimeout\n\nif typing.TYPE_CHECKING:\n from typing import Literal\n\n from .response import HTTPResponse\n from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT\n from .util.ssltransport import SSLTransport\n\nfrom ._collections import HTTPHeaderDict\nfrom .util.response import assert_header_parsing\nfrom .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout\nfrom .util.util import to_str\nfrom .util.wait import wait_for_read\n\ntry: # Compiled with SSL?\n import ssl\n\n BaseSSLError = ssl.SSLError\nexcept (ImportError, AttributeError):\n ssl = None # type: ignore[assignment]\n\n class BaseSSLError(BaseException): # type: ignore[no-redef]\n pass\n\n\nfrom ._base_connection import _TYPE_BODY\nfrom ._base_connection import ProxyConfig as ProxyConfig\nfrom ._base_connection import _ResponseOptions as _ResponseOptions\nfrom ._version import __version__\nfrom .exceptions import (\n ConnectTimeoutError,\n HeaderParsingError,\n NameResolutionError,\n NewConnectionError,\n ProxyError,\n SystemTimeWarning,\n)\nfrom .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_\nfrom .util.request import body_to_chunks\nfrom .util.ssl_ import assert_fingerprint as _assert_fingerprint\nfrom .util.ssl_ import (\n create_urllib3_context,\n is_ipaddress,\n resolve_cert_reqs,\n resolve_ssl_version,\n ssl_wrap_socket,\n)\nfrom .util.ssl_match_hostname import CertificateError, match_hostname\nfrom .util.url import Url\n\n# Not a no-op, we're adding this to the namespace so it can be imported.\nConnectionError = ConnectionError\nBrokenPipeError = BrokenPipeError\n\n\nlog = logging.getLogger(__name__)\n\nport_by_scheme = {\"http\": 80, \"https\": 443}\n\n# When it comes time to update this value as a part of regular maintenance\n# (ie test_recent_date is failing) update it to ~6 months before the current date.\nRECENT_DATE = datetime.date(2022, 1, 1)\n\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n\n_HAS_SYS_AUDIT = hasattr(sys, \"audit\")\n\n\nclass HTTPConnection(_HTTPConnection):\n \"\"\"\n Based on :class:`http.client.HTTPConnection` but provides an extra constructor\n backwards-compatibility layer between older and newer Pythons.\n\n Additional keyword parameters are used to configure attributes of the connection.\n Accepted parameters include:\n\n - ``source_address``: Set the source address for the current connection.\n - ``socket_options``: Set specific options on the underlying socket. If not specified, then\n defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling\n Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.\n\n For example, if you wish to enable TCP Keep Alive in addition to the defaults,\n you might pass:\n\n .. code-block:: python\n\n HTTPConnection.default_socket_options + [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n ]\n\n Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).\n \"\"\"\n\n default_port: typing.ClassVar[int] = port_by_scheme[\"http\"] # type: ignore[misc]\n\n #: Disable Nagle's algorithm by default.\n #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``\n default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [\n (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n ]\n\n #: Whether this connection verifies the host's certificate.\n is_verified: bool = False\n\n #: Whether this proxy connection verified the proxy host's certificate.\n # If no proxy is currently connected to the value will be ``None``.\n proxy_is_verified: bool | None = None\n\n blocksize: int\n source_address: tuple[str, int] | None\n socket_options: connection._TYPE_SOCKET_OPTIONS | None\n\n _has_connected_to_proxy: bool\n _response_options: _ResponseOptions | None\n _tunnel_host: str | None\n _tunnel_port: int | None\n _tunnel_scheme: str | None\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n ) -> None:\n super().__init__(\n host=host,\n port=port,\n timeout=Timeout.resolve_default_timeout(timeout),\n source_address=source_address,\n blocksize=blocksize,\n )\n self.socket_options = socket_options\n self.proxy = proxy\n self.proxy_config = proxy_config\n\n self._has_connected_to_proxy = False\n self._response_options = None\n self._tunnel_host: str | None = None\n self._tunnel_port: int | None = None\n self._tunnel_scheme: str | None = None\n\n # https://github.com/python/mypy/issues/4125\n # Mypy treats this as LSP violation, which is considered a bug.\n # If `host` is made a property it violates LSP, because a writeable attribute is overridden with a read-only one.\n # However, there is also a `host` setter so LSP is not violated.\n # Potentially, a `@host.deleter` might be needed depending on how this issue will be fixed.\n @property\n def host(self) -> str:\n \"\"\"\n Getter method to remove any trailing dots that indicate the hostname is an FQDN.\n\n In general, SSL certificates don't include the trailing dot indicating a\n fully-qualified domain name, and thus, they don't validate properly when\n checked against a domain name that includes the dot. In addition, some\n servers may not expect to receive the trailing dot when provided.\n\n However, the hostname with trailing dot is critical to DNS resolution; doing a\n lookup with the trailing dot will properly only resolve the appropriate FQDN,\n whereas a lookup without a trailing dot will search the system's search domain\n list. Thus, it's important to keep the original host around for use only in\n those cases where it's appropriate (i.e., when doing DNS lookup to establish the\n actual TCP connection across which we're going to send HTTP requests).\n \"\"\"\n return self._dns_host.rstrip(\".\")\n\n @host.setter\n def host(self, value: str) -> None:\n \"\"\"\n Setter for the `host` property.\n\n We assume that only urllib3 uses the _dns_host attribute; httplib itself\n only uses `host`, and it seems reasonable that other libraries follow suit.\n \"\"\"\n self._dns_host = value\n\n def _new_conn(self) -> socket.socket:\n \"\"\"Establish a socket connection and set nodelay settings on it.\n\n :return: New socket connection.\n \"\"\"\n try:\n sock = connection.create_connection(\n (self._dns_host, self.port),\n self.timeout,\n source_address=self.source_address,\n socket_options=self.socket_options,\n )\n except socket.gaierror as e:\n raise NameResolutionError(self.host, self, e) from e\n except SocketTimeout as e:\n raise ConnectTimeoutError(\n self,\n f\"Connection to {self.host} timed out. (connect timeout={self.timeout})\",\n ) from e\n\n except OSError as e:\n raise NewConnectionError(\n self, f\"Failed to establish a new connection: {e}\"\n ) from e\n\n # Audit hooks are only available in Python 3.8+\n if _HAS_SYS_AUDIT:\n sys.audit(\"http.client.connect\", self, self.host, self.port)\n\n return sock\n\n def set_tunnel(\n self,\n host: str,\n port: int | None = None,\n headers: typing.Mapping[str, str] | None = None,\n scheme: str = \"http\",\n ) -> None:\n if scheme not in (\"http\", \"https\"):\n raise ValueError(\n f\"Invalid proxy scheme for tunneling: {scheme!r}, must be either 'http' or 'https'\"\n )\n super().set_tunnel(host, port=port, headers=headers)\n self._tunnel_scheme = scheme\n\n def connect(self) -> None:\n self.sock = self._new_conn()\n if self._tunnel_host:\n # If we're tunneling it means we're connected to our proxy.\n self._has_connected_to_proxy = True\n\n # TODO: Fix tunnel so it doesn't depend on self.sock state.\n self._tunnel() # type: ignore[attr-defined]\n\n # If there's a proxy to be connected to we are fully connected.\n # This is set twice (once above and here) due to forwarding proxies\n # not using tunnelling.\n self._has_connected_to_proxy = bool(self.proxy)\n\n @property\n def is_closed(self) -> bool:\n return self.sock is None\n\n @property\n def is_connected(self) -> bool:\n if self.sock is None:\n return False\n return not wait_for_read(self.sock, timeout=0.0)\n\n @property\n def has_connected_to_proxy(self) -> bool:\n return self._has_connected_to_proxy\n\n def close(self) -> None:\n try:\n super().close()\n finally:\n # Reset all stateful properties so connection\n # can be re-used without leaking prior configs.\n self.sock = None\n self.is_verified = False\n self.proxy_is_verified = None\n self._has_connected_to_proxy = False\n self._response_options = None\n self._tunnel_host = None\n self._tunnel_port = None\n self._tunnel_scheme = None\n\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n \"\"\"\"\"\"\n # Empty docstring because the indentation of CPython's implementation\n # is broken but we don't want this method in our documentation.\n match = _CONTAINS_CONTROL_CHAR_RE.search(method)\n if match:\n raise ValueError(\n f\"Method cannot contain non-token characters {method!r} (found at least {match.group()!r})\"\n )\n\n return super().putrequest(\n method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding\n )\n\n def putheader(self, header: str, *values: str) -> None:\n \"\"\"\"\"\"\n if not any(isinstance(v, str) and v == SKIP_HEADER for v in values):\n super().putheader(header, *values)\n elif to_str(header.lower()) not in SKIPPABLE_HEADERS:\n skippable_headers = \"', '\".join(\n [str.title(header) for header in sorted(SKIPPABLE_HEADERS)]\n )\n raise ValueError(\n f\"urllib3.util.SKIP_HEADER only supports '{skippable_headers}'\"\n )\n\n # `request` method's signature intentionally violates LSP.\n # urllib3's API is different from `http.client.HTTPConnection` and the subclassing is only incidental.\n def request( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n *,\n chunked: bool = False,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> None:\n # Update the inner socket's timeout value to send the request.\n # This only triggers if the connection is re-used.\n if self.sock is not None:\n self.sock.settimeout(self.timeout)\n\n # Store these values to be fed into the HTTPResponse\n # object later. TODO: Remove this in favor of a real\n # HTTP lifecycle mechanism.\n\n # We have to store these before we call .request()\n # because sometimes we can still salvage a response\n # off the wire even if we aren't able to completely\n # send the request body.\n self._response_options = _ResponseOptions(\n request_method=method,\n request_url=url,\n preload_content=preload_content,\n decode_content=decode_content,\n enforce_content_length=enforce_content_length,\n )\n\n if headers is None:\n headers = {}\n header_keys = frozenset(to_str(k.lower()) for k in headers)\n skip_accept_encoding = \"accept-encoding\" in header_keys\n skip_host = \"host\" in header_keys\n self.putrequest(\n method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host\n )\n\n # Transform the body into an iterable of sendall()-able chunks\n # and detect if an explicit Content-Length is doable.\n chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize)\n chunks = chunks_and_cl.chunks\n content_length = chunks_and_cl.content_length\n\n # When chunked is explicit set to 'True' we respect that.\n if chunked:\n if \"transfer-encoding\" not in header_keys:\n self.putheader(\"Transfer-Encoding\", \"chunked\")\n else:\n # Detect whether a framing mechanism is already in use. If so\n # we respect that value, otherwise we pick chunked vs content-length\n # depending on the type of 'body'.\n if \"content-length\" in header_keys:\n chunked = False\n elif \"transfer-encoding\" in header_keys:\n chunked = True\n\n # Otherwise we go off the recommendation of 'body_to_chunks()'.\n else:\n chunked = False\n if content_length is None:\n if chunks is not None:\n chunked = True\n self.putheader(\"Transfer-Encoding\", \"chunked\")\n else:\n self.putheader(\"Content-Length\", str(content_length))\n\n # Now that framing headers are out of the way we send all the other headers.\n if \"user-agent\" not in header_keys:\n self.putheader(\"User-Agent\", _get_default_user_agent())\n for header, value in headers.items():\n self.putheader(header, value)\n self.endheaders()\n\n # If we're given a body we start sending that in chunks.\n if chunks is not None:\n for chunk in chunks:\n # Sending empty chunks isn't allowed for TE: chunked\n # as it indicates the end of the body.\n if not chunk:\n continue\n if isinstance(chunk, str):\n chunk = chunk.encode(\"utf-8\")\n if chunked:\n self.send(b\"%x\\r\\n%b\\r\\n\" % (len(chunk), chunk))\n else:\n self.send(chunk)\n\n # Regardless of whether we have a body or not, if we're in\n # chunked mode we want to send an explicit empty chunk.\n if chunked:\n self.send(b\"0\\r\\n\\r\\n\")\n\n def request_chunked(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n ) -> None:\n \"\"\"\n Alternative to the common request method, which sends the\n body with chunked encoding and not as one block\n \"\"\"\n warnings.warn(\n \"HTTPConnection.request_chunked() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n self.request(method, url, body=body, headers=headers, chunked=True)\n\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTPResponse:\n \"\"\"\n Get the response from the server.\n\n If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable.\n\n If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed.\n \"\"\"\n # Raise the same error as http.client.HTTPConnection\n if self._response_options is None:\n raise ResponseNotReady()\n\n # Reset this attribute for being used again.\n resp_options = self._response_options\n self._response_options = None\n\n # Since the connection's timeout value may have been updated\n # we need to set the timeout on the socket.\n self.sock.settimeout(self.timeout)\n\n # This is needed here to avoid circular import errors\n from .response import HTTPResponse\n\n # Get the response from http.client.HTTPConnection\n httplib_response = super().getresponse()\n\n try:\n assert_header_parsing(httplib_response.msg)\n except (HeaderParsingError, TypeError) as hpe:\n log.warning(\n \"Failed to parse headers (url=%s): %s\",\n _url_from_connection(self, resp_options.request_url),\n hpe,\n exc_info=True,\n )\n\n headers = HTTPHeaderDict(httplib_response.msg.items())\n\n response = HTTPResponse(\n body=httplib_response,\n headers=headers,\n status=httplib_response.status,\n version=httplib_response.version,\n reason=httplib_response.reason,\n preload_content=resp_options.preload_content,\n decode_content=resp_options.decode_content,\n original_response=httplib_response,\n enforce_content_length=resp_options.enforce_content_length,\n request_method=resp_options.request_method,\n request_url=resp_options.request_url,\n )\n return response\n\n\nclass HTTPSConnection(HTTPConnection):\n \"\"\"\n Many of the parameters to this constructor are passed to the underlying SSL\n socket by means of :py:func:`urllib3.util.ssl_wrap_socket`.\n \"\"\"\n\n default_port = port_by_scheme[\"https\"] # type: ignore[misc]\n\n cert_reqs: int | str | None = None\n ca_certs: str | None = None\n ca_cert_dir: str | None = None\n ca_cert_data: None | str | bytes = None\n ssl_version: int | str | None = None\n ssl_minimum_version: int | None = None\n ssl_maximum_version: int | None = None\n assert_fingerprint: str | None = None\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n cert_reqs: int | str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n server_hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_certs: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n ssl_version: int | str | None = None, # Deprecated\n cert_file: str | None = None,\n key_file: str | None = None,\n key_password: str | None = None,\n ) -> None:\n super().__init__(\n host,\n port=port,\n timeout=timeout,\n source_address=source_address,\n blocksize=blocksize,\n socket_options=socket_options,\n proxy=proxy,\n proxy_config=proxy_config,\n )\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.key_password = key_password\n self.ssl_context = ssl_context\n self.server_hostname = server_hostname\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n self.ssl_version = ssl_version\n self.ssl_minimum_version = ssl_minimum_version\n self.ssl_maximum_version = ssl_maximum_version\n self.ca_certs = ca_certs and os.path.expanduser(ca_certs)\n self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)\n self.ca_cert_data = ca_cert_data\n\n # cert_reqs depends on ssl_context so calculate last.\n if cert_reqs is None:\n if self.ssl_context is not None:\n cert_reqs = self.ssl_context.verify_mode\n else:\n cert_reqs = resolve_cert_reqs(None)\n self.cert_reqs = cert_reqs\n\n def set_cert(\n self,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ) -> None:\n \"\"\"\n This method should only be called once, before the connection is used.\n \"\"\"\n warnings.warn(\n \"HTTPSConnection.set_cert() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead provide the parameters to the \"\n \"HTTPSConnection constructor.\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n\n # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also\n # have an SSLContext object in which case we'll use its verify_mode.\n if cert_reqs is None:\n if self.ssl_context is not None:\n cert_reqs = self.ssl_context.verify_mode\n else:\n cert_reqs = resolve_cert_reqs(None)\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.key_password = key_password\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n self.ca_certs = ca_certs and os.path.expanduser(ca_certs)\n self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)\n self.ca_cert_data = ca_cert_data\n\n def connect(self) -> None:\n sock: socket.socket | ssl.SSLSocket\n self.sock = sock = self._new_conn()\n server_hostname: str = self.host\n tls_in_tls = False\n\n # Do we need to establish a tunnel?\n if self._tunnel_host is not None:\n # We're tunneling to an HTTPS origin so need to do TLS-in-TLS.\n if self._tunnel_scheme == \"https\":\n self.sock = sock = self._connect_tls_proxy(self.host, sock)\n tls_in_tls = True\n\n # If we're tunneling it means we're connected to our proxy.\n self._has_connected_to_proxy = True\n\n self._tunnel() # type: ignore[attr-defined]\n # Override the host with the one we're requesting data from.\n server_hostname = self._tunnel_host\n\n if self.server_hostname is not None:\n server_hostname = self.server_hostname\n\n is_time_off = datetime.date.today() < RECENT_DATE\n if is_time_off:\n warnings.warn(\n (\n f\"System time is way off (before {RECENT_DATE}). This will probably \"\n \"lead to SSL verification errors\"\n ),\n SystemTimeWarning,\n )\n\n sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n sock=sock,\n cert_reqs=self.cert_reqs,\n ssl_version=self.ssl_version,\n ssl_minimum_version=self.ssl_minimum_version,\n ssl_maximum_version=self.ssl_maximum_version,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n ca_cert_data=self.ca_cert_data,\n cert_file=self.cert_file,\n key_file=self.key_file,\n key_password=self.key_password,\n server_hostname=server_hostname,\n ssl_context=self.ssl_context,\n tls_in_tls=tls_in_tls,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint,\n )\n self.sock = sock_and_verified.socket\n self.is_verified = sock_and_verified.is_verified\n\n # If there's a proxy to be connected to we are fully connected.\n # This is set twice (once above and here) due to forwarding proxies\n # not using tunnelling.\n self._has_connected_to_proxy = bool(self.proxy)\n\n def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:\n \"\"\"\n Establish a TLS connection to the proxy using the provided SSL context.\n \"\"\"\n # `_connect_tls_proxy` is called when self._tunnel_host is truthy.\n proxy_config = typing.cast(ProxyConfig, self.proxy_config)\n ssl_context = proxy_config.ssl_context\n sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n sock,\n cert_reqs=self.cert_reqs,\n ssl_version=self.ssl_version,\n ssl_minimum_version=self.ssl_minimum_version,\n ssl_maximum_version=self.ssl_maximum_version,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n ca_cert_data=self.ca_cert_data,\n server_hostname=hostname,\n ssl_context=ssl_context,\n assert_hostname=proxy_config.assert_hostname,\n assert_fingerprint=proxy_config.assert_fingerprint,\n # Features that aren't implemented for proxies yet:\n cert_file=None,\n key_file=None,\n key_password=None,\n tls_in_tls=False,\n )\n self.proxy_is_verified = sock_and_verified.is_verified\n return sock_and_verified.socket # type: ignore[return-value]\n\n\nclass _WrappedAndVerifiedSocket(typing.NamedTuple):\n \"\"\"\n Wrapped socket and whether the connection is\n verified after the TLS handshake\n \"\"\"\n\n socket: ssl.SSLSocket | SSLTransport\n is_verified: bool\n\n\ndef _ssl_wrap_socket_and_match_hostname(\n sock: socket.socket,\n *,\n cert_reqs: None | str | int,\n ssl_version: None | str | int,\n ssl_minimum_version: int | None,\n ssl_maximum_version: int | None,\n cert_file: str | None,\n key_file: str | None,\n key_password: str | None,\n ca_certs: str | None,\n ca_cert_dir: str | None,\n ca_cert_data: None | str | bytes,\n assert_hostname: None | str | Literal[False],\n assert_fingerprint: str | None,\n server_hostname: str | None,\n ssl_context: ssl.SSLContext | None,\n tls_in_tls: bool = False,\n) -> _WrappedAndVerifiedSocket:\n \"\"\"Logic for constructing an SSLContext from all TLS parameters, passing\n that down into ssl_wrap_socket, and then doing certificate verification\n either via hostname or fingerprint. This function exists to guarantee\n that both proxies and targets have the same behavior when connecting via TLS.\n \"\"\"\n default_ssl_context = False\n if ssl_context is None:\n default_ssl_context = True\n context = create_urllib3_context(\n ssl_version=resolve_ssl_version(ssl_version),\n ssl_minimum_version=ssl_minimum_version,\n ssl_maximum_version=ssl_maximum_version,\n cert_reqs=resolve_cert_reqs(cert_reqs),\n )\n else:\n context = ssl_context\n\n context.verify_mode = resolve_cert_reqs(cert_reqs)\n\n # In some cases, we want to verify hostnames ourselves\n if (\n # `ssl` can't verify fingerprints or alternate hostnames\n assert_fingerprint\n or assert_hostname\n # assert_hostname can be set to False to disable hostname checking\n or assert_hostname is False\n # We still support OpenSSL 1.0.2, which prevents us from verifying\n # hostnames easily: https://github.com/pyca/pyopenssl/pull/933\n or ssl_.IS_PYOPENSSL\n or not ssl_.HAS_NEVER_CHECK_COMMON_NAME\n ):\n context.check_hostname = False\n\n # Try to load OS default certs if none are given. We need to do the hasattr() check\n # for custom pyOpenSSL SSLContext objects because they don't support\n # load_default_certs().\n if (\n not ca_certs\n and not ca_cert_dir\n and not ca_cert_data\n and default_ssl_context\n and hasattr(context, \"load_default_certs\")\n ):\n context.load_default_certs()\n\n # Ensure that IPv6 addresses are in the proper format and don't have a\n # scope ID. Python's SSL module fails to recognize scoped IPv6 addresses\n # and interprets them as DNS hostnames.\n if server_hostname is not None:\n normalized = server_hostname.strip(\"[]\")\n if \"%\" in normalized:\n normalized = normalized[: normalized.rfind(\"%\")]\n if is_ipaddress(normalized):\n server_hostname = normalized\n\n ssl_sock = ssl_wrap_socket(\n sock=sock,\n keyfile=key_file,\n certfile=cert_file,\n key_password=key_password,\n ca_certs=ca_certs,\n ca_cert_dir=ca_cert_dir,\n ca_cert_data=ca_cert_data,\n server_hostname=server_hostname,\n ssl_context=context,\n tls_in_tls=tls_in_tls,\n )\n\n try:\n if assert_fingerprint:\n _assert_fingerprint(\n ssl_sock.getpeercert(binary_form=True), assert_fingerprint\n )\n elif (\n context.verify_mode != ssl.CERT_NONE\n and not context.check_hostname\n and assert_hostname is not False\n ):\n cert: _TYPE_PEER_CERT_RET_DICT = ssl_sock.getpeercert() # type: ignore[assignment]\n\n # Need to signal to our match_hostname whether to use 'commonName' or not.\n # If we're using our own constructed SSLContext we explicitly set 'False'\n # because PyPy hard-codes 'True' from SSLContext.hostname_checks_common_name.\n if default_ssl_context:\n hostname_checks_common_name = False\n else:\n hostname_checks_common_name = (\n getattr(context, \"hostname_checks_common_name\", False) or False\n )\n\n _match_hostname(\n cert,\n assert_hostname or server_hostname, # type: ignore[arg-type]\n hostname_checks_common_name,\n )\n\n return _WrappedAndVerifiedSocket(\n socket=ssl_sock,\n is_verified=context.verify_mode == ssl.CERT_REQUIRED\n or bool(assert_fingerprint),\n )\n except BaseException:\n ssl_sock.close()\n raise\n\n\ndef _match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n asserted_hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\n # Our upstream implementation of ssl.match_hostname()\n # only applies this normalization to IP addresses so it doesn't\n # match DNS SANs so we do the same thing!\n stripped_hostname = asserted_hostname.strip(\"[]\")\n if is_ipaddress(stripped_hostname):\n asserted_hostname = stripped_hostname\n\n try:\n match_hostname(cert, asserted_hostname, hostname_checks_common_name)\n except CertificateError as e:\n log.warning(\n \"Certificate did not match expected hostname: %s. Certificate: %s\",\n asserted_hostname,\n cert,\n )\n # Add cert to exception and reraise so client code can inspect\n # the cert when catching the exception, if they want to\n e._peer_cert = cert # type: ignore[attr-defined]\n raise\n\n\ndef _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError:\n # Look for the phrase 'wrong version number', if found\n # then we should warn the user that we're very sure that\n # this proxy is HTTP-only and they have a configuration issue.\n error_normalized = \" \".join(re.split(\"[^a-z]\", str(err).lower()))\n is_likely_http_proxy = (\n \"wrong version number\" in error_normalized\n or \"unknown protocol\" in error_normalized\n )\n http_proxy_warning = (\n \". Your proxy appears to only use HTTP and not HTTPS, \"\n \"try changing your proxy URL to be HTTP. See: \"\n \"https://urllib3.readthedocs.io/en/latest/advanced-usage.html\"\n \"#https-proxy-error-http-proxy\"\n )\n new_err = ProxyError(\n f\"Unable to connect to proxy\"\n f\"{http_proxy_warning if is_likely_http_proxy and proxy_scheme == 'https' else ''}\",\n err,\n )\n new_err.__cause__ = err\n return new_err\n\n\ndef _get_default_user_agent() -> str:\n return f\"python-urllib3/{__version__}\"\n\n\nclass DummyConnection:\n \"\"\"Used to detect a failed ConnectionCls import.\"\"\"\n\n\nif not ssl:\n HTTPSConnection = DummyConnection # type: ignore[misc, assignment] # noqa: F811\n\n\nVerifiedHTTPSConnection = HTTPSConnection\n\n\ndef _url_from_connection(\n conn: HTTPConnection | HTTPSConnection, path: str | None = None\n) -> str:\n \"\"\"Returns the URL from a given connection. This is mainly used for testing and logging.\"\"\"\n\n scheme = \"https\" if isinstance(conn, HTTPSConnection) else \"http\"\n\n return Url(scheme=scheme, host=conn.host, port=conn.port, path=path).url\n","repo_name":"urllib3/urllib3","sub_path":"src/urllib3/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":33794,"program_lang":"python","lang":"en","doc_type":"code","stars":3526,"dataset":"github-code","pt":"54"} +{"seq_id":"39447944866","text":"import sys\n\nlocation = \"C:\\Windows\\System32\\drivers\\etc\\hosts\"\n#location = \"demo_host.txt\"\n\n\ndef block():\n\n\twith open(location,\"a\") as host:\n\t\thost.write(\"127.0.0.1\twww.youtube.com\\n\")\n\t\thost.write(\"127.0.0.1\twww.facebook.com\\n\")\n\t\thost.write(\"127.0.0.1\twww.primevideo.com\\n\")\n\ndef unblock():\n\n\twith open(location,'r') as host:\n\t\tlines = host.readlines()\n\n\twith open(location,'w') as host:\n\t\tfor line in lines:\n\t\t\tif line[:1] == '#':\n\t\t\t\thost.write(line)\n\t\t\telse:\n\t\t\t\tcontinue\n\ndef main():\n\tif len(sys.argv) == 2:\n\t\t#then do some thing\n\t\tif sys.argv[1] == \"-b\":\n\t\t\tblock()\n\t\telif sys.argv[1] == \"-ub\":\n\t\t\tunblock()\n\telse:\n\t\tprint(\"1 argument expected \\n\")\n\t\tprint(\"-b : block\\n\")\n\t\tprint(\"-ub : unblock\\n\")\n\t\texit(1)\n\n\nif __name__ == '__main__':\n main()","repo_name":"rial99/website-blocker","sub_path":"host_disable.py","file_name":"host_disable.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18497660030","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Python implementation of the spline routines in Elmer's GeneralUtils.F90.\n\nThis module provides compatibility with the finite element software Elmer.\nYou can provide your own (x,y) data points, and get cubic spline coefficients\nin the format Elmer uses.\n\nMapping of routines:\n\n Elmer name Python name\n\n CubicSpline solve_coeffs\n CubicSplineVal evaluate_cubic_spline\n CubicSplinedVal evaluate_cubic_spline_derivative\n\nThis is especially useful for e.g. custom material models, which can then\nuse Elmer's spline routines to evaluate function values in the solver code.\n\nIn this use case, solve_coeffs() is the one you mainly need; the other two\nroutines can be used for testing on the Python side to see how the spline\nfit looks.\n\nSee main() for a usage example.\n\nCreated on Sun Oct 15 00:00:00 2017\n\n@author: Juha Jeronen \n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Internal function specific to Elmer's spline implementation.\n# AFAIK, this is NOT a generic tridiagonal solver.\n#\ndef _solve_tridiag(y, h):\n y = np.atleast_1d(y)\n h = np.atleast_1d(h)\n if y.ndim > 1:\n raise ValueError(\"y must be rank-1 array, got rank-%d\" % y.ndim)\n if h.ndim > 1:\n raise ValueError(\"h must be rank-1 array, got rank-%d\" % h.ndim)\n if y.shape != h.shape:\n raise ValueError(\"y and h must have same shape; got y = %s, h = %s\" % (y.shape, h.shape))\n\n b = np.empty_like(y)\n r = np.empty_like(y)\n\n # Unlike Elmer's implementation, we compute r[0] and r[-1] here, not in the caller.\n #\n # This is so that no extra arguments need to be passed in, since we *return* r\n # instead of writing into a caller-given array.\n #\n r[0] = ( y[1] - y[0] ) / h[0]\n r[-1] = ( y[-1] - y[-2] ) / h[-1]\n\n# DO i=2,n-1\n# b(i) = 2 * ( h(i-1) + h(i) )\n# r(i) = 3 * ( h(i) * ( y(i)-y(i-1) ) / h(i-1) + &\n# h(i-1) * ( y(i+1)-y(i) ) / h(i) )\n# END DO\n b[1:-1] = 2 * ( h[:-2] + h[1:-1] )\n r[1:-1] = 3 * ( h[1:-1] * ( y[1:-1] - y[:-2] ) / h[:-2] +\n h[:-2] * ( y[2:] - y[1:-1] ) / h[1:-1] )\n\n r[1] = r[1] - h[1]*r[0]\n n = y.shape[0]\n for i in range(1, n-1):\n s = -h[i+1] / b[i]\n r[i+1] += s*r[i]\n b[i+1] += s*h[i-1]\n\n for i in range(n-2, 1, -1):\n r[i] = (r[i] - h[i-1]*r[i+1]) / b[i]\n\n return r\n\ndef solve_coeffs(x, y, aeps=1e-8):\n \"\"\"Create a cubic spline that passes through given points in the xy plane.\n\n Parameters:\n x: rank-1 array of np.float64\n data x coordinates. Recommended to be monotone i.e. (x[1:] - x[:-1] > 0).all().\n\n Strictly, x can be non-monotone, but that will cause problems in\n evaluate_cubic_spline(), as we use a simplistic x-coordinate based\n approach for the detection of the correct piecewise segment.\n y: rank-1 array of np.float64\n data y coordinates (function values) so that y[k] = f(x[k]) for your function f.\n aeps: float\n Minimum allowed length of an x-interval (see the implementation).\n\n Differs from Elmer's in that Elmer has a global AEPS, and there\n CubicSpline uses 10*AEPS; whereas we use the given aeps as-is.\n\n Returns:\n r: rank-1 array of np.float64\n Cubic spline coefficients in Elmer format.\n\"\"\"\n x = np.atleast_1d(x)\n y = np.atleast_1d(y)\n if x.ndim > 1:\n raise ValueError(\"x must be rank-1 array, got rank-%d\" % x.ndim)\n if y.ndim > 1:\n raise ValueError(\"y must be rank-1 array, got rank-%d\" % y.ndim)\n if x.shape != y.shape:\n raise ValueError(\"x and y must have same shape; got x = %s, y = %s\" % (x.shape, y.shape))\n\n x_is_monotone = (x[1:] - x[:-1] > 0).all()\n h = x[1:] - x[:-1]\n\n if x_is_monotone:\n r = np.empty_like(y)\n r[0] = ( y[1] - y[0] ) / h[0]\n r[-1] = ( y[-1] - y[-2] ) / h[-1]\n\n h = ( y[1:] - y[:-1] ) / h\n r[1:-1] = ( h[:-1] + h[1:] ) / 2\n\n n = y.shape[0]\n for i in range(n-1):\n if abs(h[i]) < aeps: # Elmer uses 10*aeps\n r[i] = 0\n r[i+1] = 0\n continue\n\n alpha = r[i] / h[i]\n beta = r[i+1] / h[i]\n if alpha < 0 or beta < 0:\n r[i] = 0\n continue\n\n tau = np.sqrt(alpha**2 + beta**2)\n if tau > 3:\n tau = 3 / tau\n r[i] = alpha * tau * h[i]\n r[i+1] = beta * tau * h[i]\n else:\n r = _solve_tridiag(y,h)\n\n return r\n\ndef _evaluate_cubic_spline_one(x, y, r, t):\n \"\"\"Evaluate one point on the cubic spline.\n\n Parameters:\n x : rank-1 np.array of np.float64, length 2\n data x coordinates\n y : rank-1 np.array of np.float64, length 2\n data y coordinates\n r : rank-1 np.array of np.float64, length 2\n corresponding elements of output of solve_coeffs() for your data\n t : float\n point where to evaluate. Must be between the given x values.\n\n Returns:\n s : float\n Value of the spline at the point t.\n\"\"\"\n h = x[1] - x[0]\n a = -2 * ( y[1] - y[0] ) + ( r[0] + r[1] ) * h\n b = 3 * ( y[1] - y[0] ) - ( 2*r[0] + r[1] ) * h\n c = r[0] * h\n d = y[0]\n\n lt = (t - x[0]) / h # 0..1\n return ((a*lt + b) * lt + c) * lt + d\n\ndef evaluate_cubic_spline(x, y, r, t):\n \"\"\"Evaluate cubic spline at points.\n\n Parameters:\n x : rank-1 np.array of np.float64\n data x coordinates\n y : rank-1 np.array of np.float64\n data y coordinates\n r : rank-1 np.array of np.float64\n output of solve_coeffs() for your data\n t : rank-1 np.array of np.float64\n points where to evaluate. Must satisfy (x[0] <= t <= x[-1]).all().\n\n Returns:\n s : rank-1 np.array of np.float64\n Value of the spline at the points t.\n\"\"\"\n return _evaluate_generic(x,y,r,t, _evaluate_cubic_spline_one)\n\ndef _evaluate_cubic_spline_derivative_one(x, y, r, t):\n \"\"\"Evaluate one point on the first derivative of the cubic spline.\n\n Parameters:\n x : rank-1 np.array of np.float64, length 2\n data x coordinates\n y : rank-1 np.array of np.float64, length 2\n data y coordinates\n r : rank-1 np.array of np.float64, length 2\n corresponding elements of output of solve_coeffs() for your data\n t : float\n point where to evaluate. Must be between the given x values.\n\n Returns:\n s : float\n Value of the derivative at the point t.\n\"\"\"\n h = x[1] - x[0]\n a = -2 * ( y[1] - y[0] ) + ( r[0] + r[1] ) * h\n b = 3 * ( y[1] - y[0] ) - ( 2*r[0] + r[1] ) * h\n c = r[0] * h\n\n lt = (t - x[0]) / h # 0..1\n return ((3*a*lt + 2*b) * lt + c)/h\n\ndef evaluate_cubic_spline_derivative(x, y, r, t):\n \"\"\"Evaluate first derivative of cubic spline at points.\n\n Parameters:\n x : rank-1 np.array of np.float64\n data x coordinates\n y : rank-1 np.array of np.float64\n data y coordinates\n r : rank-1 np.array of np.float64\n output of solve_coeffs() for your data\n t : rank-1 np.array of np.float64\n points where to evaluate. Must satisfy (x[0] <= t <= x[-1]).all().\n\n Returns:\n s : rank-1 np.array of np.float64\n Value of the derivative at the points t.\n\"\"\"\n return _evaluate_generic(x,y,r,t, _evaluate_cubic_spline_derivative_one)\n\n# For each t, find the corresponding piecewise interval, and call func.\n#\ndef _evaluate_generic(x, y, r, t, func):\n if (t < x[0]).any() or (t > x[-1]).any():\n raise ValueError(\"at least one t is out of the allowed range (%g, %g)\" % (x[0], x[-1]))\n\n # FIXME: extremely inefficient, a loop with searches\n s = np.empty_like(t)\n for k,tau in enumerate(t):\n # find which interval this tau belongs to\n #\n # TODO: add support for non-monotone x:\n # - generate all possible solutions\n # - pick the one where the point on the spline curve is the closest to the previous point\n #\n i1 = np.where(x <= tau)[0][-1] # this is always non-empty, so we may [-1] right here\n i2 = np.where(x > tau)[0]\n\n # If tau == x[-1], then i2 will be empty, so we special-case that.\n #\n # Safer to switch on whether i2 is empty or not than use floating-point equality.\n #\n if len(i2): # general case\n i2 = i2[0]\n I = np.r_[i1:i2+1]\n else: # tau == x[-1]\n i1 -= 1 # i1 points to the last element of x, but we want the one before that\n # to get the last *interval*.\n I = np.r_[i1:i1+2] # i1+2 is now one-past-end, so x[I] refers to x[-2:].\n s[k] = func(x[I], y[I], r[I], tau)\n\n return s\n\ndef main():\n \"\"\"Usage example.\"\"\"\n f = lambda x: np.sin(np.pi*x) # function which will be approximated by a spline\n x1 = 0\n x2 = 1\n ndata = 11 # how many data points in [x1,x2] to use to create the spline fit\n nvis = 101 # how many points used for plotting results\n\n # create data\n #\n xx = np.linspace(x1,x2, ndata)\n yy = f(xx)\n\n # create spline fit\n #\n rr = solve_coeffs(xx,yy)\n tt = np.linspace(x1,x2, nvis)\n ss = evaluate_cubic_spline(xx, yy, rr, tt)\n\n # plot\n #\n yr = f(tt) # reference\n plt.figure(1)\n plt.clf()\n plt.plot(tt, yr, 'k--') # reference\n plt.plot(tt, ss, 'r-') # spline fit\n plt.plot(xx, yy, 'rx') # data points used to create the fit\n plt.grid(b=True, which='both')\n\nif __name__ == '__main__':\n main()\n plt.show()\n\n","repo_name":"TUTElectromechanics/mm-codegen","sub_path":"extras/elmerspline.py","file_name":"elmerspline.py","file_ext":"py","file_size_in_byte":9806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"42654731460","text":"import argparse\nfrom intelhex import IntelHex\nimport multiprocessing\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport os\nimport sys\n\nfrom pynrfjprog import MultiAPI as API\n\n\n# Module multiprocessing is organized differently in Python 3.4+\ntry:\n # Python 3.4+\n if sys.platform.startswith('win'):\n import multiprocessing.popen_spawn_win32 as forking\n else:\n import multiprocessing.popen_fork as forking\nexcept ImportError:\n import multiprocessing.forking as forking\n\nif sys.platform.startswith('win'):\n # First define a modified version of Popen.\n class _Popen(forking.Popen):\n def __init__(self, *args, **kw):\n if hasattr(sys, 'frozen'):\n # We have to set original _MEIPASS2 value from sys._MEIPASS\n # to get --onefile mode working.\n os.putenv('_MEIPASS2', sys._MEIPASS)\n try:\n super(_Popen, self).__init__(*args, **kw)\n finally:\n if hasattr(sys, 'frozen'):\n # On some platforms (e.g. AIX) 'os.unsetenv()' is not\n # available. In those cases we cannot delete the variable\n # but only set it to the empty string. The bootloader\n # can handle this case.\n if hasattr(os, 'unsetenv'):\n os.unsetenv('_MEIPASS2')\n else:\n os.putenv('_MEIPASS2', '')\n\n # Second override 'Popen' class with our modified version.\n forking.Popen = _Popen\n\n\nclass CLI(object):\n def __init__(self):\n self.parser = argparse.ArgumentParser(description='Program multiple nRF5 devices concurrently with this nrfjprog inspired python module/exe', epilog='https://github.com/NordicPlayground/nRF5-multi-prog')\n self.subparsers = self.parser.add_subparsers(dest='command')\n self.args = None\n\n self._add_recover_command()\n self._add_program_command()\n\n def run(self):\n return self.parser.parse_args()\n\n # Top level commands.\n\n def _add_recover_command(self):\n erase_parser = self.subparsers.add_parser('recover', help='Erase all user FLASH including UICR and disables any enabled readback protection/locking.')\n\n self._add_family_argument(erase_parser)\n self._add_snrs_argument(erase_parser)\n\n def _add_program_command(self):\n program_parser = self.subparsers.add_parser('program', help='Programs the device.')\n\n self._add_erase_before_flash_group(program_parser)\n self._add_family_argument(program_parser)\n self._add_file_argument(program_parser)\n self._add_reset_group(program_parser)\n self._add_snrs_argument(program_parser)\n self._add_verify_argument(program_parser)\n\n # Mutually exclusive groups of arguments.\n\n def _add_erase_before_flash_group(self, parser):\n erase_before_flash_group = parser.add_mutually_exclusive_group()\n self._add_eraseall_argument(erase_before_flash_group)\n self._add_sectors_erase_argument(erase_before_flash_group)\n self._add_sectorsuicr_erase_argument(erase_before_flash_group)\n\n def _add_reset_group(self, parser): # TODO: add other reset options.\n reset_group = parser.add_mutually_exclusive_group()\n self._add_sysreset_argument(reset_group)\n\n # Arguments.\n\n def _add_eraseall_argument(self, parser):\n parser.add_argument('-e', '--eraseall', action='store_true', help='Erase all user FLASH including UICR.')\n\n def _add_family_argument(self, parser):\n parser.add_argument('--family', type=str, help='The family of the target device. Defaults to NRF51.', required=False, choices=['NRF51', 'NRF52'])\n\n def _add_file_argument(self, parser):\n parser.add_argument('-f', '--file', help='The hex file to be programmed to all devices.', required=True)\n\n def _add_sectors_erase_argument(self, parser):\n parser.add_argument('-se', '--sectorserase', action='store_true', help='Erase all sectors that FILE contains data in before programming.')\n\n def _add_sectorsuicr_erase_argument(self, parser):\n parser.add_argument('-u', '--sectorsanduicrerase', action='store_true', help='Erase all sectors that FILE contains data in and the UICR (unconditionally) before programming.')\n\n def _add_snrs_argument(self, parser):\n parser.add_argument('-s', '--snrs', type=int, nargs='+', help='Selects the debuggers with the given serial numbers among all those connected to the PC for the operation. Defaults to all snrs with be selected.')\n\n def _add_sysreset_argument(self, parser):\n parser.add_argument('-r', '--systemreset', action='store_true', help='Executes a system reset.')\n\n def _add_verify_argument(self, parser):\n parser.add_argument('-v', '--verify', action='store_true', help='Read back memory and verify that it matches FILE.')\n\n\nclass nRF5MultiFlash(object):\n def __init__(self, args):\n self.nRF5_instances = {}\n self.args = args\n\n self.family = args.family\n self.snrs = args.snrs\n\n if not self.args.family:\n self.family = 'NRF51'\n\n if not self.args.snrs:\n with API.MultiAPI('NRF51') as nrf:\n self.snrs = nrf.enum_emu_snr()\n\n if self.family == 'NRF51':\n self.PAGE_SIZE = 0x400\n else:\n self.PAGE_SIZE = 0x1000\n\n if args.command == 'program':\n self.hex_file = IntelHex(args.file)\n\n def _byte_lists_equal(self, data, read_data):\n for i in xrange(len(data)):\n if data[i] != read_data[i]:\n return False\n return True\n\n def _connect_to_device(self, device):\n self.nRF5_instances[device] = API.MultiAPI(self.family)\n self.nRF5_instances[device].open()\n self.nRF5_instances[device].connect_to_emu_with_snr(device)\n\n def _recover_device(self, device):\n self.nRF5_instances[device].recover()\n\n def _program_device(self, device):\n if self.args.eraseall:\n self.nRF5_instances[device].erase_all()\n if self.args.sectorsanduicrerase:\n self.nRF5_instances[device].erase_uicr()\n\n for segment in self.hex_file.segments():\n start_addr, end_addr = segment\n size = end_addr - start_addr\n\n if self.args.sectorserase or self.args.sectorsanduicrerase:\n start_page = int(start_addr / self.PAGE_SIZE)\n end_page = int(end_addr / self.PAGE_SIZE)\n for page in range(start_page, end_page + 1):\n self.nRF5_instances[device].erase_page(page * self.PAGE_SIZE)\n\n data = self.hex_file.tobinarray(start=start_addr, size=(size)) # TODO: this can be optimized.\n self.nRF5_instances[device].write(start_addr, data.tolist(), True)\n\n if self.args.verify:\n read_data = self.nRF5_instances[device].read(start_addr, len(data))\n assert (self._byte_lists_equal(data, read_data)), 'Verify failed. Data readback from memory does not match data written.'\n\n if self.args.systemreset:\n self.nRF5_instances[device].sys_reset()\n self.nRF5_instances[device].go()\n\n\n def _cleanup(self, device):\n self.nRF5_instances[device].disconnect_from_emu()\n self.nRF5_instances[device].close()\n\n # Public methods.\n\n def perform_command(self, device):\n self._connect_to_device(device)\n\n if self.args.command == 'recover':\n self._recover_device(device)\n elif self.args.command == 'program':\n self._program_device(device)\n\n self._cleanup(device)\n\n\ndef main():\n cli = CLI()\n args = cli.run()\n\n nRF = nRF5MultiFlash(args)\n\n pool = ThreadPool(len(nRF.snrs))\n pool.map(nRF.perform_command, nRF.snrs)\n\nif __name__ == '__main__':\n\n if sys.platform.lower().startswith('win'):\n os.environ['PATH'] = 'C:\\\\Program Files (x86)\\\\Nordic Semiconductor\\\\nrf5x\\\\bin\\\\' + ';' + os.environ['PATH']\n elif sys.platform.lower().startswith('linux'):\n pass\n elif sys.platform.startswith('dar'):\n os.environ['PATH'] = '/usr/local/Caskroom/nrf5x-command-line-tools/8.3.0/nrfjprog/' + ';' + os.environ['PATH']\n\n multiprocessing.freeze_support()\n main()\n","repo_name":"NordicPlayground/nRF5-multi-prog","sub_path":"nrf5_multi_prog/nrf5_multi_prog.py","file_name":"nrf5_multi_prog.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"24777930402","text":"#!/data/data/com.termux/files/usr/bin/env python3\nimport subprocess\n\nprint (\" ____ _ __ __ _ \")\nprint (\"/ ___|| |__ __ _ __________ _ \\ \\ / /__ _ __| | _____ \")\nprint (\"\\___ \\| '_ \\ / _` |_ /_ / _` |____\\ \\ /\\ / / _ \\| '__| |/ / __|\")\nprint (\" ___) | | | | (_| |/ / / / (_| |_____\\ V V / (_) | | | <\\__ \\\\\")\nprint (\"|____/|_| |_|\\__,_/___/___\\__,_| \\_/\\_/ \\___/|_| |_|\\_\\___/\")\nprint (\"\\nSlasher\\n\")\n\ntry:\n\tdef copy2clip(txt):\n\t cmd='echo \\\"'+ str(txt) +'\\\"| termux-clipboard-set ; termux-vibrate'\n\t return subprocess.check_call(cmd, shell=True)\n\twhile 1:\n\t msg = input(\"Enter Msg ;\")\n\t my_list = msg.split()\n\t string = '/'\n\t my_new_list = [string + x for x in my_list]\n\t string = ' '.join(my_new_list)\n\t copy2clip(string)\n\t print (\"Text Copied ^\" + \"\\n\")\nexcept KeyboardInterrupt:\n exit()\n","repo_name":"shazza-works/rainbow-fun","sub_path":"color/slash.py","file_name":"slash.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16327080137","text":"import os\nimport imageio\n\ndef clear_graph_folder():\n folder = './graphs'\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n os.remove(file_path)\n\ndef create_gifs():\n # Combine all images in graphs folder into a gif\n images = []\n for filename in os.listdir('./graphs'):\n file_path = os.path.join('./graphs', filename)\n images.append(imageio.imread(file_path))\n imageio.mimsave('./animation.gif', images, duration=0.5)\n","repo_name":"ttnhathuy313/gradient-descent-viz","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16444549683","text":"import dns.dnssec\nimport dns.resolver\nimport dns.name\nimport time\n\nfrom ipatests.test_integration.base import IntegrationTest\nfrom ipatests.test_integration import tasks\nfrom ipaplatform.paths import paths\n\ntest_zone = \"dnssec.test.\"\ntest_zone_repl = \"dnssec-replica.test.\"\nroot_zone = \".\"\nexample_test_zone = \"example.test.\"\n\n\ndef resolve_with_dnssec(nameserver, query, log, rtype=\"SOA\"):\n res = dns.resolver.Resolver()\n res.nameservers = [nameserver]\n res.lifetime = 10 # wait max 10 seconds for reply\n # enable Authenticated Data + Checking Disabled flags\n res.set_flags(dns.flags.AD | dns.flags.CD)\n\n # enable EDNS v0 + enable DNSSEC-Ok flag\n res.use_edns(0, dns.flags.DO, 0)\n\n ans = res.query(query, rtype)\n return ans\n\n\ndef is_record_signed(nameserver, query, log, rtype=\"SOA\"):\n try:\n ans = resolve_with_dnssec(nameserver, query, log, rtype=rtype)\n ans.response.find_rrset(ans.response.answer, dns.name.from_text(query),\n dns.rdataclass.IN, dns.rdatatype.RRSIG,\n dns.rdatatype.from_text(rtype))\n except KeyError:\n return False\n except dns.exception.DNSException:\n return False\n return True\n\n\ndef wait_until_record_is_signed(nameserver, record, log, rtype=\"SOA\",\n timeout=100):\n \"\"\"\n Returns True if record is signed, or False on timeout\n :param nameserver: nameserver to query\n :param record: query\n :param log: logger\n :param rtype: record type\n :param timeout:\n :return: True if records is signed, False if timeout\n \"\"\"\n log.info(\"Waiting for signed %s record of %s from server %s (timeout %s \"\n \"sec)\", rtype, record, nameserver, timeout)\n wait_until = time.time() + timeout\n while time.time() < wait_until:\n if is_record_signed(nameserver, record, log, rtype=rtype):\n return True\n time.sleep(1)\n return False\n\n\nclass TestInstallDNSSECLast(IntegrationTest):\n \"\"\"Simple DNSSEC test\n\n Install a server and a replica with DNS, then reinstall server\n as DNSSEC master\n \"\"\"\n num_replicas = 1\n topology = 'star'\n\n @classmethod\n def install(cls, mh):\n tasks.install_master(cls.master, setup_dns=True)\n tasks.install_replica(cls.master, cls.replicas[0], setup_dns=True)\n\n def test_install_dnssec_master(self):\n \"\"\"Both master and replica have DNS installed\"\"\"\n args = [\n \"ipa-dns-install\",\n \"--dnssec-master\",\n \"--forwarder\", self.master.config.dns_forwarder,\n \"-p\", self.master.config.dirman_password,\n \"-U\",\n ]\n self.master.run_command(args)\n\n def test_if_zone_is_signed_master(self):\n # add zone with enabled DNSSEC signing on master\n args = [\n \"ipa\",\n \"dnszone-add\", test_zone,\n \"--dnssec\", \"true\",\n ]\n self.master.run_command(args)\n\n # test master\n assert wait_until_record_is_signed(\n self.master.ip, test_zone, self.log, timeout=100\n ), \"Zone %s is not signed (master)\" % test_zone\n\n # test replica\n assert wait_until_record_is_signed(\n self.replicas[0].ip, test_zone, self.log, timeout=200\n ), \"DNS zone %s is not signed (replica)\" % test_zone\n\n def test_if_zone_is_signed_replica(self):\n # add zone with enabled DNSSEC signing on replica\n args = [\n \"ipa\",\n \"dnszone-add\", test_zone_repl,\n \"--dnssec\", \"true\",\n ]\n self.replicas[0].run_command(args)\n\n # test replica\n assert wait_until_record_is_signed(\n self.replicas[0].ip, test_zone_repl, self.log, timeout=300\n ), \"Zone %s is not signed (replica)\" % test_zone_repl\n\n # we do not need to wait, on master zones should be singed faster\n # than on replicas\n\n assert wait_until_record_is_signed(\n self.master.ip, test_zone_repl, self.log, timeout=5\n ), \"DNS zone %s is not signed (master)\" % test_zone\n\n\nclass TestInstallDNSSECFirst(IntegrationTest):\n \"\"\"Simple DNSSEC test\n\n Install the server with DNSSEC and then install the replica with DNS\n \"\"\"\n num_replicas = 1\n topology = 'star'\n\n @classmethod\n def install(cls, mh):\n tasks.install_master(cls.master, setup_dns=False)\n args = [\n \"ipa-dns-install\",\n \"--dnssec-master\",\n \"--forwarder\", cls.master.config.dns_forwarder,\n \"-p\", cls.master.config.dirman_password,\n \"-U\",\n ]\n cls.master.run_command(args)\n\n tasks.install_replica(cls.master, cls.replicas[0], setup_dns=True)\n\n # backup trusted key\n tasks.backup_file(cls.master, paths.DNSSEC_TRUSTED_KEY)\n tasks.backup_file(cls.replicas[0], paths.DNSSEC_TRUSTED_KEY)\n\n @classmethod\n def uninstall(cls, mh):\n # restore trusted key\n tasks.restore_files(cls.master)\n tasks.restore_files(cls.replicas[0])\n\n super(TestInstallDNSSECFirst, cls).uninstall(mh)\n\n def test_sign_root_zone(self):\n args = [\n \"ipa\", \"dnszone-add\", root_zone, \"--dnssec\", \"true\"\n ]\n self.master.run_command(args)\n\n # make BIND happy, and delegate zone which contains A record of master\n args = [\n \"ipa\", \"dnsrecord-add\", root_zone, self.master.domain.name,\n \"--ns-rec=\" + self.master.hostname\n ]\n self.master.run_command(args)\n\n # test master\n assert wait_until_record_is_signed(\n self.master.ip, root_zone, self.log, timeout=100\n ), \"Zone %s is not signed (master)\" % root_zone\n\n # test replica\n assert wait_until_record_is_signed(\n self.replicas[0].ip, root_zone, self.log, timeout=300\n ), \"Zone %s is not signed (replica)\" % root_zone\n\n def test_chain_of_trust(self):\n \"\"\"\n Validate signed DNS records, using our own signed root zone\n :return:\n \"\"\"\n\n # add test zone\n args = [\n \"ipa\", \"dnszone-add\", example_test_zone, \"--dnssec\", \"true\"\n ]\n\n self.master.run_command(args)\n\n # wait until zone is signed\n assert wait_until_record_is_signed(\n self.master.ip, example_test_zone, self.log, timeout=100\n ), \"Zone %s is not signed (master)\" % example_test_zone\n\n # GET DNSKEY records from zone\n ans = resolve_with_dnssec(self.master.ip, example_test_zone, self.log,\n rtype=\"DNSKEY\")\n dnskey_rrset = ans.response.get_rrset(\n ans.response.answer,\n dns.name.from_text(example_test_zone),\n dns.rdataclass.IN,\n dns.rdatatype.DNSKEY)\n assert dnskey_rrset, \"No DNSKEY records received\"\n\n self.log.debug(\"DNSKEY records returned: %s\", dnskey_rrset.to_text())\n\n # generate DS records\n ds_records = []\n for key_rdata in dnskey_rrset:\n if key_rdata.flags != 257:\n continue # it is not KSK\n ds_records.append(dns.dnssec.make_ds(example_test_zone, key_rdata,\n 'sha256'))\n assert ds_records, (\"No KSK returned from the %s zone\" %\n example_test_zone)\n\n self.log.debug(\"DS records for %s created: %r\", example_test_zone,\n ds_records)\n\n # add DS records to root zone\n args = [\n \"ipa\", \"dnsrecord-add\", root_zone, example_test_zone,\n # DS record requires to coexists with NS\n \"--ns-rec\", self.master.hostname,\n ]\n for ds in ds_records:\n args.append(\"--ds-rec\")\n args.append(ds.to_text())\n\n self.master.run_command(args)\n\n # extract DSKEY from root zone\n ans = resolve_with_dnssec(self.master.ip, root_zone, self.log,\n rtype=\"DNSKEY\")\n dnskey_rrset = ans.response.get_rrset(ans.response.answer,\n dns.name.from_text(root_zone),\n dns.rdataclass.IN,\n dns.rdatatype.DNSKEY)\n assert dnskey_rrset, \"No DNSKEY records received\"\n\n self.log.debug(\"DNSKEY records returned: %s\", dnskey_rrset.to_text())\n\n # export trust keys for root zone\n root_key_rdatas = []\n for key_rdata in dnskey_rrset:\n if key_rdata.flags != 257:\n continue # it is not KSK\n root_key_rdatas.append(key_rdata)\n\n assert root_key_rdatas, \"No KSK returned from the root zone\"\n\n root_keys_rrset = dns.rrset.from_rdata_list(dnskey_rrset.name,\n dnskey_rrset.ttl,\n root_key_rdatas)\n self.log.debug(\"Root zone trusted key: %s\", root_keys_rrset.to_text())\n\n # set trusted key for our root zone\n self.master.put_file_contents(paths.DNSSEC_TRUSTED_KEY,\n root_keys_rrset.to_text() + '\\n')\n self.replicas[0].put_file_contents(paths.DNSSEC_TRUSTED_KEY,\n root_keys_rrset.to_text() + '\\n')\n\n # verify signatures\n args = [\n \"drill\", \"@localhost\", \"-k\",\n paths.DNSSEC_TRUSTED_KEY, \"-S\",\n example_test_zone, \"SOA\"\n ]\n\n # test if signature chains are valid\n self.master.run_command(args)\n self.replicas[0].run_command(args)\n","repo_name":"vgol/freeipa-rosa","sub_path":"ipatests/test_integration/test_dnssec.py","file_name":"test_dnssec.py","file_ext":"py","file_size_in_byte":9645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14775170648","text":"from PyQt4 import QtCore, QtGui\nfrom ui_calculator import Ui_CalDialog\nimport sys\n\nclass CalDialog(QtGui.QDialog, Ui_CalDialog):\n def __init__(self):\n QtGui.QDialog.__init__(self)\n # Set up the user interface from Designer.\n self.setupUi(self)\n \n \n @QtCore.pyqtSignature(\"QString\")\n def on_txtInput_textChanged(self, input):\n (num, b) = input.toInt()\n if not b:\n ans='Error'\n else:\n if num>40 or num<=0:\n ans='Plz input correctly'\n elif num<23:\n ans='Small than 6'\n elif num<30:\n ans='6'\n elif num<35:\n ans='7'\n elif num<40:\n ans='8'\n else:\n ans='9'\n self.txtOutput.setText(ans)\n\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n calculator = CalDialog()\n calculator.show()\n sys.exit(app.exec_())\n","repo_name":"kylewu/wenbin","sub_path":"py_calculator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44354488721","text":"from tkinter.messagebox import NO\nfrom genpy import Time\nimport rospy\nfrom tf2_ros import TransformListener, Buffer\nfrom tf import transformations, TransformBroadcaster\nfrom threading import Thread\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom actionlib import SimpleActionClient\nfrom std_msgs.msg import String\nimport numpy\n\n\nclass DockTfPub():\n def __init__(self):\n self.tf2_buffer = Buffer()\n self.tf2_listener = TransformListener(self.tf2_buffer)\n self.broadcaster = TransformBroadcaster()\n self.move_base_client = SimpleActionClient('move_base', MoveBaseAction)\n self.camera_link = \"camera_color_optical_frame\"\n self.april_tag_link = \"ID0\"\n\n self.subscriber = rospy.Subscriber(\"dock\", String, self.dock_cb)\n\n self.map_frame = \"map\"\n self.robot_frame = \"base_link\"\n\n self.april_tag_to_cam_tf = None\n self.map_to_robot_tf = None\n\n def dock_cb(self, msg):\n rospy.loginfo(f\"got message {msg.data}\")\n if (msg.data == \"dock\"):\n dock_tf = self.get_dock_tf()\n if (dock_tf is not None):\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = self.map_frame\n\n goal.target_pose.pose.position.x = dock_tf.transform.translation.x\n goal.target_pose.pose.position.y = dock_tf.transform.translation.y\n goal.target_pose.pose.position.z = dock_tf.transform.translation.z\n\n quat = []\n quat.append(dock_tf.transform.rotation.x)\n quat.append(dock_tf.transform.rotation.y)\n quat.append(dock_tf.transform.rotation.z)\n quat.append(dock_tf.transform.rotation.w)\n\n euler = transformations.euler_from_quaternion(quat)\n # rospy.loginfo(f\"numpy.allclose: {numpy.allclose(euler)} \")\n rospy.loginfo(f\"target euler: {euler[1]}\")\n target_quat = transformations.quaternion_from_euler(0.0, 0.0, (euler[2] + numpy.pi/2))\n\n goal.target_pose.pose.orientation.x = target_quat[0]\n goal.target_pose.pose.orientation.y = target_quat[1]\n goal.target_pose.pose.orientation.z = target_quat[2]\n goal.target_pose.pose.orientation.w = target_quat[3]\n rospy.loginfo(\"sending goal\")\n self.move_base_client.send_goal(goal)\n\n def get_dock_tf(self):\n try:\n transform = self.tf2_buffer.lookup_transform(\n self.map_frame,\n \"dock\",\n rospy.Time(0)\n )\n return transform\n\n except Exception as e:\n rospy.logwarn(e)\n return None\n\n def get_april_tag_tf(self):\n try:\n transform = self.tf2_buffer.lookup_transform(\n self.camera_link,\n self.april_tag_link,\n rospy.Time(0)\n )\n\n self.april_tag_to_cam_tf = transform\n except Exception as e:\n rospy.logwarn(e)\n\n def get_robot_transform(self):\n try:\n transform = self.tf2_buffer.lookup_transform(\n self.map_frame,\n self.robot_frame,\n rospy.Time(0)\n )\n self.map_to_robot_tf = transform\n\n except Exception as e:\n rospy.logwarn(e)\n\n def get_map_to_tag_tf(self):\n try:\n transform = self.tf2_buffer.lookup_transform(\n self.map_frame,\n self.april_tag_link,\n rospy.Time(0)\n )\n return transform\n\n except Exception as e:\n rospy.logwarn(e)\n\n\n\n def get_april_tag_rotation(self):\n # +x should point to the right in the image\n # +y should point down in the image\n # +z should point into the plane of the image\n # because of the conventional orientation stated above\n # we are interested in the roll\n tag_quaternion = []\n tag_quaternion[0] = self.april_tag_to_cam_tf.transform.rotation.x\n tag_quaternion[1] = self.april_tag_to_cam_tf.transform.rotation.y\n tag_quaternion[2] = self.april_tag_to_cam_tf.transform.rotation.z\n tag_quaternion[3] = self.april_tag_to_cam_tf.transform.rotation.w\n\n yaw = transformations.euler_from_quaternion(\n tag_quaternion)[0]\n return yaw\n\n def get_robot_rotation(self):\n robot_quaternion = []\n robot_quaternion[0] = self.map_to_robot_tf.transform.rotation.x\n robot_quaternion[1] = self.map_to_robot_tf.transform.rotation.y\n robot_quaternion[2] = self.map_to_robot_tf.transform.rotation.z\n robot_quaternion[3] = self.map_to_robot_tf.transform.rotation.w\n\n yaw = transformations.euler_from_quaternion(\n robot_quaternion)[2]\n return yaw\n\n\n def combine_yaw_to_quaternion(self):\n tag_roll = self.get_april_tag_rotation\n robot_yaw = self.get_robot_rotation\n combined_rotation = tag_roll + robot_yaw\n quaternion = transformations.quaternion_from_euler(\n 0.0, 0.0, combined_rotation\n )\n return quaternion\n\n def get_map_to_dock_tf(self):\n # self.april_tag_to_cam_tf.transform.translation.z\n return\n\n def loop(self):\n while not rospy.is_shutdown():\n rospy.loginfo(\"loop\")\n tf = self.get_map_to_tag_tf()\n\n quat = []\n coord = {}\n if tf is not None:\n\n quat.append(tf.transform.rotation.x)\n quat.append(tf.transform.rotation.y)\n quat.append(tf.transform.rotation.z)\n quat.append(tf.transform.rotation.w)\n\n\n coord['x']= (tf.transform.translation.x)\n coord['y']= (tf.transform.translation.y)\n coord['z']= (tf.transform.translation.z)\n\n self.broadcaster.sendTransform(\n (0.0, 0.0, 1.5),\n transformations.quaternion_from_euler(0.0, 0.0, 0.0),\n rospy.Time.now(),\n \"dock\",\n self.april_tag_link\n )\n\n euler_angles = transformations.euler_from_quaternion(quat)\n info = \"\"\n for i in range(len(euler_angles)):\n info += f\"euler[{i}]: {euler_angles[i]}\"\n\n for key in coord:\n rospy.loginfo(f\"coordinates {key}: {coord[key]}\")\n\n\n rospy.loginfo(info)\n rospy.Rate(0.5).sleep()\n\n\n\ndef main():\n rospy.init_node('dock_tf_node')\n node = DockTfPub()\n rate = rospy.Rate(0.5)\n loop_thread = Thread(target=node.loop, args=())\n rospy.loginfo(\"starting loop\")\n loop_thread.start()\n rospy.loginfo(\"starting spin\")\n rospy.spin()\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","repo_name":"Capstone-S13/dock_test","sub_path":"scripts/dock_tf.py","file_name":"dock_tf.py","file_ext":"py","file_size_in_byte":6864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70634632161","text":"import tempfile\nimport os\nfrom django.core.management.base import BaseCommand, CommandError\nfrom minio.error import ResponseError, NoSuchKey\n\nfrom exodus.core.static_analysis import StaticAnalysis\nfrom exodus.core.storage import RemoteStorageHelper\nfrom reports.models import Report\n\n\nclass Command(BaseCommand):\n help = 'Refresh all reports'\n\n def add_arguments(self, parser):\n parser.add_argument('report_id', nargs='*', type=int)\n\n parser.add_argument(\n '--all',\n action='store_true',\n dest='all',\n help='Update all reports',\n )\n\n parser.add_argument(\n '--icons',\n action='store_true',\n dest='icons',\n help='Update icons',\n )\n\n parser.add_argument(\n '--trackers',\n action='store_true',\n dest='trackers',\n help='Update found trackers',\n )\n\n parser.add_argument(\n '--clist',\n action='store_true',\n dest='clist',\n help='Update clist file',\n )\n\n def handle(self, *args, **options):\n if options['all']:\n try:\n reports = Report.objects.order_by('-creation_date')\n except Report.DoesNotExist:\n raise CommandError('No reports found')\n elif options['report_id']:\n try:\n reports = Report.objects.filter(pk__in=options['report_id'])\n except Report.DoesNotExist:\n raise CommandError('No reports found')\n else:\n raise CommandError('Please specify a report id or --all option')\n\n trackers_changed = 0\n count = 1\n for report in reports:\n self.stdout.write('Start updating report \"{}\" - {}/{}'.format(report.id, count, len(reports)))\n\n # report.application could fail with malformed reports\n try:\n handle = report.application.handle\n except Exception as e:\n self.stdout.write(self.style.WARNING(str(e)))\n continue\n\n count += 1\n with tempfile.TemporaryDirectory() as tmpdir:\n storage_helper = RemoteStorageHelper(report.bucket)\n\n if options['clist'] or options['icons']:\n apk_name = report.apk_file\n apk_tmp = os.path.join(tmpdir, apk_name)\n try:\n storage_helper.get_file(apk_name, apk_tmp)\n except ResponseError:\n raise CommandError('Unable to get APK')\n static_analysis = StaticAnalysis(apk_path=apk_tmp)\n\n if options['clist']:\n with tempfile.NamedTemporaryFile(delete=True) as fp:\n static_analysis.save_embedded_classes_in_file(fp.name)\n storage_helper.put_file(fp.name, report.class_list_file)\n self.stdout.write(\n self.style.SUCCESS('Successfully updated classes list of \"{}\"'.format(handle)))\n\n if options['icons']:\n icon_name = '{}_{}.png'.format(report.bucket, handle)\n source = report.application.source\n icon_phash = static_analysis.get_icon_and_phash(storage_helper, icon_name, source)\n if icon_phash:\n report.application.icon_path = icon_name\n report.application.save()\n self.stdout.write(\n self.style.SUCCESS('Successfully updated icon of \"{}\"'.format(handle)))\n\n if options['trackers']:\n # Download class list file\n static_analysis = StaticAnalysis(None)\n clist_tmp = os.path.join(tmpdir, report.class_list_file)\n try:\n storage_helper.get_file(report.class_list_file, clist_tmp)\n except (ResponseError, NoSuchKey):\n raise CommandError('Unable to get clist file')\n\n trackers = static_analysis.detect_trackers(clist_tmp)\n if report.found_trackers.count() != len(trackers):\n trackers_changed += 1\n self.stdout.write(\n self.style.WARNING(\n 'Previous: {} - New: {} trackers'.format(report.found_trackers.count(), len(trackers))))\n report.found_trackers.set(trackers)\n report.save()\n self.stdout.write(\n self.style.SUCCESS('Successfully updated trackers list of \"{}\"'.format(handle)))\n\n self.stdout.write('=====')\n\n self.stdout.write(self.style.SUCCESS('Update complete !'))\n if options['trackers']:\n self.stdout.write('Reports updated (trackers): {}'.format(trackers_changed))\n","repo_name":"Exodus-Privacy/exodus","sub_path":"exodus/reports/management/commands/refreshstaticanalysis.py","file_name":"refreshstaticanalysis.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","stars":551,"dataset":"github-code","pt":"54"} +{"seq_id":"31981927873","text":"#!/usr/bin/python3\n\nfrom fastapi import APIRouter, HTTPException, Response\nfrom typing import Optional\nfrom schemas import GroupResponseSchema, GroupCreateSchema\nfrom sqlalchemy.orm import Session\nfrom fastapi.params import Depends\nfrom models.db import get_db\nfrom repositories import GroupRepository, UserRepository\nfrom models import Group, User\nfrom common import MemberValidationError\n\ngroup_router = APIRouter()\n\n\n@group_router.get(\"/{name:str}\", response_model=GroupResponseSchema)\ndef get_group_by_name(name: str, db: Session = Depends(get_db)) -> Optional[Group]: # type: ignore # noqa\n group = GroupRepository(db).get_group_by_name(name=name)\n if not group:\n raise HTTPException(status_code=404, detail=\"No Group with this name.\")\n return group\n\n\n@group_router.post(\"\", response_model=GroupResponseSchema)\ndef create_group(\n group_data: GroupCreateSchema,\n current_user: User = Depends(UserRepository(db=Depends(get_db)).get_current_user), # type: ignore # noqa\n db: Session = Depends(get_db), # type: ignore\n):\n group = GroupRepository(db).get_group_by_name(name=group_data.name)\n if group:\n raise HTTPException(\n status_code=409, detail=\"Group with this name exists.\"\n ) # noqa\n try:\n new_group = GroupRepository(db).create_group(\n group_data=group_data, current_user=current_user\n )\n except MemberValidationError:\n raise HTTPException(\n status_code=400, detail=\"Some members are not on Splitwise.\"\n )\n except Exception:\n raise HTTPException(status_code=500, detail=\"Something went wrong.\")\n print(new_group.id)\n return new_group\n\n\n@group_router.delete(\"\", response_model=None)\ndef delete_group(group_data: GroupCreateSchema, db: Session = Depends(get_db)): # type: ignore # noqa\n group = GroupRepository(db).get_group_by_name(name=group_data.name)\n if not group:\n raise HTTPException(\n status_code=404, detail=\"Group with this name does not exist.\"\n )\n GroupRepository(db).delete_group(group_name=group_data.name)\n return Response(status_code=204, content=\"Group deleted successfully\")\n","repo_name":"iangfernandes96/splitwise","sub_path":"routes/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13565128081","text":"from collections import deque, defaultdict\n\nN, K = map(int, input().split())\ng_a = defaultdict(lambda: [])\nfor i in range(N):\n a_input = list(map(int, input().split()))\n for j in range(N):\n if a_input[j] != 0:\n g_a[i].append(j)\nQ = int(input())\nst = [list(map(int, input().split())) for _ in range(Q)]\n\nfor s, t in st:\n start = (s-1)%N\n end = (t-1)%N\n queue = deque([start])\n trace = defaultdict(lambda: -1)\n trace[start] = 0\n flag = False\n while len(queue) > 0:\n val = queue.popleft()\n adj_i = g_a[val]\n # i の隣接ノード\n for j in adj_i:\n if trace[j] == -1:\n if j == end:\n print(trace[val] + 1)\n flag = True\n break\n queue.appendleft(j)\n trace[j] = trace[val] + 1\n if flag:\n break\n if not flag:\n print(-1)","repo_name":"sumugit/atcoder","sub_path":"arc/arc159/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2618963871","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport pytest\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\n#进入testerhome,访问MTSC2020置顶帖,点击目录,点击议题征集范围。把代码贴到回复里\n\nclass TestTestsele():\n\n def setup_method(self):\n self.driver = webdriver.Chrome()\n self.driver.get(\"https://testerhome.com/\")\n self.driver.maximize_window()\n self.vars = {}\n\n def wait(self, timeout, method):\n WebDriverWait(self.driver, timeout).until(method)\n\n def teardown_method(self, method):\n self.driver.quit()\n\n def test_page(self):\n element1 = (By.CSS_SELECTOR, '.topic:nth-child(2) [title=\"MTSC2020 中国互联网测试开发大会议题征集\"]')\n # self.driver.find_element(By.LINK_TEXT, \"MTSC2020 中国互联网测试开发大会议题征集\").click()\n # self.driver.find_element(By.CSS_SELECTOR, '.topic:nth-child(2)[title=\"MTSC2020 中国互联网测试开发大会议题征集\"]').click()\n self.wait(10, expected_conditions.element_to_be_clickable(element1))\n self.driver.find_element(*element1).click()\n\n element2 = (By.CSS_SELECTOR, '.toc-container:nth-child(1) button')\n self.wait(10, expected_conditions.element_to_be_clickable(element2))\n self.driver.find_element(*element2).click()\n\n # WebDriverWait(self.driver, 10).until(lambda x: self.driver.find_element(element2) > 1)\n #使用lambda表达式\n # WebDriverWait(self.driver, 10).until(lambda x: self.driver.find_element(By.CSS_SELECTOR, '.toc-container:nth-child(1) button')).click()\n\n element3 = (By.LINK_TEXT, '提交议题格式')\n self.wait(10, expected_conditions.element_to_be_clickable(element3))\n self.driver.find_element(*element3).click()\n","repo_name":"litebin/pythonTest","sub_path":"test_selenium/test_testerhome.py","file_name":"test_testerhome.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72088287520","text":"import logging\nimport time\nfrom unittest.mock import MagicMock, PropertyMock\n\nfrom freqtrade.data.dataprovider import DataProvider\nfrom freqtrade.enums import State\nfrom freqtrade.worker import Worker\nfrom tests.conftest import get_patched_worker, log_has, log_has_re\n\n\ndef test_worker_state(mocker, default_conf, markets) -> None:\n mocker.patch('freqtrade.exchange.Exchange.markets', PropertyMock(return_value=markets))\n worker = get_patched_worker(mocker, default_conf)\n assert worker.freqtrade.state is State.RUNNING\n\n default_conf.pop('initial_state')\n worker = Worker(args=None, config=default_conf)\n assert worker.freqtrade.state is State.STOPPED\n\n\ndef test_worker_running(mocker, default_conf, caplog) -> None:\n mock_throttle = MagicMock()\n mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle)\n mocker.patch('freqtrade.persistence.Trade.stoploss_reinitialization', MagicMock())\n\n worker = get_patched_worker(mocker, default_conf)\n\n state = worker._worker(old_state=None)\n assert state is State.RUNNING\n assert log_has('Changing state to: RUNNING', caplog)\n assert mock_throttle.call_count == 1\n # Check strategy is loaded, and received a dataprovider object\n assert worker.freqtrade.strategy\n assert worker.freqtrade.strategy.dp\n assert isinstance(worker.freqtrade.strategy.dp, DataProvider)\n\n\ndef test_worker_stopped(mocker, default_conf, caplog) -> None:\n mock_throttle = MagicMock()\n mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle)\n\n worker = get_patched_worker(mocker, default_conf)\n worker.freqtrade.state = State.STOPPED\n state = worker._worker(old_state=State.RUNNING)\n assert state is State.STOPPED\n assert log_has('Changing state to: STOPPED', caplog)\n assert mock_throttle.call_count == 1\n\n\ndef test_throttle(mocker, default_conf, caplog) -> None:\n def throttled_func():\n return 42\n\n caplog.set_level(logging.DEBUG)\n worker = get_patched_worker(mocker, default_conf)\n\n start = time.time()\n result = worker._throttle(throttled_func, throttle_secs=0.1)\n end = time.time()\n\n assert result == 42\n assert end - start > 0.1\n assert log_has_re(r\"Throttling with 'throttled_func\\(\\)': sleep for \\d\\.\\d{2} s.*\", caplog)\n\n result = worker._throttle(throttled_func, throttle_secs=-1)\n assert result == 42\n\n\ndef test_throttle_with_assets(mocker, default_conf) -> None:\n def throttled_func(nb_assets=-1):\n return nb_assets\n\n worker = get_patched_worker(mocker, default_conf)\n\n result = worker._throttle(throttled_func, throttle_secs=0.1, nb_assets=666)\n assert result == 666\n\n result = worker._throttle(throttled_func, throttle_secs=0.1)\n assert result == -1\n\n\ndef test_worker_heartbeat_running(default_conf, mocker, caplog):\n message = r\"Bot heartbeat\\. PID=.*state='RUNNING'\"\n\n mock_throttle = MagicMock()\n mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle)\n worker = get_patched_worker(mocker, default_conf)\n\n worker.freqtrade.state = State.RUNNING\n worker._worker(old_state=State.STOPPED)\n assert log_has_re(message, caplog)\n\n caplog.clear()\n # Message is not shown before interval is up\n worker._worker(old_state=State.RUNNING)\n assert not log_has_re(message, caplog)\n\n caplog.clear()\n # Set clock - 70 seconds\n worker._heartbeat_msg -= 70\n worker._worker(old_state=State.RUNNING)\n assert log_has_re(message, caplog)\n\n\ndef test_worker_heartbeat_stopped(default_conf, mocker, caplog):\n message = r\"Bot heartbeat\\. PID=.*state='STOPPED'\"\n\n mock_throttle = MagicMock()\n mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle)\n worker = get_patched_worker(mocker, default_conf)\n\n worker.freqtrade.state = State.STOPPED\n worker._worker(old_state=State.RUNNING)\n assert log_has_re(message, caplog)\n\n caplog.clear()\n # Message is not shown before interval is up\n worker._worker(old_state=State.STOPPED)\n assert not log_has_re(message, caplog)\n\n caplog.clear()\n # Set clock - 70 seconds\n worker._heartbeat_msg -= 70\n worker._worker(old_state=State.STOPPED)\n assert log_has_re(message, caplog)\n","repo_name":"kazunetakeda25/bit-trade-bot","sub_path":"tests/test_worker.py","file_name":"test_worker.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"33629940295","text":"from .defines import SCRIPT, TERRITORY, VARIANT, NONE\n\n\ndef pprint(o):\n from json import dumps\n print(dumps(o, indent=4))\n\n\nclass ISOGuesser:\n def __init__(self):\n DLikelySubtags = self.DLikelySubtags = self.get_D_likely_subtags()\n\n # TODO: Check there aren't any collisions (somehow)! ======================================================================\n D = self.DRevLikelySubtags = {}\n for k, v in list(DLikelySubtags.items()):\n for i in self.get_L_removed(\n v,\n [\n NONE,\n TERRITORY\n ]\n ):\n if not self.split(k)[0]:\n # Only use in reverse if a language to reverse to!\n continue\n D[i] = k\n\n #print sorted(self.DRevLikelySubtags.items())\n #self.DLangData = self.get_D_sup_languages()\n\n def pack_iso(self, iso):\n \"\"\"\n Removes unneeded info from the ISO, e.g. \"ja_Jpan\" -> \"ja\",\n assuming the territory (e.g. \"...-JP\") isn't needed.\n\n This makes sure territory info isn't removed when shortening as\n it's meant to be reversible.\n \"\"\"\n has_territory = self.removed(iso, TERRITORY) != iso\n if has_territory:\n return iso\n else:\n return self.remove_unneeded_info(iso)\n\n def unpack_iso(self, iso):\n \"\"\"\n The opposite of pack_iso(), above.\n \"\"\"\n return self.removed(\n self.guess_omitted_info(iso),\n TERRITORY\n )\n\n def remove_unneeded_info(self, s):\n \"\"\"\n e.g. for \"en_Latn\", try remove the Latn part if obvious\n \"\"\"\n if s == 'zh_Hant':\n return 'zh_Hant'\n\n if s in self.DRevLikelySubtags:\n r = self.DRevLikelySubtags[s]\n if r == 'zh_Hani':\n return 'zh' # HACK!\n elif r == 'cmn':\n return 'zh' # HACK!\n elif r == 'zh_Hant':\n return 'zh_Hant'\n return r\n\n if s == 'cmn':\n return 'zh'\n return s\n\n def guess_omitted_info(self, s):\n if s == 'zh_Hant':\n return self.join('zh', 'Hant', None, None)\n\n lang, script, territory, variant = self.split(s)\n assert lang or territory, \\\n \"iso or territory is required to guess omitted info: %s\" % s\n\n #print iso, script, territory, variant, s in self.DLikelySubtags, s in self.DLangData\n\n # Look in the likely subtags\n if s in self.DLikelySubtags:\n return self.DLikelySubtags[s]\n else:\n # Try with various keys removed as needed\n # e.g. \"en_Latn\" doesn't have a key, but \"en\" does (value\n # \"en_Latn-US\"), so look for without the script etc as well.\n\n for i in self.get_L_removed(\n s,\n [\n SCRIPT,\n TERRITORY,\n VARIANT,\n SCRIPT|TERRITORY,\n SCRIPT|VARIANT,\n VARIANT|TERRITORY,\n SCRIPT|TERRITORY|VARIANT\n ],\n rem_dupes=True\n ):\n if i in self.DLikelySubtags:\n i_split = self.split(self.DLikelySubtags[i])\n\n # Don't allow if information (aside from territory)\n # differs from one to the other!\n allow = True\n for x, y in zip(\n (i_split.lang, i_split.script, i_split.variant),\n (lang, script, variant)\n ):\n if x and y and x != y:\n allow = False\n break\n\n if not allow:\n continue\n\n return self.join(*[\n y or x for x, y in zip(\n i_split,\n (lang, script, territory, variant)\n )\n ])\n\n # Look in the CLDR supplemental data to add missing scripts\n # maybe deriving script from a main locale if only territory but\n # not script provided if it's a secondary locale\n #if s in self.DLangData:\n # D = self.DLangData[s]\n\n # lang = D['@type']\n # script = D['@scripts'] if '@scripts' in D \\\n # and not ' ' in D['@scripts'] else script\n # territory = D['@territories'] if '@territories' in D \\\n # and not ' ' in D['@territories'] else territory\n\n return self.join(\n lang, script, territory, variant\n )\n\n\nif __name__ == '__main__':\n from iso_tools.ISOTools import ISOTools as i\n from cProfile import run\n\n print(i.get_L_removed('nl_Latn-NL', [\n NONE,\n SCRIPT,\n TERRITORY,\n SCRIPT|TERRITORY\n ],\n #rem_dupes=True\n ))\n\n print(i.guess_omitted_info('hy'))\n print(i.guess_omitted_info('ko'))\n print(i.guess_omitted_info('zh'))\n print(i.guess_omitted_info('en_Latn|MINE!'))\n print(i.guess_omitted_info('en_Shaw'))\n\n #run(\"for x in xrange(50000): i.guess_omitted_info('ja')\")\n #for x in xrange(5000):\n # print i.guess_omitted_info('ja')\n","repo_name":"mcyph/iso_tools","sub_path":"iso_tools/isotools_classes/ISOGuesser.py","file_name":"ISOGuesser.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6903657743","text":"def get_child_queryset2(obj, hasParent=True):\n '''\n 获取所有子集\n obj实例\n 数据表需包含parent字段\n 是否包含父默认True\n '''\n cls = type(obj)\n queryset = cls.objects.none()\n fatherQueryset = cls.objects.filter(pk=obj.id)\n if hasParent:\n queryset = queryset | fatherQueryset\n child_queryset = cls.objects.filter(parent=obj)\n while child_queryset:\n queryset = queryset | child_queryset\n child_queryset = cls.objects.filter(parent__in=child_queryset)\n return queryset\n","repo_name":"kidword/blog-admin","sub_path":"blog_admin/utils/queryset.py","file_name":"queryset.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21538557076","text":"from PIL import Image, ImageFont, ImageDraw\nfrom pathlib import Path\n\nBLANK = Path(\"./img/template_blank.png\")\n\n# https://fonts.google.com/noto/specimen/Noto+Sans+JP\n\nFONT = ImageFont.truetype(\"./NotoSansJP-Medium.otf\", size=22)\n\nCONTENTS = [\"メインストーリー\", \"イベントクエスト\", \"コロシアム\", \"心深圏\", \"キャラストーリー\", \"ガチャ(召喚)\", \"共襲\", \"星間の塔\"]\n\n\ndef main():\n with Image.open(BLANK) as im:\n draw = ImageDraw.Draw(im)\n top = 436\n pos = [900, top]\n color = (0xB7, 0xC0, 0xE7)\n for i, txt in enumerate(CONTENTS):\n draw.text(tuple(pos), txt, font=FONT, fill=color)\n if i == 3:\n pos[1] = top\n pos[0] += 250\n else:\n pos[1] += 34\n im.save(\"template_new.png\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"megido72-resume/megido72-resume-maker","sub_path":"script/favorite_content.py","file_name":"favorite_content.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4624192160","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 14 20:43:28 2020\n\n@author: Santiago Caro\n\"\"\"\n\n#%% Carga imagen 4D\n#fileName = \"C:/Users/CarlosJoseMunoz/Desktop/semestres/2020-2/PDI/Proyecto_2/luimarcarcar/4D.hdr\"\nfileName = \"C:/Users/Santiago Caro/Documents/Santiago Consultas/Bioingenieria/Procesamiento Digital de Imagenes/Slicer/luimarcarcar/4D.hdr\"\n\n\"\"\"Try to read a 4D nifti file as a multivolume\"\"\"\nprint('trying to read %s' % fileName)\n\n# use the vtk reader which seems to handle most nifti variants well\nreader = vtk.vtkNIFTIImageReader()\nreader.SetFileName(fileName)\nreader.SetTimeAsVector(True)\nreader.Update()\nheader = reader.GetNIFTIHeader()\nqFormMatrix = reader.GetQFormMatrix()\nif not qFormMatrix:\n print('Warning: %s does not have a QFormMatrix - using Identity')\n qFormMatrix = vtk.vtkMatrix4x4()\n\nspacing = reader.GetOutputDataObject(0).GetSpacing()\ntimeSpacing = reader.GetTimeSpacing()\nnFrames = reader.GetTimeDimension()\nprint(nFrames)\nif header.GetIntentCode() != header.IntentTimeSeries:\n intentName = header.GetIntentName()\n if not intentName:\n intentName = 'Nothing'\n print('Warning: %s does not have TimeSeries intent, instead it has \\\"%s\\\"' % (fileName,intentName))\n print('Trying to read as TimeSeries anyway')\n\nunits = header.GetXYZTUnits()\n\n# try to account for some of the unit options\n# (Note: no test data available but we hope these are right)\nif units & header.UnitsMSec == header.UnitsMSec:\n timeSpacing /= 1000.\n\nif units & header.UnitsUSec == header.UnitsUSec:\n timeSpacing /= 1000. / 1000.\n\nspaceScaling = 1.\nif units & header.UnitsMeter == header.UnitsMeter:\n spaceScaling *= 1000.\n\nif units & header.UnitsMicron == header.UnitsMicron:\n spaceScaling /= 1000.\n\nspacing = [e * spaceScaling for e in spacing]\n\n# create frame labels using the timing info from the file\n# but use the advanced info so user can specify offset and scale\nvolumeLabels = vtk.vtkDoubleArray()\nvolumeLabels.SetNumberOfTuples(nFrames)\nframeLabelsAttr = ''\nfor i in range(nFrames):\n frameId = 0 + timeSpacing * 0.1 * i\n volumeLabels.SetComponent(i, 0, frameId)\n frameLabelsAttr += str(frameId)+','\n\nframeLabelsAttr = frameLabelsAttr[:-1]\n\n# create the display node\nmvDisplayNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLMultiVolumeDisplayNode')\nmvDisplayNode.SetScene(slicer.mrmlScene)\nslicer.mrmlScene.AddNode(mvDisplayNode)\nmvDisplayNode.SetReferenceCount(mvDisplayNode.GetReferenceCount()-1)\nmvDisplayNode.SetDefaultColorMap()\n\n# spacing and origin are in the ijkToRAS, so clear them from image data\nimageChangeInformation = vtk.vtkImageChangeInformation()\nimageChangeInformation.SetInputConnection(reader.GetOutputPort())\nimageChangeInformation.SetOutputSpacing( 1, 1, 1 )\nimageChangeInformation.SetOutputOrigin( 0, 0, 0 )\nimageChangeInformation.Update()\n\n# QForm includes directions and origin, but not spacing so add that\n# here by multiplying by a diagonal matrix with the spacing\nscaleMatrix = vtk.vtkMatrix4x4()\nfor diag in range(3):\n scaleMatrix.SetElement(diag, diag, spacing[diag])\n\nijkToRAS = vtk.vtkMatrix4x4()\nijkToRAS.DeepCopy(qFormMatrix)\nvtk.vtkMatrix4x4.Multiply4x4(ijkToRAS, scaleMatrix, ijkToRAS)\n\nmvNode = slicer.vtkMRMLMultiVolumeNode()\n\nmvNode.SetIJKToRASMatrix(ijkToRAS)\nmvNode.SetAndObserveDisplayNodeID(mvDisplayNode.GetID())\nmvNode.SetAndObserveImageData(imageChangeInformation.GetOutputDataObject(0))\nmvNode.SetNumberOfFrames(nFrames)\n\n# set the labels and other attributes, then display the volume\nmvNode.SetLabelArray(volumeLabels)\nmvNode.SetLabelName(\"MultiVolumen\")\n\nmvNode.SetAttribute('MultiVolume.FrameLabels',frameLabelsAttr)\nmvNode.SetAttribute('MultiVolume.NumberOfFrames',str(nFrames))\nmvNode.SetAttribute('MultiVolume.FrameIdentifyingDICOMTagName','')\nmvNode.SetAttribute('MultiVolume.FrameIdentifyingDICOMTagUnits','')\n\nmvNode.SetName(str(nFrames)+' frames NIfTI MultiVolume')\n\n#the node is inserted in the scene\nslicer.mrmlScene.AddNode(mvNode)\n\n#%% Extracción de frames y filtrado\n\nescena = slicer.mrmlScene;\nvolumen4D = escena.GetNodeByID('vtkMRMLMultiVolumeNode1')\nimagenvtk4D = volumen4D.GetImageData()\nnumero_imagenes = volumen4D.GetNumberOfFrames()\n\n\nextract1 = vtk.vtkImageExtractComponents()\nextract1.SetInputData(imagenvtk4D)\n\nras2ijk = vtk.vtkMatrix4x4()\nijk2ras = vtk.vtkMatrix4x4()\nvolumen4D.GetRASToIJKMatrix(ras2ijk)\nvolumen4D.GetIJKToRASMatrix(ijk2ras)\n\nfor i in range(numero_imagenes):\n volumenFijo = slicer.vtkMRMLScalarVolumeNode(); \n imagen_fija = extract1.SetComponents(i)\n extract1.Update()\n volumenFijo.SetAndObserveImageData(extract1.GetOutput())\n extract1.Update()\n volumenFijo.SetName(\"frame\"+str(i))\n volumenFijo.SetRASToIJKMatrix(ras2ijk)\n volumenFijo.SetIJKToRASMatrix(ijk2ras)\n escena.AddNode(volumenFijo)\n \nfor i in range(numero_imagenes):\n cliModule = slicer.modules.gradientanisotropicdiffusion\n n = cliModule.cliModuleLogic().CreateNode()\n parameters = {}\n parameters['conductance'] = 1.0 \n parameters['numberOfIterations'] = 5\n parameters['timeStep'] = 0.05\n volumen_entrada = slicer.mrmlScene.GetNodeByID(\"vtkMRMLScalarVolumeNode\"+str(i+1))\n #volumen_entrada = volumenFijo\n volumen_salida = slicer.vtkMRMLScalarVolumeNode()\n slicer.mrmlScene.AddNode(volumen_salida)\n parameters['inputVolume'] = volumen_entrada.GetID()\n parameters['outputVolume'] = volumen_salida.GetID()\n cliModule = slicer.modules.gradientanisotropicdiffusion\n cliNode = slicer.cli.run(cliModule,None,parameters,wait_for_completion=True)\n\n#%% Registro\n\nvolumenFijo = slicer.vtkMRMLScalarVolumeNode();\n#le asigno las transformaciones\nvolumenFijo.SetRASToIJKMatrix(ras2ijk)\nvolumenFijo.SetIJKToRASMatrix(ijk2ras)\n\n#le asigno el volumen 3D fijo\nimagen_fija = extract1.SetComponents(0)\nextract1.Update()\n\nvolumenFijo.SetName('fijo')\nvolumenFijo.SetAndObserveImageData(extract1.GetOutput())\nextract1.Update()\n\n#anado el nuevo volumen a la escena\\\nescena.AddNode(volumenFijo)\n\nfor i in range(numero_imagenes-1):\n imagen_movil = slicer.mrmlScene.GetNodeByID(\"vtkMRMLScalarVolumeNode\"+str(i+62))#Seleccionamos un volumen lejano\n \n volumenMovil = slicer.vtkMRMLScalarVolumeNode();\n volumenMovil.SetRASToIJKMatrix(ras2ijk)\n volumenMovil.SetIJKToRASMatrix(ijk2ras)\n volumenMovil.SetAndObserveImageData(imagen_movil.GetImageData())\n volumenMovil.SetName('movil'+str(i+1))\n escena.AddNode(volumenMovil)\n transformadaSalida = slicer.vtkMRMLLinearTransformNode()\n transformadaSalida.SetName('Transformada de registro')\n slicer.mrmlScene.AddNode(transformadaSalida) \n parameters = {}\n parameters['fixedVolume'] = volumenFijo.GetID()\n parameters['movingVolume'] = volumenMovil.GetID()\n parameters['transformType'] = 'Rigid'\n parameters['outputTransform'] = transformadaSalida.GetID() \n cliNode = slicer.cli.run(slicer.modules.brainsfit,None,parameters, wait_for_completion=True)\n\n#%% Segmentación\n#parametros para la operacion de segmentado para un solo volumen \nparameters = {}\nparameters['smoothingIterations'] = 5.0 \nparameters['timestep'] = 0.0625\n\nparameters['iterations'] = 5\nparameters['multiplier'] = 2.5\nparameters['neighborhood'] = 1\nparameters['labelvalue'] = 2\n\nfiducials = slicer.mrmlScene.GetNodeByID('vtkMRMLMarkupsFiducialNode2')#Se especifíca el fiducial que se va a usar\nparameters['seed'] = fiducials.GetID()\n\nvolumen_entrada = slicer.mrmlScene.GetNodeByID('vtkMRMLScalarVolumeNode182')#se especficia el volumen que se va a usar \nparameters['inputVolume'] = volumen_entrada.GetID()\n\nvolumen_salida = slicer.vtkMRMLLabelMapVolumeNode()\nslicer.mrmlScene.AddNode(volumen_salida)\nparameters['outputVolume'] = volumen_salida.GetID()\n\ncliModule = slicer.modules.simpleregiongrowingsegmentation\ncliNode = slicer.cli.run(cliModule,None,parameters,wait_for_completion=True)\n\n#%% Grafica\n# para encontrar el promedio de la intensidad es necesario hacer la segmentación de un volumen primero para obtener el lebelmapvolume, \nimport numpy\nlabel = array('LabelMapVolume')#se especifica la region segmentada\npoints = numpy.where( label == 2 ) # or use another label number depending on what you segmented\n\n\nescena = slicer.mrmlScene;\nvolumen4D = escena.GetNodeByID('vtkMRMLMultiVolumeNode1')\n\nimagenvtk4D = volumen4D.GetImageData()\nnumero_imagenes = volumen4D.GetNumberOfFrames()\neje_x=numpy.array(range(numero_imagenes))\ndata=numpy.zeros(())\nfor i in range(numero_imagenes): #obtiene todos los volumenes \n volumenFijo = slicer.vtkMRMLScalarVolumeNode(); \n imagen_fija = extract1.SetComponents(i)\n extract1.Update()\n volumenFijo.SetAndObserveImageData(extract1.GetOutput())\n extract1.Update()\n volumenFijo.SetName(\"frame\"+str(i))\n volumenFijo.SetRASToIJKMatrix(ras2ijk)\n volumenFijo.SetIJKToRASMatrix(ijk2ras)\n escena.AddNode(volumenFijo)\n\nprom=numpy.array([])\n\nfor i in range(numero_imagenes):\n volume=array(\"frame\"+str(i))\n values = volume[points]\n prom=numpy.append(prom,values.mean())\n\nchartNode = slicer.util.plot((eje_x,prom), xColumnIndex=0, columnNames=['X', 'X^2'])\nchartNode.SetXAxisTitle('X')\nchartNode.SetYAxisTitle('Y')\nchartNode.LegendVisibilityOff()\nchartNode.SetTitle('Prueba')\n\n\n#%% Prueba para un solo frame\n\nescena = slicer.mrmlScene;\nvolumen4D = escena.GetNodeByID('vtkMRMLMultiVolumeNode1')\nimagenvtk4D = volumen4D.GetImageData()\nnumero_imagenes = volumen4D.GetNumberOfFrames()\n\n\nextract1 = vtk.vtkImageExtractComponents()\nextract1.SetInputData(imagenvtk4D)\n\nras2ijk = vtk.vtkMatrix4x4()\nijk2ras = vtk.vtkMatrix4x4()\nvolumen4D.GetRASToIJKMatrix(ras2ijk)\nvolumen4D.GetIJKToRASMatrix(ijk2ras)\n\nvolumenFijo = slicer.vtkMRMLScalarVolumeNode(); \nimagen_fija = extract1.SetComponents(50)\nextract1.Update()\nvolumenFijo.SetAndObserveImageData(extract1.GetOutput())\nextract1.Update()\nvolumenFijo.SetName(\"frame\"+str(50))\nvolumenFijo.SetRASToIJKMatrix(ras2ijk)\nvolumenFijo.SetIJKToRASMatrix(ijk2ras)\nescena.AddNode(volumenFijo)\n\n\ncliModule = slicer.modules.gradientanisotropicdiffusion\nn = cliModule.cliModuleLogic().CreateNode()\nparameters = {}\nparameters['conductance'] = 1.5\nparameters['numberOfIterations'] = 10\nparameters['timeStep'] = 0.05\nvolumen_entrada = slicer.mrmlScene.GetNodeByID(\"vtkMRMLScalarVolumeNode1\")\n#volumen_entrada = volumenFijo\nvolumen_salida = slicer.vtkMRMLScalarVolumeNode()\nslicer.mrmlScene.AddNode(volumen_salida)\nparameters['inputVolume'] = volumen_entrada.GetID()\nparameters['outputVolume'] = volumen_salida.GetID()\ncliModule = slicer.modules.gradientanisotropicdiffusion\ncliNode = slicer.cli.run(cliModule,None,parameters,wait_for_completion=True)\n\nvolumenFijo = slicer.vtkMRMLScalarVolumeNode();\n#le asigno las transformaciones\nvolumenFijo.SetRASToIJKMatrix(ras2ijk)\nvolumenFijo.SetIJKToRASMatrix(ijk2ras)\n\n#le asigno el volumen 3D fijo\nimagen_fija = extract1.SetComponents(0)\nextract1.Update()\n\nvolumenFijo.SetName('fijo')\nvolumenFijo.SetAndObserveImageData(extract1.GetOutput())\nextract1.Update()\n\n#anado el nuevo volumen a la escena\\\nescena.AddNode(volumenFijo)\n\n\nimagen_movil = slicer.mrmlScene.GetNodeByID(\"vtkMRMLScalarVolumeNode2\")#Seleccionamos un volumen lejano\n \nvolumenMovil = slicer.vtkMRMLScalarVolumeNode();\nvolumenMovil.SetRASToIJKMatrix(ras2ijk)\nvolumenMovil.SetIJKToRASMatrix(ijk2ras)\nvolumenMovil.SetAndObserveImageData(imagen_movil.GetImageData())\nvolumenMovil.SetName('movil'+str(i+1))\nescena.AddNode(volumenMovil)\ntransformadaSalida = slicer.vtkMRMLLinearTransformNode()\ntransformadaSalida.SetName('Transformada de registro')\nslicer.mrmlScene.AddNode(transformadaSalida) \nparameters = {}\nparameters['fixedVolume'] = volumenFijo.GetID()\nparameters['movingVolume'] = volumenMovil.GetID()\nparameters['transformType'] = 'BSpline'\nparameters['outputTransform'] = transformadaSalida.GetID() \ncliNode = slicer.cli.run(slicer.modules.brainsfit,None,parameters, wait_for_completion=True)\n\n \nparameters = {}\nparameters['smoothingIterations'] = 5.0 \nparameters['timestep'] = 0.0625\n\nparameters['iterations'] = 5\nparameters['multiplier'] = 2.5\nparameters['neighborhood'] = 1\nparameters['labelvalue'] = 2\n\nfiducials = slicer.mrmlScene.GetNodeByID('vtkMRMLMarkupsFiducialNode1')#Se especifíca el fiducial que se va a usar\nparameters['seed'] = fiducials.GetID()\n\nvolumen_entrada = slicer.mrmlScene.GetNodeByID('vtkMRMLScalarVolumeNode4')#se especficia el volumen que se va a usar \nparameters['inputVolume'] = volumen_entrada.GetID()\n\nvolumen_salida = slicer.vtkMRMLLabelMapVolumeNode()\nslicer.mrmlScene.AddNode(volumen_salida)\nparameters['outputVolume'] = volumen_salida.GetID()\n\ncliModule = slicer.modules.simpleregiongrowingsegmentation\n\n","repo_name":"santiagocaroz/Proyecto_2_PDI","sub_path":"rutinaCompleta.py","file_name":"rutinaCompleta.py","file_ext":"py","file_size_in_byte":12545,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1657334601","text":"# -*- coding: utf-8 -*-\n\nimport enum\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom keras.layers import * \nimport uuid\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# 프론트에서 이미지 파일을 받아서 로컬에 저장을 한 뒤\n# 저장된 path 를 받아서 아래 함수 구동\n# 고양이 사진에서 얼굴 검출 후, 점 찍어서 result.png 로 저장\n\n# 모델 불러오기\nfrom keras.models import load_model\nglobal model\nmodel = load_model('cat-detection.h5')\n\ndef catFaceRecog(image):\n global left_ear_x, left_ear_y, right_ear_x, right_ear_y, left_eye_x, left_eye_y, right_eye_x, right_eye_y\n \n class CatFeatures(enum.Enum):\n # below are the cat facial features (⊙o⊙)\n # eyes\n LEFT_EYE = 0\n RIGHT_EYE = 1\n # mouth\n MOUTH = 2\n # left ear\n LEFT_EAR_1 = 3\n LEFT_EAR_2 = 4\n LEFT_EAR_3 = 5\n # right ear\n RIGHT_EAR_1 = 6\n RIGHT_EAR_2 = 7\n RIGHT_EAR_3 = 8\n\n def map_labels(labels):\n x = labels[0:18:2]\n y = labels[1:18:2]\n\n features ={\n CatFeatures.LEFT_EYE : (),\n CatFeatures.RIGHT_EYE : (),\n CatFeatures.MOUTH : (),\n CatFeatures.LEFT_EAR_1 : (),\n CatFeatures.LEFT_EAR_2 : (),\n CatFeatures.LEFT_EAR_3 : (),\n CatFeatures.RIGHT_EAR_1 : (),\n CatFeatures.RIGHT_EAR_2 : (),\n CatFeatures.RIGHT_EAR_3 : (),\n }\n for key,xpoint,ypoint in zip(features.keys(),x,y):\n features[key] = (xpoint,ypoint)\n\n return features\n\n def preprocess_image(image):\n x = image / 255.0\n x = cv2.resize(x,(224,224))\n x = np.asarray(x).astype('float32')\n return x\n\n def decode_labels(labels,width,heigth):\n labels[0:18:2] = labels[0:18:2] * width\n labels[1:18:2] = labels[1:18:2] * heigth\n return labels\n\n def predict_image(image, model):\n img = image\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n (w, h) = img.shape[:2]\n\n # predictions\n image_preprocessed = preprocess_image(img)\n y = model.predict(np.expand_dims(image_preprocessed, axis=0)).flatten()\n y = decode_labels(y, w, h)\n return show_cat(img,y)\n\n def show_cat(image,labels):\n features = map_labels(labels)\n\n plt.imshow(image)\n plt.axis('off')\n\n x,y = [],[]\n points = [CatFeatures.LEFT_EAR_1,\n CatFeatures.LEFT_EYE,\n CatFeatures.RIGHT_EYE,\n CatFeatures.RIGHT_EAR_1,\n CatFeatures.LEFT_EAR_1\n ]\n\n for p in points:\n x.append(features[p][0])\n y.append(features[p][1])\n\n lines = plt.plot(x,y,marker='*')\n plt.setp(lines, color='c',)\n\n # 점 찍은 상태의 사진 저장 -> 프론트에 전송\n filepath = \"./static/\" + str(uuid.uuid4()) + \"result.jpg\"\n plt.savefig(filepath)\n plt.close()\n\n # 왼쪽 귀 좌표\n left_ear_x = features[CatFeatures.LEFT_EAR_1][0]\n left_ear_y = features[CatFeatures.LEFT_EAR_1][1]\n\n # 오른쪽 귀 좌표\n right_ear_x = features[CatFeatures.RIGHT_EAR_1][0]\n right_ear_y = features[CatFeatures.RIGHT_EAR_1][1]\n\n # 왼쪽 눈 좌표\n left_eye_x = features[CatFeatures.LEFT_EYE][0]\n left_eye_y = features[CatFeatures.LEFT_EYE][1]\n\n # 오른쪽 눈 좌표\n right_eye_x = features[CatFeatures.RIGHT_EYE][0]\n right_eye_y = features[CatFeatures.RIGHT_EYE][1]\n \n return left_ear_x, left_ear_y, right_ear_x, right_ear_y, left_eye_x, left_eye_y, right_eye_x, right_eye_y, filepath\n \n return predict_image(image, model)","repo_name":"streetnyangfighter/takealook_ai","sub_path":"ai_func.py","file_name":"ai_func.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4376782095","text":"class Node:\r\n def __init__(self, value=0):\r\n self.value = value\r\n self.next = None\r\n\r\n\r\nclass Stack:\r\n def __init__(self, value=0):\r\n self.top = None\r\n self.height = 0\r\n\r\n def push(self,value):\r\n if self.top==None:\r\n self.top=Node(value)\r\n return True\r\n newNode=Node(value)\r\n newNode.next=self.top\r\n self.top=newNode\r\n self.height+=1\r\n return True\r\n\r\n def pop(self):\r\n if self.top==None:\r\n return None\r\n temp=self.top\r\n self.top=self.top.next\r\n self.height-=1\r\n temp.next=None\r\n return temp\r\n\r\ndef main():\r\n books=Stack()\r\n while 1:\r\n n=int(input(\"1.Push into Stack\\n2.Pop from Stack.\\n3.Exit....\\n\"))\r\n if n==1:\r\n books.push(int(input()))\r\n if n==2:\r\n poppedValue=books.pop()\r\n print(\"Popped Value is \"+str(poppedValue.value) if poppedValue else \"Nothing in Stack\")\r\n if n==3:\r\n print(\"------------Exiting------------\")\r\n return 0\r\n\r\nif __name__=='__main__':\r\n main()\r\n","repo_name":"Manikandan312003/Stack-Operations","sub_path":"Stack-Using-Linked-List.py","file_name":"Stack-Using-Linked-List.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4320270512","text":"import unittest\nfrom src.common.patient import Patient\nfrom src.simulator.generators.gauss_generator import GaussGenerator\nfrom src.simulator.generators.exponential_generator import ExponentialGenerator\n\n\nclass TestPatient(unittest.TestCase):\n\n def setUp(self):\n\n self.patient1 = Patient('0000', GaussGenerator(\n 1, 1), ExponentialGenerator(0.5), 2, 3600)\n self.patient2 = Patient('0001', GaussGenerator(\n 2, 2), ExponentialGenerator(3), 1, 10800)\n self.patient3 = Patient('0002', GaussGenerator(\n 1.5, 1), ExponentialGenerator(2), 2, 9000)\n\n def test_generate_leave(self):\n\n self.patient1.generate_leave()\n self.assertIsNotNone(self.patient1.leave_time)\n\n def test_generate_therapy(self):\n\n self.patient1.generate_therapy()\n self.assertIsNotNone(self.patient1.therapy_time)\n\n def test_generate_all(self):\n\n self.patient1.generate_all()\n self.assertIsNotNone(self.patient1.leave_time)\n self.assertIsNotNone(self.patient1.leave_time)\n\n def test_decrement_leave_time(self):\n\n self.patient1.generate_leave()\n temp = self.patient1.leave_time\n self.patient1.decrement_leave_time(1)\n self.assertEqual(self.patient1.leave_time, temp - 1)\n\n def test_decrement_therapy_time(self):\n\n self.patient1.generate_therapy()\n temp = self.patient1.therapy_time\n self.patient1.decrement_therapy_time(1)\n self.assertEqual(self.patient1.therapy_time, temp - 1)\n\n def test_it(self):\n\n self.assertFalse(self.patient1.__lt__(self.patient2))\n self.assertTrue(self.patient2.__lt__(self.patient3))\n\n if self.patient1.arrival_time < self.patient3.arrival_time:\n self.assertTrue(self.patient1.__lt__(self.patient3))\n else:\n self.assertFalse(self.patient1.__lt__(self.patient3))\n\n def test_clone_and_generate(self):\n\n patient4 = self.patient1.clone_and_generate()\n self.assertNotEqual(id(patient4), id(self.patient1))\n self.assertTrue(isinstance(patient4, Patient))\n self.assertEqual(patient4.id, self.patient1.id)\n self.assertEqual(patient4.emergency_code, self.patient1.emergency_code)\n self.assertEqual(patient4.arrival_time, self.patient1.arrival_time)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"thegoldgoat/stima-pronto-soccorso","sub_path":"simulator/test/simulator_tests/src_tests/common_tests/test_patient.py","file_name":"test_patient.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"39613933943","text":"from yargy import Parser, rule, and_, not_, or_\nfrom yargy.interpretation import fact\nfrom yargy.relations import gnc_relation\nfrom yargy.predicates import gram, is_capitalized, type, caseless, eq, custom, normalized, length_eq\nfrom yargy.tokenizer import Tokenizer\n\n\ndef is_excep(value):\n return_value = value\n if value == 'по':\n return_value = None\n return return_value\n\n\nName = fact(\n 'Name',\n ['first', 'last', 'middle']\n)\n\nNAME = gram('Name')\nSURN = gram('Surn')\nPATR = gram('Patr')\n\nFIRST = NAME.interpretation(Name.first)\nLAST = SURN.interpretation(Name.last.custom(is_excep))\nMIDDLE = PATR.interpretation(Name.middle)\n\nFIRST_LAST = rule(FIRST, LAST)\nLAST_FIRST = rule(LAST, FIRST)\n\nFIRST_MIDDLE = rule(FIRST, MIDDLE)\nFIRST_MIDDLE_LAST = rule(FIRST, MIDDLE, LAST)\nLAST_FIRST_MIDDLE = rule(LAST, FIRST, MIDDLE)\n\nSINGLE_FIRST = FIRST\nSINGLE_LAST = LAST\nSINGLE_MIDDLE = MIDDLE\n\nNAME = or_(\n LAST_FIRST_MIDDLE,\n FIRST_MIDDLE_LAST,\n FIRST_MIDDLE,\n FIRST_LAST,\n LAST_FIRST,\n\n SINGLE_LAST,\n SINGLE_MIDDLE,\n SINGLE_FIRST\n).interpretation(Name)\n","repo_name":"mc-off/NLP","sub_path":"lab2/src/rules/name/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18496783022","text":"# encoding:utf-8\n# ------------------------------\n# FileName: map\n# Description: 不同类型经纬度的转换\n# ------------------------------\nimport csv\nimport json\nimport math\nfrom math import sqrt, atan2, cos, sin, radians, fabs, asin\n# 腾讯地图经纬度转换成百度经纬度\nimport requests as requests\nimport pandas as pd\n\nx_pi = 3.14159265358979324 * 3000.0 / 180.0\npi = 3.1415926535897932384626 # π\n\n\ndef map_tx2bd(lng, lat):\n '''\n 腾讯转百度\n :param lng:\n :param lat:\n :return:\n '''\n x = lng\n y = lat\n z = sqrt(x * x + y * y) + 0.00002 * sin(y * pi)\n theta = atan2(y, x) + 0.000003 * cos(x * pi)\n bd_lng = z * cos(theta) + 0.0065\n bd_lat = z * sin(theta) + 0.006\n\n return round(bd_lng, 5), round(bd_lat, 5)\n\n\ndef map_gps2bd(lng, lat):\n '''\n gps 经纬度转换为 baidu 经纬度\n :param lng:\n :param lat:\n :return:\n '''\n ak = '你的ak'\n url = 'http://api.map.baidu.com/geoconv/v1/?coords=' + str(lng) + ',' + str(lat) + '&from=1&to=5&ak='+ak\n\n content = requests.get(url).content\n data = json.loads(content)\n result = data['result']\n lng = result[0]['x']\n lat = result[0]['y']\n\n return round(lng, 5), round(lat, 5)\n\n\ndef bd09_to_gcj02(bd_lon, bd_lat):\n \"\"\"\n 百度坐标系(BD-09)转火星坐标系(GCJ-02)\n 百度——>谷歌、高德\n :param bd_lat:百度坐标纬度\n :param bd_lon:百度坐标经度\n :return:转换后的坐标列表形式\n \"\"\"\n x = bd_lon - 0.0065\n y = bd_lat - 0.006\n z = math.sqrt(x * x + y * y) - 0.00002 * math.sin(y * x_pi)\n theta = math.atan2(y, x) - 0.000003 * math.cos(x * x_pi)\n lng = z * math.cos(theta)\n lat = z * math.sin(theta)\n\n return round(lng, 5), round(lat, 5)\n\n\ndef hav(theta):\n s = sin(theta / 2)\n return s * s\n\n\ndef get_distance(lnglat1, lnglat2):\n '''\n 计算两经纬度点之间的距离\n :param lnglat1:\n :param lnglat2:\n :return:\n '''\n EARTH_RADIUS = 6371 # 地球平均半径,6371km\n\n \"用haversine公式计算球面两点间的距离。\"\n lng1 = lnglat1[0]\n lat1 = lnglat1[1]\n lng2 = lnglat2[0]\n lat2 = lnglat2[1]\n # 经纬度转换成弧度\n dlng = fabs(radians(lng1) - radians(lng2))\n dlat = fabs(radians(lat1) - radians(lat2))\n\n h = hav(dlat) + cos(radians(lat1)) * cos(radians(lat2)) * hav(dlng)\n distance = 2 * EARTH_RADIUS * asin(sqrt(h))\n\n return round(distance, 5)\n\n\nif __name__ == '__main__':\n lng, lat = bd09_to_gcj02(113.331248,23.121341)\n print(lng, lat)\n","repo_name":"chenyibelive/craw_lianjia","sub_path":"craw_tools/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"8630570492","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 17 22:01:00 2018\n\n@author: Dean\n递增序列的二分查找\n\"\"\"\n\n#查找等于key的数\ndef Binary_Search(arr,k):\n l = 0\n r = len(arr)-1\n mid = 0\n while(l<=r):\n mid = (l+r) // 2\n if arr[mid] == k:\n return mid\n elif arr[mid] < k:\n l = mid + 1\n else:\n r = mid - 1\n return mid\n\n#查找最左边比x大的数\ndef Binary_Search_left_max(arr,x):\n l,r = 0, len(arr)-1\n mid = 0\n while(l<=r):\n mid = (l+r)//2\n if arr[mid] < x:\n l = mid + 1\n else:\n r = mid - 1\n return max(l,r)\n\n#查找最右边比x小的数\ndef Binary_Search_right_min(arr,x):\n l,r = 0, len(arr)-1\n mid = 0\n while(l<=r):\n mid = (l+r)//2\n if arr[mid] < x:\n l = mid + 1\n else:\n r = mid - 1\n return min(l,r)\n \n \n \n \n \n\nif __name__ == \"__main__\":\n data = list(range(10))\n print(Binary_Search(data,5))\n \n data2 = [1,3,7,8,9,13]\n print(Binary_Search_left_max(data2,6))\n \n data2 = [1,3,7,8,9,13]\n print(Binary_Search_right_min(data2,10))","repo_name":"Deanhz/normal_works","sub_path":"算法题/程序员面试指南/python/查找/二分查找.py","file_name":"二分查找.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37651678473","text":"import os\nimport yaml\nimport argparse\nimport copy\nimport numpy as np\nfrom easydict import EasyDict as edict\nfrom tensorboardX import SummaryWriter\nimport sys\nimport director\nfrom datetime import datetime\nimport utils.logger as logger\nfrom tensorboardX import SummaryWriter\nfrom pprint import pprint\n\ndef config_pytorch():\n config = edict()\n config.exp_id = 'Respose' # Experiment ID\n config.task = 'class' # 'class-bbox | Postion |'\n config.gpu = 1 # Use GPU set to >-1 else Run model on CPU\n config.threads_num = 12 # 'nThreads' For Dataloader\n config.save_mode = 'all' # 'all' | 'best', save all models or only save the best model\n config.load_model = '' # path to a previously trained model\n config.test = False # run in test mode or not\n config.gt=True # Test model with gt bounding boxes |True| predicted |False|\n return config\n\n\n\ndef train_config():\n config = edict()\n config.begin_epoch = 1 # Default value begin epoch\n config.end_epoch = 20 # default value end epoch\n config.test_interval = 1 # default value test_interval\n config.train_batch_size = 8 # default test/train batch-size\n config.lr = 1e-4 # default lr rate for adamw\n config.lr_epoch_step = [10, 20, 30] #default epochs for lr-reduction lr*lr_factor\n config.lr_factor = 0.1 # lr-reduction factor\n config.optimizer_name = 'adamw' # optimizer\n config.momentum = 0.0\n config.weightDecay = 0.0\n config.alpha = 0.99\n config.epsilon = 1e-8\n config.Beta=0\n return config\n\ndef loss_config():\n config = edict()\n config.class_loss_type='FocalLoss' # default loss for CLassification Head\n config.class_loss_weight=1 # default weight for classifaction error \n config.reg_loss_weight=1 # default weight for bbounding box error\n config.rot_loss_type = 'quatloss' # default loss for Rotation\n config.rot_loss_weight = 1 # default weight for rotation\n config.trans_loss_type = 'L1' #default loss for translation\n config.trans_loss_weight = 1 # default loss weight for translation\n return config\n\ndef network_config():\n config = edict()\n # ------ backbone -------- #\n config.arch = 'resnet' # Backbone resnet \n config.back_freeze=False # default conf. for backbone freeze\n config.back_input_channel = 3 # Input channles backbone \n config.back_layers_num=34 # number os layers for backbone\n # -------regression-------#\n config.class_head_freeze=False # default freeze class-head\n # ------ rotation head -------- #\n config.rot_head_freeze = False # default freeze rotation head\n config.rot_representation='quat' # default rotation representation quat-head: |quat| 6D representation: |rot|\n # ------ translation head -------- #\n config.trans_head_freeze = False # default freeze translation head\n return config\ndef get_default_dataset_config():\n config = edict()\n config.name = 'YCB' # Default Dataset name\n return config\ndef get_base_config():\n \"\"\"\n Here all default configurations are loaded\n \"\"\"\n base_config = edict()\n base_config.dataset=get_default_dataset_config()\n base_config.pytorch = config_pytorch()\n base_config.train = train_config()\n base_config.network = network_config()\n base_config.loss = loss_config()\n return base_config\n\ndef update_config_from_file(_config, config_file, check_necessity=True):\n \"\"\"\n Update default configuration using config-file\n\n Args:\n _config : config dictionary containing default config params\n config_file: yaml-File containing configurations \n check_necessity (bool, optional): [add config params who are in default configurations]. Defaults to True.\n \n\n\n Returns:\n config : dict containing updataed configuaration \n \"\"\"\n config = copy.deepcopy(_config)\n with open(config_file) as f:\n exp_config = edict(yaml.load(f, Loader=yaml.FullLoader))\n for k, v in exp_config.items():\n if k in config:\n if isinstance(v, dict):\n for vk, vv in v.items():\n if vk in config[k]:\n if isinstance(vv, list) and not isinstance(vv[0], str):\n config[k][vk] = np.asarray(vv)\n else:\n config[k][vk] = vv\n else:\n if check_necessity:\n raise ValueError(\"{}.{} not exist in config\".format(k, vk))\n else:\n raise ValueError(\"{} is not dict type\".format(v))\n else:\n if check_necessity:\n raise ValueError(\"{} not exist in config\".format(k))\n return config\n\nclass config():\n def __init__(self):\n self.parser = argparse.ArgumentParser(description='pose experiment')\n self.parser.add_argument('--cfg', type=str,default='../Config-Files/Quat-Head/Config-Test.yaml', help='path/to/configure_file') ## Put here config-file \n self.parser.add_argument('--test', action='store_true', help='')\n\n def parse(self):\n config = get_base_config() # get default arguments\n args, rest = self.parser.parse_known_args() # get arguments from command line\n for k, v in vars(args).items():\n config.pytorch[k] = v \n config_file = config.pytorch.cfg\n config = update_config_from_file(config, config_file, check_necessity=False) # update arguments from config file\n # complement config regarding dataset\n # automatically correct config\n if config.network.back_freeze == True:\n config.loss.backbone_loss_weight = 0\n if config.network.rot_head_freeze == True:\n config.loss.rot_loss_weight = 0\n if config.network.trans_head_freeze == True:\n config.loss.trans_loss_weight = 0\n\n if config.pytorch.test:\n config.pytorch.exp_id = config.pytorch.exp_id + 'TEST'\n\n # complement config regarding paths\n now = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n # save path\n config.pytorch['save_path'] = os.path.join(director.exp_dir, config.pytorch.exp_id, now)\n if not os.path.exists(config.pytorch.save_path):\n os.makedirs(config.pytorch.save_path, exist_ok=True)\n # logger path\n logger.set_logger_dir(config.pytorch.save_path, action='k')\n\n pprint(config)\n # copy and save current config file\n os.system('cp {} {}'.format(config_file, os.path.join(config.pytorch.save_path, 'config_copy.yaml')))\n # save all config infos\n args = dict((name, getattr(config, name)) for name in dir(config) if not name.startswith('_'))\n refs = dict((name, getattr(director, name)) for name in dir(director) if not name.startswith('_'))\n file_name = os.path.join(config.pytorch.save_path, 'config.txt')\n with open(file_name, 'wt') as cfg_file:\n cfg_file.write('==> Cmd:\\n')\n cfg_file.write(str(sys.argv))\n cfg_file.write('\\n==> Opt:\\n')\n for k, v in sorted(args.items()):\n cfg_file.write(' %s: %s\\n' % (str(k), str(v)))\n cfg_file.write('==> Ref:\\n')\n for k, v in sorted(refs.items()):\n cfg_file.write(' %s: %s\\n' % (str(k), str(v)))\n\n return config\n","repo_name":"Mohamedaminhamdad/Regpose","sub_path":"lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"22652208880","text":"import copy\nimport random\nimport torch\nimport string\nimport nltk\nfrom transformers import BertTokenizer, pipeline\nfrom nltk.tokenize.treebank import TreebankWordTokenizer, TreebankWordDetokenizer\nfrom typing import List\nfrom .abstract_method import AbstractMethods\n\n\ndef contains_only_characters(input_string: str):\n if len(input_string) == 0:\n return False\n for char in input_string:\n if not char.isalpha():\n return False\n return True\n\n\nclass _DescriptionMutation(AbstractMethods):\n def __init__(self, prompt, test, entry_point):\n super().__init__(prompt, test, entry_point)\n\n # self.tree_tokenizer = TreebankWordTokenizer()\n # self.detokenizer = TreebankWordDetokenizer()\n bert_model_name = 'bert-base-uncased'\n self.device = torch.device('cuda')\n self.berttokenizer = BertTokenizer.from_pretrained(bert_model_name)\n self.unmasker = pipeline('fill-mask', model='bert-base-uncased')\n\n def mutate(self, language):\n func_entry, comments, demo = self.split_desc_testcases(language)\n assert self.combine_desc_testcases(language, func_entry, comments, demo) == self.prompt\n if language == 'py' or 'cs':\n new_comments = self.mutate_py(comments)\n else:\n raise NotImplementedError\n return self.combine_desc_testcases(language, func_entry, new_comments, demo)\n\n def mutate_py(self, desc):\n raise NotImplementedError\n\n\nclass CharacterMutation(_DescriptionMutation):\n def __init__(self, prompt, test, entry_point):\n super().__init__(prompt, test, entry_point)\n self.num_of_perturb = 3 # delat is the number of token/words allowed to be modified\n self.letters_to_insert = string.ascii_letters\n\n def mutate_py(self, comments):\n tokens = comments.split(\" \")\n\n possible_mutate_indexL = []\n # collect the token index for substitution\n for idx, word in enumerate(tokens):\n if contains_only_characters(word) == False:\n continue\n possible_mutate_indexL.append((idx, word))\n bert_new_sentences = list()\n\n # generate similar setences using Bert\n if possible_mutate_indexL:\n bert_new_sentences = self.character_mutation(tokens, possible_mutate_indexL)\n if len(bert_new_sentences) == 0:\n new_comments = comments\n else:\n new_comments = random.choice(bert_new_sentences)\n return new_comments\n\n def _get_random_letter(self):\n \"\"\"Helper function that returns a random single letter from the English\n alphabet that could be lowercase or uppercase.\"\"\"\n return random.choice(self.letters_to_insert)\n\n def _get_neighbor_swap_words(self, word):\n \"\"\"Returns a list containing all possible words with 1 pair of\n neighboring characters swapped.\"\"\"\n\n if len(word) <= 1:\n return []\n\n candidate_words = []\n\n start_idx = 0\n end_idx = (len(word) - 1)\n\n if start_idx >= end_idx:\n return []\n\n for i in range(start_idx, end_idx):\n candidate_word = word[:i] + word[i + 1] + word[i] + word[i + 2 :]\n candidate_words.append(candidate_word)\n\n return candidate_words\n\n def _get_character_insert_words(self, word):\n \"\"\"Returns returns a list containing all possible words with 1 random\n character inserted.\"\"\"\n if len(word) <= 1:\n return []\n\n candidate_words = []\n\n start_idx = 0\n end_idx = len(word)\n\n if start_idx >= end_idx:\n return []\n\n for i in range(start_idx, end_idx):\n candidate_word = word[:i] + self._get_random_letter() + word[i:]\n candidate_words.append(candidate_word)\n\n return candidate_words\n\n def _get_character_delete_words(self, word):\n \"\"\"Returns returns a list containing all possible words with 1 letter\n deleted.\"\"\"\n if len(word) <= 1:\n return []\n\n candidate_words = []\n\n start_idx = 0\n end_idx = len(word)\n\n if start_idx >= end_idx:\n return []\n for i in range(start_idx, end_idx):\n candidate_word = word[:i] + word[i + 1 :]\n candidate_words.append(candidate_word)\n\n return candidate_words\n\n def _get_homoglyph_replace_words(self, word):\n \"\"\"Returns a list containing all possible words with 1 character\n replaced by a homoglyph.\"\"\"\n candidate_words = []\n homos = {\n \"-\": \"˗\",\n \"9\": \"৭\",\n \"8\": \"Ȣ\",\n \"7\": \"𝟕\",\n \"6\": \"б\",\n \"5\": \"Ƽ\",\n \"4\": \"Ꮞ\",\n \"3\": \"Ʒ\",\n \"2\": \"ᒿ\",\n \"1\": \"l\",\n \"0\": \"O\",\n \"'\": \"`\",\n \"a\": \"ɑ\",\n \"b\": \"Ь\",\n \"c\": \"ϲ\",\n \"d\": \"ԁ\",\n \"e\": \"е\",\n \"f\": \"𝚏\",\n \"g\": \"ɡ\",\n \"h\": \"հ\",\n \"i\": \"і\",\n \"j\": \"ϳ\",\n \"k\": \"𝒌\",\n \"l\": \"ⅼ\",\n \"m\": \"m\",\n \"n\": \"ո\",\n \"o\": \"о\",\n \"p\": \"р\",\n \"q\": \"ԛ\",\n \"r\": \"ⲅ\",\n \"s\": \"ѕ\",\n \"t\": \"𝚝\",\n \"u\": \"ս\",\n \"v\": \"ѵ\",\n \"w\": \"ԝ\",\n \"x\": \"×\",\n \"y\": \"у\",\n \"z\": \"ᴢ\",\n }\n for i in range(len(word)):\n if word[i] in homos:\n repl_letter = homos[word[i]]\n candidate_word = word[:i] + repl_letter + word[i + 1 :]\n candidate_words.append(candidate_word)\n\n return candidate_words\n\n def character_mutation(self, tokens, possible_mutate_indexL):\n base_tokens = copy.deepcopy(tokens)\n new_sentences = []\n for _ in range(50):\n sampled_num = min(len(possible_mutate_indexL), self.num_of_perturb)\n samples = random.sample(possible_mutate_indexL, sampled_num)\n mask_indices = [k[0] for k in samples]\n for masked_index in mask_indices:\n current_token = tokens[masked_index]\n tokens[masked_index] = self._mutate(current_token)\n new_sentence = \" \".join(tokens)\n new_sentences.append(new_sentence)\n\n tokens = copy.deepcopy(base_tokens)\n return new_sentences\n\n def _mutate(self, word):\n func_list = [\n self._get_neighbor_swap_words,\n self._get_character_insert_words,\n self._get_character_delete_words,\n self._get_homoglyph_replace_words\n ]\n mutants = []\n for func in func_list:\n candidates = func(word)\n mutants.extend(candidates)\n if len(mutants) == 0:\n return word\n return random.choice(mutants)\n\n\nclass TokenMutation(_DescriptionMutation):\n def __init__(self, prompt, test, entry_point):\n super().__init__(prompt, test, entry_point)\n self.num_of_perturb = 3\n\n def mutate_py(self, comments: str) -> str:\n tokens = comments.split(\" \")\n pos_inf = nltk.tag.pos_tag(tokens)\n\n # the elements in the lists are tuples \n bert_masked_indexL = list()\n\n # collect the token index for substitution\n for idx, (word, tag) in enumerate(pos_inf):\n if contains_only_characters(word) == False:\n continue\n # substitute the nouns and adjectives; you could easily substitue more words by modifying the code here\n if tag.startswith('NN') or tag.startswith('JJ'):\n tagFlag = tag[:2]\n # we do not perturb the first and the last token because BERT's performance drops on for those positions\n if idx != 0 and idx != len(tokens) - 1:\n bert_masked_indexL.append((idx, tagFlag))\n\n bert_new_sentences = list()\n\n # generate similar setences using Bert\n if bert_masked_indexL:\n bert_new_sentences = self.perturbBert(tokens, bert_masked_indexL)\n\n if len(bert_new_sentences) == 0:\n new_desc = comments\n else:\n new_desc = random.choice(bert_new_sentences)\n return new_desc\n\n def perturbBert(self, tokens: List[str], masked_indexL: List):\n base_tokens = copy.deepcopy(tokens)\n # self.bertmodel, self.num_of_perturb,\n new_sentences = list()\n\n for _ in range(10):\n sampled_num = min(len(masked_indexL), self.num_of_perturb)\n samples = random.sample(masked_indexL, sampled_num)\n mask_indices = [k[0] for k in samples]\n low_tokens = [x.lower() for x in tokens]\n for masked_index in mask_indices:\n low_tokens[masked_index] = '[MASK]'\n new_str = \" \".join(low_tokens)\n unmask_str = self.unmasker(new_str)\n try:\n filed_token = [d[0]['token_str'] for d in unmask_str]\n except:\n filed_token = [d['token_str'] for d in unmask_str]\n for index, token in zip(mask_indices, filed_token):\n tokens[index] = token\n new_sentence = \" \".join(tokens)\n tokens = copy.deepcopy(base_tokens)\n new_sentences.append(new_sentence)\n return new_sentences\n\n # # for each idx, use Bert to generate k (i.e., num) candidate tokens\n # for (masked_index, tagFlag) in masked_indexL:\n # original_word = tokens[masked_index]\n #\n # low_tokens = [x.lower() for x in tokens]\n # low_tokens[masked_index] = '[MASK]'\n # try:\n # indexed_tokens = self.berttokenizer.convert_tokens_to_ids(low_tokens)\n # tokens_tensor = torch.tensor([indexed_tokens])\n # tokens_tensor = tokens_tensor.to(self.device)\n # prediction = self.bertmodel(tokens_tensor)\n # except KeyError as error:\n # print('skip a sentence. unknown token is %s' % error)\n # break\n #\n #\n #\n # # try whether all the tokens are in the vocabulary\n # try:\n # indexed_tokens = self.berttokenizer.convert_tokens_to_ids(low_tokens)\n # tokens_tensor = torch.tensor([indexed_tokens])\n # tokens_tensor = tokens_tensor.to(self.device)\n # prediction = self.bertmodel(tokens_tensor)\n #\n # # skip the sentences that contain unknown words\n # # another option is to mark the unknow words as [MASK]; we skip sentences to reduce fp caused by BERT\n # except KeyError as error:\n # print('skip a sentence. unknown token is %s' % error)\n # break\n #\n # # get the similar words\n # topk_Idx = torch.topk(prediction[0][0, masked_index], self.num_of_perturb)[1].tolist()\n # topk_tokens = self.berttokenizer.convert_ids_to_tokens(topk_Idx)\n #\n # # remove the tokens that only contains 0 or 1 char (e.g., i, a, s)\n # # this step could be further optimized by filtering more tokens (e.g., non-english tokens)\n # topk_tokens = list(filter(lambda x: len(x) > 1, topk_tokens))\n #\n # # generate similar sentences\n # for t in topk_tokens:\n # if any(char in invalidChars for char in t):\n # continue\n # tokens[masked_index] = t\n # new_pos_inf = nltk.tag.pos_tag(tokens)\n #\n # # only use the similar sentences whose similar token's tag is still NN or JJ\n # if (new_pos_inf[masked_index][1].startswith(tagFlag)):\n # # new_sentence = self.detokenizer.detokenize(tokens)\n # new_sentence = \" \".join(tokens)\n # new_sentences.append(new_sentence)\n #\n # tokens[masked_index] = original_word\n","repo_name":"Cap-Ning/codeModel","sub_path":"src/methods/description_mute.py","file_name":"description_mute.py","file_ext":"py","file_size_in_byte":12067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29104259957","text":"from typing import Iterator\nfrom unittest import IsolatedAsyncioTestCase\nfrom unittest.mock import Mock\n\nfrom aiontai.models import (\n Doujin,\n Image,\n)\n\n\nclass TestDoujin(IsolatedAsyncioTestCase):\n async def test__iterable(self) -> None:\n model = Doujin(\n id=Mock(),\n media_id=Mock(),\n title=Mock(),\n cover=Mock(),\n thumbnail=Mock(),\n images=[\n Mock(spec=Image), Mock(spec=Image), Mock(spec=Image),\n ],\n tags=Mock(),\n pages_count=Mock(),\n favorites_count=Mock(),\n scanlator=Mock(),\n upload_date=Mock(),\n )\n\n self.assertIsInstance(\n iter(model),\n Iterator,\n )\n self.assertIsInstance(\n next(iter(model)),\n Image,\n )\n self.assertIsInstance(\n len(model),\n int,\n )\n self.assertIsInstance(\n model[0],\n Image,\n )\n","repo_name":"LEv145/aiontai","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"54"} +{"seq_id":"3370235140","text":"files = [\"ddr_Transact.vhd\",\n \"Interrupts.vhd\",\n \"rx_CplD_Channel.vhd\",\n \"rx_MRd_Channel.vhd\",\n \"rx_usDMA_Channel.vhd\",\n \"Tx_Output_Arbitor.vhd\",\n \"wb_transact.vhd\",\n \"DMA_Calculate.vhd\",\n \"rx_dsDMA_Channel.vhd\",\n \"rx_MWr_Channel.vhd\",\n \"tlpControl.vhd\",\n \"tx_Transact.vhd\",\n \"DMA_FSM.vhd\",\n \"Registers.vhd\",\n \"RxIn_Delays.vhd\",\n \"rx_Transact.vhd\",\n \"tx_Mem_Reader.vhd\",\n \"wb_mem.vhd\"]\n","repo_name":"lnls-dig/infra-cores","sub_path":"modules/generic/pcie_cntr/common/Manifest.py","file_name":"Manifest.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"la","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73268019681","text":"get_ipython().magic('matplotlib inline')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sqlite3\n\n\ngoal_keeper_query = \"SELECT DATE(MAX(date_stat)), Player_name, overall_rating, gk_diving, gk_handling, gk_kicking, gk_positioning, gk_reflexes FROM Player_Stats ps JOIN Player p ON p.player_api_id = ps.player_api_id GROUP BY ps.player_api_id\"\n\nwith sqlite3.connect('E:/Abhay MBA Docs/TERM 1/Programming for analysts/Group Project/soccer/database.sqlite') as con:\n\tgoal_keeper_stats = pd.read_sql_query(goal_keeper_query, con)\n \ngoal_keeper_stats.shape\n\nfig, axs = plt.subplots(1, 5, sharey=True)\ngoal_keeper_stats.plot(kind='scatter', x='gk_diving', y='overall_rating', ax=axs[0], figsize=(16, 8))\ngoal_keeper_stats.plot(kind='scatter', x='gk_handling', y='overall_rating', ax=axs[1])\ngoal_keeper_stats.plot(kind='scatter', x='gk_kicking', y='overall_rating', ax=axs[2])\ngoal_keeper_stats.plot(kind='scatter', x='gk_positioning', y='overall_rating', ax=axs[3])\ngoal_keeper_stats.plot(kind='scatter', x='gk_reflexes', y='overall_rating', ax=axs[4])\n\n\nfrom sklearn.cluster import KMeans\n\nkmeans_model = KMeans(n_clusters=5, random_state=1)\ngood_columns = goal_keeper_stats._get_numeric_data().dropna(axis=1)\nkmeans_model.fit(good_columns)\nlabels = kmeans_model.labels_\n\nfrom sklearn.decomposition import PCA\npca_2 = PCA(2)\nplot_columns = pca_2.fit_transform(good_columns)\nplt.scatter(x=plot_columns[:,0], y=plot_columns[:,1], c=labels)\nplt.show()\n\ngoal_keeper_query = \"SELECT DATE(MAX(date_stat)), Player_name, overall_rating, gk_diving, gk_handling, gk_kicking, gk_positioning, gk_reflexes FROM Player_Stats ps JOIN Player p ON p.player_api_id = ps.player_api_id WHERE gk_kicking > 69 and gk_reflexes > 69 GROUP BY ps.player_api_id\"\n\nwith sqlite3.connect('E:/Abhay MBA Docs/TERM 1/Programming for analysts/Group Project/soccer/database.sqlite') as con:\n\tgoal_keeper_stats = pd.read_sql_query(goal_keeper_query, con)\n \nimport statsmodels.formula.api as smf\n\n# create a fitted model with all the features\nlm = smf.ols(formula='overall_rating ~ gk_diving + gk_handling + gk_kicking + gk_positioning + gk_reflexes', data=goal_keeper_stats).fit()\n\n# print the coefficients\nlm.params\n\nlm.summary()\n\n","repo_name":"AbhayPadda/python-notebooks","sub_path":"Soccer Dataset Practice/Code/GK_Python.py","file_name":"GK_Python.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27421167440","text":"fname = input('Enter file name ')\r\n\r\ntry:\r\n fh = open(fname)\r\nexcept:\r\n print('No Such File!')\r\n quit()\r\n\r\nlst = list()\r\ndc = dict()\r\n\r\nfor line in fh:\r\n if not line.startswith('From') or line.startswith('From:'): continue\r\n lst = line.split()\r\n dc[lst[1]] = dc.get(lst[1],0) + 1 #this is an important idiom to check for a key if it exists, we increment in its value, else we insert the new key and assign it default value '0'\r\n\r\nbc = None\r\nbw = None\r\n\r\nfor key,value in dc.items():\r\n if bc is None or value>bc:\r\n bw = key\r\n bc = value\r\n\r\nprint(bw,bc)\r\n","repo_name":"Shridhar2025/Python_for_everybody_my_codes","sub_path":"08_dict.py","file_name":"08_dict.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41177692807","text":"from sys import stdin\n\n\ndef solution(string):\n result = 0\n for i, s in enumerate(string.split(\"-\")):\n tmp = 0\n for t in s.split(\"+\"):\n tmp += int(t)\n if i == 0:\n result += tmp\n else:\n result -= tmp\n\n return result\n\n\nif __name__ == \"__main__\":\n print(solution(stdin.readline().strip()))\n","repo_name":"cda2/BJ","sub_path":"python/src/bj1541.py","file_name":"bj1541.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36354929000","text":"\"\"\"\r\n\r\n\"\"\"\r\n\r\nimport maya.cmds as cmds\r\n\r\nimport aligerTool.mayalib.nodeLib as nodeLib\r\nimport aligerTool.mayalib.attrLib as attrLib\r\nfrom aligerTool.mayalib.ui_cmds.baseWindow import BaseWindow\r\n\r\nimport spaceController\r\nreload(spaceController)\r\nfrom spaceController import SpaceController\r\n\r\nclass SpaceSwitcher(BaseWindow):\r\n WINDOW_NAME = 'SpaceSwitcher'\r\n WINDOW_TITLE = 'Space Switcher'\r\n WIDTH = 300\r\n HEIGHT = 700\r\n SCROLLABLE = False\r\n\r\n CONSTRAINT_NODE_SUFFIX = '_SpaceSwitcher'\r\n DRIVEN_OB_ATTR = 'drivenObject'\r\n\r\n def __init__(self):\r\n super(SpaceSwitcher, self).__init__()\r\n # Key: Objeto Value: Instancia SpaceController\r\n self.__spaceControllers = {}\r\n\r\n def CreateCustomUI(self):\r\n \"\"\"\r\n\r\n Returns:\r\n\r\n \"\"\"\r\n cmds.frameLayout(label='Dynamic Spaces', width=self.WIDTH, marginHeight=5, marginWidth=5, collapsable=False)\r\n\r\n cmds.separator()\r\n self.searchSpaceButton = cmds.button('Search Dynamic Spaces', command=self.SearchSpaceControllers)\r\n self.addSpaceButton = cmds.button('Add Dynamic Space', backgroundColor=[0, 0.5, 0],\r\n command=self.AddSpaceController )\r\n\r\n self.spaceScroll = cmds.scrollLayout(width=self.WIDTH, height=self.HEIGHT - 100, parent=self.contentLayout)\r\n self.spaceContent = cmds.columnLayout(width=self.WIDTH - 20, adjustableColumn=True, parent=self.spaceScroll)\r\n\r\n def SearchSpaceControllers(self, *args):\r\n\r\n for spaceController in self.__spaceControllers.values():\r\n spaceController.DeleteUI()\r\n self.__spaceControllers.clear()\r\n\r\n nodeList = cmds.ls('*%s' % self.CONSTRAINT_NODE_SUFFIX)\r\n for node in nodeList:\r\n if attrLib.ExistAttr(node, self.DRIVEN_OB_ATTR):\r\n drivenNode = cmds.getAttr('%s.%s' % (node, self.DRIVEN_OB_ATTR))\r\n if drivenNode in self.__spaceControllers.keys():\r\n cmds.warning('Objeto %s ya configurado con SpaceSwitcher' % drivenNode)\r\n continue\r\n self.__spaceControllers[drivenNode] = SpaceController(drivenNode, node, self.spaceContent)\r\n\r\n def AddSpaceController(self, *arg):\r\n\r\n selection = cmds.ls(sl=True)\r\n if not selection or len(selection) == 0:\r\n cmds.warning('Seleccione un objeto para configurar SpaceSwitcher')\r\n return\r\n drivenNode = selection[0]\r\n if drivenNode in self.__spaceControllers.keys():\r\n cmds.warning('Objeto %s ya configurado con SpaceSwitcher' % drivenNode)\r\n return\r\n constraintNode = self.GetConstraintNode(drivenNode)\r\n if not constraintNode:\r\n constraintNode = nodeLib.CreateRootGroup(drivenNode, suffix=self.CONSTRAINT_NODE_SUFFIX)\r\n attrLib.AddAttrStr(constraintNode, self.DRIVEN_OB_ATTR, value=drivenNode) # Generamos atributo\r\n self.__spaceControllers[drivenNode] = SpaceController(drivenNode, constraintNode, self.spaceContent)\r\n\r\n\r\n\r\n def GetConstraintNode(self, node):\r\n \"\"\" \"\"\"\r\n expected_name = node + self.CONSTRAINT_NODE_SUFFIX\r\n parent = cmds.listRelatives(node, parent=True)\r\n if parent and parent[0] == expected_name and attrLib.ExistAttr(parent, self.DRIVEN_OB_ATTR):\r\n return parent[0]\r\n return None\r\n","repo_name":"Arbustus/Aligner-SpacerSwitcher","sub_path":"aligerTool/spaceSwitcher/spaceSwitcher.py","file_name":"spaceSwitcher.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1715344392","text":"from django.shortcuts import render , redirect \nfrom teacher import models\nfrom django.contrib.auth import authenticate, login as login_request, logout\nimport json\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User \nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\n\n# default page (all teacher)\ndef index(request):\n teachers = models.Teacher.objects.filter(statut=True)\n datas = {\n 'teachers':teachers,\n }\n return render(request, 'pages/teacher.html', datas)\n\n# user login \ndef login(request):\n datas = {\n\n }\n return render(request, 'pages/login.html', datas)\n\n\n# show all subjects\ndef subjects(request):\n subjects = models.Subject.objects.filter(statut=True)\n datas = {\n 'subjects':subjects,\n }\n return render(request, 'pages/subjects.html', datas)\n\n\n# teacher profil view\ndef profil(request, slug):\n teacher = models.Teacher.objects.get(slug=slug)\n datas = {\n 'teacher':teacher, \n }\n return render(request, 'pages/profil.html', datas)\n\n# add teacher \n@login_required(login_url = 'login')\ndef add_teacher(request):\n subjects = models.Subject.objects.filter(statut=True)\n\n datas = {\n 'subjects':subjects,\n }\n return render(request, 'pages/add-teacher.html', datas)\n\n\n# edit teacher \n@login_required(login_url = 'login')\ndef edit_teacher(request, slug):\n subjects = models.Subject.objects.filter(statut=True)\n teacher = models.Teacher.objects.get(slug=slug)\n datas = {\n 'subjects':subjects,\n 'teacher':teacher,\n }\n return render(request, 'pages/add-teacher.html', datas)\n\n# add subject \n@login_required(login_url = 'login')\ndef add_subject(request):\n datas = {\n \n }\n return render(request, 'pages/add-subjects.html', datas)\n\n\n# page for edit subect\n@login_required(login_url = 'login')\ndef edit_subject(request, slug):\n try: \n subject = models.Subject.objects.get(slug=slug)\n except Exception as e:\n print(e)\n return redirect('subjects')\n datas = {\n 'subject':subject,\n }\n return render(request, 'pages/add-subjects.html', datas)\n\n\n\n# LOGOUT VIEW\ndef deconnexion(request):\n logout(request)\n return redirect('login')\n\n#######################################################################\n#######################################################################\n############################ POST VIEWS ###############################\n#######################################################################\n#######################################################################\n\n@csrf_exempt\ndef islogin(request):\n\n postdata = json.loads(request.body.decode('utf-8'))\n \n # name = postdata['name']\n\n username = postdata['username']\n password = postdata['password']\n\n isSuccess = False\n u_type = ''\n try:\n if '@' in username:\n user = authenticate(email=username, password=password)\n utilisateur = User.objects.get(email=username)\n print(username)\n else:\n user = authenticate(username=username, password=password)\n utilisateur = User.objects.get(username=username)\n \n if user is not None and user.is_active:\n print(\"user is login\")\n isSuccess = True\n login_request(request, user)\n datas = {\n 'success':True,\n 'message':'Vous êtes connectés!!!',\n }\n return JsonResponse(datas,safe=False) \n \n else:\n data = {\n 'success':False,\n 'message':'Vos identifiants ne sont pas correcte',\n }\n return JsonResponse(data, safe=False)\n except Exception as e:\n print(e)\n data = {\n 'success':False,\n 'message':\"Merci de verifier vos informations d'authentification\",\n }\n return JsonResponse(data, safe=False)\n\n\n@csrf_exempt\ndef post_subject(request):\n\n postdata = json.loads(request.body.decode('utf-8'))\n \n # name = postdata['name']\n\n subject_name = postdata['subject_name']\n description = postdata['description']\n\n\n # for update\n subject_id = postdata['id']\n message = \"\"\n success = False\n try:\n try:\n # update\n subject = models.Subject.objects.get(id=int(subject_id))\n message = \"Subject updated\"\n except:\n # create\n subject = models.Subject()\n message = \"Subject created\"\n subject.subject_name = subject_name\n subject.description = description\n subject.save()\n success = True\n\n\n except Exception as e:\n print(e)\n success = False\n message = str(e)\n data = {\n 'success':success,\n 'message':message,\n }\n return JsonResponse(data, safe=False)\n\n\n@csrf_exempt\ndef delete_subject(request):\n\n postdata = json.loads(request.body.decode('utf-8'))\n\n # for delete\n subject_id = postdata['id']\n message = \"\"\n success = False\n try:\n try:\n # delete\n subject = models.Subject.objects.get(id=int(subject_id))\n subject.delete()\n success = True\n except Exception as e:\n print(e)\n message = \"error\"\n except Exception as e:\n print(e)\n success = False\n message = str(e)\n data = {\n 'success':success,\n 'message':message,\n }\n return JsonResponse(data, safe=False)\n\n\n@csrf_exempt\ndef delete_teacher(request):\n\n postdata = json.loads(request.body.decode('utf-8'))\n\n # for delete\n teacher_id = postdata['id']\n message = \"\"\n success = False\n try:\n try:\n # delete\n teacher = models.Teacher.objects.get(id=int(teacher_id))\n teacher.delete()\n success = True\n except Exception as e:\n print(e)\n message = \"error\"\n except Exception as e:\n print(e)\n success = False\n message = str(e)\n data = {\n 'success':success,\n 'message':message,\n }\n return JsonResponse(data, safe=False)\n\n@csrf_exempt\ndef post_form(request):\n try:\n last_name = request.POST.get(\"last_name\")\n first_name = request.POST.get(\"first_name\")\n email = request.POST.get(\"email\")\n subjects_taught = request.POST.get(\"subjects_taught\").rsplit(',')\n print(subjects_taught, \"333333333333333333333\")\n phone_number = request.POST.get(\"phone_number\")\n room_number = request.POST.get(\"room_number\")\n teacher_id = request.POST.get(\"id\")\n print(\"################################\", subjects_taught)\n\n # check if teacher have more than 5 subjects\n if len(subjects_taught) > 5:\n success = False\n message = \"Teacher can't have more than five(5) subjects\"\n \n else:\n try:\n # update\n teacher = models.Teacher.objects.get(id=int(teacher_id))\n message = \"Teacher updated\"\n except Exception as e:\n # create\n teacher = models.Teacher()\n message = \"Teacher created\"\n teacher.last_name = last_name\n teacher.first_name = first_name\n teacher.email = email\n\n teacher.phone_number = phone_number\n teacher.room_number = room_number\n teacher.save()\n teacher.subjects_taught.clear()\n\n # save picture\n try:\n profile_picture = request.FILES[\"profile_picture\"]\n teacher.profile_picture = profile_picture\n teacher.save()\n except:\n pass\n\n # save subjectt taught\n for subject in subjects_taught:\n print(subject, \"&&&&&&&&&&&&&&&&&&&&&&&\")\n teacher.subjects_taught.add(subject.rsplit(\"|\")[1])\n teacher.save()\n success = True\n\n except Exception as e :\n print(e)\n success = False\n message = \"An error occurred\"\n data = {\n 'success':success,\n 'message':message,\n }\n return JsonResponse(data, safe=False)\n\n\n\n\n\n\n@csrf_exempt\ndef upload_file(request):\n error = False\n success =False\n message = \"\"\n try:\n teachers_csv = request.FILES[\"teachers_csv\"]\n uploaded_file = models.UploadFile()\n uploaded_file.upload_file = teachers_csv\n uploaded_file.save()\n\n # open file in read\n with open(uploaded_file.upload_file.path, 'r' ) as reader:\n all_teachers_in_file = reader.readlines()\n\n for i in range(1, len(all_teachers_in_file)):\n if all_teachers_in_file[i].rsplit(',')[3] is not None and all_teachers_in_file[i].rsplit(',')[3] != \"\" and all_teachers_in_file[i].rsplit(',')[0] is not None and all_teachers_in_file[i].rsplit(',')[0] != \"\" and all_teachers_in_file[i].rsplit(',')[2] is not None and all_teachers_in_file[i].rsplit(',')[2] != \"\":\n \n try:\n # check if mail is used\n teacher = models.Teacher.objects.get(email=all_teachers_in_file[i].rsplit(',')[3])\n error = True\n except:\n # create teacher\n teacher = models.Teacher()\n teacher.last_name = all_teachers_in_file[i].rsplit(',')[1]\n teacher.first_name = all_teachers_in_file[i].rsplit(',')[0]\n teacher.email = all_teachers_in_file[i].rsplit(',')[3]\n\n teacher.phone_number = all_teachers_in_file[i].rsplit(',')[4]\n teacher.room_number = all_teachers_in_file[i].rsplit(',')[5]\n teacher.save()\n\n # save subject \n if '\"' in all_teachers_in_file[i]:\n subjects = all_teachers_in_file[i].rsplit('\"')[1]\n else:\n subjects = all_teachers_in_file[i].rsplit(',')[6]\n if \", \" in subjects:\n subjects = subjects.replace(\", \", \",\")\n for subject in subjects.rsplit(','):\n try:\n subject_e = models.Subject.objects.get(subject_name__icontains=subject)\n except:\n subject_e = models.Subject()\n subject_e.subject_name = subject\n subject_e.save()\n # add subject to teacher\n if teacher.subjects_taught.count() < 5 :\n teacher.subjects_taught.add(subject_e.id)\n teacher.save()\n else:\n error = True\n teacher.save()\n # save profile picture\n try:\n profile_picture = \"Teachers/\" + all_teachers_in_file[i].rsplit(',')[2]\n teacher.profile_picture = profile_picture\n teacher.save()\n except:\n pass\n success = True\n message = \"Success\"\n else:\n error = True\n except:\n success = False\n message = \"An error occurred\"\n if error:\n message = \"Datas loaded with error\"\n data = {\n 'success':success,\n 'errorr':error,\n 'message':message,\n }\n return JsonResponse(data, safe=False)\n","repo_name":"Sedrickgael/directory","sub_path":"directory/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74279044962","text":"\"\"\"\"\"\"\nfrom functools import partial\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python import debug as tf_debug\nfrom cleverhans.attacks import FastGradientMethod, ProjectedGradientDescent\nfrom cleverhans.attacks import optimize_linear\nfrom cleverhans import utils_tf\nfrom cleverhans.utils_tf import clip_eta\n\nEPS = np.finfo(float).eps\n\nclass KernelSubTf(object):\n \"\"\"Kernel substitution attack.\"\"\"\n def __init__(self, sess, c=1.0, attack:str=None, ord=np.inf):\n self.sess = sess\n self.attack = attack\n self.ord = ord\n if isinstance(c, float):\n self.c = tf.constant(c, dtype=tf.float32)\n else:\n self.c = tf.Variable(1.0, name='c')\n self.c = tf.clip_by_value(self.c, 0, np.infty)\n\n def fit(self, X, y):\n pass\n\n def _get_adv_X(self, X, y, eps:float):\n with tf.Session() as sess:\n #x = tf.placeholder(tf.float32, shape=(None, X.shape[1]))\n x = tf.constant(X, tf.float32)\n if self.attack == 'fgsm':\n adv_x = fgm_perturb(x, y=tf.constant(y, tf.float32), eps=eps, ord=self.ord,\n loss_fn=partial(self._loss_fn, y=y, c=self.c))\n elif self.attack == 'pgd':\n adv_x = pgd_perturb(x, y=tf.constant(y, tf.float32), eps=eps, ord=self.ord,\n loss_fn=partial(self._loss_fn, y=y, c=self.c))\n else:\n raise ValueError(\"not supported attack method %s\", self.attack)\n #return sess.run(adv_x, feed_dict={x: X})\n return sess.run(adv_x)\n\n def perturb(self, X, y, eps=0.1):\n if isinstance(eps, list):\n ret = []\n for ep in eps:\n ret.append(self._get_adv_X(X, y, ep) - X)\n elif eps is not None:\n ret = self._get_adv_X(X, y, eps) - X\n\n return ret\n\n def _loss_fn(self, x, y, c):\n #X = tf.matmul(x, tf.transpose(transformer))\n X = x\n mask = tf.constant(y[:, tf.newaxis] == y[tf.newaxis, :], dtype=tf.float32)\n\n r = tf.reduce_sum(X*X, 1)[:, tf.newaxis]\n p_ij = r - 2*tf.matmul(X, tf.transpose(X)) + tf.transpose(r) # l2 dist\n p_ij = p_ij / self.c\n\n p_ij = tf.linalg.set_diag(p_ij, np.ones(len(y), dtype=np.float32) * np.inf)\n p_ij = tf.exp(-p_ij - tf.reduce_logsumexp(-p_ij, axis=1)[:, tf.newaxis])\n # (n_samples, n_samples)\n\n # Compute loss\n masked_p_ij = p_ij * mask\n #p = masked_p_ij.sum(axis=1, keepdims=True) # (n_samples, 1)\n p = tf.reduce_sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1)\n loss = -tf.reduce_sum(p)\n\n return loss\n\ndef fgm_perturb(x, y, loss_fn, clip_min=None, clip_max=None, ord=np.inf, eps=0.3):\n loss = loss_fn(x)\n grad, = tf.gradients(loss, x)\n optimal_perturbation = optimize_linear(grad, eps, ord)\n adv_x = x + optimal_perturbation\n\n if (clip_min is not None) or (clip_max is not None):\n # We don't currently support one-sided clipping\n assert clip_min is not None and clip_max is not None\n adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)\n\n return adv_x\n\n\ndef pgd_perturb(x, y, loss_fn, y_target=None, clip_min=None,\n clip_max=None, rand_init=False, ord=np.inf, eps=0.3, eps_iter=0.1,\n rand_minmax=0.3, nb_iter=20):\n # changed nb_iter to 20 and eps_iter to 0.1 for higher eps attack\n # Initialize loop variables\n if rand_init:\n eta = tf.random_uniform(tf.shape(x),\n tf.cast(-rand_minmax, x.dtype),\n tf.cast(rand_minmax, x.dtype),\n dtype=x.dtype)\n else:\n eta = tf.zeros(tf.shape(x))\n\n # Clip eta\n eta = clip_eta(eta, ord, eps)\n adv_x = x + eta\n if clip_min is not None or clip_max is not None:\n adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)\n\n if y_target is not None:\n y = y_target\n targeted = True\n elif y is not None:\n y = y\n targeted = False\n else:\n raise ValueError\n # model_preds = self.model.get_probs(x)\n # preds_max = reduce_max(model_preds, 1, keepdims=True)\n # y = tf.to_float(tf.equal(model_preds, preds_max))\n # y = tf.stop_gradient(y)\n # targeted = False\n # del model_preds\n\n y_kwarg = 'y_target' if targeted else 'y'\n fgm_params = {\n 'loss_fn': loss_fn,\n 'eps': eps_iter,\n y_kwarg: y,\n 'ord': ord,\n 'clip_min': clip_min,\n 'clip_max': clip_max\n }\n if ord == 1:\n raise NotImplementedError(\"It's not clear that FGM is a good inner loop\"\n \" step for PGD when ord=1, because ord=1 FGM \"\n \" changes only one pixel at a time. We need \"\n \" to rigorously test a strong ord=1 PGD \"\n \"before enabling this feature.\")\n\n # Use getattr() to avoid errors in eager execution attacks\n #FGM = self.FGM_CLASS(\n # self.model,\n # sess=getattr(self, 'sess', None),\n # dtypestr=self.dtypestr)\n\n def cond(i, _):\n return tf.less(i, nb_iter)\n\n def body(i, adv_x):\n adv_x = fgm_perturb(adv_x, **fgm_params)\n\n # Clipping perturbation eta to self.ord norm ball\n eta = adv_x - x\n eta = clip_eta(eta, ord, eps)\n adv_x = x + eta\n\n # Redo the clipping.\n # FGM already did it, but subtracting and re-adding eta can add some\n # small numerical error.\n if clip_min is not None or clip_max is not None:\n adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)\n\n return i + 1, adv_x\n\n _, adv_x = tf.while_loop(cond, body, (tf.zeros([]), adv_x), back_prop=True,\n maximum_iterations=nb_iter)\n\n #if self.sanity_checks:\n # with tf.control_dependencies(asserts):\n # adv_x = tf.identity(adv_x)\n\n return adv_x\n","repo_name":"yangarbiter/adversarial-nonparametrics","sub_path":"nnattack/attacks/kernel_sub_tf.py","file_name":"kernel_sub_tf.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"40466862998","text":"import numpy as np\nimport cv2\nfrom keras.preprocessing import image\nimport tensorflow as tf\nimport posenet\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom PIL import Image\nimport random\nfrom pymongo import MongoClient\n\nfrom colors import get_colors\n\n#from imutils import face_utils\nimport dlib\n\nimport base64\n\n\ndef facecrop_opencv(img):\n global cascade\n\n minisize = (img.shape[1], img.shape[0])\n try:\n \tminiframe = cv2.resize(img, minisize)\n except:\n \tminiframe = None\n \t\n all_faces = cascade.detectMultiScale(miniframe)\n if len(all_faces) > 0:\n\t # Only get the first face detected\n\t x, y, w, h = [ v for v in all_faces[0] ]\n\t #cv2.rectangle(img, (x,y), (x+w,y+h), (255,255,255))\n\n\t sub_face = img[y:y+h, x:x+w]\n\t retval, bufferval = cv2.imencode('.jpg', sub_face)\n\t jpg_as_text = base64.b64encode(bufferval).decode('utf-8')\n\n\t result = {\n\t\t \"b64_face\": str(jpg_as_text),\n\t\t \"x\": str(x),\n\t\t \"y\": str(y),\n\t\t \"w\": str(w),\n\t\t \"h\": str(h)\n\t }\n\t return result\n\n return None;\n\ndef facecrop_dlib(img):\n global detector, predictor\n \n try:\n\t gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t # detect faces in the grayscale image\n\t rects = detector(gray, 0)\n\t #print(str(rects))\n\t #print(str(len(rects)))\n except:\n\t rects = []\n\t pass\n # loop over the face detections\n if len(rects) > 0:\n\t rect = rects[0]\n\t # determine the facial landmarks for the face region, then\n\t # convert the facial landmark (x, y)-coordinates to a NumPy\n\t # array\n\t '''shape = predictor(gray, rects[0])\n\t shape = face_utils.shape_to_np(shape)\n\n\t # loop over the (x, y)-coordinates for the facial landmarks\n\t # and draw them on the image\n\t for (x, y) in shape:\n\t\t cv2.circle(img, (x, y), 2, (0, 255, 0), -1)\n\t '''\n\t x = rect.tl_corner().x\n\t y = rect.tl_corner().y\n\n\t w = rect.width()\n\t h = rect.height()\n\n\t sub_face = img[y:y+h, x:x+w]\n\t \n\t try:\n\t\t retval, bufferval = cv2.imencode('.jpg', sub_face)\n\t\t jpg_as_text = base64.b64encode(bufferval).decode('utf-8')\n\t except:\n\t\t jpg_as_text = \"\"\n\t\t \n\t result = {\n\t\t \"b64_person\": str(jpg_as_text),\n\t\t \"x\": str(x),\n\t\t \"y\": str(y),\n\t\t \"w\": str(w),\n\t\t \"h\": str(h)\n\t }\n\n\t return result\n\n return None\n\ndef person_detector(img):\n\timg_resized = cv2.resize(img,(300,300)) # resize img for prediction\n\theightFactor = img.shape[0]/300.0\n\twidthFactor = img.shape[1]/300.0 \n\n\t#tic = time.time()\n\n\n\t# MobileNet requires fixed dimensions for input image(s)\n\t# so we have to ensure that it is resized to 300x300 pixels.\n\t# set a scale factor to image because network the objects has differents size. \n\t# We perform a mean subtraction (127.5, 127.5, 127.5) to normalize the input;\n\t# after executing this command our \"blob\" now has the shape:\n\t# (1, 3, 300, 300)\n\tblob = cv2.dnn.blobFromImage(img_resized, 0.007843, (300, 300), (127.5, 127.5, 127.5), False)\n\t#Set to network the input blob \n\tnet.setInput(blob)\n\t\n\t#Prediction of network\n\tdetections = net.forward()\n\n\t#toc = time.time()\n\t#print (\"MobileSSD time : \"+ str(toc-tic) + \" seconds\")\n\n\t#Size of img resize (300x300)\n\tcols = img_resized.shape[1] \n\trows = img_resized.shape[0]\n\n\tresult = []\n\n\tfor i in range(detections.shape[2]):\n\t\tconfidence = detections[0, 0, i, 2] #Confidence of prediction \n\t\tif confidence > thr: # Filter prediction \n\t\t\tclass_id = int(detections[0, 0, i, 1]) # Class label\n\n\t\t\t# Draw label and confidence of prediction in frame resized\n\t\t\t#if class_id in [classNames]:\n\t\t\tif class_id == 15: # Only detect people\n\n\n\t\t\t\t#tic = time.time()\n\n\t\t\t\tdetected_person = {}\n\n\t\t\t\t# Object location \n\t\t\t\txLeftBottom = int(detections[0, 0, i, 3] * cols) \n\t\t\t\tyLeftBottom = int(detections[0, 0, i, 4] * rows)\n\t\t\t\txRightTop = int(detections[0, 0, i, 5] * cols)\n\t\t\t\tyRightTop = int(detections[0, 0, i, 6] * rows)\n\n\t\t\t\txLeftBottom_ = int(widthFactor * xLeftBottom) \n\t\t\t\tyLeftBottom_ = int(heightFactor* yLeftBottom)\n\t\t\t\txRightTop_ = int(widthFactor * xRightTop)\n\t\t\t\tyRightTop_ = int(heightFactor * yRightTop)\n\n\t\t\t\t# Crop the bounding box for the person\n\t\t\t\t# we need the .copy() to obtain new bb not overwriting the img\n\t\t\t\t# also, note x y are flipped\n\n\t\t\t\tperson_bb = img[yLeftBottom_:yLeftBottom_+(yRightTop_-yLeftBottom_), xLeftBottom_:xLeftBottom_+(xRightTop_-xLeftBottom_)].copy()\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tretval, bufferval = cv2.imencode('.jpg', person_bb)\n\t\t\t\t\tjpg_as_text = base64.b64encode(bufferval).decode('utf-8')\n\t\t\t\texcept:\n\t\t\t\t\tjpg_as_text = \"\"\n\t\t\t\t#cv2.imwrite('results/person_'+str(i+1)+'.jpg', person_bb)\n\n\t\t\t\tdetected_person[\"bbox_body\"] = {\n\t\t\t\t\t\"b64_person\": str(jpg_as_text),\n\t\t\t\t\t\"x\": str(xLeftBottom_),\n\t\t\t\t\t\"y\": str(yLeftBottom_),\n\t\t\t\t\t\"w\": str(xRightTop_ - xLeftBottom_),\n\t\t\t\t\t\"h\": str(yRightTop_ - yLeftBottom_)\n\t\t\t\t}\n\n\t\t\t\t#toc = time.time()\n\t\t\t\t#print (\"\\t Crop person \"+str(i)+\" : \"+ str(toc-tic) + \" seconds\")\n\n\t\t\t\t# detect faces in the grayscale image\n\t\t\t\t#tic = time.time()\n\t\t\t\tperson_face_bb_dlib = facecrop_dlib(person_bb.copy())\n\n\t\t\t\t#toc = time.time()\n\t\t\t\t#print (\"\\t\\t Crop face person \"+str(i)+\" : \"+ str(toc-tic) + \" seconds\")\n\n\t\t\t\tif person_face_bb_dlib is not None:\n\t\t\t\t detected_person[\"bboxface_dlib\"] = person_face_bb_dlib\n\t\t\t\t #with open('results/person_'+str(i+1)+'_face_dlib.jpg', \"wb\") as fh:\n\t\t\t\t # fh.write(base64.decodestring(person_face_bb_dlib[\"b64_person\"]))\n\t\t\t\t\n\t\t\t\t#tic = time.time()\n\n\t\t\t\tperson_face_bb_opcv = facecrop_opencv(person_bb.copy())\n\n\t\t\t\t#toc = time.time()\n\t\t\t\t#print (\"\\t\\t Crop face person \"+str(i)+\" : \"+ str(toc-tic) + \" seconds\")\n\n\t\t\t\tif person_face_bb_opcv is not None:\n\t\t\t\t detected_person[\"bboxface_opcv\"] = person_face_bb_opcv\n\t\t\t\t #with open('results/person_'+str(i+1)+'_face_opencv.jpg', \"wb\") as fh:\n\t\t\t\t # fh.write(base64.decodestring(person_face_bb_opcv[\"b64_person\"]))\n\t\t\t\t\n\t\t\t\tresult.append(detected_person)\n\n\tcv2.imwrite(\"results/frame.png\", img)\n\treturn result\n###################################################################################################\n###################################################################################################\n###################################################################################################\n###################################################################################################\n\n# Initialize OPENCV face detector\nfacedata = \"models/haarcascades/haarcascade_frontalface_alt.xml\"\ncascade = cv2.CascadeClassifier(facedata)\n\n# initialize dlib's face detector (HOG-based) and then create\n# the facial landmark predictor\np = \"models/dlib/shape_predictor_68_face_landmarks.dat\"\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(p)\n\n# Initialize MobileNetSSD object detector net\nprototxt = 'models/MobileNetSSD/MobileNetSSD_deploy.prototxt'\nweights = 'models/MobileNetSSD/MobileNetSSD_deploy.caffemodel'\nthr = 0.4\n\n# Labels of Network\nclassNames = { 0: 'background',\n\t1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat',\n\t5: 'bottle', 6: 'bus', 7: 'car', 8: 'cat', 9: 'chair',\n\t10: 'cow', 11: 'diningtable', 12: 'dog', 13: 'horse',\n\t14: 'motorbike', 15: 'person', 16: 'pottedplant',\n\t17: 'sheep', 18: 'sofa', 19: 'train', 20: 'tvmonitor' }\n\n#Load the Caffe model \nnet = cv2.dnn.readNetFromCaffe(prototxt, weights)\n\n\n\n#-----------------------------\n#opencv initialization\n\nsource = '/home/tonny/keras_Realtime_Multi-Person_Pose_Estimation/output.mp4'\ncap = cv2.VideoCapture(source)\ncap.set(3, 640)\ncap.set(4, 480)\n#-----------------------------\n#face expression recognizer initialization\nfrom keras.models import model_from_json\nmodel = model_from_json(open(\"models/facial_expression/facial_expression_model_structure.json\", \"r\").read())\nmodel.load_weights('models/facial_expression/facial_expression_model_weights.h5') #load weights\n\n#-----------------------------\n\nemotions = ('enfado', 'disgusto', 'miedo', 'felicidad', 'tristeza', 'sorpresa', 'neutral')\n\ncolors = get_colors()\n\nclient = MongoClient('localhost', 12334)\n\ndb = client['people-detection']\ncollection = db['raw-data']\n\nwith tf.Session() as sess:\n\tmodel_cfg, model_outputs = posenet.load_model(101, sess)\n\toutput_stride = model_cfg['output_stride']\n\n\tframe_counter = 0\n\twhile( True or frame_counter > 500):\n\t\tif (frame_counter % 3):\n\t\t\tframe_counter += 1\n\t\t\tcontinue\n\n\t\tret, img = cap.read()\n\t\tpeople_detected = person_detector(img)\n\t\tcounter = 0\n\t\tfor person in people_detected:\n\n\t\t\tto_return = {}\n\t\t\tto_return[\"frame\"] = frame_counter\n\t\t\tto_return[\"datetime\"] = datetime.datetime.utcnow()\n\t\t\tto_return[\"source\"] = source\n\t\t\tto_return[\"person\"] = person\n\t\t\t\n\t\t\tcolor = colors[counter]\n\t\t\tcounter += 1\n\t\t\t# 1 rect person bbox\n\t\t\tx = int(person[\"bbox_body\"][\"x\"])\n\t\t\ty = int(person[\"bbox_body\"][\"y\"])\n\t\t\tw = int(person[\"bbox_body\"][\"w\"])\n\t\t\th = int(person[\"bbox_body\"][\"h\"])\n\t\t\tcv2.rectangle(img, (x, y), (x+w, y+h), color, 3)\n\t\t\t\n\t\t\ttry:\n\t\t\t\tinput_image, display_image, output_scale = posenet.read_img(\n\t\t\t\t\timg[y:y+h, x:x+w], scale_factor=0.7125, output_stride=output_stride)\n\n\t\t\t\theatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(\n\t\t\t\t\tmodel_outputs,\n\t\t\t\t\tfeed_dict={'image:0': input_image}\n\t\t\t\t)\n\n\t\t\t\tpose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(\n\t\t\t\t\theatmaps_result.squeeze(axis=0),\n\t\t\t\t\toffsets_result.squeeze(axis=0),\n\t\t\t\t\tdisplacement_fwd_result.squeeze(axis=0),\n\t\t\t\t\tdisplacement_bwd_result.squeeze(axis=0),\n\t\t\t\t\toutput_stride=output_stride,\n\t\t\t\t\tmax_pose_detections=1,\n\t\t\t\t\tmin_pose_score=0.15)\n\n\t\t\t\tkeypoint_coords *= output_scale\n\n\t\t\t\tto_return[\"person\"][\"pose\"] = {}\n\t\t\t\tto_return[\"person\"][\"pose\"][\"pose_scores\"] = dict(pose_scores)\n\t\t\t\tto_return[\"person\"][\"pose\"][\"keypoint_scores\"] = dict(keypoint_scores)\n\t\t\t\tto_return[\"person\"][\"pose\"][\"keypoint_coords\"] = dict(keypoint_coords)\n\n\n\t\t\t\t# TODO this isn't particularly fast, use GL for drawing and display someday...\n\t\t\t\t'''overlay_image = posenet.draw_skel_and_kp(\n\t\t\t\t\tdisplay_image, pose_scores, keypoint_scores, keypoint_coords,\n\t\t\t\t\tmin_pose_score=0.15, min_part_score=0.1)\n\t\t\t\t'''\n\t\t\t\t\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\t\t# If face is detected, show bbox\n\t\t\ttry:\n\t\t\t\tx = int(person[\"bbox_body\"][\"x\"]) + int(person[\"bboxface_opcv\"][\"x\"])\n\t\t\t\ty = int(person[\"bbox_body\"][\"y\"]) + int(person[\"bboxface_opcv\"][\"y\"])\n\t\t\t\tw = int(person[\"bboxface_opcv\"][\"w\"])\n\t\t\t\th = int(person[\"bboxface_opcv\"][\"h\"])\n\t\t\t\tcv2.rectangle(img, (x, y), (x+w, y+h), color, 3)\n\t\t\t\tdetected_face = img[int(y):int(y+h), int(x):int(x+w)] #crop detected face\n\t\t\t\tdetected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) #transform to gray scale\n\t\t\t\tdetected_face = cv2.resize(detected_face, (48, 48)) #resize to 48x48\n\t\t\t\t\n\t\t\t\timg_pixels = image.img_to_array(detected_face)\n\t\t\t\timg_pixels = np.expand_dims(img_pixels, axis = 0)\n\t\t\t\t\n\t\t\t\timg_pixels /= 255 #pixels are in scale of [0, 255]. normalize all pixels in scale of [0, 1]\n\t\t\t\t\n\t\t\t\tpredictions = model.predict(img_pixels) #store probabilities of 7 expressions\n\n\t\t\t\t#find max indexed array 0: angry, 1:disgust, 2:fear, 3:happy, 4:sad, 5:surprise, 6:neutral\n\t\t\t\tmax_index = np.argmax(predictions[0])\n\t\t\t\t\n\t\t\t\temotion = emotions[max_index]\n\t\t\t\t\n\t\t\t\tto_return[\"person\"][\"emotions\"] = {}\n\t\t\t\tto_return[\"person\"][\"emotions\"][\"predictions\"] = predictions\n\t\t\t\tto_return[\"person\"][\"emotions\"][\"max\"] = max_index\t\t\t\t\n\n\t\t\t\t#write emotion text above rectangle\n\t\t\t\tcv2.putText(img, emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\n\t\t\ttry:\n\t\t\t\t#print (to_return)\n\t\t\t\tcollection.insert(to_return)\n\t\t\t\tprint(\"guardado frame\"+str(frame_counter))\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"ERROR EN FRAME \"+ str(frame_counter) + \" \" + str(e) )\n\t\t\tcv2.imshow('img',img)\n\t\tframe_counter += 1\n\t\t#if cv2.waitKey(1) & 0xFF == ord('q'): #press q to quit\n\t\t#\tbreak\n\n#kill open cv things\t\t\ncap.release()\ncv2.destroyAllWindows()","repo_name":"tonnyESP/LayeredPeopleDetector","sub_path":"emotion_detector.py","file_name":"emotion_detector.py","file_ext":"py","file_size_in_byte":11727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29423815952","text":"\"\"\"Module for text tokenization\n\"\"\"\nfrom experiment.encoders.base import Encoder\nimport spacy\n\n\n\ndef lemmatize_pipe(doc):\n lemma_list = [str(tok.lemma_).lower() for tok in doc\n if (tok.is_digit or tok.is_alpha) and not tok.is_stop] \n return lemma_list\n\nclass Tokenizer(Encoder):\n def __init__(self, cfg, name=\"Tokenizer\"):\n self.cfg = cfg\n self.name = name\n self.spacy_model_name = self.cfg.spacy_model_name\n if not spacy.util.is_package(self.spacy_model_name):\n spacy.cli.download(self.spacy_model_name)\n self.nlp = spacy.load(self.spacy_model_name, disable=['tagger', 'parser', 'ner'])\n self.nlp.add_pipe(self.nlp.create_pipe('sentencizer'))\n\n def encode(self, texts, batch_size=500):\n preproc_pipe = []\n for doc in self.nlp.pipe(texts, batch_size=batch_size):\n preproc_pipe.append(lemmatize_pipe(doc))\n return preproc_pipe","repo_name":"Dumbris/semantic-search-domain-adaptation","sub_path":"src/experiment/experiment/encoders/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22062977890","text":"import numpy as np\nfrom src.agents.TabularQ import TabularQ\nfrom src.bayesian_inference.TabularQApproximation import TabularQApproximation\nfrom src.utils.math import argMax\n\nclass BayesianQLearningTabular(TabularQ):\n def __init__(self, state_shape, num_acts, params):\n super().__init__(state_shape, num_acts, params)\n self.gamma = params['gamma']\n self.bayesianQ = TabularQApproximation(state_shape, num_acts, self.gamma, params)\n\n def learn(self, s, sp, r, a, gamma):\n x = self.getIndex(s) + (a * self.num_states)\n ap = self.maxAction(sp)\n x_next = self.getIndex(sp) + (ap * self.num_states)\n self.bayesianQ.update_stats(x, x_next, r, gamma)\n\n def maxAction(self, s):\n self.act_vals = [self.bayesianQ.sample(self.getIndex(s) + (a * self.num_states), 1) for a in range(self.num_acts)]\n move = argMax(self.act_vals)\n return move\n","repo_name":"dchui1/659-project","sub_path":"src/agents/BayesianQLearningTabular.py","file_name":"BayesianQLearningTabular.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"738557095","text":"from mep3_simulation import WebotsUserDriver\nimport rclpy\nfrom rclpy.qos import QoSProfile, ReliabilityPolicy, DurabilityPolicy\nfrom std_msgs.msg import Int8\n\n\nclass MatchState:\n UNARMED = 0\n ARMED = 1\n STARTED = 2\n\n\nclass WebotsCinchDriver:\n\n def init(self, webots_node, properties):\n self.__robot = webots_node.robot\n self.__publisher = WebotsUserDriver.get().node.create_publisher(\n Int8, '/match_start_status', QoSProfile(depth=1, reliability=ReliabilityPolicy.RELIABLE, durability=DurabilityPolicy.TRANSIENT_LOCAL))\n self.__state = None\n\n def publish(self, state):\n if self.__state != state:\n self.__state = state\n self.__publisher.publish(Int8(data=self.__state))\n\n def step(self):\n if self.__state != MatchState.STARTED:\n elapsed_time = self.__robot.getTime()\n if elapsed_time <= 1.0:\n self.publish(MatchState.UNARMED)\n elif elapsed_time <= 2.0:\n self.publish(MatchState.ARMED)\n else:\n self.publish(MatchState.STARTED)\n\n rclpy.spin_once(\n WebotsUserDriver.get().node,\n timeout_sec=0,\n executor=WebotsUserDriver.get().executor\n )\n","repo_name":"memristor/mep3","sub_path":"mep3_simulation/mep3_simulation/webots_cinch_driver.py","file_name":"webots_cinch_driver.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"54"} +{"seq_id":"37470033661","text":"import webbrowser\r\nimport random\r\nimport speech_recognition as sr\r\nimport wikipedia\r\nimport datetime\r\nimport sys\r\nimport playsound\r\nfrom gtts import gTTS\r\nimport os\r\ndef main():\r\n def speak(audio):\r\n print('Computer: ' + audio)\r\n myobj = gTTS(text=audio, lang='en', slow=False)\r\n myobj.save(\"welcome.mp3\")\r\n playsound.playsound(\"welcome.mp3\")\r\n os.remove(\"welcome.mp3\")\r\n\r\n def greetMe():\r\n currentH = int(datetime.datetime.now().hour)\r\n if currentH >= 0 and currentH < 12:\r\n speak('Good Morning!')\r\n speak('jay heeind dosto')\r\n\r\n\r\n if currentH >= 12 and currentH < 18:\r\n speak('Good Afternoon!')\r\n speak('jay heeindd dosto')\r\n\r\n if currentH >= 18 and currentH !=0:\r\n speak('Good Evening!')\r\n speak('jay heeind dosto')\r\n\r\n greetMe()\r\n\r\n speak(\"I am your digital assistant!\")\r\n speak('what can i do for you! MASter!')\r\n\r\n\r\n def myCommand():\r\n\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n r.pause_threshold = 1\r\n audio = r.listen(source)\r\n try:\r\n query = r.recognize_google(audio, language='en-in')\r\n print('User: ' + query + '\\n')\r\n\r\n except sr.UnknownValueError:\r\n speak('Sorry sir! I didn\\'t get that! try again')\r\n # query = str(input('Command: '))\r\n query=myCommand()\r\n\r\n return query\r\n\r\n while True:\r\n\r\n query = myCommand();\r\n query = query.lower()\r\n if \"open \"in query:\r\n cnt=0\r\n for i in range(len(query)):\r\n if query[i]==\" \":\r\n cnt+=1\r\n if cnt==1:\r\n speak(\"done sir! Enjoy !\")\r\n x=query.split(' ', 1)[1]\r\n site=\"www.\"+x+\".com\"\r\n webbrowser.open(site)\r\n if query==\"open youtube\":\r\n speak(\"would u like to open any channel??\")\r\n query = myCommand()\r\n if query in \"y yes yaa yeah yuup yup\":\r\n speak(\"alright\")\r\n speak(\"speak valid channel name\")\r\n x= myCommand()\r\n x=str(x)\r\n webbrowser.open(\"https://www.youtube.com/results?search_query=\"+x)\r\n elif(cnt==2):\r\n speak(\"done sir! Enjoy !\")\r\n if query==\"open google drive\" :\r\n webbrowser.open(\"drive.google.com\")\r\n elif query==\"open google maps\":\r\n webbrowser.open(\"maps.google.com\")\r\n elif query==\"open google translator\":\r\n webbrowser.open(\"translator.google.com\")\r\n else:\r\n x=query.split(' ', 1)[1]\r\n site=x+\".in\"\r\n webbrowser.open(site)\r\n\r\n\r\n\r\n elif \"what\\'s up\" in query or 'how are you' in query:\r\n stMsgs = ['Just doing my thing!', 'I am fine!', 'Nice!', 'I am nice and full of energy']\r\n speak(random.choice(stMsgs))\r\n #speak(\"mere ex be chungi mere next be chungi mere crush be chungi rishtadaar bahaut cute\")\r\n\r\n elif 'nothing' in query or 'abort' in query or 'stop' in query or 'goodbye' in query:\r\n speak('okay')\r\n speak('Bye Master!')\r\n sys.exit()\r\n\r\n elif 'hello' in query:\r\n speak('Hello Sir')\r\n\r\n elif 'play music' in query:\r\n speak(\"i can open youtube and you can select music!\")\r\n speak(\"DO you want me to do so?\")\r\n x=myCommand()\r\n if x in \"yes yeah yup yuup yah\":\r\n webbrowser.open(\"www.youtube.com\")\r\n\r\n elif\"what can you do\" in query:\r\n speak(\"what do u want\")\r\n speak(\"i can open youtube,google,gmail,send email ,play music,open some self made games and much more you will definately have fun with me!\")\r\n\r\n elif \"write something\" in query:\r\n speak(\"got it start speaking...\")\r\n content = myCommand()\r\n print(content)\r\n\r\n elif\"abba harmonium bajate the\" in query:\r\n speak(\"nahi!!\")\r\n speak(\"abba harmonium khaaate the the !! Arrey bhhaaii\")\r\n speak(\"Maaf karna gusse m idhar udhar nikal jate hu\")\r\n\r\n elif \"love\" in query:\r\n speak(\"What can i say!!\")\r\n #speak(\"bhaag yah c \")\r\n speak(\"but you are valueable to me!\")\r\n\r\n elif \"play games\" in query:\r\n speak(\"Your hardware can't tollerate my games please try after my updation\")\r\n\r\n else:\r\n query = query\r\n speak('Searching...')\r\n try:\r\n results = wikipedia.summary(query, sentences=2)\r\n speak('Got it.')\r\n speak('WIKIPEDIA says - ')\r\n speak(results)\r\n\r\n except:\r\n speak(\"Sorry master !! something went wrong please try searching google\")\r\n speak(\"should i open google for you??\")\r\n x=myCommand()\r\n if x in \"y\":\r\n webbrowser.open('www.google.com\\\\'+query)\r\n\r\n speak(\"what's my next task master!\")\r\n","repo_name":"shubhamjainjnsb/talking_assist","sub_path":"talking_assist/shubham_assist.py","file_name":"shubham_assist.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34404563460","text":"# 互斥锁(同步锁) 用来对共享资源的同步访问\n\nimport threading\nimport time\n\n\ndef add():\n # 让锁内的代码同步执行,锁住拿到的资源不让其被其他线程共享\n _lock.acquire() # 加锁\n global num\n temp = num\n time.sleep(0.01)\n num = temp + 1\n _lock.release() # 释放锁\n\n\n\nif __name__ == '__main__':\n\n # 创建同步锁对象\n _lock = threading.Lock()\n num = 0\n\n l = []\n\n for i in range(100):\n t = threading.Thread(target=add)\n t.start()\n l.append(t)\n\n for t in l:\n t.join()\n\n\n print(num)\n","repo_name":"QJLONG/python_basic_notebook","sub_path":"threading/18_mutexLock.py","file_name":"18_mutexLock.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27113576696","text":"import pytest\nfrom brownie import network, AdvancedCollectible\nfrom scripts.helpful_scripts import (\n get_account,\n get_contract,\n LOCAL_BLOCKCHAIN_ENVIRONMENTS,\n listen_for_event,\n)\nimport time\n\n\ndef test_can_create_advanced_collectible_integration(\n get_keyhash,\n chainlink_fee,\n):\n # Arrange\n if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip(\"Only for integration testing\")\n advanced_collectible = AdvancedCollectible.deploy(\n get_contract(\"vrf_coordinator\").address,\n get_contract(\"link_token\").address,\n get_keyhash,\n {\"from\": get_account()},\n )\n get_contract(\"link_token\").transfer(\n advanced_collectible.address, chainlink_fee * 3, {\"from\": get_account()}\n )\n # Act\n advanced_collectible.createCollectible(\"None\", {\"from\": get_account()})\n # time.sleep(75)\n listen_for_event(\n advanced_collectible, \"ReturnedCollectible\", timeout=200, poll_interval=10\n )\n # Assert\n assert advanced_collectible.tokenCounter() > 0\n","repo_name":"PatrickAlphaC/nft-mix","sub_path":"tests/integration/test_advanced_int.py","file_name":"test_advanced_int.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":816,"dataset":"github-code","pt":"54"} +{"seq_id":"17063389558","text":"#!/usr/bin/env python\nimport os\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import ObjectProperty\n\nprint(\"Welcome to DROIDS- Detecting Relative Outlier Impacts in Dynamic Simulations- analytical engine and visual toolbox for functional evolutionary comparison of molecular dynamic simulation\")\ncmd = 'gedit READMEv3.0.md'\nos.system(cmd)\nprint(\"finding paths for paths.ctl\") \ncmd = 'perl PATHS.pl'\nos.system(cmd)\n\n\nclass DROIDSApp(App):\n# kv_directory = 'kivy_templates'\n def build(self):\n return MyLayout()\n \nclass MyLayout(Widget):\n \n \n # define buttons and actions\n def btn1(self):\n print(\"running DROIDS - direct comparative analysis\") \n cmd = 'python DROIDS1.py'\n os.system(cmd)\n def btn2(self):\n print(\"running DROIDS - mutant model comparison\") \n cmd = 'python DROIDS2.py'\n os.system(cmd)\n def btn3(self):\n print(\"running DROIDS + maxDemon - functional variant analysis\") \n cmd = 'python DROIDS3.py'\n os.system(cmd)\n \n\n\nif __name__ == '__main__':\n DROIDSApp().run()\n","repo_name":"gbabbitt/DROIDS-3.0-comparative-protein-dynamics","sub_path":"DROIDS.py","file_name":"DROIDS.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"10585845418","text":"import cv2\nimport numpy as np\nimport csv\n\n\ndef resize(img,i):\n\t\n\tres = cv2.resize(img, dsize=(185,120), interpolation=cv2.INTER_CUBIC)\n\tres = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n\tcv2.imwrite(\"image{}convertie.png\".format(i),res)\n\treturn res\n\ndef convert_csv(img):\t\n\t\n\twith open(\"imgdataset.csv\",\"a\") as file:\n\t\t\n\t\tdata = np.asarray( img, dtype=\"float32\" )\n\t\tdata = data.flatten()\n\t\tdata = np.round(data, 2)\n\t\tprint (data)\n\t\t\n\t\twriter = csv.writer(file)\n\t\twriter.writerow([float(r) for r in data])\n\ndef get_value():\n\treturn float(value)\n\nfor i in range (1,998):\n\timg_resized = cv2.imread(\"image{}.png\".format(i))\n\tprint(\"image{} convertie\".format(i))\n\timg_resized = resize(img_resized,i)\n\tconvert_csv(img_resized)\n\t\n\t","repo_name":"IALABGARAGE/PiloteAutomatique","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21157615350","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pprint\n\n\ndef sort_stories_by_votes(hnlist):\n return sorted(hnlist, key=lambda k: k['votes'], reverse=True)\n\n\ndef create_custom_hn(links, subtext):\n hn = []\n pts_bigger_than = 99\n for idx, item in enumerate(links):\n title = item.getText()\n href = item.get('href', None)\n vote = subtext[idx].select('.score')\n if len(vote):\n points = int(vote[0].getText().replace(' points', ''))\n if points > pts_bigger_than:\n hn.append({'title': title, 'link': href, 'votes': points})\n return sort_stories_by_votes(hn)\n\n\ndef get_hacker_news(pages):\n pages_to_get = pages\n mega_list = []\n for page_num in range(1, pages_to_get + 1):\n if page_num == 1:\n res = requests.get('https://news.ycombinator.com/news')\n soup = BeautifulSoup(res.text, 'html.parser')\n sel_links = soup.select('.storylink')\n sel_subtext = soup.select('.subtext')\n mega_list.extend(create_custom_hn(sel_links, sel_subtext))\n else:\n res = requests.get(\n f'https://news.ycombinator.com/news?p={page_num}')\n soup = BeautifulSoup(res.text, 'html.parser')\n sel_links = soup.select('.storylink')\n sel_subtext = soup.select('.subtext')\n mega_list.extend(create_custom_hn(sel_links, sel_subtext))\n return sort_stories_by_votes(mega_list)\n\n\ndef main():\n pprint.pprint(get_hacker_news(4))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tonyf8321/hacker_news_scraper","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3614664622","text":"import os\nfrom azure.servicebus import ServiceBusClient\n\nCONNECTION_STR = os.environ['SERVICEBUS_CONNECTION_STR']\nQUEUE_NAME = os.environ[\"SERVICEBUS_QUEUE_NAME\"]\n\nservicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)\n\nwith servicebus_client:\n receiver = servicebus_client.get_queue_receiver(queue_name=QUEUE_NAME)\n with receiver:\n received_msgs = receiver.peek_messages(max_message_count=2)\n for msg in received_msgs:\n print(str(msg))\n\nprint(\"Receive is done.\")\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/servicebus/azure-servicebus/samples/sync_samples/receive_peek.py","file_name":"receive_peek.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"70804609762","text":"import tensorflow as tf\nimport numpy as np\n\nINF = 1e30\n\n\n# 双向gru\nclass cudnn_gru:\n\n def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope=\"cudnn_gru\"):\n self.num_layers = num_layers\n self.grus = []\n self.inits = []\n self.dropout_mask = []\n self.scope = scope\n for layer in range(num_layers):\n input_size_ = input_size if layer == 0 else 2 * num_units\n # 前向gru\n # 参数:层数,隐含单元数\n gru_fw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units)\n # 后向gru\n gru_bw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units)\n # init_state,shape=(1,batch_size,num_units)\n init_fw = tf.tile(tf.Variable(\n tf.zeros([1, 1, num_units])), [1, batch_size, 1])\n init_bw = tf.tile(tf.Variable(\n tf.zeros([1, 1, num_units])), [1, batch_size, 1])\n # dropout层\n mask_fw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),\n keep_prob=keep_prob, is_train=is_train, mode=None)\n mask_bw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),\n keep_prob=keep_prob, is_train=is_train, mode=None)\n self.grus.append((gru_fw, gru_bw, ))\n self.inits.append((init_fw, init_bw, ))\n self.dropout_mask.append((mask_fw, mask_bw, ))\n\n def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):\n # 转换输入的维度,0维与1维交换;并且增加一个维度(最外层[]作用)\n # 如输入shape为[2,3,4],outputs.shape=[[3,2,4]]\n outputs = [tf.transpose(inputs, [1, 0, 2])]\n with tf.variable_scope(self.scope):\n for layer in range(self.num_layers):\n gru_fw, gru_bw = self.grus[layer]\n init_fw, init_bw = self.inits[layer]\n mask_fw, mask_bw = self.dropout_mask[layer]\n with tf.variable_scope(\"fw_{}\".format(layer)):\n out_fw, _ = gru_fw(\n outputs[-1] * mask_fw, initial_state=(init_fw, ))\n with tf.variable_scope(\"bw_{}\".format(layer)):\n inputs_bw = tf.reverse_sequence(\n outputs[-1] * mask_bw, seq_lengths=seq_len, seq_axis=0, batch_axis=1)\n out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw, ))\n out_bw = tf.reverse_sequence(\n out_bw, seq_lengths=seq_len, seq_axis=0, batch_axis=1)\n outputs.append(tf.concat([out_fw, out_bw], axis=2))\n if concat_layers:\n # 连接每一层输出作为最终输出\n res = tf.concat(outputs[1:], axis=2)\n else:\n res = outputs[-1]\n res = tf.transpose(res, [1, 0, 2])\n return res\n\n\nclass native_gru:\n\n def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope=\"native_gru\"):\n self.num_layers = num_layers\n self.grus = []\n self.inits = []\n self.dropout_mask = []\n self.scope = scope\n for layer in range(num_layers):\n input_size_ = input_size if layer == 0 else 2 * num_units\n gru_fw = tf.contrib.rnn.GRUCell(num_units)\n gru_bw = tf.contrib.rnn.GRUCell(num_units)\n init_fw = tf.tile(tf.Variable(\n tf.zeros([1, num_units])), [batch_size, 1])\n init_bw = tf.tile(tf.Variable(\n tf.zeros([1, num_units])), [batch_size, 1])\n mask_fw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32),\n keep_prob=keep_prob, is_train=is_train, mode=None)\n mask_bw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32),\n keep_prob=keep_prob, is_train=is_train, mode=None)\n self.grus.append((gru_fw, gru_bw, ))\n self.inits.append((init_fw, init_bw, ))\n self.dropout_mask.append((mask_fw, mask_bw, ))\n\n def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):\n outputs = [inputs]\n with tf.variable_scope(self.scope):\n for layer in range(self.num_layers):\n gru_fw, gru_bw = self.grus[layer]\n init_fw, init_bw = self.inits[layer]\n mask_fw, mask_bw = self.dropout_mask[layer]\n with tf.variable_scope(\"fw_{}\".format(layer)):\n out_fw, _ = tf.nn.dynamic_rnn(\n gru_fw, outputs[-1] * mask_fw, seq_len, initial_state=init_fw, dtype=tf.float32)\n with tf.variable_scope(\"bw_{}\".format(layer)):\n inputs_bw = tf.reverse_sequence(\n outputs[-1] * mask_bw, seq_lengths=seq_len, seq_axis=1, batch_axis=0)\n out_bw, _ = tf.nn.dynamic_rnn(\n gru_bw, inputs_bw, seq_len, initial_state=init_bw, dtype=tf.float32)\n out_bw = tf.reverse_sequence(\n out_bw, seq_lengths=seq_len, seq_axis=1, batch_axis=0)\n outputs.append(tf.concat([out_fw, out_bw], axis=2))\n if concat_layers:\n res = tf.concat(outputs[1:], axis=2)\n else:\n res = outputs[-1]\n return res\n\n\ndef mru(input, seq_len, mask, mru_range, hidden):\n \"\"\"\n :param input: [b,c,dim]\n :param seq_len: 1000\n :param mask: [b,c]\n :param mru_range: [1, 2, 4, 10, 25]\n :param hidden: 250\n \"\"\"\n w_t = []\n N = input.get_shape().as_list()[0]\n dim = input.get_shape().as_list()[-1]\n for i, scale in enumerate(mru_range):\n in_i = input\n s_len = seq_len // scale\n if seq_len % scale is not 0:\n pad_size = scale - (seq_len % scale)\n padding = np.array([[0, 0], [0, pad_size], [0, 0]])\n in_i = tf.pad(input, padding)\n s_len = s_len + 1\n # shape=[N, s_len, scale*450]\n input_pad = tf.reshape(in_i, [N, -1, scale * dim])\n # contracted.shape=[N, s_len, hidden]\n contracted = tf.layers.dense(input_pad, units=hidden, activation=tf.nn.relu)\n # [N, s_len, 1, hidden] --> [N, s_len, scale, hidden] --> [N, w_size, hidden]\n expanded = tf.slice(tf.reshape(tf.tile(tf.expand_dims(contracted, 2),\n [1, 1, scale, 1]), [N, s_len * scale, hidden]), [0, 0, 0], [N, seq_len, hidden])\n expanded = tf.tile(tf.expand_dims(tf.cast(mask, tf.float32), 2),\n [1, 1, expanded.get_shape().as_list()[-1]]) * expanded\n w_t.append(expanded)\n\n w_t = tf.reshape(w_t, [N, seq_len, len(mru_range) * hidden])\n\n gate = tf.layers.dense(tf.layers.dense(w_t, units=250, activation=tf.nn.relu),\n units=dim, activation=tf.nn.relu)\n gate = tf.nn.sigmoid(gate)\n z_t = tf.layers.dense(w_t, units=dim, activation=tf.nn.tanh)\n y_t = gate * z_t + (1 - gate) * input\n return y_t\n\n\nclass ptr_net:\n def __init__(self, batch, hidden, keep_prob=1.0, is_train=None, scope=\"ptr_net\"):\n self.gru = tf.contrib.rnn.GRUCell(hidden)\n self.batch = batch\n self.scope = scope\n self.keep_prob = keep_prob\n self.is_train = is_train\n self.dropout_mask = dropout(tf.ones(\n [batch, hidden], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train)\n\n def __call__(self, init, match, d, mask):\n with tf.variable_scope(self.scope):\n d_match = dropout(match, keep_prob=self.keep_prob,\n is_train=self.is_train)\n inp, logits1 = pointer(d_match, init * self.dropout_mask, d, mask)\n d_inp = dropout(inp, keep_prob=self.keep_prob,\n is_train=self.is_train)\n _, state = self.gru(d_inp, init)\n tf.get_variable_scope().reuse_variables()\n _, logits2 = pointer(d_match, state * self.dropout_mask, d, mask)\n return logits1, logits2\n\n\ndef dropout(args, keep_prob, is_train, mode=\"recurrent\"):\n \"\"\"\n :param args: dropout matrix shape=[batch_size, 1, input_size_]\n :param keep_prob:\n :param is_train:\n :param mode:\n :return:\n \"\"\"\n if keep_prob < 1.0:\n noise_shape = None\n scale = 1.0\n shape = tf.shape(args)\n if mode == \"embedding\":\n noise_shape = [shape[0], 1]\n scale = keep_prob\n if mode == \"recurrent\" and len(args.get_shape().as_list()) == 3:\n noise_shape = [shape[0], 1, shape[-1]]\n # tf.cond():控制流程,如果第一个参数为True,执行第一个函数,否则执行第二个函数\n # 如果是train模式,使用dropout,否则mask矩阵全为1,即不使用dropout\n # noise_shape第二维为1,表示按照输入的第二维一致性dropout\n # 如:对于输入为(batch,word,vector_dim)的tensor,若一个单词被drop,整个句子即drop\n args = tf.cond(is_train, lambda: tf.nn.dropout(\n args, keep_prob, noise_shape=noise_shape) * scale, lambda: args)\n return args\n\n\ndef softmax_mask(val, mask):\n return -INF * (1 - tf.cast(mask, tf.float32)) + val\n\n\ndef pointer(inputs, state, hidden, mask, scope=\"pointer\"):\n with tf.variable_scope(scope):\n u = tf.concat([tf.tile(tf.expand_dims(state, axis=1), [\n 1, tf.shape(inputs)[1], 1]), inputs], axis=2)\n s0 = tf.nn.tanh(dense(u, hidden, use_bias=False, scope=\"s0\"))\n s = dense(s0, 1, use_bias=False, scope=\"s\")\n s1 = softmax_mask(tf.squeeze(s, [2]), mask)\n a = tf.expand_dims(tf.nn.softmax(s1), axis=2)\n res = tf.reduce_sum(a * inputs, axis=1)\n return res, s1\n\n\ndef summ(memory, hidden, mask, keep_prob=1.0, is_train=None, scope=\"summ\"):\n with tf.variable_scope(scope):\n d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train)\n s0 = tf.nn.tanh(dense(d_memory, hidden, scope=\"s0\"))\n s = dense(s0, 1, use_bias=False, scope=\"s\")\n s1 = softmax_mask(tf.squeeze(s, [2]), mask)\n a = tf.expand_dims(tf.nn.softmax(s1), axis=2)\n res = tf.reduce_sum(a * memory, axis=1)\n return res\n\n\ndef dot_attention(inputs, memory, mask, keep_prob=1.0, is_train=None, scope=\"dot_attention\"):\n with tf.variable_scope(scope):\n\n d_inputs = dropout(inputs, keep_prob=keep_prob, is_train=is_train)\n d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train)\n JX = tf.shape(inputs)[1]\n\n with tf.variable_scope(\"attention\"):\n outputs = tf.matmul(d_inputs, tf.transpose(\n d_memory, [0, 2, 1])) / (inputs.get_shape().as_list()[-1] ** 0.5)\n # mask.shape:[batch,q_size]-->[batch,c_size,q_size]\n mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1])\n logits = tf.nn.softmax(softmax_mask(outputs, mask))\n # outputs.shape=[batch,c_size,word_dim]\n outputs = tf.matmul(logits, memory)\n # res.shape=[batch,c_size,2*word_dim]\n res = tf.concat([inputs, outputs], axis=2)\n\n with tf.variable_scope(\"gate\"):\n dim = res.get_shape().as_list()[-1]\n d_res = dropout(res, keep_prob=keep_prob, is_train=is_train)\n gate = tf.nn.sigmoid(dense(d_res, dim, use_bias=False))\n return res * gate\n\n\ndef dense(inputs, hidden, use_bias=True, scope=\"dense\"):\n with tf.variable_scope(scope):\n shape = tf.shape(inputs)\n dim = inputs.get_shape().as_list()[-1]\n out_shape = [shape[idx] for idx in range(\n len(inputs.get_shape().as_list()) - 1)] + [hidden]\n flat_inputs = tf.reshape(inputs, [-1, dim])\n W = tf.get_variable(\"W\", [dim, hidden])\n res = tf.matmul(flat_inputs, W)\n if use_bias:\n b = tf.get_variable(\n \"b\", [hidden], initializer=tf.constant_initializer(0.))\n res = tf.nn.bias_add(res, b)\n res = tf.reshape(res, out_shape)\n return res\n\n\ndef bilinear(x, y, scope=\"bilinear\"):\n \"\"\"\n :param X: [batch, m, k]\n :param Y: [batch, n,l]\n :param scope: []\n :return res: [b,m,n]\n W = [batch,k,l]\n \"\"\"\n with tf.variable_scope(scope):\n batch = x.get_shape().as_list()[0]\n dim_x = x.get_shape().as_list()[-1]\n dim_y = y.get_shape().as_list()[-1]\n W = tf.get_variable(\"W\", [batch, dim_x, dim_y],\n initializer=tf.random_uniform_initializer(\n minval=0, maxval=None, seed=None, dtype=tf.float32))\n res = tf.matmul(x, W)\n # 0维Bacth不转, 1维和2维转\n res = tf.matmul(res, tf.transpose(y, perm=[0, 2, 1]))\n return res\n\n","repo_name":"caldreaming/Hierarchical_Attention_Flow_Model","sub_path":"oqmrc/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":12816,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"72407224480","text":"#UD4. Introducción a programación en QGIS3\n#4.4.Geoprocesos Raster\n#4.4.2. Geoprocesos raster. HILLSHADE\n##################################################\n\n#Capa de entrada (MDT)\ninput = iface.activeLayer()\n\n#Capa de salida (carpeta, nombre archivo, )\nfolder = 'C:\\\\ISM_PyQGIS\\\\'\nname = 'hillshade'\nformat = '.tif'\n\nuri = folder+name+format\n\nparameter_dictionary = {\n 'INPUT': input,\n 'Z_FACTOR': 1,\n 'AZIMUTH': 45,\n 'V_ANGLE':45,\n 'OUTPUT': uri}\n \nresult = processing.run(\"qgis:hillshade\", parameter_dictionary)\niface.addRasterLayer(uri, name,\"gdal\")\n","repo_name":"jmsmz24/Desarrollo","sub_path":"PyQGIS/4_4_3_GeoprocesosRaster_HILLSHADE.py","file_name":"4_4_3_GeoprocesosRaster_HILLSHADE.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12548587841","text":"#########\n# q3.py #\n#########\n# レシート明細データ(df_receipt)から売上年月日(sales_ymd),顧客ID(customer_id),\n# 商品コード(product_cd),売上金額(amount)の順に列を指定し,10件表示せよ.\n# ただし,sales_ymdをsales_dateに項目名を変更しながら抽出すること.\n\nimport pandas as pd\n\ndf_receipt = pd.read_csv(\"../data/receipt.csv\", dtype=str)\n\nans3 = df_receipt[['sales_ymd', 'customer_id', 'product_cd', 'amount']].rename(columns={'sales_ymd':'sales_date'}).head(10)\n\nans3.to_csv(\"../answer/ans3.csv\")\n#print(ans3)\n","repo_name":"makotoyamaai/statistics","sub_path":"100knocks/program/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43193522379","text":"from mock import patch, call, MagicMock\n\nfrom collections import OrderedDict\n\nimport cinder_utils as cinder_utils\n\nfrom test_utils import (\n CharmTestCase,\n)\n\nTO_PATCH = [\n # helpers.core.hookenv\n 'config',\n 'log',\n # helpers.core.host\n 'mounts',\n 'umount',\n # ceph utils\n 'ceph_create_pool',\n 'ceph_pool_exists',\n # storage_utils\n 'create_lvm_physical_volume',\n 'create_lvm_volume_group',\n 'deactivate_lvm_volume_group',\n 'is_lvm_physical_volume',\n 'relation_ids',\n 'remove_lvm_physical_volume',\n 'ensure_loopback_device',\n 'is_block_device',\n 'zap_disk',\n 'get_os_codename_package',\n 'get_os_codename_install_source',\n 'configure_installation_source',\n 'eligible_leader',\n 'templating',\n # fetch\n 'apt_update',\n 'apt_install'\n]\n\n\nMOUNTS = [\n ['/mnt', '/dev/vdb']\n]\n\n\nclass TestCinderUtils(CharmTestCase):\n def setUp(self):\n super(TestCinderUtils, self).setUp(cinder_utils, TO_PATCH)\n self.config.side_effect = self.test_config.get_all\n\n def svc_enabled(self, svc):\n return svc in self.test_config.get('enabled-services')\n\n def test_all_services_enabled(self):\n '''It determines all services are enabled based on config'''\n self.test_config.set('enabled-services', 'all')\n enabled = []\n for s in ['volume', 'api', 'scheduler']:\n enabled.append(cinder_utils.service_enabled(s))\n self.assertEquals(enabled, [True, True, True])\n\n def test_service_enabled(self):\n '''It determines services are enabled based on config'''\n self.test_config.set('enabled-services', 'api,volume,scheduler')\n self.assertTrue(cinder_utils.service_enabled('volume'))\n\n def test_service_not_enabled(self):\n '''It determines services are not enabled based on config'''\n self.test_config.set('enabled-services', 'api,scheduler')\n self.assertFalse(cinder_utils.service_enabled('volume'))\n\n @patch('cinder_utils.service_enabled')\n def test_determine_packages_all(self, service_enabled):\n '''It determines all packages required when all services enabled'''\n service_enabled.return_value = True\n pkgs = cinder_utils.determine_packages()\n self.assertEquals(sorted(pkgs),\n sorted(cinder_utils.COMMON_PACKAGES +\n cinder_utils.VOLUME_PACKAGES +\n cinder_utils.API_PACKAGES +\n cinder_utils.SCHEDULER_PACKAGES))\n\n @patch('cinder_utils.service_enabled')\n def test_determine_packages_subset(self, service_enabled):\n '''It determines packages required for a subset of enabled services'''\n service_enabled.side_effect = self.svc_enabled\n\n self.test_config.set('enabled-services', 'api')\n pkgs = cinder_utils.determine_packages()\n common = cinder_utils.COMMON_PACKAGES\n self.assertEquals(sorted(pkgs),\n sorted(common + cinder_utils.API_PACKAGES))\n self.test_config.set('enabled-services', 'volume')\n pkgs = cinder_utils.determine_packages()\n common = cinder_utils.COMMON_PACKAGES\n self.assertEquals(sorted(pkgs),\n sorted(common + cinder_utils.VOLUME_PACKAGES))\n self.test_config.set('enabled-services', 'api,scheduler')\n pkgs = cinder_utils.determine_packages()\n common = cinder_utils.COMMON_PACKAGES\n self.assertEquals(sorted(pkgs),\n sorted(common + cinder_utils.API_PACKAGES +\n cinder_utils.SCHEDULER_PACKAGES))\n\n def test_creates_restart_map_all_enabled(self):\n '''It creates correct restart map when all services enabled'''\n ex_map = OrderedDict([\n ('/etc/cinder/cinder.conf', ['cinder-api', 'cinder-volume',\n 'cinder-scheduler', 'haproxy']),\n ('/etc/cinder/api-paste.ini', ['cinder-api']),\n ('/etc/ceph/ceph.conf', ['cinder-volume']),\n ('/etc/haproxy/haproxy.cfg', ['haproxy']),\n ('/etc/apache2/sites-available/openstack_https_frontend',\n ['apache2']),\n ('/etc/apache2/sites-available/openstack_https_frontend.conf',\n ['apache2']),\n ])\n self.assertEquals(cinder_utils.restart_map(), ex_map)\n\n @patch('cinder_utils.service_enabled')\n def test_creates_restart_map_no_api(self, service_enabled):\n '''It creates correct restart map with api disabled'''\n service_enabled.side_effect = self.svc_enabled\n self.test_config.set('enabled-services', 'scheduler,volume')\n ex_map = OrderedDict([\n ('/etc/cinder/cinder.conf', ['cinder-volume', 'cinder-scheduler',\n 'haproxy']),\n ('/etc/ceph/ceph.conf', ['cinder-volume']),\n ('/etc/haproxy/haproxy.cfg', ['haproxy']),\n ('/etc/apache2/sites-available/openstack_https_frontend',\n ['apache2']),\n ('/etc/apache2/sites-available/openstack_https_frontend.conf',\n ['apache2']),\n ])\n self.assertEquals(cinder_utils.restart_map(), ex_map)\n\n @patch('cinder_utils.service_enabled')\n def test_creates_restart_map_only_api(self, service_enabled):\n '''It creates correct restart map with only api enabled'''\n service_enabled.side_effect = self.svc_enabled\n self.test_config.set('enabled-services', 'api')\n ex_map = OrderedDict([\n ('/etc/cinder/cinder.conf', ['cinder-api', 'haproxy']),\n ('/etc/cinder/api-paste.ini', ['cinder-api']),\n ('/etc/haproxy/haproxy.cfg', ['haproxy']),\n ('/etc/apache2/sites-available/openstack_https_frontend',\n ['apache2']),\n ('/etc/apache2/sites-available/openstack_https_frontend.conf',\n ['apache2']),\n ])\n self.assertEquals(cinder_utils.restart_map(), ex_map)\n\n def test_ensure_block_device_bad_config(self):\n '''It doesn't prepare storage with bad config'''\n for none in ['None', 'none', None]:\n self.assertRaises(cinder_utils.CinderCharmError,\n cinder_utils.ensure_block_device,\n block_device=none)\n\n def test_ensure_block_device_loopback(self):\n '''It ensures loopback device when checking block device'''\n cinder_utils.ensure_block_device('/tmp/cinder.img')\n ex_size = cinder_utils.DEFAULT_LOOPBACK_SIZE\n self.ensure_loopback_device.assert_called_with('/tmp/cinder.img',\n ex_size)\n\n cinder_utils.ensure_block_device('/tmp/cinder-2.img|15G')\n self.ensure_loopback_device.assert_called_with('/tmp/cinder-2.img',\n '15G')\n\n def test_ensure_standard_block_device(self):\n '''It looks for storage at both relative and full device path'''\n for dev in ['vdb', '/dev/vdb']:\n cinder_utils.ensure_block_device(dev)\n self.is_block_device.assert_called_with('/dev/vdb')\n\n def test_ensure_nonexistent_block_device(self):\n '''It will not ensure a non-existant block device'''\n self.is_block_device.return_value = False\n self.assertRaises(cinder_utils.CinderCharmError,\n cinder_utils.ensure_block_device, 'foo')\n\n def test_clean_storage_unmount(self):\n '''It unmounts block device when cleaning storage'''\n self.is_lvm_physical_volume.return_value = False\n self.zap_disk.return_value = True\n self.mounts.return_value = MOUNTS\n cinder_utils.clean_storage('/dev/vdb')\n self.umount.called_with('/dev/vdb', True)\n\n def test_clean_storage_lvm_wipe(self):\n '''It removes traces of LVM when cleaning storage'''\n self.mounts.return_value = []\n self.is_lvm_physical_volume.return_value = True\n cinder_utils.clean_storage('/dev/vdb')\n self.remove_lvm_physical_volume.assert_called_with('/dev/vdb')\n self.deactivate_lvm_volume_group.assert_called_with('/dev/vdb')\n\n def test_clean_storage_zap_disk(self):\n '''It removes traces of LVM when cleaning storage'''\n self.mounts.return_value = []\n self.is_lvm_physical_volume.return_value = False\n cinder_utils.clean_storage('/dev/vdb')\n self.zap_disk.assert_called_with('/dev/vdb')\n\n def test_prepare_lvm_storage_not_clean(self):\n '''It errors when prepping non-clean LVM storage'''\n self.is_lvm_physical_volume.return_value = True\n self.assertRaises(cinder_utils.CinderCharmError,\n cinder_utils.prepare_lvm_storage,\n block_device='/dev/foobar',\n volume_group='bar-vg')\n\n def test_prepare_lvm_storage_clean(self):\n self.is_lvm_physical_volume.return_value = False\n cinder_utils.prepare_lvm_storage(block_device='/dev/foobar',\n volume_group='bar-vg')\n self.create_lvm_physical_volume.assert_called_with('/dev/foobar')\n self.create_lvm_volume_group.assert_called_with('bar-vg',\n '/dev/foobar')\n\n def test_prepare_lvm_storage_error(self):\n self.is_lvm_physical_volume.return_value = False\n self.create_lvm_physical_volume.side_effect = Exception()\n # NOTE(jamespage) ensure general Exceptions mapped\n # to CinderCharmError's\n self.assertRaises(cinder_utils.CinderCharmError,\n cinder_utils.prepare_lvm_storage,\n block_device='/dev/foobar',\n volume_group='bar-vg')\n\n def test_migrate_database(self):\n '''It migrates database with cinder-manage'''\n with patch('subprocess.check_call') as check_call:\n cinder_utils.migrate_database()\n check_call.assert_called_with(['cinder-manage', 'db', 'sync'])\n\n def test_ensure_ceph_pool(self):\n self.ceph_pool_exists.return_value = False\n cinder_utils.ensure_ceph_pool(service='cinder', replicas=3)\n self.ceph_create_pool.assert_called_with(service='cinder',\n name='cinder',\n replicas=3)\n\n def test_ensure_ceph_pool_already_exists(self):\n self.ceph_pool_exists.return_value = True\n cinder_utils.ensure_ceph_pool(service='cinder', replicas=3)\n self.assertFalse(self.ceph_create_pool.called)\n\n @patch('os.path.exists')\n def test_register_configs_apache(self, exists):\n exists.return_value = False\n self.get_os_codename_package.return_value = 'grizzly'\n self.relation_ids.return_value = False\n configs = cinder_utils.register_configs()\n calls = []\n for conf in [cinder_utils.CINDER_API_CONF,\n cinder_utils.CINDER_CONF,\n cinder_utils.APACHE_SITE_CONF,\n cinder_utils.HAPROXY_CONF]:\n calls.append(\n call(conf,\n cinder_utils.CONFIG_FILES[conf]['hook_contexts'])\n )\n configs.register.assert_has_calls(calls, any_order=True)\n\n @patch('os.path.exists')\n def test_register_configs_apache24(self, exists):\n exists.return_value = True\n self.get_os_codename_package.return_value = 'grizzly'\n self.relation_ids.return_value = False\n configs = cinder_utils.register_configs()\n calls = []\n for conf in [cinder_utils.CINDER_API_CONF,\n cinder_utils.CINDER_CONF,\n cinder_utils.APACHE_SITE_24_CONF,\n cinder_utils.HAPROXY_CONF]:\n calls.append(\n call(conf,\n cinder_utils.CONFIG_FILES[conf]['hook_contexts'])\n )\n configs.register.assert_has_calls(calls, any_order=True)\n\n @patch('os.mkdir')\n @patch('os.path.isdir')\n @patch('os.path.exists')\n def test_register_configs_ceph(self, exists, isdir, mkdir):\n exists.return_value = False\n isdir.return_value = False\n self.get_os_codename_package.return_value = 'grizzly'\n self.relation_ids.return_value = ['ceph:0']\n configs = cinder_utils.register_configs()\n calls = []\n for conf in [cinder_utils.CINDER_API_CONF,\n cinder_utils.CINDER_CONF,\n cinder_utils.APACHE_SITE_CONF,\n cinder_utils.HAPROXY_CONF,\n cinder_utils.CEPH_CONF]:\n calls.append(\n call(conf,\n cinder_utils.CONFIG_FILES[conf]['hook_contexts'])\n )\n configs.register.assert_has_calls(calls, any_order=True)\n self.assertTrue(mkdir.called)\n\n def test_set_ceph_kludge(self):\n pass\n \"\"\"\n def set_ceph_env_variables(service):\n # XXX: Horrid kludge to make cinder-volume use\n # a different ceph username than admin\n env = open('/etc/environment', 'r').read()\n if 'CEPH_ARGS' not in env:\n with open('/etc/environment', 'a') as out:\n out.write('CEPH_ARGS=\"--id %s\"\\n' % service)\n with open('/etc/init/cinder-volume.override', 'w') as out:\n out.write('env CEPH_ARGS=\"--id %s\"\\n' % service)\n \"\"\"\n\n @patch.object(cinder_utils, 'migrate_database')\n @patch.object(cinder_utils, 'determine_packages')\n def test_openstack_upgrade_leader(self, pkgs, migrate):\n pkgs.return_value = ['mypackage']\n self.config.side_effect = None\n self.config.return_value = 'cloud:precise-havana'\n self.eligible_leader.return_value = True\n self.get_os_codename_install_source.return_value = 'havana'\n configs = MagicMock()\n cinder_utils.do_openstack_upgrade(configs)\n self.assertTrue(configs.write_all.called)\n configs.set_release.assert_called_with(openstack_release='havana')\n self.assertTrue(migrate.called)\n\n @patch.object(cinder_utils, 'migrate_database')\n @patch.object(cinder_utils, 'determine_packages')\n def test_openstack_upgrade_not_leader(self, pkgs, migrate):\n pkgs.return_value = ['mypackage']\n self.config.side_effect = None\n self.config.return_value = 'cloud:precise-havana'\n self.eligible_leader.return_value = False\n self.get_os_codename_install_source.return_value = 'havana'\n configs = MagicMock()\n cinder_utils.do_openstack_upgrade(configs)\n self.assertTrue(configs.write_all.called)\n configs.set_release.assert_called_with(openstack_release='havana')\n self.assertFalse(migrate.called)\n","repo_name":"CiscoSystems/juju-cinder","sub_path":"unit_tests/test_cinder_utils.py","file_name":"test_cinder_utils.py","file_ext":"py","file_size_in_byte":14876,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20605893728","text":"import numpy as np\nimport progressbar\nfrom .solver import Solver\nimport time\n\nclass RiemannianTrustRegions(Solver):\n\n def __init__(self, manifold, cost, initGuess='random', maxiter=1000,\n timeiter=None, verbose=False, DeltaBar=None, Delta0=None,\n rhoPrime=0.1, rhoReg=1e3, epsMac=1e-16, kappa=0.1, theta=1.,\n maxiter_tCG=None, mingradnorm=1e-6):\n self.manifold = manifold\n self.cost = cost\n self.rmnGrad = self.manifold._riemannianGradient(self.cost._euclideanGradient)\n self.rmnHess = self.manifold._riemannianHessian(self.cost._euclideanGradient, self.cost._euclideanHessian)\n\n self.maxiter = maxiter\n self.verbose = verbose\n if DeltaBar == None:\n self.DeltaBar = np.sqrt(manifold.dim)\n else:\n self.DeltaBar = DeltaBar\n\n if Delta0 == None:\n self.Delta0 = self.DeltaBar / 8\n else:\n self.Delta0 = Delta0\n\n self.rhoPrime = rhoPrime\n self.rhoReg = rhoReg\n self.epsMac = epsMac\n self.kappa = kappa\n self.theta = theta\n self.mingradnorm = mingradnorm\n\n if maxiter_tCG == None:\n self.maxiter_tCG = maxiter\n else:\n self.maxiter_tCG = maxiter_tCG\n\n if initGuess == 'random':\n self.initGuess = manifold._randomPoint()\n else:\n self.initGuess = initGuess\n\n if timeiter == None:\n self.timeiter = maxiter\n else:\n self.timeiter = timeiter\n\n def _step(self, xx, Delta):\n grad = self.rmnGrad(xx)\n ss, Hss = self._tCG(xx, grad, Delta)\n xx_tent = self.manifold._retract(xx, ss)\n fxx = self.cost._eval(xx)\n rhoAdd = np.max([1., np.abs(fxx)]) * self.epsMac * self.rhoReg\n\n rho = fxx - self.cost._eval(xx_tent) + rhoAdd\n rho /= -self.manifold._inner(grad, ss) \\\n -(0.5 * self.manifold._inner(ss, Hss)) \\\n + rhoAdd\n\n if rho > self.rhoPrime:\n xx_next = xx_tent\n else:\n xx_next = xx\n\n if rho < 0.25:\n Delta_next = 0.25 * Delta\n elif rho > 0.75 and np.isclose(self.manifold._norm(ss), Delta):\n Delta_next = np.min([2 * Delta, DeltaBar])\n else:\n Delta_next = Delta\n\n\n if self.verbose == False:\n return xx_next, Delta_next\n else:\n return xx_next, Delta_next, fxx, self.manifold._norm(grad)\n\n def _tCG(self, xx, grad, Delta):\n # compute truncated Conjugate Gradients\n add = self.manifold._addTangent\n multiply = self.manifold._multiplyTangent\n b = grad\n v = self.manifold._zeroTangent(); r = b; p = r;\n r_0norm = self.manifold._norm(r)\n for _ in range(self.maxiter_tCG):\n Hp = self.rmnHess(xx, p)\n inner = self.manifold._inner(p, Hp)\n alpha = (self.manifold._norm(r) ** 2) / inner\n v_tent = add(v, multiply(alpha, p))\n\n if (inner <= 0) or (self.manifold._norm(v_tent) >= Delta):\n p_norm = self.manifold._norm(p)\n pv_inner = self.manifold._inner(p, v)\n v_norm = self.manifold._norm(v)\n t = -(pv_inner + np.sqrt((pv_inner ** 2) - p_norm*(v_norm**2 - Delta**2))) / p_norm\n v = add(v, multiply(t, p))\n return v, add(b, add(multiply(-1.0, r), multiply(t, Hp)))\n else:\n v = v_tent\n\n r_prevnorm = self.manifold._norm(r)\n r = add(r, multiply(-alpha, Hp))\n r_nextnorm = self.manifold._norm(r)\n if r_nextnorm <= r_0norm*np.min([r_0norm**self.theta, self.kappa]):\n return v, add(b, multiply(-1.0, r))\n\n beta = (r_nextnorm / r_prevnorm) ** 2\n p = add(r, multiply(beta, p))\n\n return v, add(b, multiply(-1.0, r))\n\n\n def solve(self):\n xx = self.initGuess\n Delta = self.Delta0\n if self.verbose == False:\n for ii in progressbar.progressbar(range(self.maxiter)):\n xx, Delta = self._step(xx, Delta)\n\n return xx\n else:\n tic = time.time()\n grads = []\n costs = []\n for ii in progressbar.progressbar(range(self.maxiter)):\n xx, Delta, fxx, grad = self._step(xx, Delta)\n grads.append(grad)\n costs.append(fxx)\n if grad < self.mingradnorm:\n toc = time.time()\n break\n if ii == self.timeiter-1:\n toc = time.time()\n\n return xx, costs, grads, (toc - tic)\n","repo_name":"s-a-barnett/man-opt","sub_path":"src/solvers/rtr.py","file_name":"rtr.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18360537211","text":"import re\nimport string\nimport logging\nfrom typing import List, Set, Dict, Union\nfrom xml.etree.ElementTree import Element\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\n\nfrom xpath_blindeye.config import MAX_NODE_NAME_LENGTH, ROOT_PATH, SOFT_CHARSET_FAIL, MAX_WORKERS\nfrom xpath_blindeye.requestor import request\nfrom xpath_blindeye.util import rreplace\n\nlogger = logging.getLogger(\"xpath-blindeye\")\n\n\nclass XNode(object):\n def __init__(self, node_name: str, path: str, parent: Element, xml_root: Element, saved_root: Element):\n self.node_name = node_name\n self.path = path\n self.parent = parent\n self.xml_root = xml_root\n self.saved_root = saved_root\n\n\n def _get_known_attribute_counts(self) -> Set[int]:\n nodes = self._get_similar_known_nodes()\n\n attr_counts = []\n for n in nodes:\n if n.text is not None:\n attr_counts.append(len(n.attrib))\n quick_list = set(attr_counts)\n return quick_list\n\n def _get_similar_known_nodes(self) -> List[Element]:\n search_path = self.path.replace(ROOT_PATH, '', 1)\n search_path = search_path.strip('/')\n index_re = re.compile(r'(\\[\\d+\\])')\n search_path = index_re.sub('', search_path)\n search_path = rreplace(search_path, '*', self.node_name, 1)\n if ROOT_PATH == self.path:\n search_path = \".\"\n nodes = self.xml_root.findall(search_path) # type: List[Element]\n if self.saved_root is not None:\n nodes.extend(self.saved_root.findall(search_path))\n return nodes\n\n def get_attribute_count(self):\n attr_path = self.path + '/@*'\n quick_list = self._get_known_attribute_counts()\n return self.get_quick_guess_or_count(attr_path, quick_list)\n\n def get_quick_guess_or_count(self, path: str, quick_list: Set[int]) -> int:\n q = 'count({path}) = {guess}'\n attr_count = mass_query(q, {'path': path}, quick_list)\n if attr_count is not None:\n return attr_count\n return self.get_count(path, start_count=0)\n\n def _get_known_attr_names(self) -> Set[str]:\n nodes = self._get_similar_known_nodes()\n attr_names = []\n for n in nodes:\n if n.attrib:\n attr_names.extend(n.attrib.keys())\n quick_list = set(attr_names)\n return quick_list\n\n def _get_known_attr_values(self, attr_name: str) -> Set[str]:\n nodes = self._get_similar_known_nodes()\n attr_values = []\n for n in nodes:\n if n.attrib and attr_name in n.attrib:\n attr_values.append(n.attrib[attr_name])\n quick_list = set(attr_values)\n return quick_list\n\n def get_attributes(self) -> Dict[str, str]:\n attr_count = self.get_attribute_count()\n attributes = {}\n if attr_count <= 0:\n return attributes\n for i in range(1, attr_count + 1):\n attr_path = self.path + '/@*[{index}]'.format(index=i)\n known_attr_names = self._get_known_attr_names()\n attr_name = mass_query(\"name({path}) = '{guess}'\", {'path': attr_path}, known_attr_names)\n if attr_name is None:\n attr_name = self.get_node_name(attr_path)\n\n known_attr_values = self._get_known_attr_values(attr_name)\n attr_value = mass_query(\"{path} = '{guess}'\", {'path': attr_path}, known_attr_values)\n if attr_value is None:\n attr_value = self.get_path_string_value(attr_path)\n attributes[attr_name] = attr_value\n return attributes\n\n def get_node_text_length(self) -> int:\n q = \"string-length(normalize-space({path}/text())) = {guess}\"\n return self._get_integer_guess(self.path, q, start_count=0, step=20)\n\n def _get_known_node_text(self) -> Set[str]:\n nodes = self._get_similar_known_nodes()\n\n node_texts = []\n for n in nodes:\n if n.text is not None:\n node_texts.append(n.text)\n quick_list = set(node_texts)\n return quick_list\n\n def get_node_text(self) -> Union[str, None]:\n substring_query = \"substring(translate(normalize-space({path}/text()), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'),{start},1) = '{guess}'\"\n capitalization_query = \"normalize-space({path}/text()) = '{guess}'\"\n text_length = self.get_node_text_length()\n if not text_length:\n return None\n\n known_node_texts = self._get_known_node_text()\n text = mass_query(capitalization_query, {'path': self.path}, known_node_texts)\n if text is None:\n text = self._extract_text(self.path, text_length, substring_query, capitalization_query)\n return text\n\n def get_known_child_node_names(self) -> Set[str]:\n nodes = self._get_similar_known_nodes()\n\n node_children = []\n for n in nodes:\n node_children.extend(list(n))\n quick_list = set([c.tag for c in node_children])\n return quick_list\n\n def get_child_node_names(self) -> List[str]:\n child_count = self.get_number_of_children()\n child_names = []\n if child_count == 0:\n return []\n known_names = self.get_known_child_node_names()\n for i in range(1, child_count + 1):\n child_name = None\n child_path = self.path + '/*[{}]'.format(i)\n q = \"name({path}) = '{guess}'\"\n if known_names:\n child_name = mass_query(q, {'path': child_path}, known_names)\n if child_name is None:\n child_name = self.get_node_name(child_path)\n known_names.add(child_name)\n child_names.append((child_name, child_path))\n return child_names\n\n def _get_known_children_counts(self) -> Set[int]:\n children = self._get_similar_known_nodes()\n quick_list = set([len(list(c)) for c in children])\n return quick_list\n\n def get_number_of_children(self) -> int:\n child_path = self.path + '/*'\n quick_list = self._get_known_children_counts()\n return self.get_quick_guess_or_count(child_path, quick_list)\n\n @classmethod\n def get_count(cls, path: str, start_count=0, step=50) -> int:\n q = 'count({path}) = {guess}'\n return cls._get_integer_guess(path, q, start_count, step)\n\n @classmethod\n def _get_integer_guess(cls, path: str, q: str, start_count: int, step: int) -> int:\n initial_range = range(start_count, start_count + step)\n count = mass_query(q, {'path': path}, initial_range)\n if count is None:\n return cls._get_integer_guess(path, q, start_count + step, step)\n else:\n return count\n\n @classmethod\n def get_node_name(cls, path: str) -> str:\n p = \"name({path})\".format(path=path)\n real_node_name = cls.get_path_string_value(p)\n return real_node_name\n\n @classmethod\n def get_path_string_value(cls, path: str) -> str:\n substring_query = \"substring(translate({path}, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'),{start},1) = '{guess}'\"\n capitalization_query = \"{path} = '{guess}'\"\n string_length = cls.get_length(path)\n real_string = cls._extract_text(path, string_length, substring_query, capitalization_query)\n return real_string\n\n @classmethod\n def _extract_text(cls, path: str, text_length: int, substring_query: str, capitalization_query: str) -> str:\n primary_charset = list('abcdefghijklmnopqrstuvwxyz0123456789 \\n\\t')\n fallback_charset = set('\\'!@#$%^&*()_=+\",./?-:;<>~`')\n charsets = [primary_charset, fallback_charset]\n node_text = \"\"\n for i in range(1, text_length + 1):\n current_letter = None\n for charset in charsets:\n current_letter = mass_query(substring_query, {'start': i, 'path': path}, charset)\n if current_letter is not None:\n break\n try:\n node_text += current_letter\n except TypeError:\n if SOFT_CHARSET_FAIL:\n logger.error('Could not identify character, using \"?\" instead')\n node_text += '?'\n else:\n raise\n # Included node_text.title() and string.capwords(node_text) for edge case on .title() --- he's turns to He'S\n guesses = [node_text, node_text.upper(), node_text.capitalize(), node_text.title(), string.capwords(node_text),\n '-'.join([w.capitalize() for w in node_text.split('-')])]\n real_text_value = mass_query(capitalization_query, {'path': path}, guesses)\n if real_text_value is None:\n if not SOFT_CHARSET_FAIL:\n # TODO: Iterate over upper/lower case for each alphabet value\n raise Exception('Edge case (Unknown case of node text {}), handle later (or now :) )'.format(node_text))\n else:\n logger.error(\"None of {} matched case for {}\".format(guesses, node_text))\n return node_text\n return real_text_value\n\n @classmethod\n def get_length(cls, path: str, start_length=0) -> int:\n if start_length > MAX_NODE_NAME_LENGTH:\n raise Exception(\"Name length of a node exeeded {}\".format(MAX_NODE_NAME_LENGTH))\n q = \"string-length({path}) = {guess}\"\n initial_range = range(start_length + 1, start_length + 11)\n length = mass_query(q, {'path': path}, initial_range)\n if length is None:\n return cls.get_length(path, start_length + 10)\n else:\n return length\n\n\ndef mass_query(query: str, query_parameters: Dict[str, str], guesses: Union[Set, List]) -> Union[int, str, None]:\n correct_guess = None\n params = query_parameters.copy()\n if len(guesses) > 0:\n with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:\n\n future_to_index = {}\n for g in guesses:\n # Change query to work with ' and \" character\n if not isinstance(g, int):\n if \"'\" in g:\n query = query.replace(\"'\", '\"')\n elif '\"' in g:\n query = query.replace('\"', \"'\")\n future_to_index[executor.submit(request, query.format(guess=g, **params))] = g\n\n for future in as_completed(future_to_index):\n guess = future_to_index[future]\n is_correct = future.result()\n if is_correct:\n logger.info(\"{} - {}\".format(guess, is_correct))\n else:\n logger.debug(\"{} - {}\".format(guess, is_correct))\n if is_correct:\n correct_guess = guess\n break\n executor.shutdown()\n return correct_guess\n","repo_name":"hackersql/------","sub_path":"Web/xpath_injection/xpath-blindeye/xpath_blindeye/xnode.py","file_name":"xnode.py","file_ext":"py","file_size_in_byte":10868,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"11067460323","text":"# поиск слов по наличию букв с помощью регулярных выражений библиотеки re\n\nimport re\n\n# words = \"меня звать Олег, мне 35 лет\"\n# search_word = \"звать\"\n# if re.search(r'\\bзвать\\b', words):\n# print(search_word) # выводит слово, если нашлось полное совпадение\n\n\n\nproducts_upsells = \"'\\\"2528\\\":{\\\"name\\\":{\\\"value\\\":\\\"NDL_2894\\\",\\\"class\\\":\\\".product-NDL-info-main NDLwefwefef .product-nam {\\\"value\\\":\\\"NDL_2893\\\"'\"\n\n# print(*filter(lambda x: 'N' in x, products_upsells))\n\n\n\n# text = \"Hregerwg The film Titanic NDL_2894 was released in 1998\"\n# result = re.match(r\"[a-zA-z]+\", text)\n# print(result.group(0)) # выводит первое слово в тексте\n\n\nresult = re.findall(r'[NDL]\\w+', products_upsells)\nprint (result) # выводит все слова, содержащие буквы NDL\n\n\n# words = ['Python', 'NDL_2894 wewefwef', 'easy', 'to', 'learn']\n# print(*filter(lambda x: 'N' in x, words)) # выводит полное содержимое текста, если найдена буква N","repo_name":"romansozinov/my-examples","sub_path":"search_word.py","file_name":"search_word.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39233664847","text":"\"\"\"\nwritten by ryanreadbooks\ndate: 2021/11/3\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DoubleConv(nn.Module):\n \"\"\"Two conv. layers with batch norm.\"\"\"\n\n def __init__(self, in_channels, out_channels):\n \"\"\"Initialize_layers.\"\"\"\n super().__init__()\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, feature):\n \"\"\"Forward pass.\"\"\"\n return self.double_conv(feature)\n\n\nclass Down(nn.Module):\n \"\"\"Downscaling with maxpool then double conv.\"\"\"\n\n def __init__(self, in_channels, out_channels):\n \"\"\"Initialize_layers.\"\"\"\n super().__init__()\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2),\n DoubleConv(in_channels, out_channels)\n )\n\n def forward(self, feature):\n \"\"\"Forward pass.\"\"\"\n return self.maxpool_conv(feature)\n\n\nclass Up(nn.Module):\n \"\"\"Upscaling then double conv.\"\"\"\n\n def __init__(self, in_channels, out_channels):\n \"\"\"Initialize_layers.\"\"\"\n super().__init__()\n self.upscale = nn.ConvTranspose2d(\n in_channels // 2, in_channels // 2, 2, stride=2)\n self.conv = DoubleConv(in_channels, out_channels)\n\n def forward(self, new, old):\n \"\"\"Forward pass.\"\"\"\n new = self.upscale(new)\n diff_y = old.shape[2] - new.shape[2]\n diff_x = old.shape[3] - new.shape[3]\n half_y = diff_y // 2\n half_x = diff_x // 2\n new = F.pad(new, (half_x, diff_x - half_x, half_y, diff_y - half_y))\n return self.conv(torch.cat([old, new], dim=1))\n\n\nclass UNet(nn.Module):\n \"\"\"Original U-net.\"\"\"\n\n def __init__(self, input_channels=1, output_channels=1):\n \"\"\"Initialize_layers.\"\"\"\n super().__init__()\n self.num_outs = output_channels\n\n # Encoders\n self.inc = DoubleConv(input_channels, 64)\n self.down1 = Down(64, 128)\n self.down2 = Down(128, 256)\n self.down3 = Down(256, 512)\n self.down4 = Down(512, 512)\n self.up1 = Up(1024, 256)\n self.up2 = Up(512, 128)\n self.up3 = Up(256, 64)\n self.up4 = Up(128, output_channels)\n\n def forward(self, img):\n \"\"\"Forward pass.\"\"\"\n out0 = self.inc(img)\n out1 = self.down1(out0)\n out2 = self.down2(out1)\n out3 = self.down3(out2)\n out4 = self.down4(out3)\n img = self.up1(out4, out3)\n img = self.up2(img, out2)\n img = self.up3(img, out1)\n img = self.up4(img, out0)\n\n return img\n","repo_name":"ryanreadbooks/Modified-GGCNN","sub_path":"models/unet/unet_backend.py","file_name":"unet_backend.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"40262336918","text":"import mpyc\nimport json\nimport sys\nimport os\nimport logging\nimport requests\nimport time\nimport datetime\n\nimport subprocess\nfrom flask import abort, Flask, render_template, request\nfrom pymongo import MongoClient\n\napp = Flask(__name__,static_folder='./static')\nlogging.basicConfig(level=logging.DEBUG)\nl = logging.getLogger(\"web_app\")\n\n# MongoDB client\n#client = MongoClient(\"mongodb://172.20.96.113:27017\")\nclient = MongoClient(\"mongodb://mongo:27017\")\ndb = client.test_db\nmain_wd = os.getcwd()\nis_running = False\n\ndef store_sample(line, datapart):\n \"\"\"\n Stores a single data sample within MongoDB.\n\n Input:\n line : line as a dictionary.\n Output:\n success : bool if DB insertion was successful.\n \"\"\"\n try:\n if datapart == 'train':\n sensor_data = db.train.sensor_data\n elif datapart == 'test':\n sensor_data = db.test.sensor_data\n elif datapart == 'model':\n sensor_data = db.model.sensor_data\n sensor_id = sensor_data.insert_one(line).inserted_id\n l.debug(f\"Inserted line with id {sensor_id}\")\n except Exception as e:\n l.error(e)\n return False\n return True\n\n\n# Data store API\n@app.route(\"/store\", methods=[\"PUT\"])\ndef put_training_data():\n datapart = request.args.get('datapart')\n if not request.is_json:\n l.error(\"Request was not JSON\")\n return abort(400)\n l.info(f\"/store received data: {request.json}\")\n line = json.loads(request.json)\n store_sample(line, datapart)\n return \"\"\n\n\n# Clears all database entries\n@app.route(\"/clear_all\")\ndef clear_all():\n datapart = request.args.get('datapart')\n if datapart == 'train':\n sensor_data = db.train.sensor_data\n elif datapart == 'test':\n sensor_data = db.test.sensor_data\n elif datapart == 'model':\n sensor_data = db.model.sensor_data\n try:\n sensor_data.drop()\n except Exception as e:\n l.error(e)\n return \"Failed\"\n return \"Success\"\n\n\n@app.route(\"/fetch\")\ndef fetch_all():\n datapart = request.args.get('datapart')\n if datapart == 'train':\n sensor_data = db.train.sensor_data\n elif datapart == 'test':\n sensor_data = db.test.sensor_data\n elif datapart == 'model':\n sensor_data = db.model.sensor_data\n cursor = sensor_data.find()\n samples = [s for s in cursor]\n return render_template(\"fetch_all.html\", items=samples)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"demo.html\")\n\n\n@app.route(\"/mpyc_launch\", methods=[\"GET\"])\ndef mpyc_launch():\n\n def get_api_name(api_name):\n return api_name + '.py'\n\n http_arg = request.args.get('api')\n l.debug(f'Arg api : {http_arg}')\n script_name = get_api_name(http_arg)\n l.debug(f'Api : {script_name}')\n if script_name is None:\n return \"400\"\n\n l.debug(request)\n os.chdir(main_wd)\n test_path=\"./mpyc/demos\"\n os.chdir(test_path)\n # Raise other parties\n Party = os.getenv(f\"Party\")\n l.debug(f'Party: {Party}')\n if Party == '0':\n global is_running\n if is_running:\n return '200'\n else:\n is_running = True\n for i in range(int(os.getenv('N_PARTIES')) - 1, 0, -1):\n party_host = os.getenv(f'PARTY_{i}_HOST')\n party_port = os.getenv(f'PARTY_{i}_PORT')\n host_addr = f'http://{party_host}:{party_port}/mpyc_launch?api={http_arg}'\n l.debug(f'Target host: {host_addr}')\n r = requests.get(host_addr)\n l.debug(f'Party {i} response: {r.text}')\n time.sleep(2.50)\n # Fetch and parse result\n l.debug(f'{datetime.datetime.now()}: start mpyc script...')\n #raw_result = os.popen(f\"python -u run.py 3 average.py\").read()\n process = subprocess.Popen(['python', script_name, '-c', f'party{3}_0.ini'], stdout=subprocess.PIPE)\n stdout, stderr = process.communicate()\n is_running = False\n l.debug(f'{datetime.datetime.now()}: end mpyc compute script...')\n\n #print(f'output: {stdout.decode().split()}')\n output_formatted = stdout.decode().split('$$$')[1]\n output_formatted = output_formatted.split('$$$')[0]\n output_formatted = output_formatted.strip()\n output_formatted = output_formatted.replace('\\n', '')\n output_formatted = output_formatted.strip(',')\n l.debug(f'{output_formatted}')\n #for s in output_formatted.split(','):\n # l.debug(f'{s}')\n #output = [float(s) for s in output_formatted.split(',')]\n #return render_template(\"demo.html\", result=output)\n #return render_template(\"fetch_all.html\", items=output)\n return output_formatted\n\n else:\n l.debug(f'Party config: party{3}_{Party}.ini')\n #os.popen(f\"python average.py -c party{3}_{Party}.ini > /dev/null 2>&1\")\n os.system(f'python {script_name} -c party{3}_{Party}.ini &')\n return \"200\"\n\n\n@app.route(\"/mpyc_compute\", methods=[\"GET\"])\ndef mpyc_compute():\n\n def get_api_name(api_name):\n return api_name + '.py'\n\n http_arg = request.args.get('api')\n l.debug(f'Compute arg api : {http_arg}')\n script_name = get_api_name(http_arg)\n l.debug(f'Compute api : {script_name}')\n if script_name is None:\n return \"400\"\n\n #l.debug(request)\n os.chdir(main_wd)\n test_path=\"./mpyc/demos\"\n os.chdir(test_path)\n # Raise other parties\n Party = os.getenv(f\"Party\")\n l.debug(f'Party: {Party}')\n\n if Party == 0 or Party == '0':\n time.sleep(2*0.50)\n if Party == 1 or Party == '1':\n time.sleep(0.50)\n\n l.debug(f'{datetime.datetime.now()}: start mpyc compute script...')\n #raw_result = os.popen(f\"python -u run.py 3 average.py\").read()\n process = subprocess.Popen(['python', script_name, '-c', f'party{3}_{Party}.ini'], stdout=subprocess.PIPE)#shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n stdout, stderr = process.communicate()\n\n output_formatted = stdout.decode().split('$$$')[1]\n output_formatted = output_formatted.split('$$$')[0]\n output_formatted = output_formatted.strip()\n output_formatted = output_formatted.replace('\\n', '')\n output_formatted = output_formatted.strip(',')\n l.debug(f'{output_formatted}')\n return output_formatted\n\n\n# Inference API\n@app.route(\"/test\", methods=[\"GET\"])\ndef get_inference():\n # Not yet implemented\n return abort(400)\n\n\nif __name__ == '__main__':\n l.debug(\"Initialising server...\")\n Party = os.getenv(f\"Party\")\n host_name = f'server{Party}_web_1'\n print(f'host name: {host_name}')\n app.run(debug=False,\n #ssl_context=(os.getenv(\"CERT_PATH\"), os.getenv(\"SECRET_PATH\")),\n host=host_name)\n","repo_name":"Fluxmux/securefacematching","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6696,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"71567324002","text":"import discord\nimport asyncio\nimport configparser\nimport inspect\nimport os\nfrom datetime import datetime\nfrom time import strftime\n\ndef lister(array, and_=False):\n compiled = array[0]\n if len(array)==1:\n return compiled\n if len(array)>2:\n for n in array[1:-1]:\n compiled += \", \" + n\n compiled += \",\" #Oxford comma\n if len(array) == 2 or and_ == True:\n compiled += \" and\"\n compiled += \" \" + array[-1]\n return compiled\n\ndef capital(text):\n words = text.split(' ')\n text = ''\n for w in words:\n w = w[0].upper() + w[1:]\n text += w + ' '\n return text[:-1]\n\ndef file_read(name):\n items = []\n file = open(name)\n while True:\n line = file.readline()\n if line == '':\n break\n if line == '\\n':\n continue\n if line[-1] == '\\n':\n items.append(line[:-1])\n else:\n items.append(line)\n file.close()\n return items\n\ndef file_write(name, items):\n file = open(name, 'w')\n while len(items) > 0:\n file.write(items.pop(0)+'\\n')\n file.close()\n \ndef file_append(name, item):\n file = open(name, 'a')\n file.write(str(item)+'\\n')\n file.close()\n \ndef file_append_utf8(name, item):\n file = open(name, 'a', encoding='utf8')\n file.write(str(item)+'\\n')\n file.close()\n \ndef file_create(name):\n file = open(name, 'w+')\n file.close()\n \ndef file_clean(name):\n items = file_read(name)\n file_write(name, sorted(items))\n \n\n\nclass MemberError(Exception):\n pass\nclass ChannelError(Exception):\n pass\nclass GameError(Exception):\n def __init__(self, name=None):\n self.name = name\nclass PlayerError(Exception):\n pass\n\n\n\nclass Config:\n def __init__(self, cfg):\n self.file = cfg\n config = configparser.ConfigParser(interpolation=None)\n config.read(cfg, encoding='utf-8')\n self.token = config.get('Essential','Token')\n self.role_strings = config.get('Essential','Roles').split(' ')\n self.prefix = config.get('Optional','Prefix',fallback='!')\n self.channel = config.getint('Essential','MainChannel',fallback=None)\n self.invites = config.getint('Optional','GameChannel',fallback=self.channel)\n self.banned_strings = config.get('Optional','Strings',fallback='')\n self.delete_commands = config.get('Optional','Delete',fallback=False)\n self.games = file_read('gameslist.txt')\n \n self.banned_strings = self.banned_strings.split(' ')\n \n self.roles = []\n for rs in self.role_strings:\n self.roles.append(int(rs))\n\n\n\nclass Bot(discord.Client):\n def __init__(self):\n super().__init__(max_messages=500, intents=discord.Intents.all())\n self.config = Config('mb_settings.ini')\n self.active = True\n self.restricted_commands = {'channel': -1,'close': -1, 'verify': 1, 'clean': -1, 'add': -2, 'alias': -2, 'remove': -1, 'reload': -1, 'join': 0, 'leave': 0, 'players': 1, 'invite': 1, 'scrub': -1}\n self.external_commands = ['channel', 'scrub', 'vote']\n self.destruct_commands = ['vote']\n self.channel = None\n self.invite_channel = None\n self.server = None\n self.num_roles = len(self.config.roles)\n self.numerical_reactions = ['\\u0031\\u20E3', '\\u0032\\u20E3', '\\u0033\\u20E3', '\\u0034\\u20E3', '\\u0035\\u20E3', '\\u0036\\u20E3', '\\u0037\\u20E3', '\\u0038\\u20E3', '\\u0039\\u20E3']\n self.list_clean()\n self.gameslists = dict()\n self.aliases = dict()\n for g in self.config.games:\n self.gameslists[g] = file_read('games/%s.txt' % g)\n for a in file_read('aliases/%s.txt' % g):\n self.aliases[a] = g\n self.alias_list = list(self.aliases.keys())\n \n \n async def on_ready(self):\n print('------')\n print('Logged in as')\n print(self.user.name)\n print(self.user.id)\n self.channel = self.get_channel(self.config.channel)\n self.invite_channel = self.get_channel(self.config.invites)\n self.server = self.guilds[0]\n print('Listening on')\n print(self.server.name)\n print(self.channel.name)\n print(self.channel.id)\n print('Broadcasting on')\n print(self.server.name)\n print(self.invite_channel.name)\n print(self.invite_channel.id)\n print('------') \n \n def list_clean(self):\n file_clean('gameslist.txt')\n for g in self.config.games:\n try:\n file_clean('games/%s.txt' % g)\n except FileNotFoundError:\n file_create('games/%s.txt' % g)\n try:\n file_clean('aliases/%s.txt' % g)\n except FileNotFoundError:\n file_create('aliases/%s.txt' % g)\n\n def find(self, name):\n for m in self.server.members:\n if m.name.lower() == name.lower():\n return m\n raise MemberError()\n \n def find_channel(self, name):\n for c in self.server.channels:\n if c.name.lower() == name.lower():\n return c\n raise ChannelError()\n \n def game_check(self, game):\n if not game.lower() in self.config.games:\n if game.lower() in self.alias_list:\n return self.aliases[game]\n else:\n raise GameError\n else:\n return game\n \n async def game_join(self, user, game):\n uid = str(user.id)\n if uid in self.gameslists[game.lower()]:\n return False\n \n self.gameslists[game.lower()].append(uid)\n file_append('games/%s.txt' % game.lower(), uid)\n return True\n \n async def multi_join(self, user):\n games = await self.multi_query(user, self.config.games, 'game groups to join')\n \n for g in games:\n await self.game_join(user, g)\n \n def level(self, user):\n highest = 0\n for r in user.roles:\n try:\n temp = self.config.roles.index(r.id) + 1\n except:\n continue\n else:\n if(temp > highest):\n highest = temp\n return highest \n \n def list_roles(self, levels=range(0)):\n text = ''\n if levels == range(0):\n levels = range(self.num_roles)\n \n for l in levels:\n for r in self.server.roles:\n if r.id == self.config.roles[l]:\n name = r.name\n break\n text += '%d: %s\\n' % (l+1, name)\n return text\n \n def list_players(self, game):\n players = []\n \n for u in self.gameslists[game.lower()]:\n players.append(self.server.get_member(int(u)))\n while None in players:\n players.remove(None)\n \n return players\n \n def log(self, message, color=0):\n file_append_utf8('mb.log', '\\u001b[95m%s \\u001b[96m#%s \\u001b[93m@%s \\u001b[%im%s \\u001b[0m' % (strftime('%D %T'), message.channel.name, message.author.name, color, message.content))\n \n def allowed(self, user, command):\n return not (command in list(self.restricted_commands.keys())) or \\\n self.level(user) > ( self.num_roles + self.restricted_commands[command] ) % self.num_roles\n \n async def query(self, user, mess):\n reactions = ['\\u2705','\\u274c']\n for r in reactions:\n await mess.add_reaction(r)\n \n def reac_check(reaction, author):\n return reaction.message.id == mess.id and reaction.emoji in reactions and author == user\n try:\n answer, _ = await self.wait_for('reaction_add', check=reac_check, timeout=30)\n except:\n await mess.delete()\n raise TimeoutError\n \n return answer.emoji == reactions[0]\n \n async def multi_query(self, user, items, selecting='\\b'):\n chan = self.channel\n nreact = self.numerical_reactions\n \n selected = []\n for i in range(0, len(items), 9):\n remaining = len(items) - i\n if remaining < 9:\n amount = remaining\n else:\n amount = 9\n \n options = ''\n for j in range(1, amount+1):\n options += '%d: %s\\n' % (j, items[i+j-1])\n \n request = await chan.send('Select %s from the list below by clicking on the corresponding reactions. Then click the check button to continue.\\n```%s```' % (selecting, options))\n for j in range(0, amount):\n await request.add_reaction(nreact[j])\n await request.add_reaction('\\u2705')\n \n def reac_check(reaction, author):\n return reaction.message.id == request.id and reaction.emoji == '\\u2705' and author == user\n try:\n await self.wait_for('reaction_add', check=reac_check, timeout=40)\n except:\n await request.delete()\n raise TimeoutError\n \n request = await chan.fetch_message(request.id)\n reactions = request.reactions[:amount]\n await request.delete()\n \n for r in reactions:\n if r.count > 1:\n selected.append(items[i + nreact.index(r.emoji)])\n \n return selected\n \n async def get_mentions(self, user, game):\n chan = self.channel\n players = self.list_players(game)\n try:\n players.remove(user)\n except:\n pass\n \n selected = []\n if await self.query(user, await chan.send('Do you want to invite all online players from the %s group?' % game)):\n for p in players:\n if str(p.status) == 'online':\n selected.append(p)\n \n else:\n selected = await self.multi_query(user, players, 'players to invite')\n \n if not len(selected):\n if await self.query(user, await chan.send('You appear to have not selected any players to invite. (Or they might just be offline)\\nTry again?')):\n return await self.get_mentions(user, game)\n return []\n \n mentions = []\n for u in selected:\n mentions.append(u.mention)\n \n return mentions\n \n async def message_response(self, user, text, chan=None):\n if not chan:\n chan = self.channel\n mess = await chan.send(text)\n \n def mess_check(message):\n return message.author == user and message.channel == chan\n \n try:\n answer = await self.wait_for('message', check=mess_check, timeout=30)\n except:\n #await mess.delete()\n raise TimeoutError\n \n return answer.content\n \n async def screen(self, mess):\n text = mess.content.translate(' ')\n \n bad = False\n for s in self.config.banned_strings:\n if s in text:\n bad = True\n break\n \n if bad:\n self.log(mess, 91)\n await mess.channel.send('Message from %s was removed because it contained a banned character combination.' % mess.author.mention)\n await mess.delete()\n \n return bad\n \n \n async def cmd_add(self, user, game):\n '''Adds a new game invite group\n Usage: .add [game]\n '''\n if game.lower() in self.config.games:\n raise GameError(game.lower())\n \n self.config.games.append(game.lower())\n self.gameslists[game.lower()] = []\n file_append('gameslist.txt', game.lower())\n file_create('games/%s.txt' % game.lower())\n print('Game added by %s - %s' % (user.name, game))\n await self.channel.send('%s game invite group added by %s' % (game, user.name))\n \n async def cmd_alias(self, user, game):\n '''Adds an alias for a game invite group, allowing it to be referred by multiple names\n Usage: .alias [game]\n '''\n game = self.game_check(game) \n chan = self.channel\n \n answer = await self.message_response(user, 'Respond with a list of aliases to use for the %s game group.\\nMultiple aliases should be placed on separate lines.' % game)\n \n games = list(self.gameslists.keys())\n aliases = []\n existing = []\n for al in answer.split('\\n'):\n a = al.lower()\n if a in aliases:\n pass\n elif a in games or a in self.alias_list:\n existing.append(a)\n else:\n aliases.append(a)\n self.alias_list.append(a)\n self.aliases[a] = game\n file_append('aliases/%s.txt' % game, a)\n \n if aliases:\n await chan.send('The following aliases were added for the %s game group:\\n```%s```' % (game, lister(aliases, and_=True)))\n if existing:\n await chan.send('The following aliases were not added, as they are already in use:\\n```%s```' % (game, lister(existing, and_=True)))\n \n async def cmd_channel(self, chan):\n self.channel = chan\n print('Channel changed to %s.' % chan.name)\n await chan.send('Channel changed to %s.' % chan.name)\n \n async def cmd_clean(self):\n self.list_clean()\n \n async def cmd_close(self):\n await self.channel.send('Clocking out...')\n self.active = False\n await self.logout()\n exit()\n \n async def cmd_help(self, user):\n functions = inspect.getmembers(self, predicate=inspect.ismethod)\n commands = []\n for t in functions:\n if t[0][0:4] == 'cmd_' and self.allowed(user, t[0][4:]):\n commands.append(self.config.prefix + t[0][4:])\n await self.channel.send(\"Command list: \\n```%s```\" % lister(commands))\n \n async def cmd_join(self, user, game=None):\n if game == None:\n await self.multi_join(user)\n await self.channel.send('%s, you have have been added to all the groups you selected.' % user.name)\n return\n \n game = self.game_check(game)\n \n if await self.game_join(user, game):\n await self.channel.send('You are now part of the %s group, %s.' % (capital(game), user.name))\n else:\n await self.channel.send('You are already part of the %s group.' % capital(game))\n \n async def cmd_games(self):\n if len(self.config.games) == 0:\n await self.channel.send('There are no game invite groups. Hopefully someone will add one!')\n return\n \n games = []\n self.config.games = sorted(self.config.games)\n for g in self.config.games:\n games.append(capital(g))\n await self.channel.send('Here is the list of game invite groups:\\n*(Capitalization is not important)*\\n```%s```' % lister(games, and_=True))\n \n async def cmd_invite(self, user, game):\n game = self.game_check(game)\n chan = self.channel\n \n mentions = await self.get_mentions(user, game)\n if not len(mentions):\n return\n \n if chan != self.invite_channel:\n await chan.send('Broadcasting invites on text channel #%s...' % self.invite_channel.name)\n await self.invite_channel.send('The following players have been invited by %s to play %s:\\n%s.' % (user.name, game, lister(mentions, True)))\n \n async def cmd_leave(self, user, game):\n game = self.game_check(game)\n uid = str(user.id)\n \n if not uid in self.gameslists[game.lower()]:\n await self.channel.send('You are not part of the %s group.' % game)\n return\n \n self.gameslists[game.lower()].remove(uid)\n file_write('games/%s.txt' % game.lower(), self.gameslists[game.lower()])\n await self.channel.send('You are no longer part of the %s group, %s.' % (game, user.name))\n \n async def cmd_players(self, user, game):\n game = self.game_check(game)\n \n if len(self.gameslists[game.lower()]) == 0:\n await self.channel.send('There are no players in the %s group. Perhaps you should join.' % game)\n return\n \n players = self.list_players(game)\n namelist = []\n for p in players:\n namelist.append(p.name)\n \n await self.channel.send('The following users play %s: \\n```%s```' % (game, lister(namelist, and_=True)))\n \n async def cmd_roles(self):\n await self.channel.send('Managed Roles:\\n```%s```' % self.list_roles())\n\n async def cmd_reload(self):\n self.config = Config('mb_settings.ini')\n self.num_roles = len(self.config.roles)\n self.gameslists = dict()\n for g in self.config.games:\n self.gameslists[g] = file_read('games/%s.txt' % g)\n \n async def cmd_remove(self, user, game):\n game = self.game_check(game)\n \n self.config.games.remove(game.lower())\n self.gameslists[game.lower()] = []\n file_write('gameslist.txt', self.config.games)\n os.remove('games/%s.txt' % game.lower())\n print('Game removed by %s - %s' % (user.name, game))\n await self.channel.send('%s game invite group removed by %s' % (game, user.name))\n \n await self.cmd_reload() # don't want to do this but it works\n \n async def cmd_schedule(self, user, game):\n game = self.game_check(game)\n chan = self.channel\n \n time = await self.message_response(user, 'What time would you like to schedule the invite?\\nEnter the time as HH:MM')\n \n def is_valid(time):\n return len(time) == 5 and time[:2].isnumeric() and time[3:].isnumeric() and time[2] == ':'\n \n while not is_valid(time):\n time = await self.message_response(user, 'That time appears invalid. Here are two examples of proper times: 09:35 and 21:00')\n \n await chan.send('**Note that if you select the \"all online players\" option, it will be based on who is online currently, not at the specified time.**')\n mentions = await self.get_mentions(user, game)\n if not len(mentions):\n return\n \n timeout = ((int(time[:2]) - int(strftime('%H'))) % 24 * 60 + (int(time[3:]) - int(strftime('%M')))) * 60\n if int(time[:2]) < 12:\n if timeout > 43200:\n timeout = timeout - 43200\n disptime = '%s p.m.' % time\n else:\n disptime = '%s a.m.' % time\n else:\n disptime = '%d%s p.m.' % (int(time[:2]) - 12, time[2:])\n if disptime[0] == '0':\n disptime = disptime[1:]\n \n if chan != self.invite_channel:\n await chan.send('Broadcasting invites on text channel #%s...' % self.invite_channel.name)\n mess = await self.invite_channel.send('The following players have been invited by %s to play %s at %s\\n%s.\\nAnyone that wants to be included in the follow-up ping, react with \\u2705' % (user.name, game, disptime, lister(mentions, True)))\n await mess.add_reaction('\\u2705')\n\n mentions = []\n def reac_check(reaction, author):\n if reaction.message.id == mess.id and reaction.emoji == '\\u2705':\n if not author.mention in mentions:\n mentions.append(author.mention)\n return False\n \n try:\n await self.wait_for('reaction_add', check=reac_check, timeout=timeout)\n except asyncio.TimeoutError:\n pass\n \n await self.invite_channel.send('The following players have been invited by %s to play %s\\n%s.' % (user.mention, game, lister(mentions, True)))\n \n async def cmd_scrub(self, channel, amount):\n if(amount>100):\n await channel.send('That is too many messages.')\n else:\n await channel.purge(limit=int(amount)+1)\n \n async def cmd_verify(self, sponsor, name):\n chan = self.channel\n \n endorsed = self.find(name)\n \n level = self.level(sponsor) - 1\n elevel = self.level(endorsed)\n levels = range(elevel, level)\n \n if elevel >= level:\n await chan.send(\"You don't have permission to grant that user a role higher than they currently have.\")\n return\n \n request = await chan.send('Please select a role to assign to %s:\\n```%s```' % (endorsed.name, self.list_roles(levels)))\n \n reactions = self.numerical_reactions\n for i in levels:\n await request.add_reaction(reactions[i])\n \n def reac_check(reaction, author):\n return reaction.message.id == request.id and reaction.emoji in reactions[elevel:level] and author == sponsor\n try:\n answer, _ = await self.wait_for('reaction_add', check=reac_check, timeout=30)\n except:\n await request.delete()\n raise TimeoutError\n \n for i in levels:\n if answer.emoji == reactions[i]:\n for r in self.server.roles:\n if r.id == self.config.roles[i]:\n await endorsed.add_roles(r)\n await chan.send('%s successfully given %s role by %s.' % (endorsed.name, r.name, sponsor.name))\n await request.delete()\n return\n \n async def cmd_vote(self, chan, number = None):\n if number:\n if int(number) > 9:\n await chan.send('The maximum number of choices is 9.')\n return\n reactions = self.numerical_reactions[:int(number)]\n else:\n reactions = ['\\u2705','\\u2754','\\u274c']\n \n messages = await chan.history(limit=2).flatten()\n mess = messages[1]\n for r in reactions:\n await mess.add_reaction(r)\n \n async def on_member_update(self, before, after):\n if before.nick != after.nick:\n if not before.nick:\n m = await self.channel.send('**Nickname change detected**\\n\\tOld: %s\\n\\tNew: %s' % (before.name, after.nick))\n elif not after.nick:\n m = await self.channel.send('**Nickname change detected**\\n\\tOld: %s\\n\\tNew: %s' % (before.nick, after.name))\n else:\n m = await self.channel.send('**Nickname change detected**\\n\\tOld: %s\\n\\tNew: %s' % (before.nick, after.nick))\n self.log(m, 94)\n \n async def on_user_update(self, before, after):\n if(before.name != after.name):\n m = await self.channel.send('**Username change detected**\\n\\tOld: %s\\n\\tNew: %s' % (before.name, after.name))\n self.log(m, 94)\n \n async def on_message(self, mess):\n await self.wait_until_ready()\n \n if await self.screen(mess):\n return\n if not mess.content.startswith(self.config.prefix):\n return \n if mess.author == self.user:\n return\n \n try:\n space = mess.content.index(' ')\n except ValueError:\n space = None\n command = mess.content[1:space]\n \n if not (mess.channel == self.channel or command in self.external_commands):\n return\n \n print('Command detected - %s' % command)\n self.log(mess)\n \n if command in self.external_commands:\n parameter1 = mess.channel\n else:\n parameter1 = mess.author\n \n if space == None:\n parameter2 = None \n else:\n parameter2 = mess.content[space+1:]\n \n try:\n function = getattr(self, 'cmd_%s' % command)\n except:\n print('Command invalid.')\n else:\n if self.allowed(mess.author, command):\n params = len(inspect.signature(function).parameters)\n \n try:\n if params == 0:\n await function()\n elif params == 1:\n await function(parameter1)\n elif params == 2:\n await function(parameter1, parameter2)\n \n except MemberError:\n await mess.channel.send('Could not find a user by the name of %s' % parameter2)\n\n except GameError as err:\n if err.name:\n await mess.channel.send('The %s game group already exists.' % err.name)\n else:\n await mess.channel.send('Could not find a game group by the name of %s' % parameter2)\n if self.allowed(mess.author, getattr(self, 'cmd_add')) and await self.query(mess.author, await mess.channel.send('Would you like to add it?')):\n await self.cmd_add(parameter1, parameter2)\n \n except PlayerError:\n await self.channel.send('There are no players in the %s group. Perhaps you should join.' % parameter2)\n \n except TimeoutError:\n await mess.channel.send('Request timed out.')\n \n else:\n if self.config.delete_commands or command in self.destruct_commands:\n await mess.delete()\n \n else:\n await mess.channel.send('You do not have permission to use that command.')\n \n \n \nif __name__ == '__main__':\n t=Bot()\n while t.active:\n t.run(t.config.token)\n asyncio.sleep(60)\n","repo_name":"XxPieIsTastyxX/Discord-Manager","sub_path":"managerbot.py","file_name":"managerbot.py","file_ext":"py","file_size_in_byte":25909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12803836493","text":"import tweepy\r\n#boş bırakılan yerler kişiye özel olarak tanımlanır\r\nTWITTER_CONSUMER_KEY = ''\r\nTWITTER_CONSUMER_SECRET = ''\r\nTWITTER_ACCESS_TOKEN = ''\r\nTWITTER_ACCESS_TOKEN_SECRET = ''\r\nauth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)\r\nauth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)\r\napi = tweepy.API(auth,wait_on_rate_limit=True)\r\n\r\n\"\"\"\r\n#yöntem1\r\ntakipciler = api.followers (id=\"mubbud\", count=100)\r\ni=0\r\nfor takipci in takipciler:\r\n \r\n i=i+1\r\n print(i,\"-\",takipci.name,\"(\",takipci.screen_name,\")\",takipci.description)\r\n\"\"\"\r\n\r\n#yöntem2\r\ni=0\r\ntakipciler = tweepy.Cursor(api.followers, id=\"\").items(100) #idye takipçilerini öğrenmek istediğiniz kullanıcının kullanıcı adını girmeniz gerekir\r\nfor takipci in takipciler:\r\n i=i+1\r\n print(i,takipci.name,takipci.screen_name)\r\n \r\n","repo_name":"ballibora/tweepy-denemem","sub_path":"tweet_1_takipc.py","file_name":"tweet_1_takipc.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27492355470","text":"try:\n import Queue as Q\nexcept ImportError:\n import queue as Q\n\nq = Q.PriorityQueue()\nq.put(10)\nq.put(1)\nq.put(5)\nwhile not q.empty():\n\tprint(q.get())\n\n\nq = Q.PriorityQueue()\nq.put((10,'ten'))\nq.put((1,'one'))\nq.put((5,'five'))\nwhile not q.empty():\n print(q.get())\n\n# giving error in this case\n\ntry:\n import Queue as Q # ver. < 3.0\nexcept ImportError:\n import queue as Q\n\nclass Skill(object):\n def __init__(self, priority, description):\n self.priority = priority\n self.description = description\n print('New Level:', description)\n return\n def __cmp__(self, other):\n return cmp(self.priority, other.priority)\n\nq = Q.PriorityQueue()\n\nq.put(Skill(5, 'Proficient'))\nq.put(Skill(10, 'Expert'))\nq.put(Skill(1, 'Novice'))\n\nwhile not q.empty():\n next_level = q.get()\n print('Processing level:', next_level.description)","repo_name":"Pkpallaw16/Data-Structure-And-Algorithms","sub_path":"16 Priority Queue/Priorityqueue.py","file_name":"Priorityqueue.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11246226965","text":"from django.contrib import admin\nfrom django.urls import path\nfrom home import views\n\nurlpatterns = [\n path(\"\",views.index,name='home'),\n path(\"settle/\",views.settle,name='settle'),\n path(\"table\",views.table,name='table'),\n path(\"script/\",views.script,name='script'),\n path(\"script//csvfile\",views.csvfile,name='csvfile')\n \n]","repo_name":"aryan-basu-gemini/Merchant-Ledgerbook","sub_path":"logbooks/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"71862129760","text":"\r\n# Public -\r\n# Protected -\r\n# Private -\r\n\r\nclass Employee:\r\n __company=\"Duke\"\r\n salary=7500\r\n\r\n\r\n\r\nGurparteek=Employee()\r\nSimar=Employee()\r\nYuvi=Employee()\r\n\r\n\r\nprint(Gurparteek.__company)\r\n#print(Simar.company)\r\n#print(Yuvi.company)\r\n\r\nYuvi.salary = 11900\r\nGurparteek.salary=4500\r\nSimar.salary=8977\r\n\r\nprint(Yuvi.salary)\r\nprint(Gurparteek.salary)\r\nprint(Simar.salary)\r\n","repo_name":"GurparteekGill/OPPS-in-Python","sub_path":"OPPS/Privatemodifiers.py","file_name":"Privatemodifiers.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12304440748","text":"def fibo_show(n):\n \"\"\"\n Function shows recursive method\n \"\"\"\n print(\"Begin to calculate Fibonacci numbers \" + str(n))\n if n == 0:\n result = 0\n print(result)\n elif n == 1:\n result = 1\n print(result)\n else:\n result = fibo_show(n - 1) + fibo_show(n - 2)\n print(result)\n return result\n\n\nx = fibo_show(5)\n# print(x)\n","repo_name":"ibrikin/igorfirstrep","sub_path":"python/02_functions/2.3/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29771936790","text":"from lasif.components.component import Component\nimport os\nimport shutil\nimport toml\nimport emoji\nfrom colorama import init\nfrom colorama import Fore\nfrom typing import List, Union\n\ninit()\nfrom inversionson import InversionsonError\n\n\nclass StoryTellerComponent(Component):\n \"\"\"\n A class in charge of documentation of inversion.\n\n Monitors a file which tells the actual story of the inversion\n\n Keeps track of a few things:\n - For each iteration:\n -- Which events are used\n -- What was the control group\n -- What was the misfit\n -- Type of adjoint source.\n\n - During inversion\n -- How often each event has been used\n -- How influential the event was on the inversion\n -- It allows for the addition of new data by regularly\n querying Lasif project to look for new data and then\n updates list of how often events have been used.\n\n Preferably this should be done in a way that it should be\n easy to work with data afterwards. Currently using toml files\n but would be nice to have a better option.\n \"\"\"\n\n def __init__(self, communicator, component_name):\n super(StoryTellerComponent, self).__init__(communicator, component_name)\n self.root, self.backup = self._create_root_folder()\n self.iteration_tomls = self.comm.project.paths[\"iteration_tomls\"]\n self.story_file = os.path.join(self.root, \"inversion.md\")\n self.all_events = os.path.join(self.root, \"all_events.txt\")\n self.events_used_toml = os.path.join(self.root, \"events_used.toml\")\n self.validation_toml = os.path.join(self.root, \"validation.toml\")\n if os.path.exists(self.validation_toml):\n self.validation_dict = toml.load(self.validation_toml)\n else:\n self.validation_dict = {}\n if os.path.exists(self.events_used_toml):\n self.events_used = toml.load(self.events_used_toml)\n else:\n self._create_initial_events_used_toml()\n self.markdown = MarkDown(self.story_file)\n self.printer = PrettyPrinter()\n\n def _create_root_folder(self):\n \"\"\"\n Initiate the folder structure if needed\n \"\"\"\n root = self.comm.project.paths[\"documentation\"]\n backup = os.path.join(root, \"BACKUP\")\n if not os.path.exists(root):\n os.mkdir(root)\n if not os.path.exists(backup):\n os.mkdir(backup)\n return root, backup\n\n def _backup_files(self):\n \"\"\"\n Backup all information at the end of each iteration.\n \"\"\"\n tmpdir = os.path.join(self.backup, \"..\", \"..\", \"tmp\")\n if os.path.exists(self.backup):\n shutil.copytree(self.backup, tmpdir)\n shutil.rmtree(self.backup)\n shutil.copytree(self.root, self.backup)\n shutil.rmtree(tmpdir)\n else:\n shutil.copytree(self.root, self.backup)\n\n def _backup_story_file(self):\n \"\"\"\n Protects the valuable story file from being overwritten.\n \"\"\"\n backup_loc = os.path.join(self.backup, \"inversion.md\")\n shutil.copy(self.story_file, backup_loc)\n\n def _create_story_file(self):\n \"\"\"\n Create a markdown file which will be used to tell the story\n of the inversion automatically.\n \"\"\"\n if os.path.isfile(self.story_file):\n raise InversionsonError(\n f\"File {self.story_file} already exists.\"\n f\" Will stop here so that it does not get\"\n f\" overwritten.\"\n )\n header = self.comm.project.inversion_id\n self.markdown.add_header(header_style=1, text=header, new=True)\n\n text = \"Welcome to the automatic documentation of the inversion\"\n text += f\" project {self.comm.project.inversion_id}. Here the \"\n text += \"inversion is documented iteration by iteration. \\n\"\n text += \"This is currently just a test but hopefully it will \"\n text += \"work out beautifully.\"\n\n self.markdown.add_paragraph(text)\n\n def _write_list_of_all_events(self):\n \"\"\"\n Write out a list of all events included in lasif project\n \"\"\"\n all_events = self.comm.lasif.list_events()\n with open(self.all_events, \"w+\") as fh:\n fh.writelines(f\"{event}\\n\" for event in all_events)\n\n def _create_initial_events_used_toml(self):\n \"\"\"\n Initialize the toml files which keeps track of usage of events\n \"\"\"\n all_events = self.comm.lasif.list_events()\n self.events_used = {}\n for event in all_events:\n self.events_used[event] = 0\n with open(self.events_used_toml, \"w+\") as fh:\n toml.dump(self.events_used, fh)\n\n def _update_list_of_events(self):\n \"\"\"\n In order to be able to add events to inversion we\n need to update the list of used events.\n \"\"\"\n all_events = self.comm.lasif.list_events()\n already_in_list = list(self.events_used.keys())\n new = [x for x in all_events if x not in already_in_list]\n if len(new) == 0:\n return\n else:\n for event in new:\n self.events_used[event] = 0\n with open(self.events_used_toml, \"w\") as fh:\n toml.dump(self.events_used, fh)\n with open(self.all_events, \"a\") as fh:\n fh.writelines(f\"{event}\\n\" for event in new)\n\n def _update_usage_of_events(self):\n \"\"\"\n To keep track of how often events are used.\n \"\"\"\n for event in self.comm.project.non_val_events_in_iteration:\n if not self.comm.project.updated[event]:\n if event not in self.events_used.keys():\n self.events_used[event] = 0\n if isinstance(self.events_used[event], str):\n raise InversionsonError(\"Events used are strings\")\n self.events_used[event] += 1\n self.comm.project.change_attribute(\n attribute=f'updated[\"{event}\"]', new_value=True\n )\n self.comm.project.update_iteration_toml()\n with open(self.events_used_toml, \"w\") as fh:\n toml.dump(self.events_used, fh)\n\n def _start_entry_for_iteration(self):\n \"\"\"\n Start a new section in the story file\n \"\"\"\n iteration = self.comm.project.current_iteration\n if iteration.startswith(\"it0000_model\"):\n iteration_number = 0\n else:\n iteration_number = int(\n self.comm.project.current_iteration.split(\"_\")[-1].lstrip(\"0\")\n )\n self.markdown.add_header(header_style=2, text=f\"Iteration: {iteration_number}\")\n text = \"Here you can read all about what happened in iteration \"\n text += f\"{iteration_number}.\"\n\n self.markdown.add_paragraph(text=text)\n\n def _report_acceptance_of_model(self):\n \"\"\"\n When model gets accepted and we compute additional misfits,\n we report it to story file.\n \"\"\"\n iteration = self.comm.project.current_iteration\n if iteration.startswith(\"it0000_model\"):\n iteration_number = 0\n else:\n iteration_number = int(iteration.split(\"_\")[0][2:].lstrip(\"0\"))\n tr_region = float(iteration.split(\"_\")[-1][:-2])\n text = f\"Model for Iteration {iteration_number} accepted for\"\n text += f\" trust region: {tr_region}.\"\n\n self.markdown.add_paragraph(text=text, textstyle=\"bold\")\n\n def _report_shrinking_of_trust_region(self):\n \"\"\"\n When model gets accepted and we compute additional misfits,\n we report it to story file.\n \"\"\"\n iteration = self.comm.project.current_iteration\n if iteration.startswith(\"it0000_model\"):\n iteration_number = 0\n else:\n iteration_number = int(iteration.split(\"_\")[0][2:].lstrip(\"0\"))\n tr_region = float(iteration.split(\"_\")[-1][:-2])\n text = f\"Model for Iteration {iteration_number} was rejected \"\n text += f\"so now we shrink the trust region to: {tr_region} \"\n text += \"and try again.\"\n\n self.markdown.add_paragraph(text=text)\n\n def _add_table_of_events_and_misfits(self, verbose=None, task=None):\n \"\"\"\n Include a table of events and corresponding misfits to\n the story file.\n \"\"\"\n self.markdown.add_header(header_style=3, text=\"Misfits\")\n if not verbose:\n text = \"The events used in the iteration along with their misfits\"\n text += \" are displayed below:\"\n\n if verbose and \"additional\" not in verbose:\n text = \"We have computed misfits for the control group events. \"\n text += \"The misfits are displayed below. The additional events \"\n text += \"are displayed with 0.0 misfit values.\"\n\n if verbose and \"additional\" in verbose:\n text = \"We have now computed the misfits for all the events of \"\n text += \"the iteration. These are displayed below.\"\n\n self.markdown.add_paragraph(text=text)\n # iteration = self.comm.project.current_iteration\n self.comm.project.get_iteration_attributes()\n self.markdown.add_table(\n data=self.comm.project.misfits, headers=[\"Events\", \"Misfits\"]\n )\n if task == \"compute_misfit_and_gradient\":\n total_misfit = 0.0\n for key in self.comm.project.misfits.keys():\n total_misfit += float(self.comm.project.misfits[key])\n text = f\"Total misfit for iteration: {total_misfit:.3f} \\n\"\n self.markdown.add_paragraph(text=text)\n return\n\n if verbose and \"additional\" in verbose:\n total_misfit = 0.0\n old_control_group_misfit = 0.0\n for key, value in self.comm.project.misfits.items():\n total_misfit += float(value)\n if key in self.comm.project.old_control_group:\n old_control_group_misfit += float(value)\n\n _, cg_red = self._get_misfit_reduction()\n\n text = f\"Total misfit for iteration: {total_misfit:.3f} \\n\"\n text += \"Misfit for the old control group: \"\n text += f\"{old_control_group_misfit}\"\n cg_red *= 100.0 # Get percentages\n text += f\"\\n Misfit reduction between the iterations: {cg_red:.3f} %\"\n\n if verbose and \"additional\" not in verbose:\n old_control_group_misfit = 0.0\n for key, value in self.comm.project.misfits.items():\n if key in self.comm.project.old_control_group:\n old_control_group_misfit += float(value)\n\n _, cg_red = self._get_misfit_reduction()\n\n text = \"Misfit for the old control group: \"\n text += f\"{old_control_group_misfit:.3f}\"\n cg_red *= 100.0\n if cg_red <= 0.0:\n text += f\"\\n Misfit increase between the iterations: {cg_red:.3f} %\"\n else:\n text += f\"\\n Misfit reduction between the iterations: {cg_red:.3f} %\"\n\n self.markdown.add_paragraph(text=text)\n\n def _report_control_group(self):\n \"\"\"\n Report what the new control group is and what the current misfit is.\n \"\"\"\n self.markdown.add_header(header_style=4, text=\"Selection of New Control Group\")\n text = \"The events which will continue on to the next iteration are \"\n text += \"listed below.\"\n\n self.markdown.add_paragraph(text=text)\n self.markdown.add_list(items=self.comm.project.new_control_group)\n\n cg_misfit = 0.0\n for key, value in self.comm.project.misfits.items():\n if key in self.comm.project.new_control_group:\n cg_misfit += float(value)\n\n text = f\"The current misfit for the control group is {cg_misfit:.3f}\"\n self.markdown.add_paragraph(text=text)\n\n def _report_increase_in_control_group_size(self):\n \"\"\"\n The control group needs to be enlarged. This is reported here.\n \"\"\"\n text = \"Control group was not good enough, so we increase it with \"\n text += \"one extra event.\"\n\n self.markdown.add_paragraph(text=text)\n\n def _report_number_of_used_events(self):\n \"\"\"\n At the end of each iteration we report how many events have been\n uses in inversion.\n \"\"\"\n num_events = len([x for x in list(self.events_used.values()) if x != 0])\n\n text = f\"We have now used {num_events} events during the inversion.\"\n\n self.markdown.add_paragraph(text=text)\n\n def _initiate_gradient_computation_task(self):\n \"\"\"\n Write a quick paragraph reporting that we will now compute gradients\n for the accepted trial model.\n \"\"\"\n text = \"Since model has been accepted, we will now compute \"\n text += \"gradients for all batch events for the accepted model.\"\n\n self.markdown.add_paragraph(text=text)\n\n def report_validation_misfit(\n self,\n iteration: str,\n event: str,\n total_sum: bool = False,\n ):\n \"\"\"\n We write misfit of validation dataset for a specific window_set\n\n :param iteration: Name of validation iteration\n :type iteration: str\n :param window_set: Name of window set\n :type window_set: str\n :param event: Name of event reported\n :type event: str\n :param total_sum: When the total sum for the iteration needs to\n be reported, default False\n :type total_sum: bool, Optional\n \"\"\"\n if not os.path.exists(self.validation_toml):\n validation_dict = {}\n else:\n validation_dict = toml.load(self.validation_toml)\n\n if iteration not in validation_dict.keys():\n validation_dict[iteration] = {\"events\": {}, \"total\": 0.0}\n\n if total_sum:\n total = 0.0\n for event in validation_dict[iteration][\"events\"].keys():\n total += float(validation_dict[iteration][\"events\"][event])\n validation_dict[iteration][\"total\"] = total\n else:\n misfits_toml = os.path.join(\n self.comm.lasif.lasif_root,\n \"ITERATIONS\",\n f\"ITERATION_{iteration}\",\n \"misfits.toml\",\n )\n misfits_dict = toml.load(misfits_toml)\n event_misfit = misfits_dict[event][\"event_misfit\"]\n validation_dict[iteration][\"events\"][event] = float(event_misfit)\n self.validation_dict = validation_dict\n with open(self.validation_toml, mode=\"w\") as fh:\n toml.dump(validation_dict, fh)\n\n def document_task(self, task: str, verbose=None):\n \"\"\"\n Depending on what kind of task it is, the function makes\n sure that there exists proper documentation of what happened\n in that task\n\n :param task: Type of task\n :type task: str\n :param verbose: Additional information regarding task, optional.\n :type verbose: str\n \"\"\"\n if task == \"compute_misfit_and_gradient\":\n # The compute misfit and gradient task is always associated\n # with the first iteration\n # This is the absolute first iteration\n # We need to create all necessary files\n self._create_story_file()\n self._start_entry_for_iteration()\n if self.comm.project.inversion_mode == \"mini-batch\":\n self._write_list_of_all_events()\n\n self._add_table_of_events_and_misfits(task=task)\n if self.comm.project.inversion_mode == \"mini-batch\":\n self._report_control_group()\n self._update_event_quality()\n\n if task == \"compute_gradient\":\n self._initiate_gradient_computation_task()\n\n if task == \"finalize_iteration\":\n if self.comm.project.inversion_mode == \"mini-batch\":\n self._report_number_of_used_events()\n self._update_list_of_events()\n self._update_usage_of_events()\n self._backup_files()\n\n if task == \"adam_documentation\":\n self._update_usage_of_events()\n self._update_list_of_events()\n\n\nclass MarkDown(StoryTellerComponent):\n \"\"\"\n A little class designed to contain a few helper functions\n to write text in Markdown style\n \"\"\"\n\n def __init__(self, file_name):\n self.file = file_name\n self.text_styles = [\"normal\", \"italic\", \"bold\"]\n self.stream = \"\"\n\n def _read_file(self):\n with open(self.file, \"r\") as fh:\n self.stream = fh.read()\n\n def _append_to_file(self):\n with open(self.file, \"a\") as fh:\n fh.write(self.stream)\n\n def _write_to_file(self):\n with open(self.file, \"w\") as fh:\n fh.write(self.stream)\n\n def _add_line_break(self):\n self.stream += \"\\n \"\n\n def add_header(self, header_style: int, text: str, new=False):\n \"\"\"\n Add a header to a markdown file. The header style\n has to be between 1 and 6\n\n :param header_style: Style of header, 1-6\n :type header_style: int\n :param text: Content of header\n :type text: str\n :param new: Add it to a new file?, defaults to False\n :type new: bool\n \"\"\"\n if header_style < 1 or header_style > 6:\n raise ValueError(\"Header style must be an integer between 1 and 6\")\n self.stream = text\n self._transform_special_characters()\n self.stream = \"#\" * int(header_style) + \" \" + self.stream\n self._add_line_break()\n self._add_line_break()\n\n if new:\n self._write_to_file()\n else:\n self._append_to_file()\n\n def _transform_special_characters(self, string=None):\n \"\"\"\n Take special markdown characters from string\n and make sure they are interpreted correctly\n \"\"\"\n output = True\n if not string:\n string = self.stream\n output = False\n string = string.replace(\"*\", \"\\*\")\n string = string.replace(\"`\", \"\\`\")\n string = string.replace(\"_\", \"\\_\")\n string = string.replace(\"{\", \"\\{\")\n string = string.replace(\"}\", \"\\}\")\n string = string.replace(\"[\", \"\\[\")\n string = string.replace(\"]\", \"\\]\")\n string = string.replace(\"(\", \"\\(\")\n string = string.replace(\")\", \"\\)\")\n string = string.replace(\"#\", \"\\#\")\n string = string.replace(\"+\", \"\\+\")\n string = string.replace(\"-\", \"\\-\")\n string = string.replace(\"!\", \"\\!\")\n string = string.replace(\"&\", \"&\")\n string = string.replace(\"<\", \"<\")\n if output:\n return string\n self.stream = string\n\n def add_paragraph(self, text: str, textstyle=\"normal\"):\n \"\"\"\n Add a brand new paragraph to the markdown file\n\n :param text: Content of paragraph\n :type text: str\n :param textstyle: Style of text, defaults to 'normal'\n :type textstyle: str, optional\n \"\"\"\n if textstyle not in self.text_styles:\n raise ValueError(f\"Text style {textstyle} is not available\")\n\n self.stream = text\n self._transform_special_characters()\n\n if textstyle != self.text_styles[0]:\n text = self.stream\n text = \"_\" + text + \"_\"\n if textstyle == self.text_styles[2]:\n text = \"_\" + text + \"_\"\n self.stream = text\n\n self._add_line_break()\n self._add_line_break()\n self._append_to_file()\n\n def add_image(self, image_url: str, image_title=\"\", alt_text=\"text\"):\n \"\"\"\n Add an image to a markdown file\n\n :param image_url: Location of an image, I think this can be a file\n when using a local markdown and not an online one.\n :type image_url: str\n :param image_title: Title when hovering on pic, defaults to \"\"\n :type image_title: str, optional\n :param alt_text: Text when pic doesn't appear, defaults to \"text\"\n :type alt_text: str, optional\n \"\"\"\n self.stream = f'![\"{alt_text}\"]'\n self.stream += f'({image_url} \"{image_title}\")'\n self._add_line_break()\n self._append_to_file()\n\n def add_table(self, data: dict, headers=[\"Events\", \"Misfits\"]):\n \"\"\"\n Add a table to a markdown file. Currently only for 2 column\n based data.\n\n :param data: Data to display in table\n :type data: dict\n :param headers: Table headers, defaults to [\"Events\", \"Misfits\"]\n :type headers: list, optional\n \"\"\"\n self.stream = \"\"\n string = f\"| {headers[0]} | {headers[1]} |\\n\"\n fixed_string = self._transform_special_characters(string)\n self.stream += fixed_string\n self.stream += \"| --- | ---: | \\n\"\n\n for key in data.keys():\n string = f\"| {key} | {data[key]} |\\n\"\n fixed_string = self._transform_special_characters(string)\n self.stream += fixed_string\n\n self._add_line_break()\n self._add_line_break()\n self._append_to_file()\n\n def add_list(self, items: list):\n \"\"\"\n Add an unordered list to a markdown file.\n\n :param items: Items to be listed\n :type items: list\n \"\"\"\n self.stream = \"\"\n\n for _i, item in enumerate(items):\n if _i != 0:\n self.stream += \" \"\n self.stream += f\"* {self._transform_special_characters(item)} \\n\"\n\n self._add_line_break()\n self._add_line_break()\n self._append_to_file()\n\n\nclass PrettyPrinter(object):\n \"\"\"\n A class which makes printing in Inversionson pretty and consistant.\n\n Not too dissimilar from the MarkDown class\n \"\"\"\n\n def __init__(self):\n self.stream = \"\"\n self.color = Fore.WHITE\n self.color_dict = self.create_color_dict()\n\n def create_color_dict(self):\n return {\n \"white\": Fore.WHITE,\n \"black\": Fore.BLACK,\n \"blue\": Fore.BLUE,\n \"green\": Fore.GREEN,\n \"red\": Fore.RED,\n \"cyan\": Fore.CYAN,\n \"magenta\": Fore.MAGENTA,\n \"yellow\": Fore.YELLOW,\n \"lightred\": Fore.LIGHTRED_EX,\n }\n\n def set_color(self, color: str):\n self.color = self.color_dict[color.lower()]\n\n def add_emoji(self, emoji_alias: str, vertical_line=True):\n if not emoji_alias.startswith(\":\"):\n emoji_alias = \":\" + emoji_alias\n if not emoji_alias.endswith(\":\"):\n emoji_alias += \":\"\n self.stream += f\"{emoji.emojize(emoji_alias, language='alias')}\"\n self.stream += \" | \" if vertical_line else \" \"\n\n def add_horizontal_line(self):\n self.stream += \"\\n ============================== \\n\"\n\n def add_message(self, message: str):\n self.stream += message\n\n def print(\n self,\n message: str,\n line_above: bool = False,\n line_below: bool = False,\n emoji_alias: Union[str, List[str]] = None,\n color: str = None,\n ):\n \"\"\"\n A printing function which works with the stream and finally prints it and\n resets the stream\n\n :param message: The string to be printed\n :type message: str\n :param line_above: Print a line above?, defaults to False\n :type line_above: bool, optional\n :param line_below: Print a line below?, defaults to False\n :type line_below: bool, optional\n :param emoji_alias: An emoji at the beginning for good measure? It needs to be a string that\n refers to an emoji, defaults to None\n :type emoji_alias: Union[str, List[str]], optional\n :param emoji_alias: Color to print with. Available colors are:\n [white, black, red, cyan, yellow, magenta, green, blue], defaults to None\n :type emoji_alias: str, optional\n \"\"\"\n if color is not None:\n self.set_color(color)\n self.stream += f\"{self.color} \"\n if line_above:\n self.add_horizontal_line()\n if emoji_alias is not None:\n if isinstance(emoji_alias, list):\n for _i, emo in enumerate(emoji_alias):\n vertical_line = True if _i == len(emoji_alias) - 1 else False\n self.add_emoji(emo, vertical_line=vertical_line)\n else:\n self.add_emoji(emoji_alias)\n self.add_message(message)\n if line_below:\n self.add_horizontal_line()\n print(self.stream)\n self.stream = \"\"\n","repo_name":"solvithrastar/Inversionson","sub_path":"inversionson/components/storyteller.py","file_name":"storyteller.py","file_ext":"py","file_size_in_byte":24631,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"54"} +{"seq_id":"71920245601","text":"p = int(raw_input())\nq = int(raw_input())\n\nresult = []\nfor i in range(p,q+1):\n k = str(i*i)\n left = k[0:(len(k)/2)]\n if (left == \"\"):\n left = \"0\"\n right = k[(len(k)/2):]\n if (right == \"\"):\n right = \"0\"\n total = int(left)+int(right)\n if (total == i):\n result.append(str(i))\nif (len(result) == 0):\n print(\"INVALID RANGE\")\nelse:\n print(\" \".join(result))\n","repo_name":"loutee/hackerrank","sub_path":"algorithms/implementation/modified-kaprekar-numbers.py","file_name":"modified-kaprekar-numbers.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8833025337","text":"import time\nimport unittest\n\nimport stopwatch\n\n\nclass MockSystemClock(object):\n \"\"\"Represents a system clock with time starting at `0` and incremented\n whenever `sleep()` is called.\n\n Meant to replace the `time()` and `sleep()` functions in the `time` module.\n\n >>> clock = MockSystemClock()\n >>> clock.time()\n 0\n >>> clock.sleep(1)\n >>> clock.time()\n 1\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the current system time to `0`.\"\"\"\n self._system_time = 0\n\n def time(self):\n \"\"\"Return the current system time.\"\"\"\n return self._system_time\n\n def sleep(self, seconds):\n \"\"\"Increment the system time by `seconds`.\"\"\"\n self._system_time += seconds\n\n\nclass StopwatchTestCase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Monkey patch `time.time()` and `time.sleep()` to point to the\n corresponding methods on a new `MockSystemClock` instance.\n \"\"\"\n self._time_time = time.time\n self._time_sleep = time.sleep\n\n mock_system_clock = MockSystemClock()\n time.time = mock_system_clock.time\n time.sleep = mock_system_clock.sleep\n\n def tearDown(self):\n \"\"\"Restore the `time` module.\"\"\"\n time.time = self._time_time\n time.sleep = self._time_sleep\n\n def test_stopwatch_as_object(self):\n \"\"\"Test using a `Stopwatch` as a regular object.\"\"\"\n sw = stopwatch.Stopwatch()\n sw.start()\n self.assertEqual(0, sw.time_elapsed)\n time.sleep(1)\n self.assertEqual(1, sw.time_elapsed)\n sw.stop()\n self.assertEqual(1, sw.total_run_time)\n\n def test_stopwatch_as_context_manager(self):\n \"\"\"Test using a `Stopwatch` as a context manager.\"\"\"\n with stopwatch.Stopwatch() as sw:\n sw.start()\n self.assertEqual(0, sw.time_elapsed)\n time.sleep(1)\n self.assertEqual(1, sw.time_elapsed)\n\n self.assertEqual(1, sw.total_run_time)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"sumeet/stopwatch","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"38786963443","text":"#!/usr/bin/env python\n\nimport sys\nimport logging\n\nimport rdflib\n\nlogging.basicConfig(filename=\"dev8d.log\",\n level=logging.INFO,\n format=\"%(asctime)s - %(levelname)s - %(message)s\")\n\nlogging.info(\"starting dump of foaf social graph\")\n\nFOAF = rdflib.Namespace('http://xmlns.com/foaf/0.1/')\nRDF = rdflib.namespace.RDF\n\ng = rdflib.ConjunctiveGraph(\"Sleepycat\")\ng.open('store')\n\n# create a sub-graph of just foafy bits\nfoaf = rdflib.Graph()\nfor subject in g.subjects(predicate=RDF.type, object=FOAF.Person):\n for triple in g.triples((subject, None, None)):\n foaf.add(triple)\n\n# save off the foafy bits as rdf/xml\nfoaf.serialize(sys.stdout)\ng.close()\n\nlogging.info(\"finished dump of foaf social graph\")\n\n","repo_name":"edsu/dev8d-linked-data","sub_path":"dump_foaf.py","file_name":"dump_foaf.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"16922490179","text":"import discord.ext\nimport os\nfrom discord.ext import commands\nfrom config import config\nimport cogs.utils as utils\n\n\nclass Owner(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n # shutdown command, only usable by the owner.\n\n @commands.command()\n @commands.is_owner()\n async def shutdown(self, ctx: commands.Context):\n await self.bot.change_presence(status=discord.Status.invisible)\n await utils.send_embed(\"Shutdown\",\n f\"{self.bot.user} has been shut down.\",\n ctx,)\n await self.bot.close()\n print(f'{self.bot.user.name} has been shut down.')\n\n # reboot command, only usable by the owner.\n\n @commands.command()\n @commands.is_owner()\n async def reboot(self, ctx: commands.Context):\n await self.bot.change_presence(status=discord.Status.invisible)\n await utils.send_embed(\"Rebooting\",\n \"Reboot initiated.\",\n ctx,)\n print(f'{self.bot.user} is rebooting...')\n os.system('sh update.sh')\n\n # dev-status command, only usable by the owner.\n\n @commands.command()\n @commands.is_owner()\n async def dev_status(self, ctx: commands.Context):\n autorolestatus, _ = await utils.autorole_status(ctx.guild.id)\n embed = await utils.create_embed(\"Dev Status\",\n f\"**Status**: Running version {config.byob_bot_version}.\\n**Ping**: {round(self.bot.latency * 1000)}ms\\n**Prefix**: {config.prefix}\\n**Autorole status**: {autorolestatus}\",\n )\n embed.add_field(name=\"**Server stats**\",\n value=f\"**Name**: {ctx.guild.name}\\n**Members**: {ctx.guild.member_count}\\n**Description**: {ctx.guild.description}\",\n inline=False)\n if not isinstance(ctx.channel, discord.channel.DMChannel):\n await ctx.message.delete()\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.is_owner()\n async def reload(self, ctx: commands.Context, extension: str):\n await self.bot.reload_extension(f\"cogs.{extension}\")\n await utils.send_embed(\"Extension reloaded\",\n f\"{extension} has been reloaded.\",\n ctx,)\n\n @commands.command()\n @commands.is_owner()\n async def disable(self, ctx: commands.Context, extension: str):\n await self.bot.unload_extension(f'cogs.{extension}')\n await utils.send_embed(\"Extension Disabled\",\n f\"{extension} has been disabled.\",\n ctx,)\n\n @commands.command()\n @commands.is_owner()\n async def enable(self, ctx: commands.Context, extension: str):\n await self.bot.load_extension(f'cogs.{extension}')\n await utils.send_embed(\"Extension Enabled\",\n f\"{extension} has been enabled.\",\n ctx,)\n\n\nasync def setup(bot):\n await bot.add_cog(Owner(bot))\n","repo_name":"fireFerry/byob-bot","sub_path":"cogs/owner.py","file_name":"owner.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"71976065442","text":"# 匯入需要的模組\nimport os\nimport sys\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\n# 讀取bug5編碼用\nimport codecs\n\npath = '/Users/hazel_lin/Documents/Lin Yi-Sin/實習或工作/COVID-19 project/Data/0111data/高速公路計程收費通行量.html'\n\n# 空list等下放資料\ndata = []\n\n# 從html的header找table(表格)的資料\nlist_header = []\nfr = codecs.open(path, 'r', encoding='big5', errors='ignore')\nsoup = BeautifulSoup(fr, 'html.parser')\nheader = soup.find_all(\"table\")[1].find(\"tr\")\n\nfor items in header:\n try:\n list_header.append(items.get_text())\n except:\n continue\n\n# 用BeautifulSoup來爬html裡面的table裡面的資料\nHTML_data = soup.find_all(\"table\")[1].find_all(\"tr\")[1:]\n\nfor element in HTML_data:\n sub_data = []\n for sub_element in element:\n try:\n sub_data.append(sub_element.get_text())\n except:\n continue\n data.append(sub_data)\n\n# 如果噴error像是column的數量不符的話,先把list裡面的東西印出來測試\n# print(list_header)\n# print(sub_data)\n\n# 把list的資料存到Pandas的DataFrame\ndataframe = pd.DataFrame(data = data, columns = list_header)\n# print(dataframe)\n\n# 把DataFrame匯出成csv,編碼用utf-8\ndataframe.to_csv('高速公路計程收費通行量.csv', encoding='utf-8')","repo_name":"hazel-ys-lin/BeautifulSoup-html-to-csv","sub_path":"py/htmltocsv.py","file_name":"htmltocsv.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72214814560","text":"import numpy as np\nimport cv2\n\ncap=cv2.VideoCapture(0);\n\n\nwhile(True):\n\t_, frame=cap.read();# the underscore is just used to signify we are just ignoring the boolean returned by read().\n\n\thsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV);\n\n\tlower_red=np.array([0, 0, 0]);\n\tupper_red=np.array([255, 255, 255]);\n\n\tmask=cv2.inRange(hsv, lower_red, upper_red);\n\n\tres=cv2.bitwise_and(frame, frame, mask=mask);\n\n\tcv2.imshow('Frame', frame);\n\tcv2.imshow('Result', res);\n\tcv2.imshow('Mask', mask);\n\n\tif((cv2.waitKey(1) & 0xFF)==ord('q')):\n\t\tbreak;\n\n\ncap.release();\ncv2.destroyAllWindows();","repo_name":"C0DER11101/6thSem","sub_path":"DIP/Assignment/OPENCV/practicePrograms/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"39113505300","text":"from law.util import make_list\nimport os\n\n\"\"\"\nHelper class for writing datacards. Whereever you want to use this class,\nyou have to ensure a valid CH installation.\n\"\"\"\n\n\nclass DatacardWriter(object):\n def __init__(self, ch, analysis, mass=\"125\", era=\"2017\"):\n self._cb = ch.CombineHarvester()\n\n self.analysis = analysis\n self._mass = mass\n self._era = era\n\n @property\n def cb(self):\n return self._cb\n\n @property\n def mass(self):\n return self._mass\n\n @property\n def era(self):\n return self._era\n\n def add_observation(self, channel, category):\n mass = make_list(self.mass)\n analysis = make_list(self.analysis)\n era = make_list(self.era)\n channel = make_list(channel)\n category = make_list(category)\n print(\"Add observations with mass {}, analysis {}, era {}, channel {} and category {}.\".format(mass, analysis, era, channel, category))\n self.cb.AddObservations(mass, analysis, era, channel, category)\n\n def add_processes(self, channel, process, category, is_signal):\n mass = make_list(self.mass)\n analysis = make_list(self.analysis)\n era = make_list(self.era)\n channel = make_list(channel)\n category = make_list(category)\n print(\n \"Add {} with mass {}, analysis {}, era {}, channel {}, process {} and category {}.\".format(\n \"signals\" if is_signal else \"backgrounds\",\n mass,\n analysis,\n era,\n channel,\n process,\n category,\n )\n )\n self.cb.AddProcesses(mass, analysis, era, channel, process, category, is_signal)\n\n def add_signals(self, channel, process, category):\n self.add_processes(channel, process, category, True)\n\n def add_backgrounds(self, channel, process, category):\n self.add_processes(channel, process, category, False)\n\n def add_shape_systematic(self, name, strength, channel, process, ch):\n channel = make_list(channel)\n process = make_list(process)\n self.cb.cp().channel(channel).process(process).AddSyst(self.cb, name, \"shape\", ch.SystMap()(strength))\n\n def remove_shape_uncertainties(self):\n # Filter all systematics of type shape.\n self.cb.FilterSysts(lambda systematic: systematic.type() == \"shape\")\n # There are also systematics, which can have a mixed type of lnN/shape, where CH returns only lnN as type. Such which values 1.0 and 0.0 are assumed to be shape uncertainties.\n self.cb.FilterSysts(lambda systematic: (systematic.value_u() == 1.0) and (systematic.value_d() == 0.0))\n\n def add_normalization_systematic(self, name, strength, channel, process, ch):\n channel = make_list(channel)\n process = make_list(process)\n self.cb.cp().channel(channel).process(process).AddSyst(self.cb, name, \"lnN\", ch.SystMap()(strength))\n\n def add_bin_by_bin_uncertainties(self, processes, ch, add_threshold=0.1, merge_threshold=0.5, fix_norm=True):\n bin_by_bin_factory = ch.BinByBinFactory()\n bin_by_bin_factory.SetAddThreshold(add_threshold)\n bin_by_bin_factory.SetMergeThreshold(merge_threshold)\n bin_by_bin_factory.SetFixNorm(fix_norm)\n bin_by_bin_factory.MergeBinErrors(self.cb.cp().process(processes))\n bin_by_bin_factory.AddBinByBin(self.cb.cp().process(processes), self.cb)\n self.cb.SetGroup(\"bbb\", [\".*_bin_\\\\d+\"])\n self.cb.SetGroup(\"syst_plus_bbb\", [\".*\"])\n\n def scale_expectation(self, scale_factor, no_norm_rate_bkg=False, no_norm_rate_sig=False):\n self.cb.cp().backgrounds().ForEachProc(lambda process: process.set_rate((process.no_norm_rate() if no_norm_rate_bkg else process.rate()) * scale_factor))\n self.cb.cp().signals().ForEachProc(lambda process: process.set_rate((process.no_norm_rate() if no_norm_rate_sig else process.rate()) * scale_factor))\n\n def scale_processes(self, scale_factor, processes, no_norm_rate=False):\n self.cb.cp().process(processes).ForEachProc(lambda process: process.set_rate((process.no_norm_rate() if no_norm_rate else process.rate()) * scale_factor))\n\n def replace_observation_by_asimov_dataset(self, signal_mass=None, signal_processes=None):\n def _replace_observation_by_asimov_dataset(observation):\n cb = self.cb.cp().analysis([observation.analysis()]).era([observation.era()]).channel([observation.channel()]).bin([observation.bin()])\n background = cb.cp().backgrounds()\n\n signal = cb.cp().signals()\n if signal_mass:\n if signal_processes:\n signal = cb.cp().signals().process(signal_processes).mass([signal_mass])\n else:\n signal = cb.cp().signals().mass([signal_mass])\n elif signal_processes:\n signal = cb.cp().signals().process(signal_processes)\n\n observation.set_shape(background.GetShape() + signal.GetShape(), True)\n observation.set_rate(background.GetRate() + signal.GetRate())\n\n self.cb.cp().ForEachObs(_replace_observation_by_asimov_dataset)\n\n def auto_rebin(self, ch, threshold=0.0, unc_frac=0.9, mode=1):\n rebin = ch.AutoRebin()\n rebin.SetBinThreshold(threshold)\n rebin.SetBinUncertFraction(unc_frac)\n rebin.SetRebinMode(mode)\n rebin.SetPerformRebin(True)\n rebin.SetVerbosity(1)\n rebin.Rebin(self.cb, self.cb)\n\n def fix_negative_bins(self):\n def _fix_negative_bins(process):\n hist = process.ShapeAsTH1F()\n for i in range(hist.GetNbinsX()):\n if hist.GetBinContent(i) < 0.0:\n print(\"Fixing negative bins for process {} in bin {}\".format(process.process(), i))\n hist.SetBinContent(i, 0.0)\n\n def _fix_negative_bins_sys(syst):\n # first fix shift down\n hist = syst.ShapeDAsTH1F()\n for i in range(hist.GetNbinsX()):\n if hist.GetBinContent(i) < 0.0:\n print(\"Fixing negative bins for syst {} (shift down) in bin {}\".format(syst.name(), i))\n hist.SetBinContent(i, 0.0)\n # then fix shift up\n hist = syst.ShapeUAsTH1F()\n for i in range(hist.GetNbinsX()):\n if hist.GetBinContent(i) < 0.0:\n print(\"Fixing negative bins for syst {} (shift up) in bin {}\".format(syst.name(), i))\n hist.SetBinContent(i, 0.0)\n\n # for each process\n self.cb.ForEachProc(_fix_negative_bins)\n # for each systematic\n self.cb.ForEachSyst(_fix_negative_bins_sys)\n\n def print_datacard(self):\n self.cb.PrintAll()\n\n def write_datacards(self, datacard_filename_template, root_filename_template, output_directory, ch):\n # http://cms-analysis.github.io/CombineHarvester/classch_1_1_card_writer.html#details\n writer = ch.CardWriter(\n os.path.join(\"$TAG\", datacard_filename_template),\n os.path.join(\"$TAG\", root_filename_template),\n )\n writer.SetVerbosity(1)\n\n # enable writing datacards in cases where the mass does not have its original meaning\n if (len(self.cb.mass_set()) == 1) and (self.cb.mass_set()[0] == \"*\"):\n writer.SetWildcardMasses([])\n\n return writer.WriteCards(output_directory, self.cb)\n","repo_name":"frengelk/Susy1LeptonAnalysis","sub_path":"analysis/utils/datacard.py","file_name":"datacard.py","file_ext":"py","file_size_in_byte":7417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69852570402","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: heyongkang\n@create: 2018/11/3-10:57 AM\n@file: factorization.py\n@function: 将一个正整数分解质因数。输入90,打印出90=2*3*3*5\n\"\"\"\n\n\ndef divPrime(num):\n lt = []\n print(num, '=', end=' ')\n while num != 1:\n for i in range(2, int(num + 1)):\n if num % i == 0: # i是num的一个质因数\n lt.append(i)\n num = num / i # 将num除以i,剩下的部分继续分解\n break\n for i in range(0, len(lt) - 1):\n print(lt[i], '*', end=' ')\n\n print(lt[-1])\n\ndivPrime(18)","repo_name":"xiaohe55/PhthonExercise100","sub_path":"014/factorization.py","file_name":"factorization.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"24889815443","text":"import pandas as pd \nfrom acquire import acquire_df\n\ndef prep_df():\n df = acquire_df()\n df.rename(columns={\"trestbps\":\"blood_pressure\", \"fbs\":\"blood_sugar\",\\\n \"thalach\": \"max_heart_rate\"}, inplace=True)\n df.sex = df.sex.astype(\"category\")\n df.cp = df.cp.astype(\"category\")\n df.blood_sugar = df.blood_sugar.astype(\"category\")\n df.restecg = df.restecg.astype(\"category\")\n df.exang = df.exang.astype(\"category\")\n df.slope = df.slope.astype(\"category\")\n df.ca = df.ca.astype(\"category\")\n df.thal = df.thal.astype(\"category\")\n df.target = df.target.astype(\"category\")\n return df\n\n \n\ndef train_test_split(df, train_size = .60):\n from sklearn.model_selection import train_test_split\n X = df.drop(columns=[\"target\"])\n y = df.target\n X_train, X_test, y_train, y_test = train_test_split(X,y, train_size=train_size, random_state = 123, stratify=y)\n train = X_train.merge(y_train, left_index = True, right_index=True)\n test = X_test.merge(y_test, left_index=True, right_index=True)\n return train, test\n\ndef split_train_and_test(train, test, target):\n X_train = train.drop(columns=[target])\n y_train = train[target]\n X_test = test.drop(columns=[target])\n y_test = test[target]\n return X_train, y_train, X_test, y_test","repo_name":"Symeonw/Heart-Disease","sub_path":"prep.py","file_name":"prep.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15851469240","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : utils.py \n@Contact : xxzhang16@fudan.edu.cn\n\n@Modify Time @Author @Version @Desciption\n------------ ------- -------- -----------\n2021/8/18 19:33 zxx 1.0 None\n@reference: https://zhuanlan.zhihu.com/p/5658208\n'''\n\n# import lib\nclass Evaluator:\n def __init__(self, idx2label):\n self.idx2label = idx2label\n\n def _decode(self, idxs):\n if not isinstance(idxs, list):\n idxs = idxs.tolist()\n return [self.idx2label[str(i)] for i in idxs[0]]\n\n def _get_entity(self, idxs):\n entity_set = {}\n entity_pointer = None\n label_seq = self._decode(idxs)\n for i, label in enumerate(label_seq):\n if label.startswith('b'):\n category = label.split('-')[1]\n entity_pointer = (i, category)\n entity_set.setdefault(entity_pointer, [label])\n elif label.startswith('i'):\n if entity_pointer is None: continue\n if entity_pointer[1] != label.split('-')[1]: continue\n entity_set[entity_pointer].append(label)\n else:\n entity_pointer = None\n return entity_set\n\n def compute_F1(self, pred, target):\n real_entiy_set = self._get_entity(target)\n pred_entiy_set = self._get_entity(pred)\n\n pred_true_entity_set = {}\n total_keys = real_entiy_set.keys() & pred_entiy_set.keys()\n\n for key in total_keys:\n real_label = real_entiy_set.get(key)\n pred_label = pred_entiy_set.get(key)\n\n if tuple(real_label) == tuple(pred_label):\n pred_true_entity_set.setdefault(key, real_label)\n\n TP_add_FP = len(pred_entiy_set)\n TP = len(pred_true_entity_set)\n TP_add_FN = len(real_entiy_set)\n\n if TP_add_FP != 0:\n precision = TP / TP_add_FP\n else:\n precision = 0\n\n if TP_add_FN != 0:\n recall = TP / TP_add_FN\n else:\n recall = 0\n\n if recall + precision != 0:\n F1 = 2 * recall * precision / (recall + precision)\n else:\n F1 = 0\n\n return precision, recall, F1","repo_name":"thinksoso/ML_Foundation_Of_HI","sub_path":"code_assignment/assignment4/zhangxiangxu/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"29276430849","text":"import pyaudio\nimport numpy as np\nimport wave\nimport time\n \nCHUNK = 2**10\nRATE = 22050\n\nclass recorder:\n def __init__(self):\n self.p=pyaudio.PyAudio()\n self.stream=self.p.open(format=pyaudio.paInt16,channels=1,rate=RATE,input=True,frames_per_buffer=CHUNK,input_device_index=0)\n self.shutdown=False\n self.audioFrames=[]\n def run(self):\n time.sleep(3)\n while(True):\n if self.shutdown:\n break\n data = np.frombuffer(self.stream.read(CHUNK),dtype=np.int16)\n self.audioFrames.append(data)\n def stop(self):\n self.shutdown=True\n self.stream.stop_stream()\n self.stream.close()\n self.p.terminate()\n self.save()\n def save(self):\n wf = wave.open('./module/audiosource/temp.wav', 'wb')\n wf.setnchannels(1)\n wf.setsampwidth(self.p.get_sample_size(pyaudio.paInt16))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(self.audioFrames))\n wf.close()\n","repo_name":"YUYUJIN/PerfectScore","sub_path":"module/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11422545700","text":"#!/usr/bin/python3\n\nimport zmq\nimport sys\nimport os\nra_server_info=\"/home/root/.ra_server_info\"\nip=\"192.168.0.2\"\nport=17688\nif __name__ == '__main__':\n check_time = 10\n if os.path.isfile(ra_server_info):\n try:\n f=open(ra_server_info, 'r')\n info=f.read()\n ip=info.split(\":\")[0]\n port=info.split(\":\")[1]\n print(\"read from file, ip:\", ip)\n print(\"read from file, port:\", port)\n except:\n print(\"no ip&port in .ra_server_info\")\n else:\n sys.exit(check_time)\n n = len(sys.argv)\n\n '''for i in sys.argv[1:]:\n print(\"i:\", i)'''\n send_msg = sys.argv[1]\n check_time = sys.argv[2]\n print(\"check time :\", check_time)\n \n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n ra_server_target = \"tcp://\" + ip + \":\" + port\n socket.connect(ra_server_target)\n socket.send(send_msg.encode(), zmq.NOBLOCK)\n\n p = zmq.Poller()\n p.register(socket, zmq.POLLIN)\n socks = dict(p.poll(1000))\n if socket in socks and socks[socket] == zmq.POLLIN:\n pass\n else:\n #timeout\n print(\"timeout!\")\n print(\"check time :\", check_time)\n sys.exit(10)\n print(\"ready to recv\")\n message = socket.recv()\n #message = b'OK,55'\n print(\"recv %s\" % message)\n str_message = message.decode()\n check_time = int(str_message.split(\",\")[1])\n\n sys.exit(check_time)\n","repo_name":"airjason13/meta-venom-honister","sub_path":"recipes-apps/lcsmi/lcsmi/ext/ra_zmq_send.py","file_name":"ra_zmq_send.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18186096162","text":"\"\"\"\nModule charts\n\"\"\"\n\nfrom http import HTTPStatus\nfrom django.http import JsonResponse\nfrom django.template.loader import render_to_string\nfrom django.views.generic import View\n\nfrom charts.app_config import CHARTS_DEFAULT_JS_LIB\n\nfrom .charts.base import AbstractDashboard\n\n\nclass ChartJsonView(View):\n \"\"\"\n Afford chart data\n \"\"\"\n\n #pylint: disable=unused-argument\n def get(self, request, *args, **kwargs):\n \"\"\"\n Rerturn char data according its id (html)\n \"\"\"\n # pylint: disable=assignment-from-none\n dashboard = self.get_dashboard(request, *args, **kwargs)\n\n if dashboard:\n #pylint: disable=not-callable\n charts_data = dashboard(request).get_charts_data()\n charts = []\n for chart_data in charts_data:\n charts.append(\n {\n 'div_id': chart_data.div_id,\n 'type': chart_data.chart.chart_type,\n 'data': chart_data.chart.get_json_data(request.theme)\n }\n )\n\n return JsonResponse(\n {\n 'settings': {'chartlib': CHARTS_DEFAULT_JS_LIB},\n 'template': render_to_string(dashboard.template_name, request=request),\n 'charts': charts\n },\n status=HTTPStatus.OK)\n\n return JsonResponse({\"error\": 'Chart not found'}, status=HTTPStatus.NOT_FOUND)\n\n def get_dashboard(self, request, *args, **kwargs) -> AbstractDashboard:\n \"\"\"\n Return dashboard\n \"\"\"\n return None\n","repo_name":"suportemr7bank/speedseven","sub_path":"charts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14577697754","text":"from django.forms import widgets\nfrom django.utils.html import escape, conditional_escape\nfrom django.utils.safestring import mark_safe\nfrom django.conf import settings\n\nclass WysiwygWidget(widgets.Textarea):\n class Media:\n js = (settings.JSITES_MEDIA_PREFIX+'/js/jquery.wysiwyg.js',)\n css = {'all': (settings.JSITES_MEDIA_PREFIX+'/css/jquery.wysiwyg.css',)}\n\n def render(self, name, value, attrs={}):\n self.attrs['id'] = 'id_%s' % name\n html = super(WysiwygWidget, self).render(name, value, attrs)\n js = u\"\"\"\n\n \"\"\" % self.attrs\n html+= js\n return mark_safe(html)\n","repo_name":"jpic/jsites","sub_path":"resources/form/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36837517755","text":"import numpy as np\nfrom gym import Env\nfrom gym.spaces import Discrete, Box, Dict, MultiDiscrete\nfrom envs.model import Model\nfrom stable_baselines3 import PPO\n\n\n\n\nclass AgentsEnv(Env):\n\n def __init__(self, state, flights, train_against_model=False):\n\n self.flights = flights\n self.initial_state = state.copy()\n\n self.state = list(self.initial_state.values())\n\n self.observation_space = MultiDiscrete([\n len(flights),\n len(flights),\n len(flights),\n len(flights)\n ])\n\n self.action_space = MultiDiscrete([len(flights), len(flights)]) \n\n self.win = 0\n self.lose = 0\n self.ilegal_step = 0\n self.tie = 0\n self.episode_steps = 0\n self.last_actions = [self.state[1], self.state[2]]\n\n if train_against_model:\n from envs.SpyEnv import SpyEnv\n spy_env = SpyEnv(state, flights)\n self.spyModel = Model.Model(spy_env, name='SpyEnv', isNew=False)\n \n self.train_against_model = train_against_model\n\n\n def heuristicFunction(self, actions):\n reward = 0\n\n spy_to_agent1 = len(self.shortest_path(self.state[0], self.state[1]))-1\n spy_to_agent2 = len(self.shortest_path(self.state[0], self.state[2]))-1\n\n if spy_to_agent1 == 1 and actions[0] != self.state[0]:\n reward -= 0.1\n\n if spy_to_agent2 == 1 and actions[1] != self.state[0]:\n reward -= 0.1\n\n if actions[0] == actions[1]:\n reward -= 0.1\n\n return reward\n\n\n\n #action = represent index of airpor in array\n def step(self, actions):\n self.episode_steps+=1\n reward = 0\n reward = self.heuristicFunction(actions)\n done = False\n info = {}\n\n self.last_actions = [self.state[1], self.state[2]]\n\n if(self.episode_steps > 30):\n self.tie +=1\n return self.state, -2, True, info\n\n\n for i in range(len(actions)):\n #check if actions are legal\n legal_flights = self.getPossibleFlightsFromCurrentPosition(self.state[i+1])\n if(actions[i] not in legal_flights):\n self.ilegal_step +=1\n reward = -3\n return self.state, reward, True, info\n \n #Check if spy lose\n if self.isSpyAndAgentInSamePosition(): \n self.win +=1\n reward = 1\n done = True\n # Check if Spy wins\n elif self.state[0] == self.state[3] and not self.isSpyAndAgentInSamePosition(): \n self.lose +=1\n reward = -1\n done = True \n else:\n #move agents\n for i in range(len(actions)):\n self.state[i+1] = actions[i]\n\n #Calculate reward\n if self.isSpyAndAgentInSamePosition(): \n self.win +=1\n reward = 1\n done = True\n \n if not done:\n #Spy move\n self.moveOpponentSpy()\n \n\n return self.state, reward, done, info\n \n def isSpyAndAgentInSamePosition(self):\n return self.state[0] == self.state[1] or self.state[0] == self.state[2]\n\n #currentPoistion is Tuple (row,col)\n def getPossibleFlightsFromCurrentPosition(self, currentPosition):\n return self.flights[currentPosition]['destinations']\n \n def mask_actions(self):\n mask_actions = []\n for i in range(2):\n agent_mask = []\n valid_actions = self.getPossibleFlightsFromCurrentPosition(self.state[i+1])\n for j in range(len(self.flights)):\n if j in valid_actions and j != self.last_actions[i]: # \n agent_mask.append(True)\n continue\n agent_mask.append(False)\n mask_actions.append(agent_mask)\n mask_actions = np.array(mask_actions)\n return mask_actions\n\n\n #Move spy\n def moveOpponentSpy(self):\n if self.train_against_model:\n #get action from the Spy model\n action = self.spyModel.predict(self.state)\n action = action.item()\n else:\n #spy will move to the traget by shortest path \n action = self.get_spy_next_airPort_by_shortest_path(self.state[0])\n #update spy to new position\n self.state[0] = action\n\n\n #self.col represent the number of columns in the grid\n def getAirPortIndex(self, currentPosition):\n return currentPosition[0] * self.col + currentPosition[1]\n \n def get_spy_next_airPort_by_shortest_path(self, spy_airport_index):\n #calculate shortest path between spy to target\n path = self.shortest_path(spy_airport_index, self.state[3])\n return path[1]\n \n def shortest_path(self, node1, node2):\n rnd = False\n path_list = [[node1]]\n path_index = 0\n # To keep track of previously visited nodes\n previous_nodes = {node1}\n if node1 == node2:\n return path_list[0]\n \n while path_index < len(path_list):\n current_path = path_list[path_index]\n last_node = current_path[-1]\n next_nodes = self.flights[last_node]['destinations']\n # Search goal node\n if node2 in next_nodes:\n current_path.append(node2)\n return current_path\n \n # Add new paths\n for next_node in next_nodes:\n if not next_node in previous_nodes:\n new_path = current_path[:]\n new_path.append(next_node)\n path_list.append(new_path)\n # To avoid backtracking\n previous_nodes.add(next_node)\n # Continue to next path in list\n path_index += 1\n # No path is found\n return []\n\n def render(self):\n return\n \n def reset(self):\n self.state = list(self.initial_state.values())\n self.moveOpponentSpy()\n\n self.episode_steps = 0\n self.last_actions = [self.state[1], self.state[2]]\n return self.state\n\n def stats(self):\n return self.win, self.lose, self.ilegal_step, self.tie\n\n def reset_stats(self):\n self.win = 0\n self.lose = 0\n self.ilegal_step = 0\n self.tie = 0\n\n\n","repo_name":"rupcgroup4/ruppinCs_2023_7","sub_path":"Model/envs/AgentsEnv.py","file_name":"AgentsEnv.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3325702190","text":"\"\"\"Main module of AS-AP-TuneCorr IOC.\"\"\"\n\nimport numpy as _np\nfrom epics import PV as _PV\n\nfrom ..envars import VACA_PREFIX as _vaca_prefix\nfrom ..namesys import SiriusPVName as _SiriusPVName\n\nfrom .csdev import Const as _Const\nfrom .base import BaseApp as _BaseApp\n\n\nclass TuneCorrApp(_BaseApp):\n \"\"\"Main application for handling tune correction.\"\"\"\n\n _optics_param = 'tune'\n\n def __init__(self, acc):\n \"\"\"Class constructor.\"\"\"\n super().__init__(acc)\n\n # consts\n self._delta_tunex = 0.0\n self._delta_tuney = 0.0\n\n self._set_new_refkl_cmd_count = 0\n\n if self._acc == 'SI':\n self._meas_config_dkl_qf = 0.020\n self._meas_config_dkl_qd = 0.020\n\n # Connect to Quadrupoles Families\n self._psfam_refkl = {fam: 0 for fam in self._psfams}\n self._lastcalc_deltakl = {fam: 0 for fam in self._psfams}\n for fam in self._psfams:\n pvname = _SiriusPVName(self._acc+'-Fam:PS-'+fam+':KL-RB')\n pvname = pvname.substitute(prefix=_vaca_prefix)\n self._psfam_intstr_rb_pvs[fam] = _PV(\n pvname,\n callback=[self._callback_init_refkl,\n self._callback_estimate_deltatune],\n connection_timeout=0.05)\n\n self.map_pv2write.update({\n 'DeltaTuneX-SP': self.set_dtune_x,\n 'DeltaTuneY-SP': self.set_dtune_y,\n 'SetNewRefKL-Cmd': self.cmd_set_newref,\n 'MeasConfigDeltaKLFamQF-SP': self.set_meas_config_dkl_qf,\n 'MeasConfigDeltaKLFamQD-SP': self.set_meas_config_dkl_qd,\n })\n\n def update_corrparams_pvs(self):\n \"\"\"Set initial correction parameters PVs values.\"\"\"\n self.run_callbacks('RespMat-Mon', self._nominal_matrix)\n self.run_callbacks('NominalKL-Mon', self._psfam_nom_intstr)\n\n # ------ handle pv write methods -------\n\n def set_dtune_x(self, value):\n \"\"\"Set DeltaTuneX.\"\"\"\n self._delta_tunex = value\n self.run_callbacks('DeltaTuneX-RB', value)\n self._calc_intstrength()\n return True\n\n def set_dtune_y(self, value):\n \"\"\"Set DeltaTuneY.\"\"\"\n self._delta_tuney = value\n self.run_callbacks('DeltaTuneY-RB', value)\n self._calc_intstrength()\n return True\n\n def cmd_set_newref(self, value):\n \"\"\"SetNewRefKL command.\"\"\"\n if self._update_ref():\n self._set_new_refkl_cmd_count += 1\n self.run_callbacks(\n 'SetNewRefKL-Cmd', self._set_new_refkl_cmd_count)\n return False\n\n def set_meas_config_dkl_qf(self, value):\n \"\"\"Set MeasConfigDeltaKLFamQF.\"\"\"\n if value == self._meas_config_dkl_qf:\n return False\n self._meas_config_dkl_qf = value\n self.run_callbacks('MeasConfigDeltaKLFamQF-RB', value)\n return True\n\n def set_meas_config_dkl_qd(self, value):\n \"\"\"Set MeasConfigDeltaKLFamQD.\"\"\"\n if value == self._meas_config_dkl_qd:\n return False\n self._meas_config_dkl_qd = value\n self.run_callbacks('MeasConfigDeltaKLFamQD-RB', value)\n return True\n\n # ---------- auxiliar methods ----------\n\n def _handle_corrparams_2_read(self, params):\n \"\"\"Edit correction params.\"\"\"\n nom_matrix = [item for sublist in params['matrix'] for item in sublist]\n nom_kl = params['nominal KLs']\n nom_deltakl = [0.0, 0.0]\n return nom_matrix, nom_kl, nom_deltakl\n\n def _handle_corrparams_2_save(self):\n matrix = _np.array(self._nominal_matrix)\n matrix = _np.reshape(matrix, [2, len(self._psfams)])\n\n value = {'matrix': matrix,\n 'nominal KLs': self._psfam_nom_intstr}\n return value\n\n def _calc_intstrength(self):\n method = 0 \\\n if self._corr_method == _Const.CorrMeth.Proportional \\\n else 1\n grouping = '2knobs' \\\n if self._corr_group == _Const.CorrGroup.TwoKnobs \\\n else 'svd'\n lastcalc_deltakl = self._opticscorr.calculate_delta_intstrengths(\n method=method, grouping=grouping,\n delta_opticsparam=[self._delta_tunex, self._delta_tuney])\n\n self.run_callbacks('Log-Mon', 'Calculated KL values.')\n\n for fam_idx, fam in enumerate(self._psfams):\n self._lastcalc_deltakl[fam] = lastcalc_deltakl[fam_idx]\n self.run_callbacks(\n 'DeltaKL'+fam+'-Mon', self._lastcalc_deltakl[fam])\n\n def _apply_corr(self):\n if self._is_status_ok():\n kls = {fam: self._psfam_refkl[fam]+self._lastcalc_deltakl[fam]\n for fam in self._psfams}\n self._apply_intstrength(kls)\n self.run_callbacks('Log-Mon', 'Applied correction.')\n\n if self._sync_corr == _Const.SyncCorr.On:\n self._event_exttrig_cmd.put(0)\n self.run_callbacks('Log-Mon', 'Generated trigger.')\n return True\n\n self.run_callbacks('Log-Mon', 'ERR: ApplyDelta-Cmd failed.')\n return False\n\n def _get_optics_param(self):\n \"\"\"Return optics parameter.\"\"\"\n return self._get_tunes()\n\n def _get_delta_intstrength(self, fam):\n \"\"\"Get delta to apply in each family.\"\"\"\n if 'QF' in fam:\n deltakl = self._meas_config_dkl_qf\n else:\n deltakl = self._meas_config_dkl_qd\n fam_idx = self._psfams.index(fam)\n nelm = self._psfam_nelm[fam_idx]\n return deltakl/nelm\n\n def _update_ref(self):\n if (self._status & 0x1) == 0: # Check connection\n # update references\n for fam in self._psfams:\n value = self._psfam_intstr_rb_pvs[fam].get()\n if value is None:\n self.run_callbacks(\n 'Log-Mon',\n 'ERR: Received a None value from {}.'.format(fam))\n return False\n self._psfam_refkl[fam] = value\n self.run_callbacks(\n 'RefKL' + fam + '-Mon', self._psfam_refkl[fam])\n\n self._lastcalc_deltakl[fam] = 0\n self.run_callbacks('DeltaKL' + fam + '-Mon', 0)\n\n # the deltas from new kl references are zero\n self._delta_tunex = 0\n self._delta_tuney = 0\n self.run_callbacks('DeltaTuneX-SP', self._delta_tunex)\n self.run_callbacks('DeltaTuneX-RB', self._delta_tunex)\n self.run_callbacks('DeltaTuneY-SP', self._delta_tuney)\n self.run_callbacks('DeltaTuneY-RB', self._delta_tuney)\n\n self._estimate_current_deltatune()\n\n self.run_callbacks('Log-Mon', 'Updated KL references.')\n return True\n\n self.run_callbacks(\n 'Log-Mon', 'ERR: Some magnet family is disconnected.')\n return False\n\n def _estimate_current_deltatune(self):\n psfam_deltakl = len(self._psfams)*[0]\n for fam_idx, fam in enumerate(self._psfams):\n psfam_deltakl[fam_idx] = \\\n self._psfam_intstr_rb[fam] - self._psfam_refkl[fam]\n self._optprm_est = self._opticscorr.calculate_opticsparam(\n psfam_deltakl)\n self.run_callbacks('DeltaTuneX-Mon', self._optprm_est[0])\n self.run_callbacks('DeltaTuneY-Mon', self._optprm_est[1])\n\n # ---------- callbacks ----------\n\n def _callback_init_refkl(self, pvname, value, cb_info, **kws):\n \"\"\"Initialize RefKL-Mon pvs and remove this callback.\"\"\"\n # Get reference\n if value is None:\n return\n fam = _SiriusPVName(pvname).dev\n self._psfam_refkl[fam] = value\n self.run_callbacks('RefKL'+fam+'-Mon', self._psfam_refkl[fam])\n\n # Remove callback\n cb_info[1].remove_callback(cb_info[0])\n\n def _callback_estimate_deltatune(self, pvname, value, **kws):\n if value is None:\n return\n fam = _SiriusPVName(pvname).dev\n self._psfam_intstr_rb[fam] = value\n self._estimate_current_deltatune()\n","repo_name":"lnls-sirius/dev-packages","sub_path":"siriuspy/siriuspy/opticscorr/tune.py","file_name":"tune.py","file_ext":"py","file_size_in_byte":8006,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"44760466147","text":"\"\"\" Converts passage level ranking into document level ranking and de-duplicates\nthe documents in the ranking. \"\"\"\n\nimport csv\n\n\ndef de_duplicate_ranking(runfile_path):\n doc_ids = set()\n with open(runfile_path, \"r\") as f_in, open(\n \"data/runs/2021/input.clarke-cc_deduplicated\", \"w\"\n ) as trec_dedup_out:\n reader = csv.reader(f_in, delimiter=\" \")\n q_ids = set()\n for row in reader:\n q_id, _, doc_id, rank, score, run_id = row\n doc_id = doc_id.split(\"-\")[0]\n if q_id not in q_ids:\n doc_ids = set()\n if doc_id not in doc_ids:\n q_ids.add(q_id)\n doc_ids.add(doc_id)\n trec_dedup_out.write(\n \" \".join(\n [\n q_id,\n \"Q0\",\n doc_id,\n rank,\n score,\n run_id,\n ]\n )\n + \"\\n\"\n )\n\n\nif __name__ == \"__main__\":\n de_duplicate_ranking(\"data/runs/2021/input.clarke-cc\")\n","repo_name":"iai-group/ecir2023-reproducibility","sub_path":"treccast/core/util/ranking_deduplication.py","file_name":"ranking_deduplication.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"4705072263","text":"'''\ncgat_clean.py - remove incomplete files\n=======================================\n\n\nPurpose\n-------\n\nThis script looks at files matching a certain pattern and will remove\nincomplete files. File completeness is determined by the file itself\nor an associated log-file.\n\nFor example, the file :file:`sample1.tsv` is deemed complete if:\n\n1. :file:`sample1.tsv.log` exists and ends in\n ``# job finished ...``,\n2. :file:`sample1.tsv` exists and ends in ``# job finished ...``\n\nUsage\n-----\n\nExample::\n\n python cgat_clean.py *.tsv\n\nType::\n\n python cgat_clean.py --help\n\nfor command line help.\n\nCommand line options\n--------------------\n\n'''\n\nimport os\nimport sys\n\nimport CGAT.Experiment as E\nimport CGAT.IOTools as IOTools\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-n\", \"--dry-run\", dest=\"dry_run\", action=\"store_true\",\n help=\"dry run, do not delete any files [%default]\")\n\n parser.set_defaults(dry_run=False)\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n filenames = args\n\n c = E.Counter()\n for filename in filenames:\n c.checked += 1\n if os.path.exists(filename + \".log\"):\n if IOTools.isComplete(filename + \".log\"):\n c.complete += 1\n continue\n\n if IOTools.isComplete(filename):\n c.complete += 1\n continue\n\n c.incomplete += 1\n E.info('deleting %s' % filename)\n if options.dry_run:\n continue\n os.unlink(filename)\n c.deleted += 1\n\n E.info(c)\n\n # write footer and output benchmark information.\n E.Stop()\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","repo_name":"CGATOxford/CGATPipelines","sub_path":"scripts/cgat_clean.py","file_name":"cgat_clean.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"54"} +{"seq_id":"39201301386","text":"import numpy as np\nletters=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n#-------Create Vigenere matrix----------------------------------\ndef createVignere(letters):\n vignere = np.arange(676)\n vignere = list(map(lambda x: '', vignere)) #Convert array to numpy so it can admit strings\n vignere = np.array(vignere).reshape((26,26)) #Shape the array to matrix\n\n for i in range(26):\n counter=i\n for j in range(26):\n vignere[i][j]=letters[counter%26]\n counter=counter+1\n return vignere\n#-------------------------------------------------------------\n\n\n\nprint(\"Welcome to the Vigenere Cipher app!\")\nchoice=input(\"Insert 1 for ciphering a message or 0 for deciphering a message\\n\")\n\nif choice == '1':#Encrypt\n \n message=input(\"Please insert the message to be deciphered: \")\n key=input(\"Please insert the key to codify: \")\n t=input(\"Please insert the t parameter: \")\n\n\n #----Change and fix format-------\n\n #Eliminate blank spaces\n message=message.replace(' ','')\n\n #Lowercase the strings\n message=message.lower()\n\n #--------------------\n\n vignere=createVignere(letters)\n\n #Create numpy arrays\n message = np.array(list(message)) \n key = np.array(list(key))\n crypted_message=np.copy(message)\n\n letters2=np.array(letters)\n\n #Codify message\n for i in range(len(message)):\n indexM=np.where(letters2[:]==message[i])\n indexM=list(map(int,indexM))\n indexK=np.where(letters2[:]==key[i%len(key)])\n indexK=list(map(int,indexK))\n crypted_message[i]=vignere[indexK[0]][indexM[0]]\n\n\n # Shape the crypted message\n\n while len(crypted_message)%int(t) != 0:\n crypted_message = np.append(crypted_message,[' '])\n\n print(str(crypted_message.reshape((-1,int(t)))).replace('[',' ').replace(']',' ').replace('\\'','').replace('\\n','').replace(' ',' '))\n\nelif choice == '0':#Decrypt\n crypted_message=input(\"Please insert the message to be deciphered: \")\n key=input(\"Please insert the key to codify: \")\n t=input(\"Please insert the t parameter: \")\n\n #----Change and fix format-------\n\n #Eliminate blank spaces\n crypted_message=crypted_message.replace(' ','')\n\n #Lowercase the strings\n crypted_message=crypted_message.lower()\n\n #--------------------\n\n vignere=createVignere(letters)\n\n #Create numpy arrays\n crypted_message = np.array(list(crypted_message)) \n key = np.array(list(key))\n\n letters2=np.array(letters)\n\n #Decode message\n for i in range(len(crypted_message)):\n #indexM=np.where(letters2[:]==message[i])\n #indexM=list(map(int,indexM))\n\n indexK=np.where(letters2[:]==key[i%len(key)])\n indexK=list(map(int,indexK))\n\n indexV=np.where(vignere[indexK,:]==crypted_message[i])\n indexV=list(map(int,indexV))\n\n crypted_message[i]=letters2[indexV[1]]\n\n # Shape the crypted message\n\n while len(crypted_message)%int(t) != 0:\n crypted_message = np.append(crypted_message,[' '])\n\n print(str(crypted_message.reshape((-1,int(t)))).replace('[',' ').replace(']',' ').replace('\\'','').replace('\\n','').replace(' ',' '))\n\nelse:\n print(\"Please insert a valid option\")","repo_name":"sespinosab/CryptografyCodes","sub_path":"Vigenere Cipher/Vigenere Cipher.py","file_name":"Vigenere Cipher.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39003960426","text":"# Author: Runar Fosse\n# Time complexity: O(1)\n# Space complexity: O(1)\n\nclass Solution:\n def isReachableAtTime(self, sx: int, sy: int, fx: int, fy: int, t: int) -> bool:\n dx, dy = abs(sx - fx), abs(sy - fy)\n if not dx and not dy:\n # If you start at (fx, fy) you can only return for t >= 2\n return t != 1\n\n # First find how far you can walk diagonally\n diagonal = min(dx, dy)\n\n # Then the rest is a straight walk\n distance = diagonal + max(dx - diagonal, dy - diagonal)\n return distance <= t\n \n# If you can reach the cell at any time s, you can reach it in any other time s+n\n# where n > 0 simply by taking a detour. Therefore, this problem is asking if \n# we can reach (fx, fy) from (sx, sy) in t time. This is a simple distance check.","repo_name":"RunarFosse/leetcode","sub_path":"Medium/determine-if-a-cell-is-reachable-at-a-given-time.py","file_name":"determine-if-a-cell-is-reachable-at-a-given-time.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12539484882","text":"import pandas as pd\n\nfrom src.static.static_values_enum import Edition, Leagues\n\ncredit_icon = \"![credit.png](https://images.hive.blog/20x0/https://files.peakd.com/file/peakd-hive/beaker007/\" \\\n \"AK3iY7Tb28oEV8oALeHvUbBpKjWxvADTHcaqtPSL4C2YzcJ4oZLp36MAiX3qGNw.png)\"\ndec_icon = \"![dec.png](https://images.hive.blog/20x0/https://files.peakd.com/file/peakd-hive/beaker007/\" \\\n \"AJoDPJLp3GXfJPTZijeTGTaHE5K7vzdhCXUedhPRnp6kKhanQnpfwzfnemFdz2x.png)\"\nsps_icon = \"![sps.png](https://images.hive.blog/20x0/https://files.peakd.com/file/peakd-hive/beaker007/\" \\\n \"AKNLw1pd6ryatb2Rg9VHbWEWWUMupgMEtxYsJyxckcGH1Hb7YoxC1cFdNv37tW3.png)\"\nvoucher_icon = \"![voucher.png](https://images.hive.blog/20x0/https://files.peakd.com/file/peakd-hive/beaker007/\" \\\n \"Eo8RPwT4kQnGyvkNp9Vx1kLpFYYVhKSy88Fsy7YrAStKwrHCRX6GNvhywGxPbQpW2bu.png)\"\nmerits_icon = \"![merits.png](https://images.hive.blog/20x0/\" \\\n \"https://d36mxiodymuqjm.cloudfront.net/website/icons/img_merit_256.png)\"\ngold_potion_icon = \"![alchemy.png](https://images.hive.blog/20x0/https://files.peakd.com/file/peakd-hive/beaker007/AK6ZKi4NWxuWbnhNc1V3k9DeqiqhTvmcenpsX5xhHUFdBGEYTMfMpsnC9aHL7R2.png)\"\nlegendary_potion_icon = \"![legendary.png](https://images.hive.blog/20x0/https://files.peakd.com/file/peakd-hive/beaker007/AK3gbhdHjfaQxKVM39VfeHCw25haYejvUT17E8WBgveTKY5rucpRY7AbjgsAhdu.png)\"\npacks_icon = \"![chaosPack.png](https://images.hive.blog/20x0/https://files.peakd.com/file/peakd-hive/beaker007/Eo8M4f1Zieju9ibwbs6Tnp3KvN9Kb93HkqwMi3FqanTmV2XoNw7pmV4MbjDSxbgiSdo.png)\"\n\nintro_img = \"https://images.hive.blog/0x0/https://files.peakd.com/file/peakd-hive/beaker007/23tvcWHTtW5SAv63Z2J86Zuyvn5Jk2BQQ5qBQrGBvv5hRm1DUaVKJN4Z8X9eFfSokovT1.png\"\n\ndef get_last_season_statistics_table(last_season_wild_battles, last_season_modern_battles):\n if not last_season_wild_battles.empty and not last_season_wild_battles.rating.isna().values[0]:\n last_season_wild_battles = last_season_wild_battles.iloc[0]\n wild_league = last_season_wild_battles.league.astype(int)\n wild_battles = int(last_season_wild_battles.battles)\n wild_rank = int(last_season_wild_battles['rank'])\n wild_rating = int(last_season_wild_battles.rating)\n wild_league_name = Leagues(wild_league).name\n wild_max_rating = int(last_season_wild_battles.max_rating)\n wild_win = int(last_season_wild_battles.wins)\n wild_win_pct = round((wild_win / wild_battles * 100), 2)\n wild_longest_streak = int(last_season_wild_battles.longest_streak)\n wild_ratio = round(wild_win / (wild_battles - wild_win), 2)\n wild_loss = wild_battles - wild_win\n else:\n wild_league = 0\n wild_league_name = \"NA\"\n wild_battles = \"NA\"\n wild_rank = \"NA\"\n wild_rating = \"NA\"\n wild_max_rating = \"NA\"\n wild_win = \"NA\"\n wild_win_pct = \"NA\"\n wild_longest_streak = \"NA\"\n wild_ratio = \"NA\"\n wild_loss = \"NA\"\n\n if not last_season_modern_battles.empty and not last_season_modern_battles.rating.isna().values[0]:\n last_season_modern_battles = last_season_modern_battles.iloc[0]\n modern_league = last_season_modern_battles.league.astype(int)\n modern_battles = int(last_season_modern_battles.battles)\n modern_rank = int(last_season_modern_battles['rank'])\n modern_rating = int(last_season_modern_battles.rating)\n modern_league_name = Leagues(modern_league).name\n modern_max_rating = int(last_season_modern_battles.max_rating)\n modern_win = int(last_season_modern_battles.wins)\n modern_win_pct = round((modern_win / modern_battles * 100), 2)\n modern_longest_streak = int(last_season_modern_battles.longest_streak)\n modern_ratio = round(modern_win / (modern_battles - modern_win), 2)\n modern_loss = modern_battles - modern_win\n else:\n modern_league = 0\n modern_league_name = \"NA\"\n modern_battles = \"NA\"\n modern_rank = \"NA\"\n modern_rating = \"NA\"\n modern_max_rating = \"NA\"\n modern_win = \"NA\"\n modern_win_pct = \"NA\"\n modern_longest_streak = \"NA\"\n modern_ratio = \"NA\"\n modern_loss = \"NA\"\n\n wild_league_logo = \"https://images.hive.blog/75x0/https://d36mxiodymuqjm.cloudfront.net/website/icons/leagues/wild_150/league_\" + str(\n wild_league) + \".png\"\n modern_league_logo = \"https://images.hive.blog/75x0/https://d36mxiodymuqjm.cloudfront.net/website/icons/leagues/modern_150/league_\" + str(\n modern_league) + \".png\"\n extra_space = \"     \"\n result = \"| Statistic | \" + wild_league_logo + \"
\" + extra_space + \"Wild| \" + modern_league_logo + \"
\" + extra_space + \"Modern | \\n\"\n result += \"| - | - | - |\\n\"\n result += \"| Battles | \" + str(wild_battles) + \" | \"\n result += str(modern_battles) + \" | \\n\"\n result += \"| Rank | \" + str(wild_rank) + \" | \"\n result += str(modern_rank) + \" | \\n\"\n result += \"| Rating | \" + str(wild_rating) + \" - \" + str(wild_league_name) + \" | \"\n result += str(modern_rating) + \" - \" + str(modern_league_name) + \" | \\n\"\n result += \"| Rating High | \" + str(wild_max_rating) + \" | \"\n result += str(modern_max_rating) + \" | \\n\"\n result += \"| Ratio (Win/Loss) | \" + str(wild_ratio) + \" (\" + str(wild_win) + \"/\" + str(wild_loss) + \") |\"\n result += str(modern_ratio) + \" (\" + str(modern_win) + \"/\" + str(modern_loss) + \") |\\n\"\n result += \"| Win PCT (Wins/battles * 100) | \" + str(wild_win_pct) + \" (\" + str(wild_win) + \"/\" + str(\n wild_battles) + \") |\"\n result += str(modern_win_pct) + \" (\" + str(modern_win) + \"/\" + str(modern_battles) + \") |\\n\"\n result += \"| Longest Streak | \" + str(wild_longest_streak) + \" |\"\n result += str(modern_longest_streak) + \" |\\n\"\n\n return result\n\n\ndef get_last_season_costs_table(account, season_info_store, skip_zeros):\n costs_rows = \"\"\n dec_df = season_info_store['dec']\n dec_df = dec_df.loc[(dec_df.player == account)].fillna(0)\n if not dec_df.empty:\n dec_df = dec_df.iloc[0]\n if 'cost_rental_payment' in dec_df:\n costs_rows += cost_earning_row(\"DEC rental payments\", dec_icon, dec_df.cost_rental_payment, skip_zeros)\n\n if 'rental_payment_fees' in dec_df:\n costs_rows += cost_earning_row(\"DEC rental fees\", dec_icon, dec_df.rental_payment_fees, skip_zeros)\n if 'enter_tournament' in dec_df:\n costs_rows += cost_earning_row(\"DEC tournament entry fees\", dec_icon, dec_df.enter_tournament,\n skip_zeros)\n if 'market_rental' in dec_df:\n costs_rows += cost_earning_row(\"DEC market rental\", dec_icon, dec_df.market_rental, skip_zeros)\n if 'purchased_energy' in dec_df:\n costs_rows += cost_earning_row(\"DEC purchased energy\", dec_icon,\n dec_df.purchased_energy, skip_zeros)\n if 'buy_market_purchase' in dec_df:\n costs_rows += cost_earning_row(\"DEC market buy\", dec_icon, dec_df.buy_market_purchase, skip_zeros)\n if 'market_fees' in dec_df:\n costs_rows += cost_earning_row(\"DEC market fees\", dec_icon, dec_df.market_fees, skip_zeros)\n if 'market_list_fee' in dec_df:\n costs_rows += cost_earning_row(\"DEC market list fee\", dec_icon, dec_df.market_list_fee, skip_zeros)\n \n sps_df = season_info_store['sps']\n sps_df = sps_df.loc[(sps_df.player == account)].fillna(0)\n if not sps_df.empty:\n sps_df = sps_df.iloc[0]\n if 'enter_tournament' in sps_df:\n costs_rows += cost_earning_row(\"SPS tournament entry fees\", sps_icon, sps_df.enter_tournament,\n skip_zeros)\n if 'delegation_modern' in sps_df:\n costs_rows += cost_earning_row(\"SPS ranked battle (modern) (fees)\", sps_icon, sps_df.delegation_modern, skip_zeros)\n if 'delegation_wild' in sps_df:\n costs_rows += cost_earning_row(\"SPS ranked battle (wild) (fees)\", sps_icon, sps_df.delegation_wild, skip_zeros)\n if 'delegation_focus' in sps_df:\n costs_rows += cost_earning_row(\"SPS daily focus (fees)\", sps_icon, sps_df.delegation_focus, skip_zeros)\n if 'delegation_season' in sps_df:\n costs_rows += cost_earning_row(\"SPS season (fees)\", sps_icon, sps_df.delegation_season, skip_zeros)\n if 'delegation_land' in sps_df:\n costs_rows += cost_earning_row(\"SPS land (fees)\", sps_icon, sps_df.delegation_land, skip_zeros)\n if 'delegation_nightmare' in sps_df:\n costs_rows += cost_earning_row(\"SPS nightmare (TD) (fees)\", sps_icon, sps_df.delegation_nightmare, skip_zeros)\n if 'delegation_brawl' in sps_df:\n costs_rows += cost_earning_row(\"SPS brawl delegation\", sps_icon, sps_df.delegation_brawl, skip_zeros)\n\n result = \"None\"\n if costs_rows != \"\":\n result = \"| Costs | # |\\n\"\n result += \"| - | - |\\n\"\n result += costs_rows\n\n return result\n\n\ndef cost_earning_row(title, icon, value, skip_zeros):\n if skip_zeros and value == 0:\n return \"\"\n else:\n return \"| \" + str(title) + \" | \" + icon + \" \" + str(round(value, 3)) + \" |\\n\"\n\n\ndef get_last_season_earnings_table(account, season_info_store, last_season_rewards, skip_zeros):\n earning_rows = \"\"\n dec_df = season_info_store['dec']\n dec_df = dec_df.loc[(dec_df.player == account)].fillna(0)\n if not dec_df.empty:\n dec_df = dec_df.iloc[0]\n if 'earn_rental_payment' in dec_df:\n earning_rows += cost_earning_row(\"DEC rental payments\", dec_icon, dec_df.earn_rental_payment, skip_zeros)\n if 'sell_market_purchase' in dec_df:\n earning_rows += cost_earning_row(\"DEC market sell\", dec_icon, dec_df.sell_market_purchase, skip_zeros)\n if 'tournament_prize' in dec_df:\n earning_rows += cost_earning_row(\"DEC tournament rewards\", dec_icon, dec_df.tournament_prize,\n skip_zeros)\n if 'modern_leaderboard_prizes' in dec_df:\n earning_rows += cost_earning_row(\"DEC modern leaderboard rewards\", dec_icon,\n dec_df.modern_leaderboard_prizes, skip_zeros)\n if 'leaderboard_prizes' in dec_df:\n earning_rows += cost_earning_row(\"DEC wild leaderboard rewards\", dec_icon,\n dec_df.leaderboard_prizes, skip_zeros)\n\n sps_df = season_info_store['sps']\n sps_df = sps_df.loc[(sps_df.player == account)].fillna(0)\n if not sps_df.empty:\n sps_df = sps_df.iloc[0]\n if 'tournament_prize' in sps_df:\n earning_rows += cost_earning_row(\"SPS tournament rewards\", sps_icon,\n sps_df.tournament_prize,\n skip_zeros)\n if 'token_transfer_multi' in sps_df:\n earning_rows += cost_earning_row(\"SPS tournament rewards (multi token)\", sps_icon,\n sps_df.token_transfer_multi,\n skip_zeros)\n if 'claim_staking_rewards' in sps_df:\n earning_rows += cost_earning_row(\"SPS staking reward\", sps_icon, sps_df.claim_staking_rewards,\n skip_zeros)\n if 'token_award' in sps_df:\n earning_rows += cost_earning_row(\"SPS token award (pools)\", sps_icon, sps_df.token_award, skip_zeros)\n \n unclaimed_sps_df = season_info_store['unclaimed_sps']\n unclaimed_sps_df = unclaimed_sps_df.loc[(unclaimed_sps_df.player == account)].fillna(0)\n if not unclaimed_sps_df.empty:\n unclaimed_sps_df = unclaimed_sps_df.iloc[0]\n if 'modern' in unclaimed_sps_df:\n earning_rows += cost_earning_row(\"SPS ranked battle (modern)\", sps_icon, unclaimed_sps_df.modern, skip_zeros)\n if 'wild' in unclaimed_sps_df:\n earning_rows += cost_earning_row(\"SPS ranked battle (wild)\", sps_icon, unclaimed_sps_df.wild, skip_zeros)\n if 'focus' in unclaimed_sps_df:\n earning_rows += cost_earning_row(\"SPS daily focus\", sps_icon, unclaimed_sps_df.focus, skip_zeros)\n if 'season' in unclaimed_sps_df:\n earning_rows += cost_earning_row(\"SPS season\", sps_icon, unclaimed_sps_df.season, skip_zeros)\n if 'land' in unclaimed_sps_df:\n earning_rows += cost_earning_row(\"SPS land\", sps_icon, unclaimed_sps_df.land, skip_zeros)\n if 'nightmare' in unclaimed_sps_df:\n earning_rows += cost_earning_row(\"SPS nightmare (TD) \", sps_icon, unclaimed_sps_df.nightmare, skip_zeros)\n if 'brawl' in unclaimed_sps_df:\n earning_rows += cost_earning_row(\"SPS brawl\", sps_icon, unclaimed_sps_df.brawl, skip_zeros)\n\n merits_df = season_info_store['merits']\n merits_df = merits_df.loc[(merits_df.player == account)].fillna(0)\n if not merits_df.empty:\n merits_df = merits_df.iloc[0]\n if 'quest_rewards' in merits_df:\n earning_rows += cost_earning_row(\"MERITS quest reward\", merits_icon, merits_df.quest_rewards,skip_zeros)\n if 'season_rewards' in merits_df:\n earning_rows += cost_earning_row(\"MERITS season rewards\", merits_icon, merits_df.season_rewards, skip_zeros)\n if 'brawl_prize' in merits_df:\n earning_rows += cost_earning_row(\"MERITS brawl prizes\", merits_icon, merits_df.brawl_prize, skip_zeros)\n \n voucher_df = season_info_store['vouchers']\n voucher_df = voucher_df.loc[(voucher_df.player == account)].fillna(0)\n if not voucher_df.empty:\n voucher_df = voucher_df.iloc[0]\n if 'claim_staking_rewards' in voucher_df:\n earning_rows += cost_earning_row(\"VOUCHER earned\", voucher_icon, voucher_df.claim_staking_rewards,\n skip_zeros)\n\n if not last_season_rewards.empty:\n potions = last_season_rewards[(last_season_rewards['type'] == 'potion')].groupby(['potion_type']).sum()\n packs = last_season_rewards[(last_season_rewards['type'] == 'pack')].groupby(['edition']).sum()\n if 'legendary' in potions.index:\n earning_rows += cost_earning_row(\"Legendary potions\", legendary_potion_icon,\n int(potions.loc['legendary'].quantity), skip_zeros)\n if 'gold' in potions.index:\n earning_rows += cost_earning_row(\"Gold potions\", gold_potion_icon, int(potions.loc['gold'].quantity),\n skip_zeros)\n if not packs.empty:\n earning_rows += cost_earning_row(\"CL Packs\", packs_icon, packs.loc[Edition.chaos.value].quantity,\n skip_zeros)\n\n result = \"None\"\n if earning_rows != \"\":\n result = \"| Earnings | # | \\n\"\n result += \"| - | - |\\n\"\n result += earning_rows\n\n return result\n\n\ndef get_tournament_info(tournaments_info):\n result = \"|Tournament name | League | finish / entrants | wins/losses/draws | entry fee | prize | \\n\"\n result += \"|-|-|-|-|-|-| \\n\"\n\n if not tournaments_info.empty:\n for index, tournament in tournaments_info.iterrows():\n if tournament.finish:\n result += \"| \" + tournament['name']\n result += \"| \" + tournament.league\n result += \"| \" + str(int(tournament.finish)) + \" / \" + str(int(tournament.num_players))\n result += \"| \" + str(int(tournament.wins)) + \" / \" + str(int(tournament.losses)) + \" / \" + str(\n int(tournament.draws))\n result += \"| \" + tournament.entry_fee\n result += \"| \" + tournament.prize_qty + \" \" + tournament.prize_type\n result += \"| \\n\"\n\n filters_sps_prizes = tournaments_info[tournaments_info.prize_type == \"SPS\"]\n total_sps_earned = pd.to_numeric(filters_sps_prizes[['prize_qty']].sum(1), errors='coerce').sum()\n\n filters_sps_entry_fee = tournaments_info[tournaments_info.entry_fee.str.contains(\"SPS\")].copy()\n split = filters_sps_entry_fee.loc[:, 'entry_fee'].str.split(\" \", expand=True)\n total_sps_fee = 0\n if not split.empty:\n filters_sps_entry_fee.loc[:, 'fee_qty'] = split[0]\n filters_sps_entry_fee.loc[:, 'fee_type'] = split[1]\n total_sps_fee = pd.to_numeric(filters_sps_entry_fee[['fee_qty']].sum(1), errors='coerce').sum()\n\n result += \"|**Total SPS** | | | | **\" + str(total_sps_fee) + \"**|**\" + str(total_sps_earned) + \"**| \\n\"\n\n return result\n\n\ndef get_card_table(cards_df, print_count=False):\n base_card_url = \"https://images.hive.blog/150x0/https://d36mxiodymuqjm.cloudfront.net/cards_by_level/\"\n\n if cards_df is not None and len(cards_df) > 0:\n unique_card_list = cards_df.card_name.unique()\n temp = pd.DataFrame()\n for card_name in unique_card_list:\n temp = pd.concat([temp, pd.DataFrame({\n 'card_name': card_name,\n 'quantity_regular': len(cards_df[(cards_df['card_name'] == card_name) & (cards_df['gold'] == False)]),\n 'quantity_gold': len(cards_df[(cards_df['card_name'] == card_name) & (cards_df['gold'] == True)]),\n 'edition_name': str(cards_df[(cards_df['card_name'] == card_name)].edition_name.values[0]),\n 'bcx': str(cards_df[(cards_df['card_name'] == card_name) & (cards_df['gold'] == False)].bcx.sum()),\n 'bcx_gold': str(cards_df[(cards_df['card_name'] == card_name) & (cards_df['gold'] == True)].bcx.sum())\n }, index=[0])], ignore_index=True)\n\n if len(temp.index) > 5:\n result = \"| | | | | |\\n\"\n result += \"|-|-|-|-|-|\\n\"\n else:\n # print all in one row\n table_row = \"|\"\n for i in range(0, len(temp.index)):\n table_row += \" |\"\n result = table_row + \"\\n\" + table_row.replace(\" \", \"-\") + \"\\n\"\n\n result += \"|\"\n for index, card in temp.iterrows():\n if index > 0 and index % 5 == 0:\n result += \"\\n\"\n\n prefix = str(base_card_url) + str(card.edition_name) + \"/\" + str(card.card_name).replace(\" \", \"%20\")\n count_str = \"\"\n gold_suffix = \"\"\n if card.quantity_regular > 0:\n bcx = str(card.bcx)\n if print_count:\n count_str = \"
\" + str(card.quantity_regular) + \"x\"\n if card.quantity_gold > 0:\n gold_suffix = \"_gold\"\n bcx = str(card.bcx_gold)\n if print_count:\n count_str = \"
\" + str(card.quantity_gold) + \"x\"\n\n card_image_url = prefix + \"_lv1\" + gold_suffix + \".png\"\n result += \"\" + str(card_image_url) + count_str + \"
bcx: \" + str(bcx)\n result += \" |\"\n else:\n result = \"None\"\n return result\n\n\ndef get_rewards_potion_packs_table(last_season_rewards):\n if not last_season_rewards.empty:\n gold_potion = \"![alchemy.png](https://images.hive.blog/120x0/https://files.peakd.com/file/peakd-hive/beaker007/AK6ZKi4NWxuWbnhNc1V3k9DeqiqhTvmcenpsX5xhHUFdBGEYTMfMpsnC9aHL7R2.png)\"\n legendary_potion = \"![legendary.png](https://images.hive.blog/120x0/https://files.peakd.com/file/peakd-hive/beaker007/AK3gbhdHjfaQxKVM39VfeHCw25haYejvUT17E8WBgveTKY5rucpRY7AbjgsAhdu.png)\"\n packs_img = \"![chaosPack.png](https://images.hive.blog/120x0/https://files.peakd.com/file/peakd-hive/beaker007/Eo8M4f1Zieju9ibwbs6Tnp3KvN9Kb93HkqwMi3FqanTmV2XoNw7pmV4MbjDSxbgiSdo.png)\"\n\n potions = last_season_rewards[(last_season_rewards['type'] == 'potion')].groupby(['potion_type']).sum()\n packs = last_season_rewards[(last_season_rewards['type'] == 'pack')].groupby(['edition']).sum()\n result = \"| Legendary | Gold | Packs |\\n\"\n result += \"|-|-|-|\\n\"\n result += \"| \" + str(legendary_potion) + \"
\" + str(potions.loc['legendary'].quantity) + \"x\"\n result += \"| \" + str(gold_potion) + \"
\" + str(potions.loc['gold'].quantity) + \"x\"\n if packs.empty:\n result += \"| \" + str(packs_img) + \"
0x\"\n else:\n result += \"| \" + str(packs_img) + \"
\" + str(packs.loc[7.0].quantity) + \"x\"\n\n result += \"|\\n\"\n return result\n else:\n return \"None\"\n\n\ndef get_introduction_chapter(account_names):\n account_suffix = \"\"\n if len(account_names) > 1:\n account_suffix = \" (\" + str(get_account_names_str(account_names)) + \")\"\n return intro_img + \"\"\"\n


\n![Season summary divider.png](https://files.peakd.com/file/peakd-hive/beaker007/23tSKXK2kCpyZXosK34FeU6MPbw4RGCrrs7TY1tgy4k5Lgndj2JNPEbpjr8JAgQ7kW8v1.png)\n\n#
Season Summary\"\"\" + str(account_suffix) + \"\"\"
\n \n\"\"\"\n\n\ndef get_closure_chapter():\n return \"\"\"\n

\n![Closing notes divider.png](https://files.peakd.com/file/peakd-hive/beaker007/23tSMhwJoyukZ42QAed1tFdaMc2XGwQZXAoTga9AByndMur5RT4oj5rMFeNJXwBeXr4tP.png)\n\n##
Closing notes
\nThis report is generated with the splinterlands statistics tool from @beaker007 [git-repo](https://github.com/gamerbeaker007/splinterlands-statistics). \nAny comment/remarks/errors pop me a message on peakd. \nIf you like the content, consider adding @beaker007 as beneficiaries of your post created with the help of this tool. \nhttps://images.hive.blog/0x0/https://files.peakd.com/file/peakd-hive/beaker007/23tkhySrnBbRV3iV2aD2jH7uuYJuCsFJF5j8P8EVG1aarjqSR7cRLRmuTDhji5MnTVKSM.png\n\n\nIf you are not playing splinterlands consider using my referral link [beaker007](https://splinterlands.com?ref=beaker007).\n\nThx all for reading\n\n
https://d36mxiodymuqjm.cloudfront.net/website/splinterlands_logo.png
\n\"\"\"\n\n\ndef get_plot_placeholder(account_name=None):\n account_suffix = \"\"\n if account_name:\n account_suffix = \" (\" + str(account_name) + \")\"\n\n return \"\"\"\n##
Season overall stats and history\"\"\" + str(account_suffix) + \"\"\"
\n\n### Battles\n\n### Earnings\n \n \n\"\"\"\n\n\ndef get_last_season_results(season_battles_wild, season_battles_modern, previous_season_id, account_name=None):\n\n account_suffix = \"\"\n if account_name:\n account_suffix = \" (\" + str(account_name) + \")\"\n return \"\"\"\n

\n![Season result divider.png](https://files.peakd.com/file/peakd-hive/beaker007/23tGwQHB4Z1zXu1MnXFvSF7REdndP7Gu67aQgWuwp9VoWurqjvGq81w2M6WkfCtovhXo4.png)\n#
Last Season results\"\"\" + str(account_suffix) + \"\"\"
\n\"\"\" + str(get_last_season_statistics_table(season_battles_wild, season_battles_modern)) + \"\"\"\n\n\"\"\"\n\n\ndef get_tournament_results(tournaments_info, account_name=None):\n account_suffix = \"\"\n if account_name:\n account_suffix = \" (\" + str(account_name) + \")\"\n\n if not tournaments_info.empty:\n return \"\"\"\n

\n![tournament divider1.png](https://files.peakd.com/file/peakd-hive/beaker007/23u5vZxRCDsEy53q1Rd2sXkXvnAg94fBPj2kCVNoPnjVDiyQfiPecgCJMvoSdqwe4vjQp.png)\n\n##
Tournaments\"\"\" + str(account_suffix) + \"\"\"
\n\"\"\" + str(get_tournament_info(tournaments_info)) + \"\"\" \n\n\"\"\"\n return \"\"\n\n\ndef get_last_season_earning_costs(account, season_info_store, last_season_rewards, skip_zeros, account_name=None):\n account_suffix = \"\"\n if account_name:\n account_suffix = \" (\" + str(account_name) + \")\"\n\n return \"\"\"\n

\n![Earnings divider.png](https://files.peakd.com/file/peakd-hive/beaker007/23u5tAfbYKhy3zti8o5cVxxgE2LfnjkAV4xZtm1CLAqpJL9zzEF67C7Ec8Tx6b7odFvvK.png)\n##
Earnings and costs\"\"\" + str(account_suffix) + \"\"\"
\n\"\"\" + str(get_last_season_earnings_table(account, season_info_store, last_season_rewards, skip_zeros)) + \"\"\"\n\n##
Costs
\n\"\"\" + str(get_last_season_costs_table(account, season_info_store, skip_zeros)) + \"\"\" \n \"\"\"\n\n\ndef get_last_season_rewards(last_season_rewards, account_name=None):\n account_suffix = \"\"\n if account_name:\n account_suffix = \" (\" + str(account_name) + \")\"\n\n if not last_season_rewards.empty:\n reward_cards = last_season_rewards[(last_season_rewards['type'] == 'reward_card')]\n else:\n reward_cards = last_season_rewards\n\n return \"\"\"\n##
Cards Earned\"\"\" + str(account_suffix) + \"\"\"
\n\"\"\" + str(get_card_table(reward_cards)) + \"\"\"\n\n##
Potions/Packs earned\"\"\" + str(account_suffix) + \"\"\"
\n\"\"\" + str(get_rewards_potion_packs_table(last_season_rewards)) + \"\"\" \n \"\"\"\n\n\ndef get_last_season_market_transactions(purchases_cards, sold_cards, account_name=None):\n account_suffix = \"\"\n if account_name:\n account_suffix = \" (\" + str(account_name) + \")\"\n\n return \"\"\"\n

\n![Card Market divider.png](https://files.peakd.com/file/peakd-hive/beaker007/23tGyBstuQdzC1Pjv1CiAvt9S3W6sfo5qzCTa6Uv2mQTpfHkwkQ89YxncGYmqsrpynjEv.png)\n\n##
Cards Purchased\"\"\" + str(account_suffix) + \"\"\"
\nNote: Completed splex.gg and peakmonsters bids are not in this overview, those are purchased by other accounts.\n\n\"\"\" + str(get_card_table(purchases_cards, True)) + \"\"\" \n\n\n##
Cards Sold\"\"\" + str(account_suffix) + \"\"\"
\nNote: Only cards that are listed and sold in this season are displayed here.\n\"\"\" + str(get_card_table(sold_cards, True)) + \"\"\" \n\n\"\"\"\n\n\ndef get_account_introduction(account_names, previous_season_id):\n result = \"Tracking my result for season \" + str(previous_season_id) + \" : \" \\\n + str(get_account_names_str(account_names)) + \"\\n\\n\"\n return result\n\n\ndef get_account_names_str(account_names):\n result = \"\"\n for account_name in account_names:\n result += str(account_name)\n if account_name != account_names[-1]:\n result += \", \"\n return result\n\n\ndef write_blog_post(account_names,\n season_info_store,\n last_season_rewards_dict,\n tournaments_info_dict,\n purchases_cards_dict,\n sold_cards_dict,\n previous_season_id,\n skip_zeros=True):\n single_account = (len(account_names) == 1)\n post = get_account_introduction(account_names, previous_season_id)\n post += get_introduction_chapter(account_names)\n\n wild_battle_df = season_info_store['wild_battle']\n modern_battle_df = season_info_store['modern_battle']\n for account_name in account_names:\n # If there is only one account so a single post do not use account name in post.\n if single_account:\n print_account_name = None\n else:\n print_account_name = account_name\n\n post += get_plot_placeholder(account_name=print_account_name)\n post += get_last_season_results(wild_battle_df.loc[wild_battle_df.player == account_name],\n modern_battle_df.loc[modern_battle_df.player == account_name],\n previous_season_id,\n account_name=print_account_name)\n post += get_tournament_results(tournaments_info_dict[account_name],\n account_name=account_name)\n post += get_last_season_earning_costs(account_name,\n season_info_store,\n last_season_rewards_dict[account_name],\n skip_zeros,\n account_name=print_account_name)\n post += get_last_season_market_transactions(purchases_cards_dict[account_name],\n sold_cards_dict[account_name],\n account_name=print_account_name)\n post += get_last_season_rewards(last_season_rewards_dict[account_name],\n account_name=print_account_name)\n\n if single_account:\n post += get_closure_chapter()\n\n if not single_account:\n post += get_closure_chapter()\n return post","repo_name":"gamerbeaker007/splinterlands-statistics","sub_path":"src/utils/hive_blog.py","file_name":"hive_blog.py","file_ext":"py","file_size_in_byte":28395,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"44986267501","text":"from nvdomain.printer import Printer\n\nLOCATIONS = {\n \"Amsterdam\": \"Amsterdam\",\n \"Anvers \": \"Antwerp\",\n \"Anvers.\": \"Antwerp\",\n \"Anversa\": \"Antwerp\",\n \"Augusta\": \"Augsburg\",\n \"Bologna\": \"Bologna\",\n \"Bracciano\": \"Bracciano\",\n \"Cremona\": \"Cremona\",\n \"Cologne\": \"Cologne\",\n \"Dilinga\": \"Dillingen\",\n \"Dresda\": \"Dresden\",\n \"Duaco\": \"Douai\",\n \"Duaci\": \"Douai\",\n \"Ferrara\": \"Ferrara\",\n \"Firenze\": \"Florence\",\n \"Fiorenza\": \"Florence\",\n \"Francoforti\": \"Frankfurt\",\n \"Lovain\": \"Leuven\",\n \"Louvain\": \"Leuven\",\n \"Lucca\": \"Lucca\",\n \"Mantova\": \"Mantua\",\n \"Milan\": \"Milan\",\n \"Milano\": \"Milan\",\n \"Monaco\": \"Munich\",\n \"Monachij\": \"Munich\",\n \"Napoli\": \"Naples\",\n \"Noribergae\": \"Nuremberg\",\n \"Norimberga\": \"Nuremberg\",\n \"Norimbergae\": \"Nuremberg\",\n \"Nürnberg\": \"Nuremberg\",\n \"Orvieto\": \"Orvieto\",\n \"Palermo\": \"Palermo\",\n \"Paris\": \"Paris\",\n \"Parigi\": \"Paris\",\n \"Perugia\": \"Perugia\",\n \"Roma\": \"Rome\",\n \"Rotterdamo\": \"Rotterdam\",\n \"Rotterodamo\": \"Rotterdam\",\n \"Vinegia\": \"Venice\",\n \"Vineggia\": \"Venice\",\n \"Vinetia\": \"Venice\",\n \"Verona\": \"Verona\",\n \"Venet.\": \"Venice\",\n \"Veneta\": \"Venice\",\n \"Venegia\": \"Venice\",\n \"Venetia\": \"Venice\",\n \"Venetijs\": \"Venice\",\n \"Venetiis\": \"Venice\"\n}\n\nMONTHS = {\n \"I\": \"Jan.\",\n \"II\": \"Feb.\",\n \"III\": \"Mar.\",\n \"IV\": \"Apr.\",\n \"V\": \"May\",\n \"VI\": \"June\",\n \"VII\": \"July\",\n \"VIII\": \"Aug.\",\n \"IX\": \"Sept.\",\n \"X\": \"Oct.\",\n \"XI\": \"Nov.\",\n \"XII\": \"Dec.\"\n}\n\nPRINTERS = {\n \"Henrico Aertssens\": Printer(\"Henrico Aertssens\"),\n \"Pierre Attaingnant\": Printer(\"Pierre Attaingnant\"),\n \"Pierre Attaignant\": Printer(\"Pierre Attaingnant\"),\n \"Ricciardo Amadino\": Printer(\"Ricciardo Amadino\"),\n \"Giorgio Angelieri\": Printer(\"Giorgio Angelieri\"),\n \"Vittorio Baldini\": Printer(\"Vittorio Baldini\"),\n \"Pierre Ballard\": Printer(\"Pierre Ballard\"),\n \"Pietro Ballard\": Printer(\"Pierre Ballard\"),\n \"P. Ballard\": Printer(\"Pierre Ballard\"),\n \"Roberto Ballard\": Printer(\"Robert Ballard\"),\n \"Robert Ballard\": Printer(\"Robert Ballard\"),\n \"R. Ballard\": Printer(\"Robert Ballard\"),\n \"Antonio Barré\": Printer(\"Antonio Barré\"),\n \"Antonio Barre\": Printer(\"Antonio Barré\"),\n \"Barezzo Barezzi\": Printer(\"Barezzo Barezzi\"),\n \"Matthijs Bastiansz\": Printer(\"Matthijs Bastiansz\"),\n \"Jean Bellère\": Printer(\"Jean Bellère\"),\n \"Jean Bellere\": Printer(\"Jean Bellère\"),\n \"Giovanni Bellero\": Printer(\"Jean Bellère\"),\n \"Amadeo Belmonte\": Printer(\"Amadeo Belmonte\"),\n \"Ottavio Beltrano\": Printer(\"Ottavio Beltrano\"),\n \"Ottavio Beltramo\": Printer(\"Ottavio Beltrano\"),\n \"Adam Berg\": Printer(\"Adam Berg\"),\n \"Adamo Berg\": Printer(\"Adam Berg\"),\n \"Adamus Berg\": Printer(\"Adam Berg\"),\n \"Melchior Bergen\": Printer(\"Melchior Bergen\"),\n \"Melchio Bergen\": Printer(\"Melchior Bergen\"),\n \"Gio. Francesco Besozzo\": Printer(\"Giovanni Francesco Besozzo\"),\n \"Vincenzo Bianchi\": Printer(\"Vincenzo Bianchi\"),\n \"Antonio Blado\": Printer(\"Antonio Blado\"),\n \"Giovanni Bogardo\": Printer(\"Giovanni Bogardo\"),\n \"Scipione Bonino\": Printer(\"Scipione Bonino\"),\n \"Novello de Bonis\": Printer(\"Novello de Bonis\"),\n \"Pietro Brea\": Printer(\"Pietro Brea\"),\n \"Giovanni De Bulghat\": Printer(\"Giovanni De Bulghat\"),\n \"Henrigo De Campis\": Printer(\"Henrico De Campis\"),\n \"Henrico De Campis\": Printer(\"Henrico De Campis\"),\n \"Matteo Cancer\": Printer(\"Matteo Cancer\"),\n \"Giacomo Carlino\": Printer(\"Giovanni Giacomo Carlino\"),\n \"Iacomo Carlino\": Printer(\"Giovanni Giacomo Carlino\"),\n \"G. C. Carlino\": Printer(\"Giovanni Giacomo Carlino\"),\n \"Pietro Cecconcelli\": Printer(\"Pietro Cecconcelli\"),\n \"Everardo Cloppenburch\": Printer(\"Everardo Cloppenburch\"),\n \"Francesco Dalle Donne\": Printer(\"Francesco Dalle Donne\"),\n \"Francesco dalle Donne\": Printer(\"Francesco Dalle Donne\"),\n \"Valerio Dorico\": Printer(\"Valerio Dorico\"),\n \"Valerio et Luygi\": Printer(\"Valerio Dorico\"),\n \"Luygi Dorici\": Printer(\"Luigi Dorico\"),\n \"Andrea Fei\": Printer(\"Andrea Fei\"),\n \"Gratiadio Ferioli\": Printer(\"Gratiadio Ferioli\"),\n \"Gio. Antonio de Franceschi\": Printer(\"Giovanni Antonio de Franceschi\"),\n \"Alessandro Gardano\": Printer(\"Alessandro Gardano\"),\n \"Alessandro Gardane\": Printer(\"Alessandro Gardano\"),\n \"Angelo Gardano\": Printer(\"Angelo Gardano\"),\n \"Angelum Gardanum\": Printer(\"Angelo Gardano\"),\n \"Antonio Gardano\": Printer(\"Antonio Gardano\", \"Venice\"),\n \"Antonio Gardane\": Printer(\"Antonio Gardano\", \"Venice\"),\n \"d'Antonio Gardane\": Printer(\"Antonio Gardano\", \"Venice\"),\n \"Antoine Gardane\": Printer(\"Antonio Gardano\", \"Venice\"),\n \"Antonium Gardane\": Printer(\"Antonio Gardano\", \"Venice\"),\n \"Ant. Gard.\": Printer(\"Antonio Gardano\", \"Venice\"),\n \"Catherine Gerlach\": Printer(\"Catherine Gerlach\"),\n \"Catharinae Gerlachiae\": Printer(\"Catherine Gerlach\"),\n \"Gerardo Greuenbruch\": Printer(\"Gerardo Greuenbruch\"),\n \"Giuseppe Guglielmo\": Printer(\"Giuseppe Guglielmo\"),\n \"Antonio Hucher\": Printer(\"Antonio Hucher\"),\n \"Paulo Kaufmann\": Printer(\"Paul Kauffmann\"),\n \"Paulo Kauffman\": Printer(\"Paul Kauffmann\"),\n \"Paulus Kauffmannus\": Printer(\"Paul Kauffmann\"),\n \"Paul Kauffmanns\": Printer(\"Paul Kauffmann\"),\n \"Gio. Batista Landini\": Printer(\"Giovanni Batista Landini\"),\n \"Batista Landini\": Printer(\"Giovanni Batista Landini\"),\n \"Filippo Lomazzo\": Printer(\"Filippo Lomazzo\"),\n \"Philippum Lomatium\": Printer(\"Filippo Lomazzo\"),\n \"Ambrosio Magnetta\": Printer(\"Ambrosio Magnetta\"),\n \"Bartholomeo Magni\": Printer(\"Bartolomeo Magni\"),\n \"Bartolomeo Magni\": Printer(\"Bartolomeo Magni\"),\n \"Bartholomaei Magni\": Printer(\"Bartolomeo Magni\"),\n \"Bartholomei Magni\": Printer(\"Bartolomeo Magni\"),\n \"Paul Marceau\": Printer(\"Paul Marceau\"),\n \"Cristofano Marescotti\": Printer(\"Cristofano Marescotti\"),\n \"Cristoforo Marescotti\": Printer(\"Cristofano Marescotti\"),\n \"Giorgio Marescotti\": Printer(\"Giorgio Marescotti\"),\n \"Paulus Matthysz\": Printer(\"Paulus Matthysz\"),\n \"Paulus Matthihsz\": Printer(\"Paulus Matthysz\"),\n \"Pietro Maria Marchetti\": Printer(\"Pietro Maria Marchetti\"),\n \"Gio. Battista Maringo\": Printer(\"Giovanni Battista Maringo\"),\n \"Paolo Masotti\": Printer(\"Paolo Masotti\"),\n \"Mascardi\": Printer(\"Giacomo Mascardi\"),\n \"Adamo Meltzer\": Printer(\"Adam Meltzer\"),\n \"Claudio Merulo\": Printer(\"Claudio Merulo\"),\n \"Claudio da Correggio\": Printer(\"Claudio Merulo\"),\n \"Claudio da Coreggio\": Printer(\"Claudio Merulo\"),\n \"Gioseffo Micheletti\": Printer(\"Gioseffo Micheletti\"),\n \"Iacopo Moderno\": Printer(\"Jacques Moderne\"),\n # \"Iacobum Modernum\": Printer(\"Jacques Moderne\"),\n \"Pier-maria Monti\": Printer(\"Pietro Maria Monti\"),\n \"Pietro Maria Monti\": Printer(\"Pietro Maria Monti\"),\n \"Giacomo Monti\": Printer(\"Giacomo Monti\"),\n \"Lodovico Monza\": Printer(\"Lodovico Monza\"),\n \"Francesco Moschenio\": Printer(\"Francesco Moscheni\"),\n \"Francesco et Simone Moscheni\": Printer(\"Francesco and Simone Moscheni\"),\n \"Nicolò Mutij\": Printer(\"Nicolò Mutii\"),\n \"Francesco Osanna\": Printer(\"Francesco Osanna\"),\n \"e Paci,\": Printer(\"Giacinto Paci\"),\n \"Pietroiacomo Petrucci\": Printer(\"Pietro Giacomo Petrucci\"),\n \"Octavianum Petrutium\": Printer(\"Ottaviano Petrucci\"),\n \"Pierre Phalese\": Printer(\"Pierre Phalèse\"),\n \"Petro Phalesio\": Printer(\"Pierre Phalèse\"),\n \"Pietro Phalesio\": Printer(\"Pierre Phalèse\"),\n \"i Pieri,\": Printer(\"Bernardino Pieri\"),\n \"Christofle Plantin\": Printer(\"Christophe Plantin\"),\n \"Plinio Pietrasanta\": Printer(\"Plinio Pietrasanta\"),\n \"Zanobi Pignoni\": Printer(\"Zanobi Pignoni\"),\n \"Pacifico Pontio\": Printer(\"Pacifico da Ponte\"),\n \"Cesare Porro\": Printer(\"Cesare Porro\"),\n \"Cesare Pozzo\": Printer(\"Cesare Pozzo\"),\n \"Gio. Pretorio\": Printer(\"Giovanni Pretorio\"),\n \"Francesco Rampazetto\": Printer(\"Francesco Rampazetto\"),\n \"Francesco Rampazzetto\": Printer(\"Francesco Rampazetto\"),\n \"Francesco Ramazetto\": Printer(\"Francesco Rampazetto\"),\n \"Alessandro Raverij\": Printer(\"Alessandro Raverii\"),\n \"Alessandro Raverii\": Printer(\"Alessandro Raverii\"),\n \"Gioseffo Ricci\": Printer(\"Gioseffo Ricci\"),\n \"Battista Robletti\": Printer(\"Giovanni Battista Robletti\"),\n \"Gio. Batt. Robletti\": Printer(\"Giovanni Battista Robletti\"),\n \"heredi di Francesco Rossi\": Printer(\"heredi di Francesco Rossi\"),\n \"Giovanni de' Rossi\": Printer(\"Giovanni de' Rossi\"),\n \"Adrian le Roy\": Printer(\"Adrian le Roy\"),\n \"Adriano le Roy\": Printer(\"Adrian le Roy\"),\n \"Rinaldo Ruuli\": Printer(\"Rinaldo Ruuli\"),\n \"Vincenzo Sabbio\": Printer(\"Vincenzo Sabbio\"),\n \"Gioseppe Sala\": Printer(\"Giuseppe Sala\"),\n \"Valerio Salviano\": Printer(\"Valerio Salviano\"),\n \"Valentin Schönigk\": Printer(\"Valentin Schönigk\"),\n \"Gerolamo Scotto\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Giolamo Scotto\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Girolamo Scotto\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Girolamo Scoto\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Gierolamo Scotto\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Gieronimo Scotto\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Hieronymo Scotto\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Hieronymus Scotus\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Hieronymum Scotum\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Hieronymum Scottum\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Hieronimum Scotum\": Printer(\"Girolamo Scotto\", \"Venice\"),\n \"Marino Silvani\": Printer(\"Marino Silvani\"),\n \"Luca Antonio Soldi\": Printer(\"Luca Antonio Soldi\"),\n \"Gio. Battista Sottile\": Printer(\"Giovanni Battista Sottile\"),\n \"Nicolao Stenio\": Printer(\"Nicolao Stenio\", \"Frankfurt\"),\n \"Tylman Susato\": Printer(\"Tielman Susato\"),\n \"Tielman Susato\": Printer(\"Tielman Susato\"),\n \"Tilman Susato\": Printer(\"Tielman Susato\"),\n \"Sottile\": Printer(\"Giovanni Battista Sottile\"),\n \"Simon Tini\": Printer(\"Simon Tini\"),\n \"Simonus Tini\": Printer(\"Simon Tini\"),\n \"Agostino Tradate\": Printer(\"Agostino Tradate\"),\n \"Paulo Tortorino\": Printer(\"Paulo Tortorino\"),\n \"Simone Verovio\": Printer(\"Simone Verovio\"),\n \"Giacomo Vincenti\": Printer(\"Giacomo Vincenti\"),\n \"Giacomo Vincenci\": Printer(\"Giacomo Vincenti\"),\n \"Giacomo Vincenzi\": Printer(\"Giacomo Vincenti\"),\n \"Iacomo Vincenti\": Printer(\"Giacomo Vincenti\"),\n \"Giamo (sic) Vincenti\": Printer(\"Giacomo Vincenti\"),\n \"Alessandro Vincenti\": Printer(\"Alessandro Vincenti\"),\n \"Alessanro Vincenti\": Printer(\"Alessandro Vincenti\"),\n \"Constantino Vitale\": Printer(\"Constantino Vitale\"),\n \"Costantino Vitale\": Printer(\"Constantino Vitale\"),\n \"C. Vitale\": Printer(\"Constantino Vitale\"),\n \"Joh. Christoph Weigel\": Printer(\"Johann Christoph Weigel\", \"Nuremberg\"),\n \"Isaaco Wesbergio\": Printer(\"Isaaco Wesbergio\"),\n \"Vvillem Iansz. Vvijngaert\": Printer(\"Willem Jansz Wyngaert\"),\n \"Bartolomeo Zannetti\": Printer(\"Bartolomeo Zanetti\"),\n \"Bartholomeo Zannetti\": Printer(\"Bartolomeo Zanetti\"),\n \"Luigi Zannetti\": Printer(\"Luigi Zanetti\"),\n \"Alessandro Zatta\": Printer(\"Alessandro Zatta\")\n}\n\nCATALOG_VARIANTS = {\n \"RISM\": \"RISM I\",\n \"Brown\": \"Brown\",\n \"Sartori\": \"Sartori\",\n \"Haar\": \"Haar\",\n \"Pass\": \"Pass\",\n \"RMI\": \"RMI\",\n \"Boetticher\": \"Boetticher\",\n \"Lesure\": \"Lesure\",\n \"Lesure e\": \"Lesure & Thibault\"\n}\n# missing Sartori & Petrucci\n\nMUSIC_GENRES = {\n \"d'Aria\": \"Arias\",\n \"Aria\": \"Arias\",\n \"Balletti\": \"Balletti\",\n \"Balletten\": \"Balletti\",\n \"Cantate\": \"Songs\",\n \"Canzonette\": \"Canzonette\",\n # \"Canzoni\": \"Canzons\",\n # \"Canzon spirituale\": \"Spiritual songs\",\n # \"Canzoni francese\": \"Chansons\",\n \"Chanson\": \"Chansons\",\n \"Chanson spirituale\": \"Spiritual Chansons\",\n \"Concenti musicali\": \"Concerted music\",\n \"Lieder\": \"Lieder\",\n \"Madrigale\": \"Madrigals\",\n \"Madregali\": \"Madrigals\",\n \"Madrigal\": \"Madrigals\",\n \"Madrigali spirituale\": \"Spiritual Madrigals\",\n \"Mascherate\": \"Mascherata\",\n \"Motetti\": \"Motets\",\n \"Musiche\": \"Musiche\",\n \"Ricercari\": \"Ricercars\",\n \"Vilanelle\": \"Villanelle\",\n \"Villanelle\": \"Villanelle\",\n \"Villanesche\": \"Villanesche\",\n \"Villotte alla padoana\": \"Paduan Villotte\",\n \"Villotte alla Napolitane\": \"Neapolitan Villotte\",\n \"alcune Napolitane\": \"Neapolitan Villotte\",\n}\n\nITALIAN_NUMBERS = {\n \" Uno \": 1,\n \" Una \": 1,\n \" voce sola\": 1,\n \" Due \": 2,\n \" Doi \": 2,\n \" Tre \": 3,\n \" Quattro \": 4,\n \" Quatro \": 4,\n \" Cinque \": 5,\n \" Sei \": 6,\n \" Sette \": 7,\n \" Otto \": 8,\n \" Nove \": 9,\n \" Dieci \": 10,\n \" Undici \": 11,\n \" Dodici \": 12,\n \" Tredici \": 13,\n \" Quattordici \": 14,\n \" Quindici \": 15,\n \" Sedici \": 16,\n \" Diciassette \": 17,\n \" Diciotto \": 18,\n \" Diciannove \": 19,\n \" Venti \": 20,\n \" Uno, \": 1,\n \" voce sola, \": 1,\n \" Due, \": 2,\n \" Tre, \": 3,\n \" Quattro, \": 4,\n \" Quatro, \": 4,\n \" Cinque, \": 5,\n \" Sei, \": 6,\n \" Sette, \": 7,\n \" Otto, \": 8,\n \" Nove, \": 9,\n \" Dieci, \": 10,\n \" Undici, \": 11,\n \" Dodici, \": 12,\n \" Tredici, \": 13,\n \" Quattordici, \": 14,\n \" Quindici, \": 15,\n \" Sedici, \": 16,\n \" Diciassette, \": 17,\n \" Diciotto, \": 18,\n \" Diciannove, \": 19,\n \" Venti, \": 20,\n \" 1 \": 1,\n \" 2 \": 2,\n \" 3 \": 3,\n \" 4 \": 4,\n \" 5 \": 5,\n \" 6 \": 6,\n \" 7 \": 7,\n \" 8 \": 8,\n \" 9 \": 9,\n \" 10 \": 10,\n \" 11 \": 11,\n \" 12 \": 12,\n \" 13 \": 13,\n \" 14 \": 14,\n \" 1, \": 1,\n \" 2, \": 2,\n \" 3, \": 3,\n \" 4, \": 4,\n \" 5, \": 5,\n \" 6, \": 6,\n \" 7, \": 7,\n \" 8, \": 8,\n \" 9, \": 9,\n \" 10, \": 10,\n \" 11, \": 11,\n \" 12, \": 12,\n \" 13, \": 13,\n \" 14, \": 14,\n \" 1. \": 1,\n \" 2. \": 2,\n \" 3. \": 3,\n \" 4. \": 4,\n \" 5. \": 5,\n \" 6. \": 6,\n \" 7. \": 7,\n \" 8. \": 8,\n \" 9. \": 9,\n \" 10. \": 10,\n \" 11. \": 11,\n \" 12. \": 12,\n \" 13. \": 13,\n \" 14. \": 14,\n \" I. \": 1,\n \" II. \": 2,\n \" III. \": 3,\n \" IV. \": 4,\n \" V. \": 5,\n \" VI. \": 6,\n \" VII. \": 7,\n \" VIII. \": 8,\n \" IX. \": 9,\n \" X. \": 10,\n \" XI. \": 11,\n \" XII. \": 12,\n \" XIII. \": 13,\n \" XIV. \": 14,\n \" XV. \": 15,\n \" XVI. \": 16,\n \" I, \": 1,\n \" II, \": 2,\n \" III, \": 3,\n \" IV, \": 4,\n \" V, \": 5,\n \" VI, \": 6,\n \" VII, \": 7,\n \" VIII, \": 8,\n \" IX, \": 9,\n \" X, \": 10,\n \" XI, \": 11,\n \" XII, \": 12,\n \" XIII, \": 13,\n \" XIV, \": 14,\n \" XV, \": 15,\n \" XVI, \": 16,\n\n}\n\nITALIAN_NUMBERS_ORDINAL = {\n \" Primo\": 1,\n \" Secondo\": 2,\n \" Terzo\": 3,\n \" Quarto\": 4,\n \" Quinto\": 5,\n \" Sesto\": 6,\n \" Settimo\": 7,\n \" l'Ottavo\": 8,\n \" Ottavo\": 8,\n \" Nono\": 9,\n \" Decimo\": 10,\n \" l'Undecimo\": 11,\n \" Undecimo\": 11,\n \" Undicesimo\": 11,\n \" Dodicesimo\": 12,\n \" Duodecimo\": 12,\n \" Tredicescimo\": 13,\n \" Quattordicesimo\": 14,\n \" Quartadecima\": 14,\n \" Decimaquinta\": 15,\n \" Decimasettima\": 17\n}\n","repo_name":"walkerdb/renpubs","sub_path":"nvdomain/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":14932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36879277317","text":"from datetime import datetime, timedelta, time, timezone\nfrom dotenv import load_dotenv\nfrom discord.ext import commands, tasks\nimport discord\nimport investpy\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\nimport pandas as pd\nimport re\nimport csv as csvLib\n\nload_dotenv()\n\nintents = discord.Intents.default()\nintents.message_content = True\nclient = discord.Client(intents=intents)\ntree = discord.app_commands.CommandTree(client)\n\nhigh_importance_events = set()\n\n@tree.command(name = \"today\", description = \"get todays events\", guild=discord.Object(929186393253634058))\nasync def get_today_calendar(interaction):\n await interaction.response.defer()\n res = await get_calendar_data()\n await interaction.followup.send(file=discord.File('res.png'))\n return\n \n\n@tree.command(name = \"tomorrow\", description = \"get tomorrows events\", guild=discord.Object(929186393253634058))\nasync def get_tomorrows_calendar(interaction):\n await interaction.response.defer()\n res = await get_calendar_data(False)\n await interaction.response.send_message(file=discord.File('res.png'))\n return\n\n@tasks.loop(minutes=15)\nasync def get_high_vol_and_send():\n if (len(high_importance_events) == 0):\n print('no high vol events left today')\n else:\n print(high_importance_events)\n for event in high_importance_events:\n if (event[0] <= datetime.now().time()):\n print('entered push for news event')\n print(high_importance_events)\n high_importance_events.remove(event)\n print('popped time')\n print(high_importance_events)\n await get_calendar_data()\n await message_channel.send(file=discord.File('res.png'))\n break\n return\n\nlooping_time = time(hour=9)\n#@tasks.loop(seconds=5.0)\n@tasks.loop(time=looping_time)\nasync def send_cal():\n message_channel = client.get_channel(int(os.getenv('TARGET_CHANNEL')))\n print(f\"Got channel {message_channel}\")\n res = await get_calendar_data()\n if (len(res)) >= 2000:\n await message_channel.send('result over 2000 chars')\n await message_channel.send(file=discord.File('res.png'))\n print('sent news')\n\n@send_cal.before_loop\nasync def before():\n await client.wait_until_ready()\n\n@client.event\nasync def on_ready():\n print(f'We have logged in as {client.user}')\n await tree.sync( guild=discord.Object(929186393253634058))\n send_cal.start()\n get_high_vol_and_send.start()\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n if message.content.startswith('$today'):\n res = await get_calendar_data()\n await message.channel.send(file=discord.File('res.png'))\n\n elif message.content.startswith('$tomorrow'):\n res = await get_calendar_data(False)\n await message.channel.send(file=discord.File('res.png'))\n\n\nasync def get_calendar_data(today=True):\n if today:\n df = investpy.news.economic_calendar(time_zone=\"GMT -4:00\", time_filter='time_only', countries=['United States'], importances=None, categories=None)\n else:\n tom = (datetime.now(timezone(timedelta(hours=-5), 'EST')) + timedelta(1)).strftime('%d/%m/%Y')\n day_after_tom = (datetime.now(timezone(timedelta(hours=-5), 'EST')) + timedelta(2)).strftime('%d/%m/%Y')\n df = investpy.news.economic_calendar(time_zone=None, time_filter='time_only', countries=['United States'], importances=None, categories=None, from_date=tom, to_date=day_after_tom)\n df['date'] = pd.to_datetime(df['date'], dayfirst=True)\n df = df[df['date'] != pd.to_datetime((datetime.now(timezone(timedelta(hours=-5), 'EST')) + timedelta(2)).strftime('%Y-%m-%d'))]\n\n df = df.drop(['id', 'zone', 'date', 'currency'], axis=1)\n new_cols = [\"time\", \"importance\", \"forecast\", \"previous\", \"actual\", \"event\"]\n df = df.reindex(columns=new_cols)\n csv = df.to_csv(index=False, na_rep='')\n\n # Split the CSV data by newlines\n csv_lines = csv.strip().split('\\n')\n\n # Create a CSV reader\n reader = csvLib.reader(csv_lines)\n\n # Get the headers from the first row\n headers = next(reader)\n\n # Iterate over each row\n rows = []\n for row in reader:\n rows.append(row)\n\n # Convert data into separate lists for each column\n time = [row[0] for row in rows]\n importance = [row[1] for row in rows]\n forecast = [row[2] for row in rows]\n previous = [row[3] for row in rows]\n actual = [row[4] for row in rows]\n event = [row[5] for row in rows]\n\n # Create the plot\n cellText = np.array([time, importance, event, previous, forecast, actual]).T.tolist()\n \n if today==True:\n for row in cellText:\n if row[1].lower() == 'high' and row[5]=='' and row[3]!= '':\n event_time = datetime.strptime(row[0], '%H:%M').time()\n high_importance_events.add((event_time,row[1]))\n \n # Get the index of the \"importance\" column\n importance_index = headers.index(\"importance\")\n\n # Remove the \"importance\" column from cellText\n cellText = [row[:importance_index] + row[importance_index + 1:] for row in cellText]\n\n # Remove the \"importance\" header from headers\n headers.pop(importance_index)\n\n fig, ax = plt.subplots(figsize=(18, 12))\n ax.axis('off') # Turn off the axes\n\n table = ax.table(\n cellText=cellText,\n colLabels=['Time',f\"Event ({datetime.now().strftime('%d-%m-%Y')})\", 'Previous', 'Forecast', 'Actual'],\n colWidths=[1.5, 1.5, 3, 3, 3, 8],\n cellLoc='center',\n loc='center',\n bbox=[0, 0, 1, 1]\n )\n\n # Adjust the width of the \"events\" column\n table.set_fontsize(20)\n table.auto_set_column_width(col=list(range(len(headers))))\n\n # Adjust the width of the \"time\" column\n color_map = {\n 'high': cm.Reds(0.5), # Light blue\n 'medium': cm.Reds(0.25), # Medium blue\n 'low': cm.Reds(0.1), # Dark blue\n }\n\n for row in range(len(importance)):\n importance_value = importance[row].lower()\n color = color_map.get(importance_value)\n if color:\n for col in range(len(headers)):\n cell = table[row + 1, col]\n cell.set_facecolor(color)\n\n table.scale(1.5, 1.5) # Adjust the scale factor as needed\n for col in range(len(headers)):\n header_cell = table[0, col]\n header_cell.set_text_props(weight='bold')\n\n for i in range(len(importance)):\n table[i+1, 1]._loc = 'left'\n table[i+1, 1]._text.set_horizontalalignment('left') \n # Save the image\n plt.savefig('res.png', bbox_inches='tight', dpi=300)\n plt.close()\n\n return 'res.png'\n\nclient.run(os.getenv(\"CLIENT_TOKEN\"))\n","repo_name":"sf8193/econ-calendar","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74748901600","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nimport utils\nimport config\n\n\ntrain_set, test_set = utils.get_dataset()\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(252, 312)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(30, 20, 191, 31))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 60, 191, 32))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 100, 191, 32))\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_4.setGeometry(QtCore.QRect(30, 140, 191, 32))\n self.pushButton_4.setObjectName(\"pushButton_4\")\n\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(40, 180, 171, 21))\n self.lineEdit.setObjectName(\"lineEdit\")\n\n self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_5.setGeometry(QtCore.QRect(30, 210, 191, 32))\n self.pushButton_5.setObjectName(\"pushButton_5\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 252, 24))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Question 5\"))\n self.pushButton.setText(_translate(\n \"MainWindow\", \"1. Show Train Images\"))\n self.pushButton_2.setText(_translate(\n \"MainWindow\", \"2. Show HyperParameter\"))\n self.pushButton_3.setText(_translate(\n \"MainWindow\", \"3. Show Model Structure\"))\n self.pushButton_4.setText(_translate(\"MainWindow\", \"4. Show Accuracy\"))\n self.pushButton_5.setText(_translate(\"MainWindow\", \"5. Test\"))\n\n self.pushButton.clicked.connect(lambda: q5_show_data())\n self.pushButton_2.clicked.connect(lambda: q5_show_hyperparams())\n self.pushButton_3.clicked.connect(lambda: q5_show_model())\n self.pushButton_4.clicked.connect(lambda: q5_show_accuracy())\n self.pushButton_5.clicked.connect(lambda: q5_inference(int(self.lineEdit.text())))\n\n\ndef q5_show_data():\n fig, axs = plt.subplots()\n fig.subplots_adjust(hspace=.3, wspace=.3)\n\n for i, (image, label) in enumerate(train_set, start=1):\n if i > 9:\n break\n plt.subplot(3, 3, i)\n plt.axis('off')\n plt.title(config.class_dict[label])\n plt.imshow(np.moveaxis(image.numpy(), 0, -1))\n plt.show()\n\ndef q5_show_hyperparams():\n print(f\"hyperparameters:\\n\"\n f\"batch size: {config.BATCH_SIZE}\\n\"\n f\"learning rate: {config.LR}\\n\"\n f\"optimizer: SGD\\n\")\n\ndef q5_show_model():\n from torchsummary import summary\n from model import VGG16\n summary(VGG16(), (3, 32, 32))\n\ndef q5_show_accuracy():\n import os\n os.system('tensorboard --logdir=\"./log\"')\n\ndef q5_inference(index=0):\n import torch\n from model import VGG16\n\n print('loading model ...')\n model = VGG16()\n model.load_state_dict(torch.load('model/net_9.pth', map_location=torch.device('cpu')))\n\n print('loading data ...')\n image, label = test_set[index]\n\n print('inferencing ...')\n with torch.no_grad():\n model.eval()\n output = torch.nn.functional.softmax(model(image.unsqueeze_(0)), dim=-1).data.numpy()[0]\n plt.figure(figsize=(20, 10))\n plt.subplot(1, 2, 1)\n plt.imshow(np.moveaxis(image[0].numpy(), 0, -1))\n\n plt.subplot(1, 2, 2)\n plt.bar(config.CLASSES, output)\n plt.show()\n\n\n\n\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"yiting-tom/HW1-CV_DL","sub_path":"q5_window.py","file_name":"q5_window.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16244889940","text":"import subprocess\n\n# Create a list of input video paths\ninput_videos = [\"./masr/masr_0.mp4\", \"./masr/masr_1.mp4\"]\n\n# Create the command to be run by FFmpeg\ncommand = [\"ffmpeg\"]\n\n# Add the input video arguments to the command\nfor input_video in input_videos:\n command.extend([\"-i\", input_video])\n\n# Add the output video argument to the command\ncommand.extend([\"-c\", \"copy\", \"output.mp4\"])\n\n# Run the FFmpeg command\nsubprocess.run(command)","repo_name":"codertjay/clip_yt_video","sub_path":"test_code.py","file_name":"test_code.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"5996401381","text":"def lex(src):\n arr = []\n sb = []\n def _submit():\n if len(sb) > 0:\n arr.append(\"\".join(sb))\n sb.clear()\n esc = False\n commentMode = False\n for c in src:\n if commentMode:\n if c == \"\\n\": commentMode=False; continue\n continue\n if esc:\n esc = False\n print(\"Escaped\", c)\n sb.append(c)\n continue\n if c in \" \\t\\n\\r\":\n _submit()\n continue\n if c == \"\\\\\":\n esc = True\n continue\n if c in \"()[]{}<>\":\n _submit()\n arr.append(c)\n continue\n if c == \"#\":\n commentMode = True\n continue\n sb.append(c)\n _submit()\n return arr\n\ndef _try_parse_cmd(tok, it):\n if tok != \"(\": return\n arr = []\n for subtok in it:\n if subtok == \")\": break\n # Try parse inner cmd's\n cmd = _try_parse_cmd(subtok, it)\n if cmd: arr.append(cmd); continue\n # Add everything in\n arr.append(subtok)\n return arr\n\ndef parse(toks):\n def _parse(it):\n arr = []\n for tok in it:\n # Try parse found cmds\n cmd = _try_parse_cmd(tok, it)\n if cmd: arr.append(cmd); continue\n return arr\n return _parse(iter(toks))\n","repo_name":"AldieNightStar/lispy","sub_path":"lispy/lexparse.py","file_name":"lexparse.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35778373356","text":"'''\nauthor : Shubbham Chau \nDate : 30-oct-2021 / 1-sept\n'''\n\n\nimport enum\nfrom src.utils.all_utils import create_dir , genFilesnamepkl , load_config\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom keras_vggface.utils import preprocess_input\nfrom keras_vggface.vggface import VGGFace\nfrom mtcnn import MTCNN\nfrom PIL import Image\nimport numpy as np\nimport argparse\nimport pickle\nimport cv2\nimport os \n\n\n\n\n\nmodel = VGGFace(model='resnet50',include_top=False,input_shape=(224,224,3),pooling='avg')\n\n\ndef detectANDpred(confg1 , confg2 , threshould=0.3):\n '''\n Fun: detectAndpred\n i/p:\n confg1 : structure-config File ( .yaml)\n confg2 : pred-config File ( .yaml)\n confg1 : threshould = 0.7 -> int (changable parameter) \n '''\n conf1 = load_config(confg1)\n pred_confg = load_config(confg2)\n maindir = conf1['internal_ops']['artifact_dir']\n messedUpdir = conf1['internal_ops']['messed_dirPath']\n predImgpath = pred_confg['Prediction']['pred_imgPath']\n nameofPredImg = pred_confg['Prediction']['provideNameofImg'] # will be name of result dir inside the messed up dir \n\n \n # create the dir of pred name\n resultDirFullPath = os.path.join(messedUpdir , nameofPredImg)\n create_dir([resultDirFullPath])\n \n\n # detect the faces from imgs\n\n detector = MTCNN()\n predImg = cv2.imread(predImgpath)\n predImg = cv2.cvtColor(predImg ,cv2.COLOR_BGR2RGB)\n detectionOp = detector.detect_faces(predImg)\n\n x,y,width,height = detectionOp[0]['box'] # ( x,y , width , height) => x2,y2(diagonal_point) (x+width , y+height)\n x1,y1,x2,y2 = x,y,x+width,y+height\n\n\n # create the boxes and crop the img\n detected_face = predImg[y1:y2,x1:x2]\n # extract its features\n image = Image.fromarray(detected_face)\n image = image.resize((224,224))\n face_array = np.asarray(image)\n face_array = face_array.astype('float32') \n expanded_img = np.expand_dims(face_array,axis=0)\n preprocessed_img = preprocess_input(expanded_img)\n result = model.predict(preprocessed_img).flatten()\n\n \n\n # img name section \n dumpeddir = conf1['internal_ops']['dumpeddir'] \n Filesnames_pkl = conf1['internal_ops']['src_pkl_filename'] \n FileNamesPkl_Path = os.path.join(maindir ,dumpeddir,Filesnames_pkl)\n\n # embedding section \n imgfeaturesdir = conf1['internal_ops']['ExtractedFeatures']['imgfeaturesdir'] # embeddingImgdata\n imgFeaturesFilename = conf1['internal_ops']['ExtractedFeatures']['imgFeaturesFilename'] # features_embedding.pkl \n embeddings_path = os.path.join(maindir , imgfeaturesdir , imgFeaturesFilename)\n\n # embeddings \n ImgFilenames = pickle.load(open(file=FileNamesPkl_Path , mode='rb'))\n embeddings = pickle.load(open(file=embeddings_path , mode='rb'))\n similar_imgs_score = []\n for i in range(len(embeddings)):\n similar_imgs_score.append(cosine_similarity(result.reshape(1,-1),embeddings[i].reshape(1,-1))[0][0])\n\n\n for img in sorted( list(enumerate(zip(similar_imgs_score,ImgFilenames))) , reverse=True, key=lambda x : x[1][0]) : \n print(img)\n \n \n\n\n \n\n # maximum_score = max(similar_imgs_score)\n # indx = similar_imgs_score.index(maximum_score) # indx of max-score \n\n # # load the filenames.pkl\n #\n # temp_img = ImgFilenames[indx]\n # messedDir = './images' + '/' + temp_img\n \n # temp_img = cv2.imread(messedDir)\n # cv2.imshow('output',temp_img)\n # cv2.waitKey(0)\n\n \n \n\n\n\n\n\n# index_pos = sorted(list(enumerate(similarity)),reverse=True,key=lambda x:x[1])[0][0]\n\n# temp_img = cv2.imread(filenames[index_pos])\n# cv2.imshow('output',temp_img)\n# cv2.waitKey(0)\n# # recommend that image\n\n\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n args.add_argument(\"--config1\" ,\"-c1\" , default='configs/structure.yaml' ) \n args.add_argument(\"--config2\" ,\"-c2\" , default='configs/pred_config.yaml' ) \n parsed_args = args.parse_args()\n try:\n detectANDpred(parsed_args.config1 , parsed_args.config2)\n except Exception as e:\n raise e \n ","repo_name":"shubhamchau222/ImagedirManager","sub_path":"src/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72980261601","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tipoInmueble', '0001_initial'),\n ('inmuebles', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='inmueblesTipo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('valorVenta', models.IntegerField()),\n ('ValorArriendo', models.IntegerField()),\n ('fechaArriendo', models.DateTimeField()),\n ('fechaVenta', models.DateTimeField()),\n ('inmueble', models.ForeignKey(related_name='inmueble', to='inmuebles.Inmuebles', null=True)),\n ('tipo', models.ForeignKey(related_name='tipo', to='tipoInmueble.tipoInmueble', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"javierpedrozaing/inmobiliaria","sub_path":"inmuebles_tipo/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73665492963","text":"#%%\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nimport matplotlib.pyplot as plt\n\nINPUT_FRAMES = 40\nOUTPUT_FRAMES = 80-INPUT_FRAMES\n\nclass LinearLayer(nn.Module):\n\tdef __init__(self, input_frames, output_frames, n_layer=1, bias=True):\n\t\tsuper().__init__()\n\t\tself.output_frame = output_frames\n\t\tself.n_layer = n_layer\n\n\t\t# layer\n\t\tself.linear_relu_stack = nn.Sequential(\n\t\t\t\t\tLinear(input_frames, 512),\n\t\t\t\t\tnn.ReLU(),\n\t\t\t\t\tnn.Linear(512, 1028),\n\t\t\t\t\tnn.ReLU(),\n\t\t\t\t\tnn.Linear(1028, 512),\n\t\t\t\t\tnn.ReLU(),\n\t\t\t\t\tnn.Linear(512,output_frames),\n\t\t\t\t)\n\n\tdef forward(self, input):\n\t\tz = self.linear_relu_stack(input.float())\n\t\treturn z\n\t\n\tdef normalize(self,batch_INPUT_FRAMES,batch_OUTPUT_FRAMES,number_of_features,number_of_nodes,number_of_trajectories,stats,cur_device):\n\t\tstd,mean = stats\n\t\t\"\"\"\n\t\treturns normalized\n\t\tfeatures_INPUT_FRAMES: input feature tensor for that current batched trajectory\n\t\t\tDim: [trajectory #,features,frames]\n\t\t\ttrajectory #: details which trajectory in the current batched trajectories\n\t\t\tfeatures: x,y features \n\t\tfeatures_OUTPUT_FRAMES: output feature tensor for that current batched trajectory\n\t\t\tDim: [trajectory #,features,frames]\n\t\t\ttrajectory #: details which trajectory in the current batched trajectories\n\t\t\tfeatures: x,y features \n\t\t\tframes: details which time frame\n\t\t\"\"\"\n\n\t\tfeatures_INPUT_FRAMES = []\n\t\tfor j, frame_batched_trajectories in enumerate(batch_INPUT_FRAMES):\n\t\t\tcurrent_frame_feature = frame_batched_trajectories.x\n\t\t\tnormalized_frame_feature = (current_frame_feature-mean)/std\n\t\t\tif j == 0:\n\t\t\t\tfeatures_INPUT_FRAMES = torch.reshape(normalized_frame_feature,(number_of_trajectories,number_of_features*number_of_nodes,1))\n\t\t\telse:\n\t\t\t\ttest = torch.reshape(normalized_frame_feature,(number_of_trajectories,number_of_features*number_of_nodes,1))\n\t\t\t\tfeatures_INPUT_FRAMES = torch.cat((features_INPUT_FRAMES,test),dim=2)\n\t\t\n\t\tfeatures_OUTPUT_FRAMES = []\n\t\tfor j, frame_batched_trajectories in enumerate(batch_OUTPUT_FRAMES):\n\t\t\tcurrent_frame_feature = frame_batched_trajectories.x\n\t\t\tnormalized_frame_feature = (current_frame_feature-mean)/std\n\n\t\t\tif j == 0:\n\t\t\t\tfeatures_OUTPUT_FRAMES = torch.reshape(normalized_frame_feature,(number_of_trajectories,number_of_features*number_of_nodes,1))\n\t\t\telse:\n\t\t\t\ttest = torch.reshape(normalized_frame_feature,(number_of_trajectories,number_of_features*number_of_nodes,1))\n\t\t\t\tfeatures_OUTPUT_FRAMES = torch.cat((features_OUTPUT_FRAMES,test),dim=2)\n\t\t\t\t\n\t\treturn features_INPUT_FRAMES.to(cur_device),features_OUTPUT_FRAMES.to(cur_device)\n\t\n# %%\n\n# Training Pipe \n# move to main.py after validation\nfrom data_loader import *\ncur_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# initialize preliminary information and load data\ntorch.manual_seed(42)\n\nBATCH_SIZE = 2\nnum_of_epochs = 100\ntrain_loader,test_loader,train_dataset,test_dataset = load_data(batch_size=BATCH_SIZE,shuffle=False)\nprint(f\"Length of train loader: {len(train_loader)}\")\nprint(f\"Length of test loader: {len(test_loader)}\\n\")\n\nnumber_of_nodes = train_dataset.num_nodes\nnumber_of_features = train_dataset.num_node_features\nstats_train = find_mean_and_std(train_or_val=True,batch_size = BATCH_SIZE)\nstats_test = find_mean_and_std(train_or_val=True,batch_size = BATCH_SIZE)\nstats_train = [torch.Tensor([1,1]).float(),torch.Tensor([0,0]).float()] # for checking matrix transformations for unnormalized inputs\nstats_val = [torch.Tensor([1,1]).float(),torch.Tensor([0,0]).float()]\n\n# initialize model and training criterion and optimizer\nmodel_0 = LinearLayer(number_of_nodes*number_of_features*INPUT_FRAMES,number_of_nodes*number_of_features*OUTPUT_FRAMES).to(cur_device)\ncriterion = torch.nn.MSELoss() # Define loss criterion.\noptimizer = torch.optim.Adam(model_0.parameters(), lr=0.01) # Define optimizer.\n\nfor epoch in range(num_of_epochs):\n\n\tmodel_0.train()\n\tfor i, batch_trajectories in enumerate(train_loader):\n\t\t# split batched trajectories into INPUT_FRAMES/OUTPUT_FRAMES \n\t\tbatch_INPUT_FRAMES = batch_trajectories[:INPUT_FRAMES]\n\t\tbatch_OUTPUT_FRAMES = batch_trajectories[INPUT_FRAMES:]\n\n\t\t# get the number of trajectories in the current batch_trajectories\n\t\tnumber_of_trajectories = int(batch_trajectories[0].num_nodes/number_of_nodes)\n\t\tprint(f\"Number of trajectories in Train Batch Trajectories {i}: {number_of_trajectories}\")\n\n\t\t# normalize for linear layer\n\t\tfeatures_INPUT_FRAMES,features_OUTPUT_FRAMES = model_0.normalize(batch_INPUT_FRAMES,batch_OUTPUT_FRAMES,number_of_features,\n\t\t\t\t\t\t\t\t\tnumber_of_nodes,number_of_trajectories,stats_train,cur_device)\n\t\t\n\t\t# reshape into 1D so that x and y and temporal can interlink\n\t\tfeatures_INPUT_FRAMES = torch.reshape(features_INPUT_FRAMES,(number_of_trajectories,number_of_nodes*number_of_features*INPUT_FRAMES))\n\t\tfeatures_OUTPUT_FRAMES = torch.reshape(features_OUTPUT_FRAMES,(number_of_trajectories,number_of_nodes*number_of_features*OUTPUT_FRAMES)) \n\n\t\t# perform the forward step\n\t\toptimizer.zero_grad() # Clear gradients.\n\t\ty_pred = model_0(features_INPUT_FRAMES).to(torch.float64)\n\n\t\tloss = criterion(y_pred, features_OUTPUT_FRAMES) # Compute the loss\n\t\town_loss = torch.sum(torch.pow(torch.abs(y_pred-features_OUTPUT_FRAMES),2))/(number_of_features*number_of_trajectories*number_of_nodes*OUTPUT_FRAMES)\n\t\t\n\t\tloss.backward() # Derive gradients.\n\t\toptimizer.step() # Update parameters based on gradients.\n\t\tprint(f\"Epoch: {epoch}, Train Batch Trajectories: {i} Training Loss: {loss.item()}, Self-defined Training Loss: {own_loss}\\n\")\n\n\tmodel_0.eval()\n\twith torch.no_grad():\n\t\tstd_test, mean_test = stats_test[0].to(cur_device),stats_test[1].to(cur_device)\n\t\tfor i, batch_trajectories_test in enumerate(test_loader):\n\t\t\t# split into INPUT_FRAMES/OUTPUT_FRAMES \n\t\t\tbatch_INPUT_FRAMES_test = batch_trajectories_test[:INPUT_FRAMES]\n\t\t\tbatch_OUTPUT_FRAMES_test = batch_trajectories_test[INPUT_FRAMES:]\n\n\t\t\t# get the number of graphs in the current batched trajectory\n\t\t\tnumber_of_trajectories_test = int(batch_trajectories_test[0].num_nodes/number_of_nodes)\n\t\t\tprint(f\"Number of trajectories in Test Batch Trajectories {i}: {number_of_trajectories_test}\")\n\n\t\t\t# normalize and reshape for linear layer\n\t\t\tfeatures_INPUT_FRAMES_test,features_OUTPUT_FRAMES_test = model_0.normalize(batch_INPUT_FRAMES_test,batch_OUTPUT_FRAMES_test,number_of_features,\n\t\t\t\t\t\t\t\t\t\tnumber_of_nodes,number_of_trajectories_test,stats_test,cur_device)\n\t\t\tfeatures_INPUT_FRAMES_reshape_test = torch.reshape(features_INPUT_FRAMES_test,(number_of_trajectories_test,number_of_nodes*number_of_features*INPUT_FRAMES))\n\t\t\tfeatures_OUTPUT_FRAMES_reshape_test = torch.reshape(features_OUTPUT_FRAMES_test,(number_of_trajectories_test,number_of_nodes*number_of_features*OUTPUT_FRAMES)) \n\n\t\t\t# perform the forward step\n\t\t\ty_pred_test = model_0(features_INPUT_FRAMES_reshape_test).to(torch.float64)\n\t\t\t\n\t\t\t# loss function for the current batched trajectories\n\t\t\tloss_test = criterion(y_pred_test, features_OUTPUT_FRAMES_reshape_test) # Compute the loss\n\t\t\town_loss_test = torch.sum(torch.pow(torch.abs(y_pred_test-features_OUTPUT_FRAMES_reshape_test),2))/(number_of_features*number_of_trajectories_test*number_of_nodes*OUTPUT_FRAMES)\n\t\t\tprint(f\"Epoch: {epoch}, Test Batch Trajectories: {i} Testing Loss: {loss_test.item()}, Self-defined Testing Loss: {own_loss_test}\")\n\n\t\t\tfeatures_OUTPUT_FRAMES_test = torch.reshape(features_OUTPUT_FRAMES_reshape_test,(number_of_trajectories_test,number_of_nodes*number_of_features,OUTPUT_FRAMES))\n\t\t\ty_pred_test = torch.reshape(y_pred_test,(number_of_trajectories_test,number_of_nodes*number_of_features,OUTPUT_FRAMES))\n\n\t\t\t# visualize the data for each separate trajectories in the batched trajectories\n\t\t\tfor j,current_trajectory in enumerate(features_OUTPUT_FRAMES_test):\n\t\t\t\tground_truth = torch.reshape(features_OUTPUT_FRAMES_test[j].T,(OUTPUT_FRAMES,number_of_nodes,number_of_features))\n\t\t\t\tground_truth_unnormalized = ground_truth*std_test+mean_test\n\n\t\t\t\ty_pred_compare = torch.reshape(y_pred_test[j].T,(OUTPUT_FRAMES,number_of_nodes,number_of_features))\n\t\t\t\ty_pred_compare_unnormalized = y_pred_compare*std_test+mean_test\n\n\t\t\t\ttotal_loss_in_current_trajectory = torch.sum(torch.pow(torch.abs(ground_truth-y_pred_compare),2))/(number_of_nodes*number_of_features*OUTPUT_FRAMES)\n\t\t\t\tloss = criterion(ground_truth,y_pred_compare)\n\t\t\t\tprint(f\" Batch Trajectories: {i}, Trajectory: {j}, Testing Loss: {loss.item()}, Self-defined Loss: {total_loss_in_current_trajectory}\")\n\t\t\t\tprint(f\"Epoch: {epoch}, Test Batch Trajectories: {i},Trajectory: {j}, Testing Loss: {loss.item()}, Self-defined Loss: {total_loss_in_current_trajectory}\")\n\n\t\t\t\tplt.figure()\n\t\t\t\tplt.xlim(0,90)\n\t\t\t\t# analyze error in each frame\n\t\t\t\tfor k in range(len(y_pred_compare_unnormalized)):\n\t\t\t\t\tgt_current_frame = ground_truth_unnormalized[k]\n\t\t\t\t\ty_pred_compare_current_frame = y_pred_compare_unnormalized[k]\n\t\t\t\t\t# loss_per_frame_in_current_trajectory = torch.sum(torch.pow(torch.abs(gt_current_frame-y_pred_compare_current_frame),2))/(number_of_nodes*number_of_features)\n\t\t\t\t\t# loss = criterion(gt_current_frame,y_pred_compare_current_frame)\n\t\t\t\t\t# print(f\" batch_trajectories: {i}, Trajectory: {j}, Accuracy: {loss.item()}, Self-defined Accuracy: {loss_per_frame_in_current_trajectory}\")\n\n\t\t\t\t\t# check x and y coordinate error\n\t\t\t\t\t# for every node\n\t\t\t\t\terror = torch.mean(torch.pow(torch.abs(gt_current_frame-y_pred_compare_current_frame),2),dim=0)\n\t\t\t\t\t# for one node\n\t\t\t\t\terror = torch.pow(torch.abs(gt_current_frame[5,:]-y_pred_compare_current_frame[5,:]),2)\n\t\t\t\t\t# print(f\" batch_trajectories: {i}, Trajectory: {j}, Frame:{k}, Error_x: {error[0]},Error_y: {error[1]}\")\n\t\t\t\t\t# print(f\" gt_x: {gt_current_frame[5,0]}, gt_y: {gt_current_frame[5,1]}, pred_x:{y_pred_compare_current_frame[5,0]}, pred_y: {y_pred_compare_current_frame[5,1]}\")\n\n\t\t\t\t\t# plot for one node in that frame\n\t\t\t\t\tplt.scatter(gt_current_frame[5,0].cpu(),gt_current_frame[5,1].cpu(),label= \"stars\",color= \"green\")\n\t\t\t\t\tplt.scatter(y_pred_compare_current_frame[5,0].cpu(),y_pred_compare_current_frame[5,1].cpu(),label= \"stars\",color= \"red\")\n\t\t\t\tbreak # test for one trajectory\n\t\t\tplt.show()\n\t\t\tprint(\"\\n\")\n\t\t\tbreak # test for one batch trajectories\n\tprint(\"\\n\")\n\nimport scipy.io\npred_np = y_pred_compare_unnormalized.cpu().numpy()\ngt_np = ground_truth_unnormalized.cpu().numpy()\nfile_path = 'prediction_visualization\\pred.mat'\nfile_path2 = 'prediction_visualization\\gt.mat'\nscipy.io.savemat(file_path, {'pred_np': pred_np})\nscipy.io.savemat(file_path2, {'gt_np': gt_np})\n\n# Create models directory \nfrom pathlib import Path\nMODEL_PATH = Path(\"models\")\nMODEL_PATH.mkdir(parents=True, exist_ok=True)\n\n# Create model save path \nMODEL_NAME = \"prediction_model.pth\"\nMODEL_SAVE_PATH = MODEL_PATH / MODEL_NAME\n# Save the model state dict\nprint(f\"Saving model to: {MODEL_SAVE_PATH}\")\ntorch.save(obj=model_0.state_dict(), # only saving the state_dict() only saves the models learned parameters\n f=MODEL_SAVE_PATH) \n\n# %%\n# #############################################################################################################\nfrom pathlib import Path\nMODEL_PATH = Path(\"models\")\nMODEL_PATH.mkdir(parents=True, exist_ok=True)\nfrom data_loader import *\ncur_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# 2. Create model save path \nMODEL_NAME = \"prediction_model.pth\"\nMODEL_SAVE_PATH = MODEL_PATH / MODEL_NAME\n\n# Testing pipe\ncur_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# initialize preliminary information and load databatch\ntorch.manual_seed(42)\n","repo_name":"jloh0017/FYP-A-Code","sub_path":"model_LinearLayer.py","file_name":"model_LinearLayer.py","file_ext":"py","file_size_in_byte":11536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11338173793","text":"from django.shortcuts import render, redirect\nfrom .models import Newsletter, LandingPagePictures\nfrom django.http import HttpResponse\nfrom random import SystemRandom\nfrom courses.models import Course\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\n\n# Ajax Requests Start\n\ndef newsletter(request):\n get_email = request.POST.get('email')\n email_in_newsletter = Newsletter.objects.filter(email=get_email)\n if not email_in_newsletter:\n new_email = Newsletter.objects.create(email=get_email)\n new_email.save()\n\n return HttpResponse('Success')\n\n# Ajax Requests Ends\n\n\ndef index(request):\n all_landingpages_pictures = LandingPagePictures.objects.all()\n sys_random = SystemRandom()\n random_picture = sys_random.choice(all_landingpages_pictures)\n context = {\n 'random_picture': random_picture,\n }\n return render(request, 'main/index.html', context)\n\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n subject = request.POST.get('subject')\n email = request.POST.get('email')\n message = request.POST.get('message')\n\n intro_and_message = f\"Hi, {name} here.\\n\" + message\n\n try:\n send_mail(subject, intro_and_message, email,\n [settings.EMAIL_HOST_USER], fail_silently=False)\n messages.success(request, 'Message sent successfully')\n except Exception as e:\n messages.error(request, 'Message not sent. Try again.')\n\n return redirect('contact')\n\n return render(request, 'main/contact.html')\n\n\ndef search_course(request):\n output = []\n search_input = request.POST.get('search')\n all_words_in_search_input = search_input.split(\" \")\n search_results = []\n\n def removeNestings(array):\n for item in array:\n if type(item) == list:\n removeNestings(item)\n else:\n output.append(item)\n\n for word in all_words_in_search_input:\n query = list(Course.objects.filter(\n Q(title__contains=word) |\n Q(keywords__name__icontains=word)\n ).distinct())\n search_results.append(query)\n\n removeNestings(search_results)\n\n context = {\n 'search_input': search_input,\n 'search_results': output,\n }\n return render(request, 'main/search.html', context)\n\n\ndef tag_search(request, tag):\n query = list(Course.objects.filter(\n status='published',\n keywords__name__icontains=tag\n ).distinct())\n\n context = {\n 'search_input': tag,\n 'search_results': query,\n }\n\n return render(request, 'main/tag_search.html', context)\n","repo_name":"TomiwaJoseph/WatchNLearn","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"36819276862","text":"from __future__ import print_function\nimport os,sys,argparse,json\n\nparser = argparse.ArgumentParser(\"Plot Reco Clusters for Inspection\")\nparser.add_argument(\"-ll\",\"--input-larlite\",required=True,type=str,help=\"kpsrecomanager larlite output file\")\nargs = parser.parse_args()\n\nimport numpy as np\nimport ROOT as rt\nfrom larlite import larlite\nfrom larcv import larcv\nfrom larflow import larflow\nlarcv.SetPyUtil()\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\n\nimport lardly\n\ncolor_by_options = [\"larmatch\",\"keypoint\"]\ncolorscale = \"Viridis\"\noption_dict = []\nfor opt in color_by_options:\n option_dict.append( {\"label\":opt,\"value\":opt} )\n\n# OPEN LARLITE FILE\nio = larlite.storage_manager( larlite.storage_manager.kREAD )\nio.add_in_filename( args.input_larlite )\nio.open()\n\nnentries = io.get_entries()\nCURRENT_EVENT = None\n\nprint(\"NENTRIES: \",nentries)\n\ndef make_figures(entry,clustername):\n \"\"\" \n if clustername is None return all clusters. \n else if string, return specific cluster\n \"\"\"\n \n from larcv import larcv\n larcv.load_pyutil()\n detdata = lardly.DetectorOutline()\n \n from larflow import larflow\n larcv.SetPyUtil() \n print(\"making figures for entry={} cluster={}\".format(entry,clustername))\n global io\n global kpsanatree\n io.go_to(entry)\n\n traces_v = []\n cluster_list = []\n\n plot_producer = None\n plot_index = None\n if clustername != \"all\":\n plot_producer = clustername.split(\":\")[0]\n plot_index = int(clustername.split(\":\")[1])\n \n # PLOT TRACK PCA-CLUSTERS: FULL/COSMIC\n clusters = [(\"cosmic\",\"trackprojsplit_full\",\"rgb(150,150,150)\",0.15,False),\n (\"wctrack\",\"trackprojsplit_wcfilter\",\"rgb(125,200,125)\",1.0,True),\n (\"wcshower\",\"showergoodhit\",\"rgb(200,125,125)\",0.5,False)]\n for (name,producer,rgbcolor,opa,drawme) in clusters:\n\n if not drawme:\n continue\n \n ev_trackcluster = io.get_data(larlite.data.kLArFlowCluster, producer )\n ev_pcacluster = io.get_data(larlite.data.kPCAxis, producer )\n \n for icluster in range(ev_trackcluster.size()):\n\n \n lfcluster = ev_trackcluster.at( icluster )\n cluster_trace = lardly.data.visualize_larlite_larflowhits( lfcluster, name=\"%s[%d]\"%(name,icluster) )\n\n clabel = \"%s:%d (%d hits)\"%(producer,icluster,lfcluster.size())\n cvalue = \"%s:%d\"%(producer,icluster) \n cluster_list.append( {\"label\":clabel,\"value\":cvalue} )\n \n\n if clustername!=\"all\":\n cluster_trace[\"marker\"][\"color\"] = \"rgb(50,50,50)\"\n else:\n r3 = np.random.randint(255,size=3)\n rand_color = \"rgb(%d,%d,%d)\"%( r3[0], r3[1], r3[2] )\n cluster_trace[\"marker\"][\"color\"] = rand_color\n \n cluster_trace[\"marker\"][\"opacity\"] = opa\n cluster_trace[\"marker\"][\"width\"] = 5.0\n\n\n pcaxis = ev_pcacluster.at( icluster )\n pcatrace = lardly.data.visualize_pcaxis( pcaxis )\n pcatrace[\"name\"] = \"%s-pca[%d]\"%(name,icluster)\n pcatrace[\"line\"][\"color\"] = \"rgb(0,0,0)\"\n pcatrace[\"line\"][\"width\"] = 1\n pcatrace[\"line\"][\"opacity\"] = 1.0\n\n if plot_producer is not None and plot_producer==producer and plot_index==icluster:\n cluster_trace[\"marker\"][\"color\"] = rgbcolor \n\n traces_v.append(cluster_trace)\n traces_v.append( pcatrace ) \n \n # add detector outline\n traces_v += detdata.getlines(color=(10,10,10))\n print(\"Number of clusters in event: \",len(cluster_list))\n \n return traces_v,cluster_list\n\ndef test():\n pass\n \napp = dash.Dash(\n __name__,\n meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}],\n)\n\nserver = app.server\n\n# 3D PLOT WINDOW\naxis_template = {\n \"showbackground\": True,\n #\"backgroundcolor\": \"#141414\", # black\n #\"gridcolor\": \"rgba(255, 255, 255)\",\n #\"zerolinecolor\": \"rgba(255, 255, 255)\", \n \"backgroundcolor\": \"rgba(100, 100, 100,0.5)\", \n \"gridcolor\": \"rgb(50, 50, 50)\",\n \"zerolinecolor\": \"rgb(0, 0, 0)\",\n}\n\nplot_layout = {\n \"title\": \"\",\n \"height\":800,\n \"margin\": {\"t\": 0, \"b\": 0, \"l\": 0, \"r\": 0},\n \"font\": {\"size\": 12, \"color\": \"black\"},\n \"showlegend\": False,\n #\"plot_bgcolor\": \"#141414\",\n #\"paper_bgcolor\": \"#141414\",\n \"plot_bgcolor\": \"#ffffff\",\n \"paper_bgcolor\": \"#ffffff\",\n \"scene\": {\n \"xaxis\": axis_template,\n \"yaxis\": axis_template,\n \"zaxis\": axis_template,\n \"aspectratio\": {\"x\": 1, \"y\": 1, \"z\": 3},\n \"camera\": {\"eye\": {\"x\": 1, \"y\": 1, \"z\": 1},\n \"up\":dict(x=0, y=1, z=0)},\n \"annotations\": [],\n },\n}\n\n# INPUT FORM: EVENT NUM\neventinput = dcc.Input(\n id=\"input_event\",\n type=\"number\",\n placeholder=\"Input Event\")\n\n# INPUT FORM: CLUSTER LIST\nplotcluster = dcc.Dropdown(\n options=[\n {'label':'all','value':'all'},\n ],\n value='all',\n id='plotcluster',\n)\n \n# PAGE LAYOUT\napp.layout = html.Div( [\n html.Div( [ eventinput,\n plotcluster,\n html.Button(\"Plot\",id=\"plot\")\n ] ),\n html.Hr(),\n html.Div( [\n dcc.Graph(\n id=\"det3d\",\n figure={\n \"data\": [],\n \"layout\": plot_layout,\n },\n config={\"editable\": True, \"scrollZoom\": False},\n )],\n className=\"graph__container\"),\n html.Div(id=\"out\")\n] )\n\n \n@app.callback(\n [Output(\"det3d\",\"figure\"),\n Output(\"plotcluster\",\"options\"),\n Output(\"plotcluster\",\"value\"),\n Output(\"out\",\"children\")],\n [Input(\"plot\",\"n_clicks\")],\n [State(\"input_event\",\"value\"),\n State(\"plotcluster\",\"value\"),\n State(\"det3d\",\"figure\")],\n )\ndef cb_render(*vals):\n \"\"\"\n runs when plot button is clicked\n \"\"\"\n global EVENT_DATA\n global UNMATCHED_CLUSTERS\n global io\n global CURRENT_EVENT \n if vals[1] is None:\n print(\"Input event is none\")\n raise PreventUpdate\n if vals[1]>=nentries or vals[1]<0:\n print(\"Input event is out of range\")\n raise PreventUpdate\n\n clustername = vals[2]\n entry = int(vals[1])\n if entry!=CURRENT_EVENT:\n # first time we access an entry, we default to the \"all\" view of the vertices\n CURRENT_EVENT = entry\n clustername = \"all\"\n cluster_traces_v,cluster_options = make_figures(int(vals[1]),clustername)\n cluster_options.append( {'label':\"all\",'value':\"all\"} )\n \n # update the figure's traces\n vals[-1][\"data\"] = cluster_traces_v\n return vals[-1],cluster_options,clustername,\"event requested: {}; cluster: {}\".format(vals[1],vals[2])\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","repo_name":"NuTufts/larflow","sub_path":"larflow/Reco/test/vis_clusters.py","file_name":"vis_clusters.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"10643357554","text":"'''\nFile name: align.py\nAuthor: Etha Hua\nDate: Feb 8th, 2022\nPurpose: Implementation of the global alignment algorithm\nwith a naive scoring function and a linear gap genalty.\n'''\nimport sys\n\n# score\n# Purpose: compare two letters and return their similarity score\n# Parameters: p and q are two letters to be comapred\n# Returns: the similarity score in integer of the two letters\ndef score(p,q):\n if p == q:\n return match\n else:\n return mismatch\n\n# fillMatrix\n# Purpose: fill in the row th row, col th column element of the matrix, and record\n# the corresponding traceback for that element in a traceBack matrix\n# Parameters: the indices indicating the element to be filled in row and col\n# Returns: N/A\n# Note: requires the alignMatrix and traceBackMatrix to be defined\ndef fillMatrix(row, col):\n diag = alignMatrix[row - 1][col - 1] + score(s1[row - 1], s2[col - 1])\n vertGap = alignMatrix[row - 1][col] + gap\n horiGap = alignMatrix[row][col - 1] + gap \n rtnval = max(diag, vertGap, horiGap)\n\n # preferrence: diagonal > vertical > horizontal\n if rtnval == diag:\n traceBackMatrix[row].append(0)\n elif rtnval == vertGap:\n traceBackMatrix[row].append(1)\n else: # horiGap\n traceBackMatrix[row].append(2)\n return rtnval\n\n# fillAllMatrices\n# Purpose: fill in the alignment matrix with the initialization of the top row and\n# the leftmost column to be zeros and fill in the traceback matrix according\n# to the alignment matrix\n# Parameters: N/A\n# Returns: N/A\n# Note: requires the alignMatrix and traceBackMatrix to be defined\ndef fillAllMatrices():\n for i in range(m+1):\n alignMatrix.append([])\n traceBackMatrix.append([])\n if i == 0:\n for j in range(n+1):\n alignMatrix[i].append(gap * j)\n traceBackMatrix[i].append(2)\n else:\n for j in range(n+1):\n if j == 0:\n alignMatrix[i].append(gap * i)\n traceBackMatrix[i].append(1)\n else:\n alignMatrix[i].append(fillMatrix(i,j))\n\n# traceBack\n# Purpose: print out the aligned sequences by tracing back from the \n# rightmost bottommost element in an alignment matrix\n# Parameters: the length of s1 and the length of s2.\n# Returns: N/A\ndef traceBack(s1len, s2len):\n alignStk = []\n i = s1len\n j = s2len\n while i > 0 or j > 0: \n if traceBackMatrix[i][j] == 0:\n alignStk.append((s1[i - 1], s2[j - 1]))\n i -= 1\n j -= 1\n elif traceBackMatrix[i][j] == 2:\n alignStk.append((\"-\", s2[j - 1]))\n j -= 1\n else: # traceBackMatrix[i][j] == 1\n alignStk.append((s1[i - 1], \"-\"))\n i -= 1 \n\n s1alned = \"\"\n s2alned = \"\"\n for _ in range(len(alignStk)):\n currPair = alignStk.pop()\n s1alned += currPair[0]\n s2alned += currPair[1]\n print(s1alned)\n print(s2alned)\n\n\n# start of the program\n\n# Default values for M, m and g are set as follows\nmatch = 4\nmismatch = -2\ngap = -2\n\n# Only if the user inputted the correct number of arguments \n# for M, m and g does the program accepts them as new \n# scores for M, m and g\nif len(sys.argv) == 4:\n match = int(sys.argv[1])\n mismatch = int(sys.argv[2])\n gap = int(sys.argv[3])\n\n# Sequence strings are initialized to empty strings\nseq = [\"\",\"\"]\n\n# Reading in the two sequences from stdin, ignoring the \n# commenting legends above the sequence content\nindex = -1\nfor line in sys.stdin:\n if (line[0] == '>'):\n index += 1\n else:\n while line[-1] == \"\\n\" or line[-1] == \"\\r\":\n line = line[0:-1]\n seq[index] += line\n\ns1 = seq[0]\ns2 = seq[1]\n\nm = len(s1)\nn = len(s2)\n\n# Initializing the matrices\nalignMatrix = []\ntraceBackMatrix = []\n\nfillAllMatrices()\ntraceBack(m,n)\n","repo_name":"ethahtz/ComputationalBiology","sub_path":"globalAlignment/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7693974297","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom pyquery import PyQuery\nfrom scrapy.http import Request\nfrom scrapy.utils.response import get_base_url\nfrom urllib.parse import urljoin\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nimport os\npath = os.path.abspath(os.path.join(os.getcwd(), \"../..\"))\nimport sys\nsys.path.append(path)\nfrom get_boss.items import GetBossItem\n\n\nheaders = {\n 'x-devtools-emulate-network-conditions-client-id': \"5f2fc4da-c727-43c0-aad4-37fce8e3ff39\",\n 'upgrade-insecure-requests': \"1\",\n 'accept': \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n 'dnt': \"1\",\n 'accept-encoding': \"gzip, deflate, br\",\n 'accept-language': \"zh-CN,zh;q=0.8,en;q=0.6\",\n 'cookie': \"__c=1527989289; __g=-; lastCity=100010000; toUrl=https%3A%2F%2Fwww.zhipin.com%2Fjob_detail%2Fc77944563dd5cc1a1XV70tW0ElM%7E.html%3Fka%3Dsearch_list_1_blank%26lid%3DTvnYVWp16I.search; JSESSIONID=\"\"; __l=l=%2Fwww.zhipin.com%2F&r=; __a=33024288.1527773672.1527940079.1527989289.90.5.22.74; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1527774077,1527835258,1527940079,1527989289; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1527991981\",\n 'cache-control': \"no-cache\",\n 'postman-token': \"76554687-c4df-0c17-7cc0-5bf3845c9831\",\n 'x-requested-with':'XMLHttpRequest',\n 'referer':\"https://www.zhipin.com/job_detail/?query=&scity=100010000&industry=&position=\",\n # 'user-agent':ua #需要替换的\n #'user-agent': 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52'\n\n }\n\nclass BossSpider(CrawlSpider):\n name = 'boss'\n start_urls = ['https://www.zhipin.com/gongsir/5d627415a46b4a750nJ9.html?page=1']\n url1 = 'https://www.zhipin.com' #用来做拼接\n\n\n # 匹配职位列表页的规则(定义抽取连接规则)\n rules = (\n Rule(LinkExtractor(allow=r'.+\\?page=\\d+'), callback=\"parse_url\",follow=True),\n )\n # 匹配详情页的规则\n # rules = (\n # Rule(LinkExtractor(allow=r'.+job_detail/\\w+~.html'), callback=\"detail_parse\", follow=False),\n # )\n\n def parse_url(self, response):\n item = GetBossItem()\n\n for i in range(1,15):\n url = response.xpath('//*[@id=\"main\"]/div[2]/div[2]/div[2]/ul/li[{}]/a/@href'.format(str(i))).extract()\n url = self.url1+str(url[0])\n print(url)\n # if item['url']:\n yield Request(url,\n callback=self.detail_parse,#回调详情页函数\n meta={'item':item}, #将参数传递给meta#\n priority=10,\n dont_filter=True, #强制不过滤\n #headers=headers\n # headers=self.headers\n )\n\n\n def detail_parse(self,response):\n item = response.meta['item'] #接收item\n # 企业名称\n dp_name = response.xpath('//div[@class=\"job-sec\"]/div[@class=\"name\"]/text()').get().strip()\n # 企业类型\n dp_type = response.xpath('//div[@class=\"level-list\"]/li[@class=\"company-type\"]/text()').getall()[0]\n # 企业成立时间\n dp_founded = response.xpath('//div[@class=\"level-list\"]/li[@class=\"res-time\"]/text()').getall()[0]\n # 职位名称\n job_name = response.xpath('//div[@class=\"company-info\"]/div[@class=\"name\"]/h1/text()').get().strip()\n # 学历要求\n education = response.xpath('//*[@id=\"main\"]/div[1]/div/div/div[2]/p/text()').getall()[2]\n # 工作经验要求\n experience = response.xpath('//*[@id=\"main\"]/div[1]/div/div/div[2]/p/text()').getall()[1]\n # 薪资\n salary = response.xpath('//*[@id=\"main\"]/div[1]/div/div/div[2]/div[2]/span/text()').get().strip()\n # 招聘状态\n state = response.xpath('//*[@id=\"main\"]/div[3]/div/div[1]/div[2]/p[6]/text()').get().strip()\n # 职位描述\n description = response.xpath('//*[@id=\"main\"]/div[3]/div/div[2]/div[2]/div[1]/div/text()').getall()\n description = str(description).strip('[\\']\\\\n ')\n # 员工福利\n welfare = response.xpath('//*[@id=\"main\"]/div[1]/div/div/div[2]/div[3]/div[2]/span/text()').getall()\n welfare = str(welfare)\n # 工作地址\n address = response.xpath('//div[@class=\"job-location\"]/div[@class=\"location-address\"]/text()').get().strip()\n\n\n item['dp_name']=dp_name\n item['dp_type']=dp_type\n item['dp_founded']=dp_founded\n item['job_name']=job_name\n item['education']=education\n item['experience']=experience\n item['salary']=salary\n item['state']=state\n item['description']=description\n item['welfare']=welfare\n item['address']=address\n\n yield item\n","repo_name":"yxys01/get_boss","sub_path":"get_boss/spiders/boss.py","file_name":"boss.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29644344841","text":"\nfrom kivy.app import App\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\nfrom kivy.uix.textinput import TextInput\nfrom kivy.core.window import Window\nfrom kivy.uix.popup import Popup\nimport socket\nimport select\nimport sys\nfrom threading import *\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\nclass MainWindow(App):\n def build(self):\n\n # main window color setting\n Window.clearcolor=(0, 0.6, 0.8, 0.1)\n\n # grid parameters\n self.window=GridLayout()\n self.window.cols=1\n self.window.padding=(100,100)\n self.window.spacing=(20,20)\n\n # server IP input textbox (first read IP from server UI)\n self.serverIpAddres=TextInput(multiline=False, text='192.168.1.14',halign=\"center\",font_size='80')\n self.serverIpAddres.height=(10)\n self.window.add_widget(self.serverIpAddres)\n\n # bed number (to change the number you need to restart the client)\n self.bedNumber = TextInput(multiline=False, text= 'Wprowadź numer łóżka',halign=\"center\",font_size='80')\n self.window.add_widget(self.bedNumber)\n\n # login button settings\n self.loginButton = Button(text='POŁĄCZ')\n self.loginButton.background_color=[1,1,1,1]\n self.loginButton.background_color=[1,1,1,.6]\n self.loginButton.bind(on_press=self.ConnectToServer)\n self.window.add_widget(self.loginButton)\n\n # N priority button settings\n self.nPriorityButton = Button(text='NORMALNY')\n self.nPriorityButton.background_color=[0,1,0,1]\n self.nPriorityButton.bind(on_press=self.nPriorityButtonOnClick)\n self.window.add_widget(self.nPriorityButton)\n self.nPriorityButton.disabled = True\n\n # P priority button settings\n self.pPriorityButton = Button(text='PILNY')\n self.pPriorityButton.background_color=[1,0.5,0,1]\n self.pPriorityButton.bind(on_press=self.pPriorityButtonOnClick)\n self.pPriorityButton.disabled = True\n self.window.add_widget(self.pPriorityButton)\n\n # PP priority button settings\n self.ppPriorityButton = Button(text='BARDZO PILNY')\n self.ppPriorityButton.background_color=[1,0,0,1]\n self.ppPriorityButton.bind(on_press=self.ppPriorityButtonOnClick)\n self.ppPriorityButton.disabled = True\n self.window.add_widget(self.ppPriorityButton)\n\n\n\n return self.window\n\n # loginButton OnClick method (here app connects to server by using IP addres from serverIpAddres input)\n def ConnectToServer(self,instance):\n ipAddres = self.serverIpAddres.text\n server.connect((str(ipAddres), 8081))\n self.serverIpAddres.disabled = True\n self.bedNumber.disabled = True\n self.loginButton.disabled = True\n self.nPriorityButton.disabled = False\n self.pPriorityButton.disabled = False\n self.ppPriorityButton.disabled = False\n\n\n\n def nPriorityButtonOnClick(self,instance):\n message=self.bedNumber.text+\";\"+\"N\"\n server.send(bytes(message,'utf-8'))\n\n def pPriorityButtonOnClick(self,instance):\n message=self.bedNumber.text+\";\"+\"P\"\n server.send(bytes(message,'utf-8'))\n\n def ppPriorityButtonOnClick(self,instance):\n message=self.bedNumber.text+\";\"+\"PP\"\n server.send(bytes(message,'utf-8'))\n\n\n\n# Run message listener in separate thread (IMPORTANT)\n#def ListenerToThread():\n# t1 = Thread(target=MessageListener)\n # t1.start()\n\n#def MessageListener():\n\n # while True:\n # sockets_list = [sys.stdin, server]\n # read_sockets, write_socket, error_socket = select.select(sockets_list, [], [])\n #for socks in read_sockets:\n # if socks == server:\n # message = socks.recv(8)\n # if str.__contains__(message.decode('utf-8'),'OK'):\n # popup = Popup(title='Sukces',\n # content=Label(text='POMOC JUŻ W DRODZE!'),\n # separator_color=[0, 1, 1, 0.9],\n # background_color=[1, 1, 1, 0.3],\n # size_hint=(None, None), size=(500, 500))\n # popup.open()\n#\n # if str.__contains__(message.decode('utf-8'),'Conne'):\n # # popup\n # popup2 = Popup(title='Sukces',\n # content=Label(text='Nawiązano łączność z serwerem!'),\n # separator_color=[0, 1, 1, 0.9],\n # background_color=[1, 1, 1, 0.3],\n # size_hint=(None, None), size=(500, 500))\n # popup2.open()\n#\n# server.close()\n\n\n#ListenerToThread()\nMainWindow().run()\n","repo_name":"ArachioHD/TSWM","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26309444237","text":"# -*- coding: utf-8 -*-\nimport os\n\n\nLIB_DIR = os.path.dirname(os.path.abspath(__file__))\nCHANNELS_DIR = os.path.join(LIB_DIR, \"channels\")\nMEDIA_DIR = os.path.join(os.path.join(LIB_DIR,os.pardir), \"media\")\n\nTMP_DIR=\"\"\n\nchannels = dict()\nordered_channels = []\nhidden_channels = []\nhidden_channelsName = []","repo_name":"spmjc/freplay2","sub_path":"resources/lib/globalvar.py","file_name":"globalvar.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37946846915","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages, Extension\nfrom pkgutil import get_importer\nfrom collections import defaultdict\nfrom functools import wraps\nfrom distutils import sysconfig\nimport re\nfrom fnmatch import fnmatch\nfrom os.path import join\nimport os\n\n\ndef find(directory, patterns):\n result = []\n for node, _, filenames in os.walk(directory):\n for filename in filenames:\n for pattern in patterns:\n if fnmatch(filename, pattern):\n result.append(join(node, filename))\n\n return result\n\n\ndef lazy(function):\n\n @wraps(function)\n def wrapped(*args, **kwargs):\n\n class LazyProxy(object):\n\n def __init__(self, function, args, kwargs):\n self._function = function\n self._args = args\n self._kwargs = kwargs\n self._result = None\n\n def __len__(self):\n return self.__len__()\n\n def __iter__(self):\n return self.__iter__()\n\n def __getattribute__(self, name):\n if name in ['_function', '_args', '_kwargs', '_result']:\n return super(LazyProxy, self).__getattribute__(name)\n\n if self._result is None:\n self._result = self._function(*self._args, **self._kwargs)\n\n return object.__getattribute__(self._result, name)\n\n def __setattr__(self, name, value):\n if name in ['_function', '_args', '_kwargs', '_result']:\n super(LazyProxy, self).__setattr__(name, value)\n return\n\n if self._result is None:\n self._result = self._function(*self._args, **self._kwargs)\n\n setattr(self._result, name, value)\n\n return LazyProxy(function, args, kwargs)\n\n return wrapped\n\n\n# Navigate, import, and retrieve the metadata of the project.\nmeta = get_importer('src/hummus').find_module('meta').load_module('meta')\n\n\ndef make_config():\n from pkgconfig import parse\n\n # Process the `pkg-config` utility and discover include and library\n # directories.\n config = defaultdict(set)\n for lib in ['zlib', 'libtiff-4', 'freetype2']:\n for key, value in parse(lib).items():\n config[key].update(value)\n\n # Add libjpeg (no .pc file).\n config['libraries'].add('jpeg')\n\n # List-ify config for setuptools.\n for key in config:\n config[key] = list(config[key])\n\n # Add hummus.\n config['include_dirs'].insert(0, 'lib/hummus/PDFWriter')\n config['include_dirs'].insert(0, 'lib/python')\n\n # Add local library.\n config['include_dirs'].insert(0, 'src')\n\n # Return built config.\n return config\n\n\n@lazy\ndef make_extension(name, sources=None, cython=True):\n # Resolve extension location from name.\n location = join('src', *name.split('.'))\n location += '.pyx' if cython else '.cpp'\n\n # NOTE: Performing black magic hacks to remove --as-needed from the linker\n # flags if present.\n sysconfig.get_config_vars()\n lds = sysconfig._config_vars['LDSHARED']\n sysconfig._config_vars['LDSHARED'] = re.sub(r',?--as-needed,??', '', lds)\n config = make_config()\n config['libraries'].insert(0, 'hummus')\n\n # Create and return the extension.\n return Extension(\n name=name,\n sources=sources + [location] if sources else [location],\n language='c++',\n **config)\n\n\n@lazy\ndef make_library(name, directory):\n patterns = ['*.cxx', '*.cpp']\n return [name, dict(sources=find(directory, patterns), **make_config())]\n\n\nsetup(\n name='hummus',\n version=meta.version,\n description=meta.description,\n author='Concordus Applications',\n author_email='support@concordusapps.com',\n url='https://github.com/concordusapps/python-hummus',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.3',\n ],\n package_dir={'hummus': 'src/hummus'},\n packages=find_packages('src'),\n setup_requires=[\n 'setuptools_cython',\n 'pkgconfig'\n ],\n install_requires=[\n 'six',\n 'wand',\n ],\n extras_require={\n 'test': ['pytest'],\n },\n libraries=[\n make_library('hummus', 'lib/hummus/PDFWriter'),\n ],\n ext_modules=[\n make_extension('hummus.reader'),\n make_extension('hummus.writer'),\n make_extension('hummus.rectangle'),\n make_extension('hummus.page'),\n make_extension('hummus.context'),\n make_extension('hummus.text'),\n make_extension('hummus.image'),\n make_extension(\n name='hummus.interface',\n sources=find('lib/python/interface', ['*.cxx'])),\n ]\n)\n","repo_name":"concordusapps/python-hummus","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"22738130234","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport unittest\nimport sys\nsys.path.append('../')\nfrom myutils.FileList import FileList\n\nclass TestBranchListMethods(unittest.TestCase):\n\n def setUp(self):\n self.fileList = [\n '/some/long/path/to/file/blablablablablablablablablablablablabla/file_1.root',\n '/some/long/path/to/file/blablablablablablablablablablablablabla/file_2.root',\n '/some/long/path/to/file/blablablablablablablablablablablablabla/file_3.root',\n '/some/long/path/to/file/blablablablablablablablablablablablabla/file_4.root',\n '/some/long/path/to/file/blablablablablablablablablablablablabla/file_5.root',\n '/some/long/path/to/file/blablablablablablablablablablablablabla/file_6.root',\n ]\n\n def test_FileList(self):\n compressedFileList = FileList.compress(self.fileList)\n decompressedFileList = FileList.decompress(compressedFileList)\n print('uncompressed length:',len(';'.join(self.fileList)))\n print('compressed length:',len(compressedFileList))\n self.assertEqual(self.fileList, decompressedFileList)\n\n def test_FileListDamaged(self):\n compressedFileList = FileList.compress(self.fileList)\n # corrupt the file list by removing the last character\n compressedFileList = compressedFileList[:-1]\n with self.assertRaises(Exception) as e:\n decompressedFileList = FileList.decompress(compressedFileList)\n\n def test_FileListDamaged2(self):\n compressedFileList = FileList.compress(self.fileList)\n # corrupt the file list\n compressedFileList = 'H'+compressedFileList[1:]\n with self.assertRaises(Exception) as e:\n decompressedFileList = FileList.decompress(compressedFileList)\n\n def test_FileListEmpty(self):\n print('empty:',FileList.compress([]))\n # corrupt the file list\n compressedFileList = 'base64:'\n with self.assertRaises(Exception) as e:\n decompressedFileList = FileList.decompress(compressedFileList)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"perrozzi/Xbb","sub_path":"python/test/test_FileList.py","file_name":"test_FileList.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16590339615","text":"# @Time : 2022/1/16 下午10:16\n# @Author : lixiang\n# @FileName: download_artifacts.py\n\n\n\nimport os\nfrom concurrent.futures import ThreadPoolExecutor, wait,ALL_COMPLETED\n\nimport requests\nimport zipfile\nOWNER=\"lx200916\"\nREPO=\"ncnn\"\ntoken=\"\"\nheaders = {\n 'Authorization': f'Bearer {token}' # Replace with Ur Token.\n}\nr=requests.session()\nr.headers=headers\n# run_id=[{'name':'v8_1','id':1087399118},{'name':'v8_2','id':1087399122},{'name':'v8_2v','id':1087399124},{'name':'v8_3','id':1087399123}]\nrun_id=[{'name':'n_1','id':1371191156},{'name':'n_1v','id':1371191899},]\nexecutor = ThreadPoolExecutor(max_workers=5)\n\ndef getartifact(artifact:dict,pwd:str):\n file=r.get(artifact['archive_download_url']).content\n # print(file)\n path=pwd+\"/\"+artifact['name']\n print(path)\n with open(path+'.zip',\"wb+\") as f:\n f.write(file)\n zipfile.ZipFile(path+'.zip').extractall(path=path)\n\n\nfor item in run_id:\n all_task=[]\n\n if not os.path.exists(f\"{REPO}/{item['name']}\"):\n os.makedirs(f\"{REPO}/{item['name']}\")\n aList=r.get(f\"https://api.github.com/repos/{OWNER}/{REPO}/actions/runs/{item['id']}/artifacts?per_page=100&page=1\").json()\n if aList['total_count']>100:\n print(\"More Than One Page\")\n print(aList['total_count'])\n for artifact in aList['artifacts']:\n all_task.append(executor.submit(getartifact,artifact=artifact,pwd=f\"{REPO}/{item['name']}\"))\n wait(all_task,return_when=ALL_COMPLETED)\n\n\n\n\n\n\n","repo_name":"UbiquitousLearning/MobileDLFrameworksBenchmark","sub_path":"GetFrameworkHistory/download_artifacts.py","file_name":"download_artifacts.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"25937984827","text":"#Dado un número, determinar la cantidad de dígitos pares que contiene.\n\nnum = int(input(\"Digite un numero: \"))\n\nsuma_digitos = 0\nproducto = 0\nrecorrido = 0\nwhile num > 0:\n producto = num % 10\n if producto % 2 == 0:\n suma_digitos +=1\n num //= 10\nprint(suma_digitos)","repo_name":"Ang3lRamos/Python324","sub_path":"while loop/Tarea5.py","file_name":"Tarea5.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1318986337","text":"from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.core.cache import cache\nfrom django.views.decorators.http import require_GET\n\nfrom .forms import PostForm, CommentForm\nfrom .models import Follow, Group, Post, User\n\n\n@require_GET\ndef index(request):\n post_list = cache.get(\"index_page\")\n if not post_list:\n post_list = Post.objects.all()\n cache.set(\"index_page\", post_list)\n paginator = Paginator(post_list, settings.PAGES_OBG_AMT)\n page_number = request.GET.get(\"page\")\n page = paginator.get_page(page_number)\n return render(\n request,\n \"index.html\",\n {\"page\": page, }\n )\n\n\ndef group_posts(request, slug):\n group = get_object_or_404(Group, slug=slug)\n posts_list = group.group_posts.all()\n paginator = Paginator(posts_list, settings.PAGES_OBG_AMT)\n page_number = request.GET.get(\"page\")\n page = paginator.get_page(page_number)\n return render(request, \"group.html\", {\"group\": group, \"page\": page})\n\n\ndef profile(request, username):\n author = get_object_or_404(User, username=username)\n posts = author.author_posts.all()\n post_amt = posts.count()\n paginator = Paginator(posts, settings.PAGES_OBG_AMT)\n page_number = request.GET.get(\"page\")\n page = paginator.get_page(page_number)\n following = False\n if request.user.is_authenticated:\n if Follow.objects.filter(user=request.user, author=author).exists():\n following = True\n follower_amt = author.follower.count()\n following_amt = author.following.count()\n context = {\n \"author\": author,\n \"post_amt\": post_amt,\n \"page\": page,\n \"following\": following,\n \"follower_amt\": follower_amt,\n \"following_amt\": following_amt,\n }\n return render(request, \"profile.html\", context)\n\n\ndef post_view(request, username, post_id):\n post_of_author = get_object_or_404(\n Post,\n author__username=username,\n pk=post_id\n )\n author = post_of_author.author\n post_amt = author.author_posts.count()\n comments = post_of_author.comments.all()\n form = CommentForm()\n following = False\n if request.user.is_authenticated:\n if Follow.objects.filter(user=request.user, author=author).exists():\n following = True\n follower_amt = author.follower.count()\n following_amt = author.following.count()\n context = {\n \"post_of_author\": post_of_author,\n \"post_amt\": post_amt,\n \"comments\": comments,\n \"form\": form,\n \"following\": following,\n \"follower_amt\": follower_amt,\n \"following_amt\": following_amt,\n }\n return render(request, \"post.html\", context)\n\n\n@login_required()\ndef new_post(request):\n header = \"Добавить запись\"\n action = \"Добавить\"\n form = PostForm(request.POST or None, files=request.FILES or None)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.author = request.user\n new_post.save()\n return redirect(\"index\")\n return render(\n request,\n \"new.html\",\n {\"form\": form, \"header\": header, \"action\": action}\n )\n\n\n@login_required()\ndef post_edit(request, username, post_id):\n header = \"Редактировать запись\"\n action = \"Сохранить\"\n profile = get_object_or_404(User, username=username)\n post = get_object_or_404(Post, pk=post_id, author=profile)\n if request.user != profile:\n return redirect(\"post\", username=username, post_id=post_id)\n form = PostForm(\n request.POST or None,\n files=request.FILES or None,\n instance=post\n )\n if request.method == \"POST\":\n if form.is_valid():\n form.save()\n return redirect(\n \"post\", username=request.user.username, post_id=post_id)\n\n context = {\n \"form\": form, \"post\": post, \"header\": header, \"action\": action,\n \"post_id\": post_id, \"username\": username,\n }\n return render(request, \"new.html\", context)\n\n\n@login_required()\ndef add_comment(request, username, post_id):\n post_of_author = get_object_or_404(\n Post,\n author__username=username,\n pk=post_id\n )\n if request.method == \"POST\":\n form = CommentForm(request.POST or None)\n if form.is_valid():\n new_comment = form.save(commit=False)\n new_comment.author = request.user\n new_comment.post = post_of_author\n new_comment.save()\n return redirect(\"post\", post_of_author.author.username, post_id)\n return render(\n request,\n \"includes/comments.html\",\n {\"form\": form}\n )\n form = CommentForm()\n context = {\n \"post_of_author\": post_of_author,\n \"form\": form,\n }\n return render(request, \"includes/comments.html\", context)\n\n\n@login_required\ndef follow_index(request):\n post_list = Post.objects.filter(author__following__user=request.user)\n paginator = Paginator(post_list, settings.PAGES_OBG_AMT)\n page_number = request.GET.get(\"page\")\n page = paginator.get_page(page_number)\n return render(\n request,\n \"follow.html\",\n {\"page\": page}\n )\n\n\n@login_required\ndef profile_follow(request, username):\n author = get_object_or_404(User, username=username)\n if not request.user == author:\n Follow.objects.get_or_create(\n user_id=request.user.id,\n author_id=author.id)\n return redirect(\"profile\", username)\n return redirect(\"profile\", username)\n\n\n@login_required\ndef profile_unfollow(request, username):\n author = get_object_or_404(User, username=username)\n Follow.objects.filter(user=request.user, author=author).delete()\n return redirect(\"profile\", username)\n\n\ndef page_not_found(request, exception):\n return render(\n request,\n \"misc/404.html\",\n {\"path\": request.path},\n status=404\n )\n\n\ndef server_error(request):\n return render(request, \"misc/500.html\", status=500)\n","repo_name":"noteasycode/hw05_final","sub_path":"yatube/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31139441395","text":"#!/usr/bin/env python3\nimport rclpy\nfrom rclpy.node import Node\nfrom std_msgs.msg import Float32\n\nimport socket\nimport json\n\nSERVER_HOST = \"0.0.0.0\"\nSERVER_PORT = 8888\n\nclass UDPVelPublisher(Node):\n def __init__(self) -> None:\n super().__init__(node_name='udp_vel_publisher')\n self.pub: rclpy.publisher.Publisher = self.create_publisher(\n msg_type=Float32,\n topic='linear_slider_pos',\n qos_profile=10\n )\n\n # Timer\n timer_period=0.000001\n self.timer: rclpy.timer.Rate = self.create_timer(timer_period_sec=timer_period, callback=self.timer_callback)\n\n # Socket server\n self.pub_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # DGRAM for UDP\n self.pub_socket.bind((SERVER_HOST, SERVER_PORT))\n\n return\n \n\n def timer_callback(self):\n msg = Float32()\n try:\n raw_data, addr = self.pub_socket.recvfrom(1024)\n json_data: dict = json.loads(raw_data)\n msg.data = float(json_data[\"servo_velocity\"])\n\n except ValueError as e:\n print(f\"{e}: Could not convert msg type to float.\")\n\n self.get_logger().info(f\"Linear slider current velocity: {msg.data}\")\n return\n \n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n udp_publisher = UDPVelPublisher()\n\n rclpy.spin(udp_publisher)\n\n udp_publisher.pub_socket.close()\n\n return\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"lukestroh/clearcore-ros2-connector","sub_path":"clearcore_udp/udp_vel_publisher.py","file_name":"udp_vel_publisher.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73014266721","text":"\"\"\"A VTK Polydata object with additional methods.\"\"\"\n\nimport vtk\nfrom typing import Dict, List, Tuple\n\nfrom ladybug_display.visualization import VisualizationData, \\\n LegendParameters, DataTypeBase\n\nfrom .metadata import PolyDataMetaData\nfrom .writer import write_to_folder, write_to_file, VTKWriters\n\n\nclass PolyData(vtk.vtkPolyData):\n \"\"\"A thin wrapper around vtk.vtkPolyData.\n\n See here for more information: https://vtk.org/doc/nightly/html/classvtkPolyData.html#details\n\n A PolyData object holds the geometry information in addition to several layers of\n data. All these data are aligned with the geometry. For instance, you can use a\n PolyData to represent a sensor grid and then add data for irradiance and daylight\n factor values to it.\n\n A PolyData can be exported to a VTK object directly but in most cases you should use\n the DisplayPolyData object to group the PolyData and set their display attributes.\n\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._data: Dict[str, PolyDataMetaData] = {}\n self._color_by = ''\n\n @ property\n def data(self) -> Dict[str, PolyDataMetaData]:\n \"\"\"Get data for this Polydata.\n\n The keys are the name for each data and the values are the visualization\n metadata.\n \"\"\"\n return self._data\n\n def add_visualization_data(\n self, data: VisualizationData, matching_method: str = 'faces'\n ):\n \"\"\"Add visualization data to this polyData.\n\n Args:\n data: A visualization data object.\n matching_method: Either faces or vertices. Use faces if one value is\n assigned per each face and vertices if one value is assigned per each\n vertex. Default is faces.\n\n \"\"\"\n per_face = False if matching_method == 'vertices' else True\n name = self._get_dataset_name(data)\n if per_face:\n assert self.GetNumberOfCells() == len(data.values), \\\n f'The length of input values for \"{name}\" ({len(data.values)}) does ' \\\n f'not match the number of polydata cells ({self.GetNumberOfCells()}).' \\\n ' Try to match the number of data or use the `add_data` method directly.'\n\n return self.add_data(\n data.values, name, per_face=per_face,\n legend_parameters=data.legend_parameters,\n unit=data.unit, data_type=data.data_type\n )\n\n def add_data(\n self, data: List, name: str, *, per_face: bool = True,\n legend_parameters: LegendParameters = None,\n data_type: DataTypeBase = None, unit: str = None\n ):\n \"\"\"Add a list of data to a vtkPolyData.\n\n Data can be added to cells or points. By default the data will be added to cells.\n\n Args:\n data: A list of values. The length of the data should match the length of\n DataCells or DataPoints in Polydata.\n name: Name of data (e.g. Useful Daylight Autonomy.)\n per_face: A Boolean to indicate if the data is per cell or per point. In\n most cases except for sensor points that are loaded as sensors the data\n are provided per cell.\n legend_parameters: An Optional LegendParameters object to override default\n parameters of the legend. None indicates that default legend parameters\n will be used. (Default: None).\n data_type: Optional DataType from the ladybug datatype subpackage (ie.\n Temperature()) , which will be used to assign default legend properties.\n If None, the legend associated with this object will contain no units\n unless a unit below is specified. (Default: None).\n unit: Optional text string for the units of the values. (ie. \"C\"). If None\n or empty, the default units of the data_type will be used. If no data\n type is specified in this case, this will simply be an empty\n string. (Default: None).\n \"\"\"\n assert name not in self._data, \\\n f'A data by name \"{name}\" already exist. Try a different name.'\n\n if per_face:\n assert self.GetNumberOfCells() == len(data), \\\n f'The length of input values for \"{name}\" ({len(data)}) does ' \\\n f'not match the number of polydata cells ({self.GetNumberOfCells()}).'\n else:\n assert self.GetNumberOfPoints() == len(data), \\\n f'The length of input values for \"{name}\" ({len(data)}) does ' \\\n f'not match the number of polydata points ({self.GetNumberOfPoints()}).'\n\n if isinstance(data[0], (list, tuple)):\n values = self._resolve_array_type(data[0][0])\n values.SetNumberOfComponents(len(data[0]))\n values.SetNumberOfTuples(len(data))\n iterator = True\n else:\n values = self._resolve_array_type(data[0])\n iterator = False\n\n if name:\n values.SetName(name)\n\n if iterator:\n for d in data:\n values.InsertNextValue(*d)\n else:\n for d in data:\n values.InsertNextValue(d)\n\n if per_face:\n self.GetCellData().AddArray(values)\n else:\n self.GetPointData().AddArray(values)\n\n self.Modified()\n\n self._data[name] = PolyDataMetaData(legend_parameters, data_type, unit, per_face)\n\n @property\n def color_by(self) -> str:\n return self._color_by\n\n @color_by.setter\n def color_by(self, name: str) -> None:\n \"\"\"Set the name for active data that should be used to color PolyData.\"\"\"\n assert name in self._data, \\\n f'{name} is not a valid data for this PolyData. Available ' \\\n f'data are: {list(self._data.keys())}'\n\n cell = self._data[name].per_face\n\n if cell:\n self.GetCellData().SetActiveScalars(name)\n else:\n self.GetPointData().SetActiveScalars(name)\n\n self.Modified()\n self._color_by = name\n\n @ staticmethod\n def _resolve_array_type(data):\n if isinstance(data, float):\n return vtk.vtkFloatArray()\n elif isinstance(data, int):\n return vtk.vtkIntArray()\n elif isinstance(data, str):\n return vtk.vtkStringArray()\n else:\n raise ValueError(f'Unsupported input data type: {type(data)}')\n\n @staticmethod\n def _get_dataset_name(data_set: VisualizationData):\n if data_set.data_type:\n ds_name = data_set.data_type.name\n elif data_set.legend_parameters and data_set.legend_parameters.title:\n ds_name = data_set.legend_parameters.title\n else:\n ds_name = 'Data'\n\n return ds_name\n\n def to_vtk(self, target_folder, name, writer: VTKWriters = VTKWriters.binary):\n \"\"\"Write to a VTK file.\n\n The file extension will be set to vtk for ASCII format and vtp for binary format.\n \"\"\"\n return write_to_file(self, target_folder, name, writer)\n\n def to_folder(self, target_folder='.'):\n \"\"\"Write data to a folder with a JSON meta file.\n\n This method generates a folder that includes a JSON meta file along with all the\n binary arrays written as standalone binary files.\n\n The generated format can be used by vtk.js using the reader below\n https://kitware.github.io/vtk-js/examples/HttpDataSetReader.html\n\n Args:\n target_folder: Path to target folder. Default: .\n\n \"\"\"\n return write_to_folder(self, target_folder)\n\n def __repr__(self) -> Tuple[str]:\n return (f'PolyData: #{len(self.data)}')\n","repo_name":"ladybug-tools/ladybug-vtk","sub_path":"ladybug_vtk/polydata.py","file_name":"polydata.py","file_ext":"py","file_size_in_byte":7752,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"35377124764","text":"#!/bin/env python\n\nfrom sys import *\nfrom operator import itemgetter\n\ndef printUsageAndExit(programName):\n print >> stderr,programName,\"filename spanMax > ofilename\"\n exit(1)\n\n\ndef outMemLines(ostream,memLines,chroms,chromMin,chromMax,spanMax,exons):\n\n if(len(memLines)<1):\n return\n\n if len(chroms)>1:\n return\n \n thisChromSpan=chromMax-chromMin\n\n if spanMax>0 and thisChromSpan>spanMax:\n return\n\n exons.sort(key=itemgetter(0))\n\n memLineFields=memLines[0].split(\"\\t\")\n outputFields=[memLineFields[0],chromMin,chromMax,memLineFields[3]+\"/k=\"+str(len(exons))+\"/s=\"+str(chromMax-chromMin),memLineFields[4],'+',chromMin,chromMax,\"0,0,0\",len(exons)]\n \n blockSizes=[]\n blockStarts=[]\n for exon in exons:\n blockSizes.append(str(exon[1]-exon[0]))\n blockStarts.append(str(exon[0]-chromMin))\n\n outputFields.append(\",\".join(blockSizes))\n outputFields.append(\",\".join(blockStarts))\n\n print >> ostream,\"\\t\".join([str(x) for x in outputFields])\n\n\nif __name__=='__main__':\n programName=argv[0]\n args=argv[1:]\n try:\n filename,spanMax=args\n except:\n printUsageAndExit(programName)\n\n fil=open(filename)\n memLines=[]\n itemName=\"\"\n chroms=set()\n chromMin=100000000000\n chromMax=0\n lino=0\n exons=[]\n for lin in fil:\n lino+=1\n if lino%1000000==1:\n print >> stderr,\"processing line\",lino\n lin=lin.rstrip(\"\\r\\n\")\n fields=lin.split(\"\\t\")\n thisItemName=fields[3]\n thisChrom=fields[0]\n thisChromStart0=int(fields[1])\n thisChromEnd1=int(fields[2])\n\n if thisItemName!=itemName:\n outMemLines(stdout,memLines,chroms,chromMin,chromMax,spanMax,exons)\n memLines=[lin]\n itemName=thisItemName\n chroms=set()\n chroms.add(thisChrom)\n chromMin=thisChromStart0\n chromMax=thisChromEnd1\n exons=[[thisChromStart0,thisChromEnd1]]\n else:\n chroms.add(thisChrom)\n chromMin=min(chromMin,thisChromStart0)\n chromMax=max(chromMax,thisChromEnd1)\n memLines.append(lin)\n exons.append([thisChromStart0,thisChromEnd1])\n \n fil.close()\n\n outMemLines(stdout,memLines,chroms,chromMin,chromMax,spanMax,exons)\n\n","repo_name":"albertwcheng/JACKIE","sub_path":"chainExonBedsToTranscriptBed.py","file_name":"chainExonBedsToTranscriptBed.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4459163350","text":"import time\nimport re\nimport os\nimport json\nimport logging.config\nimport threading\nimport argparse\nfrom slackclient import SlackClient\n\nfrom Manager import Manager\n\n\nclass MySlackBot:\n\n def __init__(self, slack_config=\"slack_api.json\"):\n # instantiate Slack client\n with open(slack_config) as json_file:\n config = json.load(json_file)\n\n self.slack_api_token = config[\"slack-api-token\"]\n\n self.setup_logging()\n self.logger = logging.getLogger(\"slackbot\")\n self.slack_client = SlackClient(self.slack_api_token)\n # starterbot's user ID in Slack: value is assigned after the bot starts up\n self.starterbot_id = None\n\n # constants\n self.RTM_READ_DELAY = 1 # 1 second delay between reading from RTM\n self.EXAMPLE_COMMAND = \"run\"\n self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\n\n self.experiment_thread = None\n\n @staticmethod\n def setup_logging(default_path='automation/logger/logging.json', default_level=logging.INFO, env_key='LOG_CFG'):\n \"\"\"\n Setup logging configuration\n \"\"\"\n path = default_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n\n def parse_bot_commands(self, slack_events):\n \"\"\"\n Parses a list of events coming from the Slack RTM API to find bot commands.\n If a bot command is found, this function returns a tuple of command and channel.\n If its not found, then this function returns None, None.\n \"\"\"\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = self.parse_direct_mention(event[\"text\"])\n if user_id == self.starterbot_id:\n self.logger.debug(\"Received the an event from user {} on channel {}\".format(event[\"channel\"],\n event[\"user\"]))\n return message, event[\"channel\"]\n return None, None\n\n def parse_direct_mention(self, message_text):\n \"\"\"\n Finds a direct mention (a mention that is at the beginning) in message text\n and returns the user ID which was mentioned. If there is no direct mention, returns None\n \"\"\"\n matches = re.search(self.MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)\n\n def handle_command(self, command, channel):\n \"\"\"\n Executes bot command if the command is known\n \"\"\"\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(self.EXAMPLE_COMMAND)\n\n # This is where you start to implement more commands!\n if command.startswith(self.EXAMPLE_COMMAND):\n sub_sections = command.split(\" \")\n if sub_sections[1].lower() == \"help\" or sub_sections[1].lower() == \"h\":\n response = \"Command is of the following structure: run configuration {name} {experiment/x} {parse/p} {graph/g}\"\n self.logger.info(\"Prepared the following response: {}\".format(response))\n\n # Sends the response back to the channel\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )\n\n else:\n experiment_type = sub_sections[1].lower()\n if len(sub_sections) == 2:\n try:\n manager = Manager(experiment_type, channel=channel, slack_token=self.slack_api_token)\n except FileNotFoundError:\n error_message = \"Config {} not found\".format(experiment_type)\n self.logger.exception(error_message)\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=error_message\n )\n return\n else:\n experiment = False\n parse = False\n graph = False\n upload = False\n if \"experiment\" in sub_sections or \"x\" in sub_sections:\n experiment = True\n if \"parse\" in sub_sections or \"p\" in sub_sections:\n parse = True\n if \"graph\" in sub_sections or \"g\" in sub_sections:\n graph = True\n if \"upload\" in sub_sections or \"u\" in sub_sections:\n upload = True\n\n try:\n manager = Manager(experiment_type, experiment, parse, graph, upload, channel=channel, slack_token=self.slack_api_token)\n except FileNotFoundError:\n error_message = \"Config {} not found\".format(experiment_type)\n self.logger.exception(error_message)\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=error_message\n )\n return\n\n self.experiment_thread = threading.Thread(target=manager.run, args=())\n self.experiment_thread.daemon = True # Daemonize thread\n self.experiment_thread.start() # Start the execution\n else:\n # Sends the response back to the channel\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text= default_response\n )\n\n def run_bot(self):\n if self.slack_client.rtm_connect(with_team_state=False):\n self.logger.info(\"Starter Bot connected and running!\")\n # Read bot's user ID by calling Web API method `auth.long-test`\n self.starterbot_id = self.slack_client.api_call(\"auth.long-test\")[\"user_id\"]\n\n orig_loc = os.getcwd()\n\n running_experiment = False\n\n while True:\n if self.experiment_thread:\n if self.experiment_thread.is_alive():\n running_experiment = True\n self.logger.debug(\"Running experiment\")\n else:\n self.logger.info(\"Experiment complete killing thread\")\n self.experiment_thread.join()\n self.experiment_thread = None\n running_experiment = False\n # Ensures that if we leave a run in an unknown state we always return to our original path.\n os.chdir(orig_loc)\n\n command, channel = self.parse_bot_commands(self.slack_client.rtm_read())\n if command:\n if not running_experiment:\n self.handle_command(command, channel)\n else:\n # Sends the response back to the channel\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=\"Currently running an experiment, please wait before triggering another\"\n )\n time.sleep(self.RTM_READ_DELAY)\n else:\n self.logger.error(\"Connection failed. Exception traceback printed above.\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Retrieve results from simulation and store to parsed_data')\n parser.add_argument(\"-s\", \"--slack-config\", help=\"Path to slack config file containing slack api token\")\n args = parser.parse_args()\n\n if args.slack_config is not None:\n mybot = MySlackBot(args.slack_config)\n else:\n mybot = MySlackBot()\n\n mybot.run_bot()\n","repo_name":"brianmc95/results-analysis","sub_path":"automation/slack_bot.py","file_name":"slack_bot.py","file_ext":"py","file_size_in_byte":8400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8171535382","text":"import math\n\nimport torch\n\n\n\nclass ArcFace(torch.nn.Module):\n \"\"\" ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf):\n \"\"\"\n def __init__(self, s=64.0, margin=0.5):\n super(ArcFace, self).__init__()\n self.scale = s\n self.margin = margin\n self.easy_margin = False\n\n def forward(self, iplogits: torch.Tensor, labels: torch.Tensor):\n # returning one minus because 1-cos is returned by the prev step\n logits = 1.0 - iplogits\n index = torch.where(labels != -1)[0]\n target_logit = logits[index, labels[index].view(-1)]\n\n with torch.no_grad():\n target_logit.arccos_()\n logits.arccos_()\n final_target_logit = target_logit + self.margin\n logits[index, labels[index].view(-1)] = final_target_logit\n logits.cos_()\n logits = logits * self.scale\n # - cos distance is returned because loss takes negative of it\n return -logits\n","repo_name":"bmsknight/robust-protonet","sub_path":"few_shot/arc_margin.py","file_name":"arc_margin.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"991823228","text":"from rest_framework.views import APIView\nfrom rest_framework import generics\nfrom .serializers import GradeCreateSerializer\nfrom .models import Grade\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework import status\nfrom .permissions import IsAdminOrNo\nfrom school.models import School\n\n\nclass GradeApiView(APIView):\n serializer_class = GradeCreateSerializer\n permission_classes = (IsAuthenticated, IsAdminOrNo)\n\n def get(self,request):\n response = {\n 'status': status.HTTP_200_OK,\n 'school_id': request.user.admin.school.id,\n }\n return Response(response, status=status.HTTP_200_OK)\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n valid = serializer.is_valid(raise_exception=True)\n\n if valid:\n school = School.objects.get(id = serializer.validated_data[\"school_id\"])\n self.check_object_permissions(self.request, school)\n serializer.save()\n status_code = status.HTTP_201_CREATED\n\n response = {\n 'success': True,\n 'statusCode': status_code,\n 'message': 'Grade successfully registered!',\n 'grade': serializer.data,\n }\n\n return Response(response, status=status_code)","repo_name":"bhuwan55/smegp","sub_path":"grade/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69872933922","text":"'''\r\nID: ml89491\r\nLANG: PYTHON3\r\nTASK: namenum\r\n'''\r\nwith open('dict.txt','r') as din:\r\n din = din.read().split(\"\\n\")\r\nwith open('namenum.in','r') as fin:\r\n fin = (fin.read().split('\\n'))\r\n fin = list(fin[0])\r\nfin1 = fin[:]\r\nif fin != list('5747867437'):\r\n\r\n d = {'2':('A','B','C'),'3':('D','E','F'),'4':('G','H','I'),'5':('J','K','L'),'6':('M','N','O'),'7':('P','R','S'),'8':('T','U','V'),'9':('W','X','Y'),}\r\n results = []\r\n def change(num,lis):\r\n lis = list(lis)\r\n lis1 = lis[:]\r\n res=[]\r\n for i in range(3):\r\n lis = lis1[:]\r\n lis[num] = d[lis[num]][i]\r\n lis = ''.join(lis)\r\n res.append(lis)\r\n return res\r\n def transfer(l):\r\n for i in range(len(l)):\r\n results.append(l[i])\r\n\r\n ret = change(0,fin)\r\n transfer(ret)\r\n\r\n\r\n for i in range(len(fin)-1):\r\n for q in ret:\r\n r = change(i+1,q)\r\n transfer(r)\r\n ret = results[:]\r\n writ = ''\r\n for i in results:\r\n if i in din:\r\n writ += str(i)+\"\\n\"\r\n if writ == '':\r\n writ += \"NONE\"+'\\n'\r\n\r\n print(writ)\r\n with open('namenum.out','w') as fout:\r\n fout.write(writ)\r\nelse:\r\n with open('namenum.out','w') as fout:\r\n fout.write('KRISTOPHER'+\"\\n\")","repo_name":"MountainDewMichael/UsacoBronze","sub_path":"NameThatNum.py","file_name":"NameThatNum.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21907326486","text":"# Given a string s consisting only of characters a, b and c.\n# Return the number of substrings containing at least one occurrence of all these characters a, b and c.\nfrom collections import Counter\n\ns = \"abca\"\n\nres = 0\nleft = 0\n\ncount = Counter()\n\nfor right in range(len(s)):\n count[s[right]] += 1\n while len(count) == 3: \n res += len(s) - right\n count[s[left]] -= 1\n if not count[s[left]]:\n del count[s[left]]\n left += 1\n\nprint(res)\n#%%\n\ndef numberOfSubstrings(s):\n slow, fast, n = 0, -1, len(s)\n d = {'a': 0, 'b': 0, 'c': 0}\n res = 0\n\n while slow < n or fast < n:\n if min(d.values()) >= 1:\n d[s[slow]] -= 1\n slow += 1\n while min(d.values()) < 1:\n if fast < n - 1:\n fast += 1\n d[s[fast]] += 1\n else:\n return print(res)\n res += n - fast\n\nnumberOfSubstrings('abca')","repo_name":"dattnguyen/Leetcode_exercises","sub_path":"1358. Number of Substrings Containing All Three Characters.py","file_name":"1358. Number of Substrings Containing All Three Characters.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33156365730","text":"# https://leetcode.com/problems/first-unique-character-in-a-string/\n\nfrom collections import Counter\n\nclass Solution:\n def firstUniqChar(self, s: str) -> int:\n counter = Counter(s)\n unique = dict(filter(lambda x: x[1] == 1, counter.items()))\n for i, c in enumerate(s):\n if c in unique: return i\n return -1\n","repo_name":"jyeoniii/algorithm","sub_path":"20201220/first_unique_character_in_a_string.py","file_name":"first_unique_character_in_a_string.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26766058156","text":"import turtle\n\nprint(\"Hello, world!\")\n\ngeorge = turtle.Turtle('turtle') \n\ngeorge.width(5)\ngeorge.color('blue')\n\nfor _ in range(4):\n george.forward(70)\n george.left(90)\n\ngeorge.penup()\n\ngeorge.goto(-150, 100)\n\ngeorge.color('green')\n\nmessage = \"Welcome to Python and Turtle!\"\ngeorge.write(message, font=(\"Arial\", 15, \"normal\"))\n","repo_name":"lenertovalucie/01_Welcome-to-Python-and-Turtle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18043111774","text":"import asyncio\nimport time\nimport discord\nfrom discord.ext import commands\nimport logging\n\nlogger = logging.getLogger('discord')\nlogger.setLevel(logging.DEBUG)\nhandler = logging.StreamHandler()\nhandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\nlogger.addHandler(handler)\n\nintents = discord.Intents.default()\nintents.members = True\nintents.message_content = True\nsl = {'hours': 3600, 'minutes': 60}\n\n\nclass Timer(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='set_timer')\n async def set_timer(self, ctx, hours, space1, minutes, space2):\n await ctx.send(f'The timer should start in {hours} {space1} {minutes} {space2}')\n await asyncio.sleep(int(hours) * sl[space1] + int(minutes) * sl[space2])\n await ctx.send('time X has come!')\n\n\nbot = commands.Bot(command_prefix='', intents=intents)\n\nTOKEN = \"BOT-TOKEN\"\n\n\nasync def main():\n await bot.add_cog(Timer(bot))\n await bot.start(TOKEN)\n\n\nasyncio.run(main())","repo_name":"SwampWood/Discord","sub_path":"Таймер-бот/tier_bot.py","file_name":"tier_bot.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19853099918","text":"import discord\nfrom discord.ext import commands\nfrom cogs.utils import checks\n\nclass repeat:\n \"\"\"Bot repeats what you say.\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @checks.is_owner()\n @commands.command(pass_context = True)\n async def monkeysee(self, ctx):\n channel = ctx.message.channel\n author = ctx.message.author\n await self.bot.send_message(channel, \"Monkeydo, type exit to stop\")\n while True:\n torepeat = await self.bot.wait_for_message(author=author, channel=channel, timeout = None)\n await self.bot.send_message(channel, torepeat.content)\n if torepeat.content == \"exit\":\n break\n\n\ndef setup(bot):\n n = repeat(bot)\n bot.add_cog(n)\n\n","repo_name":"Repulser/bursting-cogs","sub_path":"repeat/repeat.py","file_name":"repeat.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"71836640482","text":"from queue import *\n\nt = [\n 's.....',\n '.####e',\n '.##...',\n '..#.##',\n '#.....',\n 'e.....',\n]\n\nnr = 6\nnc = 6\ndr = [-1, 1, 0, 0]\ndc = [0, 0, 1, -1]\n\n\ndef dungeon_problem():\n start = None\n for r, row in enumerate(t):\n for c, col in enumerate(row):\n if col == 's':\n start = [r, c]\n\n if start:\n break\n\n return bfs(start)\n\n\ndef get_key(r, c):\n return nc * r + c\n\n\ndef bfs(n):\n q = Queue()\n q.put(n)\n visited = [False for x in range(nr*nc)]\n visited[get_key(n[0], n[1])] = True\n # ct = {get_key(n[0], n[1]): 0}\n count = 1\n nodes_found = 0\n nodes_left = 1\n\n while not q.empty():\n c = q.get()\n row = c[0]\n col = c[1]\n\n for x in range(4):\n cr = row + dr[x]\n cc = col + dc[x]\n\n if cr < 0 or cc < 0 or cr >= nr or cc >= nc:\n continue\n\n if visited[get_key(cr, cc)]:\n continue\n\n visited[get_key(cr, cc)] = True\n # ct[get_key(cr, cc)] = ct[get_key(row, col)] + 1\n\n if t[cr][cc] == '#':\n continue\n\n if t[cr][cc] == 'e':\n # return ct[get_key(cr, cc)]\n return count\n\n nodes_found += 1\n q.put([cr, cc])\n\n nodes_left -= 1\n if nodes_left == 0:\n count += 1\n nodes_left = nodes_found\n nodes_found = 0\n\n return -1\n\n\nprint(dungeon_problem())\n","repo_name":"adrianosferreira/python-algorithms","sub_path":"bfs-dungeon-problem.py","file_name":"bfs-dungeon-problem.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42183943867","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\n# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.\n# Licensed under the Apache License, Version 2.0 (the \"License\")\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# --- File Name: run_training_infernet.py\n# --- Creation Date: 26-05-2020\n# --- Last Modified: Wed 27 May 2020 23:10:57 AEST\n# --- Author: Xinqi Zhu\n# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<\n\"\"\"\nRun training file for mapping a generator to latent codes (inference net).\nCode borrowed from run_training.py from NVIDIA.\n\"\"\"\n\nimport argparse\nimport copy\nimport os\nimport sys\n\nimport dnnlib\nfrom dnnlib import EasyDict\n\nfrom metrics.metric_defaults import metric_defaults\nfrom training.vc_modular_networks2 import split_module_names, LATENT_MODULES\n\n#----------------------------------------------------------------------------\n\n\ndef run(result_dir, num_gpus, total_kimg,\n mirror_augment, metrics, resume_pkl,\n G_pkl, I_fmap_base=8, fmap_decay=0.15,\n n_samples_per=10, module_list=None,\n latent_type='uniform', batch_size=32, batch_per_gpu=16,\n random_seed=1000, fmap_min=16, fmap_max=512,\n dlatent_size=10, I_nf_scale=4, arch='resnet'):\n print('module_list:', module_list)\n train = EasyDict(run_func_name='training.training_loop_infernet.training_loop_infernet'\n ) # Options for training loop.\n\n module_list = _str_to_list(module_list)\n I = EasyDict(func_name='training.vc_networks2.infer_modular',\n dlatent_size=dlatent_size, fmap_min=fmap_min,\n fmap_max=fmap_max, module_list=module_list,\n I_nf_scale=I_nf_scale)\n desc = 'inference_net'\n\n I_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.\n loss = EasyDict(func_name='training.loss_inference.I_loss',\n latent_type=latent_type, dlatent_size=dlatent_size) # Options for generator loss.\n\n sched = EasyDict() # Options for TrainingSchedule.\n sc = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().\n # tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().\n tf_config = {'rnd.np_random_seed': random_seed} # Options for tflib.init_tf().\n\n train.total_kimg = total_kimg\n sched.lrate = 0.002\n sched.tick_kimg = 1\n sched.minibatch_size = batch_size\n sched.minibatch_gpu = batch_per_gpu\n metrics = [metric_defaults[x] for x in metrics]\n\n assert num_gpus in [1, 2, 4, 8]\n sc.num_gpus = num_gpus\n desc += '-%dgpu' % num_gpus\n\n # Configs A-E: Shrink networks to match original StyleGAN.\n I.fmap_base = 2 << I_fmap_base\n\n sc.submit_target = dnnlib.SubmitTarget.LOCAL\n sc.local.do_not_copy_source_files = True\n kwargs = EasyDict(train)\n kwargs.update(I_args=I, I_opt_args=I_opt,\n loss_args=loss)\n kwargs.update(sched_args=sched, metric_arg_list=metrics,\n tf_config=tf_config, resume_pkl=resume_pkl, G_pkl=G_pkl,\n n_samples_per=n_samples_per)\n kwargs.submit_config = copy.deepcopy(sc)\n kwargs.submit_config.run_dir_root = result_dir\n kwargs.submit_config.run_desc = desc\n dnnlib.submit_run(**kwargs)\n\n\n#----------------------------------------------------------------------------\n\n\ndef _str_to_bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef _str_to_list(v):\n v_values = v.strip()[1:-1]\n module_list = [x.strip() for x in v_values.split(',')]\n return module_list\n\ndef _str_to_list_of_int(v):\n v_values = v.strip()[1:-1]\n step_list = [int(x.strip()) for x in v_values.split(',')]\n return step_list\n\n\ndef _parse_comma_sep(s):\n if s is None or s.lower() == 'none' or s == '':\n return []\n return s.split(',')\n\n\n#----------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Train VCGAN and INFOGAN.',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n '--result-dir',\n help='Root directory for run results (default: %(default)s)',\n default='results',\n metavar='DIR')\n parser.add_argument('--num-gpus', help='Number of GPUs (default: %(default)s)',\n default=1, type=int, metavar='N')\n parser.add_argument('--total-kimg',\n help='Training length in thousands of images (default: %(default)s)',\n metavar='KIMG', default=25000, type=int)\n parser.add_argument('--mirror-augment', help='Mirror augment (default: %(default)s)',\n default=False, metavar='BOOL', type=_str_to_bool)\n parser.add_argument(\n '--metrics', help='Comma-separated list of metrics or \"none\" (default: %(default)s)',\n default='None', type=_parse_comma_sep)\n parser.add_argument('--resume_pkl', help='Continue training using pretrained pkl.',\n default=None, metavar='RESUME_PKL', type=str)\n parser.add_argument('--G_pkl', help='G to load.',\n default=None, metavar='G_PKL', type=str)\n parser.add_argument('--n_samples_per', help='Number of samples for each line in traversal (default: %(default)s)',\n metavar='N_SHOWN_SAMPLES_PER_LINE', default=10, type=int)\n parser.add_argument('--module_list', help='Module list for modular network.',\n default=None, metavar='MODULE_LIST', type=str)\n parser.add_argument('--batch_size', help='N batch.',\n metavar='N_BATCH', default=32, type=int)\n parser.add_argument('--batch_per_gpu', help='N batch per gpu.',\n metavar='N_BATCH_PER_GPU', default=16, type=int)\n parser.add_argument('--latent_type', help='What type of latent priori to use.',\n metavar='LATENT_TYPE', default='uniform', choices=['uniform', 'normal', 'trunc_normal'], type=str)\n parser.add_argument('--fmap_decay', help='fmap decay for network building.',\n metavar='FMAP_DECAY', default=0.15, type=float)\n parser.add_argument('--I_fmap_base', help='Fmap base for I.',\n metavar='I_FMAP_BASE', default=8, type=int)\n parser.add_argument('--random_seed', help='TF random seed.',\n metavar='RANDOM_SEED', default=9, type=int)\n parser.add_argument('--fmap_min', help='FMAP min.',\n metavar='FMAP_MIN', default=16, type=int)\n parser.add_argument('--fmap_max', help='FMAP max.',\n metavar='FMAP_MAX', default=512, type=int)\n parser.add_argument('--I_nf_scale', help='N feature map scale for I.',\n metavar='I_NF_SCALE', default=4, type=int)\n parser.add_argument('--dlatent_size', help='Latent size. Used for vc2_gan_style2.',\n metavar='DLATENT_SIZE', default=24, type=int)\n parser.add_argument('--arch', help='Architecture for vc2_gan_style2.',\n metavar='ARCH', default='resnet', type=str)\n\n args = parser.parse_args()\n\n for metric in args.metrics:\n if metric not in metric_defaults:\n print('Error: unknown metric \\'%s\\'' % metric)\n sys.exit(1)\n\n run(**vars(args))\n\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main()\n\n#----------------------------------------------------------------------------\n","repo_name":"wakaztahir/StyleGans2-cpu","sub_path":"run_training_infernet.py","file_name":"run_training_infernet.py","file_ext":"py","file_size_in_byte":7616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71659757600","text":"fruits = {\"orange\": \"a sweet, citrus fruit\",\n \"apple\": \"good for making cyder\",\n \"lemon\": \"a sour, yellow citrus fruit\",\n \"grape\": \"a small, sweet fruit growing in bunches\"}\n\nprint(fruits)\n\nveg = {\n \"cabbage\": \"every child's favorite\",\n \"sprouts\": \"mmmmm delicious\",\n \"spinach\": \"can i have some more fruit, please\"\n}\n\nprint(veg)\n\n# fruits.update(veg) # this function allows to python add 2 dictionaries\n# print(fruits)\n\n# veg.update(fruits) # this function allows to python add 2 dictionaries\n# print(veg)\n\nnice_and_nasty = fruits.copy()\nnice_and_nasty.update(veg)\n\nprint(nice_and_nasty)\n","repo_name":"carlosdlr/python_course","sub_path":"Dictionaries2.py","file_name":"Dictionaries2.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29154697524","text":"import pytesseract\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport json\r\nimport string\r\nimport os\r\nimport logging\r\nimport re\r\n\r\nimport config\r\n\r\nEXAMPLE_DIRECTORY = \"examples\"\r\nLOG_FILE = \"log/example.log\"\r\n\r\ndef isInt(word):\r\n try:\r\n int(word)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\ndef showImage(img):\r\n cv2.imshow('window', img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\ndef getTextLines(img):\r\n\r\n # Make image grayscale and invert bits\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n img = 255 - img\r\n\r\n reduced = cv2.reduce(img, 1, cv2.REDUCE_AVG).copy()\r\n\r\n reduced = reduced <= 0\r\n\r\n yCoords = []\r\n y = 0\r\n count = 0\r\n isSpace = False\r\n\r\n reducedRows, reducedCols = reduced.shape\r\n\r\n for i in range(reducedRows):\r\n if not isSpace and reduced[i]:\r\n isSpace = True\r\n count = 1\r\n y = i\r\n else:\r\n if not reduced[i]:\r\n isSpace = False\r\n yCoords.append(y/count)\r\n else:\r\n y += i\r\n count += 1\r\n\r\n result = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\r\n\r\n resultRows, resultCols, channels = result.shape\r\n for i in yCoords:\r\n i = int(i)\r\n cv2.line(result, (0, i), (resultCols, i), (0, 255, 0))\r\n\r\n showImage(result)\r\n\r\ndef getLines(img):\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n edges = cv2.Canny(gray, 50, 150, apertureSize=3)\r\n lines = cv2.HoughLines(edges, 1, np.pi/180, 500)\r\n for line in lines:\r\n for rho, theta in line:\r\n a = np.cos(theta)\r\n b = np.sin(theta)\r\n x0 = a*rho\r\n y0 = b*rho\r\n x1 = int(x0 + 1000*(-b))\r\n y1 = int(y0 + 1000*(a))\r\n x2 = int(x0 - 1000*(-b))\r\n y2 = int(y0 - 1000*(a))\r\n cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\r\n showImage(img)\r\n\r\ndef getText():\r\n directory_str = EXAMPLE_DIRECTORY\r\n directory = os.fsencode(directory_str)\r\n files = os.listdir(directory)\r\n files.sort()\r\n txt = \"\"\r\n numFiles = len(files)\r\n iteration = 0\r\n for filename in files:\r\n iteration += 1\r\n print(\"Parsing File \" + str(iteration) + \" of \" + str(numFiles))\r\n filenameString = filename.decode(\"utf-8\")\r\n img = cv2.imread(directory_str + \"/\" + filenameString)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n edges = cv2.Canny(gray, 50, 150, apertureSize=3)\r\n lines = cv2.HoughLines(edges, 1, np.pi/180, 1000)\r\n\r\n for rho, theta in lines[0]:\r\n a = np.cos(theta)\r\n b = np.sin(theta)\r\n x0 = a*rho\r\n y0 = b*rho\r\n x1 = int(x0 + 1000*(-b))\r\n y1 = int(y0 + 1000*(a))\r\n x2 = int(x0 - 1000*(-b))\r\n y2 = int(y0 - 1000*(a))\r\n half = x1\r\n rows, cols = img.shape[:2]\r\n left = img[0:rows, 0:half]\r\n right = img[0:rows, half:cols]\r\n txt += pytesseract.image_to_string(left)\r\n txt += pytesseract.image_to_string(right) \r\n return txt\r\n\r\ndef getPoints(text):\r\n result = {i: 0 for i in config.teams}\r\n state = \"NULL\"\r\n lines = text.split(\"\\n\")\r\n expected = 0\r\n previous = 0\r\n translator = str.maketrans(\"\", \"\", string.punctuation)\r\n for line in lines:\r\n words = line.split()\r\n if (words != [] and words[0] != \"\"):\r\n words[0] = words[0].translate(translator)\r\n if \"Event\" in words[0]:\r\n state = \"NULL\"\r\n print(line)\r\n previous = expected + 1\r\n expected = 1\r\n elif \"NULL\" is state:\r\n if \"Team\" in words[0]:\r\n state = \"RELAY\"\r\n elif \"Name\" in words[0]:\r\n state = \"INDIVIDUAL\"\r\n elif isInt(words[0][0]):\r\n if \"RELAY\" is state:\r\n multiplier = 2\r\n aliases = config.relayAliases\r\n else:\r\n multiplier = 1\r\n aliases = config.aliases\r\n \r\n place = int(re.search(r'\\d+', words[0]).group())\r\n\r\n teamName = \"\"\r\n for word in words:\r\n word = word.translate(translator)\r\n if word in aliases:\r\n teamName = aliases[word]\r\n if (place in config.points):\r\n points = config.points[place]*multiplier\r\n else:\r\n points = 0\r\n if not teamName:\r\n print(\"Info: Could not find team name in \" + line)\r\n else:\r\n if (place != expected):\r\n if (place != previous):\r\n print(\"Warning: Unexpected Place \" + str(place) + \", expected \" + str(expected))\r\n else:\r\n previous = 0\r\n expected = place\r\n print(str(place) + \" \" + teamName + \": +\" + str(points))\r\n result[teamName] += points\r\n expected += 1\r\n return result\r\n\r\ndef main():\r\n text = getText() \r\n print(text)\r\n result = getPoints(text)\r\n print(result)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"myh999/psych-sheet-ranker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10860010151","text":"import sys\r\nimport csv\r\nfrom natsort import natsorted\r\n\r\n# Validate command line arguments\r\nif len(sys.argv) < 3:\r\n print (\"**Please make sure that you have specified an input file of type '.tsv' as well as the type of molecule that you are working with (either RNA or gDNA).\")\r\n quit()\r\n\r\nmolecule = sys.argv[2].lower()\r\n\r\nif molecule != 'rna' and molecule != 'gdna':\r\n print(\"**Invalid molecule type: The molecule type must be either RNA or gDNA.\")\r\n quit()\r\n\r\n# Create an output file\r\noutput_file = open(\"%s_working_dilutions.txt\" % sys.argv[1][:-4], \"w\")\r\n\r\n# Use values from the input file to write to the output file\r\nwith open(sys.argv[1], \"r\") as tsv:\r\n\r\n # Create a sorted list of the sample concentration\r\n rows = [line.strip().split('\\t') for line in tsv]\r\n conc_list = []\r\n for row in rows:\r\n conc_tuple = (row[0], row[4])\r\n conc_list.append(conc_tuple)\r\n sorted_conc = natsorted(conc_list, key = lambda tuple: tuple[1])\r\n\r\n if molecule == 'rna':\r\n # Create a sorted list of rounded volumes to add to each sample\r\n conc_two = sorted_conc[0][1] # ng/uL\r\n VOL_TWO = 4 # uL\r\n volumes_to_add = []\r\n for i in range(0, len(sorted_conc) - 2):\r\n conc_one = sorted_conc[i][1]\r\n vol_one = float(conc_two) * VOL_TWO / float(conc_one)\r\n vol_water = VOL_TWO - vol_one\r\n volume_tuple = (sorted_conc[i][0], round(vol_one, 1), round(vol_water, 1))\r\n volumes_to_add.append(volume_tuple)\r\n sorted_volumes = natsorted(volumes_to_add, key = lambda tuple: tuple[1])\r\n\r\n # Adjust for volumes of less than 0.5uL (get rid of outliers)\r\n\r\n # Write working dilutions table to *.txt file\r\n output_file.write(\"Sample | Sample uL | H2O uL\\n\")\r\n output_file.write(\"___________________________\\n\")\r\n for row in sorted_volumes:\r\n output_file.write(str(row[0]).center(6) + \" | \" + str(row[1]).center(9) + \" | \" + str(row[2]).center(6) + \"\\n\")\r\n output_file.write(\"___________________________\\n\")\r\n\r\n # NOT QUITE DONE YET\r\n if molecule == 'gdna':\r\n # Create a sorted list of rounded volumes to add to each sample\r\n conc_two = 8 # ng/uL\r\n VOL_TWO = 100 # uL\r\n volumes_to_add = []\r\n for i in range(0, len(sorted_conc) - 2):\r\n conc_one = sorted_conc[i][1]\r\n vol_one = float(conc_two) * VOL_TWO / float(conc_one)\r\n vol_water = VOL_TWO - vol_one\r\n volume_tuple = (sorted_conc[i][0], round(vol_one, 1), round(vol_water, 1))\r\n volumes_to_add.append(volume_tuple)\r\n sorted_volumes = natsorted(volumes_to_add, key = lambda tuple: tuple[1])\r\n\r\n # Adjust for volumes of less than 0.5uL (get rid of outliers)\r\n\r\n # Write working dilutions table to *.txt file\r\n output_file.write(\"Sample | Sample uL | H2O uL\\n\")\r\n output_file.write(\"___________________________\\n\")\r\n for row in sorted_volumes:\r\n output_file.write(str(row[0]).center(6) + \" | \" + str(row[1]).center(9) + \" | \" + str(row[2]).center(6) + \"\\n\")\r\n output_file.write(\"___________________________\\n\")\r\n\r\n output_file.close()\r\n","repo_name":"factcondenser/calculate-concentrations","sub_path":"calculate_concentrations.py","file_name":"calculate_concentrations.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31750383500","text":"import numpy as np\n\nimport healpy as hp\n#from astropy_healpix import HEALPix\nfrom astropy.coordinates import Galactic, SkyCoord\nfrom astropy import units as u\n\nimport utils\nimport maps\n\n\ndef main():\n pass\n\n\ndef galactic_plane_mask(NSIDE, b_max, fn_mask=None):\n NPIX = hp.nside2npix(NSIDE)\n\n mask = np.zeros(NPIX, dtype=bool)\n #hpa = HEALPix(nside=NSIDE, frame=Galactic())\n #coords = hpa.healpix_to_skycoord(np.arange(NPIX)) \n #bs = coords['b']\n ra, dec = hp.pix2ang(NSIDE, np.arange(NPIX), lonlat=True)\n coords = SkyCoord(ra=ra*u.deg, dec=dec*u.deg)\n bs = coords.galactic.b\n idx_inplane = np.abs(bs.value) < b_max\n mask[idx_inplane] = 1\n if fn_mask is not None:\n hp.write_map(fn_mask, mask)\n return mask\n\n\n\ndef magellanic_clouds_mask(NSIDE, fn_mask=None):\n NPIX = hp.nside2npix(NSIDE)\n # start mask with 0s, meaning keep\n mask = np.zeros(NPIX, dtype=bool)\n\n coord_lmc = SkyCoord('5h23m34.5s', '-69d45m22s', frame='icrs')\n coord_smc = SkyCoord('0h52m44.8s', '-72d49m43s', frame='icrs')\n\n sep_max_lmc = 6*u.deg\n sep_max_smc = 3*u.deg\n\n vec_lmc = hp.ang2vec(coord_lmc.ra.value, coord_lmc.dec.value, lonlat=True)\n vec_smc = hp.ang2vec(coord_smc.ra.value, coord_smc.dec.value, lonlat=True)\n # returns list of indices (not booleans)\n ipix_lmc = hp.query_disc(nside=NSIDE, vec=vec_lmc, radius=sep_max_lmc.to('radian').value)\n ipix_smc = hp.query_disc(nside=NSIDE, vec=vec_smc, radius=sep_max_smc.to('radian').value)\n # 1s mean masked (excluded) vals\n mask[ipix_lmc] = 1 \n mask[ipix_smc] = 1\n if fn_mask is not None:\n hp.write_map(fn_mask, mask)\n return mask\n\n\ndef galactic_dust_mask(NSIDE, Av_max, R, fn_dustmap=None, fn_mask=None):\n print(NSIDE, R, fn_dustmap)\n map_avmean = maps.get_dust_map(NSIDE, R, fn_map=fn_dustmap)\n mask = map_avmean > Av_max \n if fn_mask is not None:\n hp.write_map(fn_mask, mask)\n return mask\n\n\ndef subsample_by_mask(NSIDE, ra, dec, mask_func, mask_func_args):\n mask = mask_func(NSIDE, *mask_func_args)\n _, pixel_indices = maps.get_map(NSIDE, ra, dec)\n \n # TODO: better way to do this??\n pixel_arr = np.arange(len(mask))\n pixel_indices_keep = pixel_arr[mask]\n idx_keep = np.in1d(pixel_indices, pixel_indices_keep, invert=True)\n print(f\"Applied mask, kept {np.sum(idx_keep)/len(idx_keep):.3f} of sources\")\n return idx_keep\n\n\ndef subsample_mask_indices(ra, dec, mask):\n npix = len(mask)\n nside = hp.npix2nside(npix)\n _, pixel_indices = maps.get_map(nside, ra, dec)\n # TODO: better way to do this??\n pixel_indices_keep = np.where(mask==0)[0]\n idx_keep = np.in1d(pixel_indices, pixel_indices_keep)\n print(f\"Applied mask, kept {np.sum(idx_keep)/len(idx_keep):.3f} of sources\")\n return idx_keep\n\n\ndef get_qso_mask(NSIDE, mask_names_gaia, b_max=None, Av_max=None, R=3.1):\n print(\"Getting QSO mask\")\n\n fn_dustmap = f'../data/maps/map_dust_NSIDE{NSIDE}.npy'\n # dict points to tuple with masks and extra args\n mask_gaia_dict = {'plane': (galactic_plane_mask, [b_max]),\n 'mcs': (magellanic_clouds_mask, []),\n 'dust': (galactic_dust_mask, [Av_max, R, fn_dustmap])}\n NPIX = hp.nside2npix(NSIDE)\n # masks have 1s where to mask. if current mask OR new\n # mask has a 1, want a 1, so we need OR\n mask_qso = np.zeros(NPIX, dtype=bool) # zeros mean no mask\n for mask_name in mask_names_gaia:\n mask_func, mask_func_args = mask_gaia_dict[mask_name]\n mask = mask_func(NSIDE, *mask_func_args)\n mask_qso = (mask_qso | mask)\n return mask_qso\n\n\nif __name__=='__main__':\n main()","repo_name":"kstoreyf/gaia-quasars-lss","sub_path":"code/masks.py","file_name":"masks.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"37777341950","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nimport bpy\nfrom sverchok.node_tree import SverchCustomTreeNode\n\n\nclass SvArmaturePropsNode(SverchCustomTreeNode, bpy.types.Node):\n '''Armature object props'''\n bl_idname = 'SvArmaturePropsNode'\n bl_label = 'Armature Props'\n bl_icon = 'MOD_ARMATURE'\n is_scene_dependent = True\n is_animation_dependent = True\n\n def sv_init(self, context):\n self.inputs.new('SvObjectSocket', 'Armature Object')\n self.inputs.new('SvStringsSocket', 'bone select mask')\n self.outputs.new('SvVerticesSocket', 'Head')\n self.outputs.new('SvVerticesSocket', 'Middle relative')\n self.outputs.new('SvVerticesSocket', 'Tail')\n self.outputs.new('SvVerticesSocket', 'Direction relative')\n self.outputs.new('SvStringsSocket', 'Length of bone')\n self.outputs.new('SvMatrixSocket', \"local bone matrix\")\n self.outputs.new('SvObjectSocket', \"Armature Object\")\n\n def process(self):\n armobj, selm = self.inputs\n head, Cent, tail, Norm, lng, matr, obj = self.outputs\n armat = [ob.data.bones for ob in armobj.sv_get()]\n if selm.is_linked:\n for ar, sm in zip(armat, selm.sv_get()):\n for b,m in zip(ar, sm):\n b.select = b.select_head = b.select_tail = m\n if head.is_linked:\n head.sv_set([[bone.head_local[:] for bone in ar] for ar in armat])\n if Cent.is_linked:\n Cent.sv_set([[bone.center[:] for bone in ar] for ar in armat])\n if tail.is_linked:\n tail.sv_set([[bone.tail_local[:] for bone in ar] for ar in armat])\n if Norm.is_linked:\n Norm.sv_set([[bone.vector[:] for bone in ar] for ar in armat])\n if lng.is_linked:\n lng.sv_set([[bone.length for bone in ar] for ar in armat])\n if matr.is_linked:\n if len(armat) > 1:\n matr.sv_set([[bone.matrix_local for bone in ar] for ar in armat])\n else:\n matr.sv_set([bone.matrix_local for bone in armat[0]])\n if obj.is_linked:\n obj.sv_set(armobj.sv_get())\n\n\ndef register():\n bpy.utils.register_class(SvArmaturePropsNode)\n\n\ndef unregister():\n bpy.utils.unregister_class(SvArmaturePropsNode)\n","repo_name":"nortikin/sverchok","sub_path":"nodes/object_nodes/armature_analyzer.py","file_name":"armature_analyzer.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"38041784960","text":"\"\"\"Provide the setuptools command bdist_rpm2.\"\"\"\n\nimport os\nimport string\n\nfrom setuptools.command.bdist_rpm import bdist_rpm\n\n\nfrom distutils.debug import DEBUG\nfrom distutils.file_util import write_file\nfrom distutils.sysconfig import get_python_version\nfrom distutils.errors import DistutilsFileError, DistutilsExecError\nfrom distutils import log\n\n\nclass bdist_rpm2(bdist_rpm):\n \"\"\"Add two extra user options to the setuptools bdist_rpm command:\n --dist-name: specify a different distribution name\n --add-test: add 'python setup.py test' to the %check section\n \"\"\"\n\n # Description\n description = \"modified version of bdist_rpm\"\n\n # Copy user options\n user_options = list(bdist_rpm.user_options)\n\n # Add name-prefix option\n user_options.append((\n 'dist-name=', None,\n \"Specify a different distribution name\"))\n\n # Add run-test option\n user_options.append((\n 'add-test', None,\n \"Add 'python setup.py test' to the %check section\"))\n\n def initialize_options(self):\n self.dist_name = ''\n self.add_test = 0\n bdist_rpm.initialize_options(self)\n\n def finalize_package_data(self):\n self.ensure_string('dist_name')\n self.dist_name = self.dist_name.strip()\n # Patch EncodingError in bdist_rpm\n metadata = self.distribution.metadata\n metadata.author_email = metadata._encode_field(metadata.author_email)\n # Call parent\n bdist_rpm.finalize_package_data(self)\n\n def get_distribution_name(self):\n if self.dist_name:\n return self.dist_name\n return self.distribution.get_name()\n\n def _make_spec_file(self):\n spec_file = bdist_rpm._make_spec_file(self)\n # Add tests to the spec file\n if self.add_test:\n test_call = \"%s setup.py test\" % self.python\n spec_file.extend(['', '%check', test_call])\n # Change dist name\n if self.dist_name:\n spec_file[0] = \"%define name \" + self.dist_name\n return spec_file\n\n # The rest of the file is shamelessly copied from\n # distutils/command/bdist_rpm.py. The only modifications are:\n # - the declaration of the `spec_path` variable, where the distribution\n # name is `self.get_distribution_name()`\n # - the use of `sdist2` instead of `sdist`\n\n def run(self):\n\n # ensure distro name is up-to-date\n self.run_command('egg_info')\n\n if DEBUG:\n print(\"before _get_package_data():\")\n print(\"vendor =\", self.vendor)\n print(\"packager =\", self.packager)\n print(\"doc_files =\", self.doc_files)\n print(\"changelog =\", self.changelog)\n\n # make directories\n if self.spec_only:\n spec_dir = self.dist_dir\n self.mkpath(spec_dir)\n else:\n rpm_dir = {}\n for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):\n rpm_dir[d] = os.path.join(self.rpm_base, d)\n self.mkpath(rpm_dir[d])\n spec_dir = rpm_dir['SPECS']\n\n # Spec file goes into 'dist_dir' if '--spec-only specified',\n # build/rpm. otherwise.\n\n spec_path = os.path.join(\n spec_dir,\n \"%s.spec\" % self.get_distribution_name())\n self.execute(write_file,\n (spec_path,\n self._make_spec_file()),\n \"writing '%s'\" % spec_path)\n\n if self.spec_only: # stop if requested\n return\n\n # Make a source distribution and copy to SOURCES directory with\n # optional icon.\n saved_dist_files = self.distribution.dist_files[:]\n sdist = self.reinitialize_command('sdist2')\n sdist.dist_name = self.dist_name\n if self.use_bzip2:\n sdist.formats = ['bztar']\n else:\n sdist.formats = ['gztar']\n self.run_command('sdist2')\n self.distribution.dist_files = saved_dist_files\n\n source = sdist.get_archive_files()[0]\n source_dir = rpm_dir['SOURCES']\n self.copy_file(source, source_dir)\n\n if self.icon:\n if os.path.exists(self.icon):\n self.copy_file(self.icon, source_dir)\n else:\n error = \"icon file '%s' does not exist\" % self.icon\n raise DistutilsFileError(error)\n\n # build package\n log.info(\"building RPMs\")\n rpm_cmd = ['rpm']\n if os.path.exists('/usr/bin/rpmbuild') or \\\n os.path.exists('/bin/rpmbuild'):\n rpm_cmd = ['rpmbuild']\n\n if self.source_only: # what kind of RPMs?\n rpm_cmd.append('-bs')\n elif self.binary_only:\n rpm_cmd.append('-bb')\n else:\n rpm_cmd.append('-ba')\n if self.rpm3_mode:\n rpm_cmd.extend(['--define',\n '_topdir %s' % os.path.abspath(self.rpm_base)])\n if not self.keep_temp:\n rpm_cmd.append('--clean')\n\n if hasattr(self, 'quiet') and self.quiet:\n rpm_cmd.append('--quiet')\n\n rpm_cmd.append(spec_path)\n # Determine the binary rpm names that should be built out of this spec\n # file\n # Note that some of these may not be really built (if the file\n # list is empty)\n nvr_string = \"%{name}-%{version}-%{release}\"\n src_rpm = nvr_string + \".src.rpm\"\n non_src_rpm = \"%{arch}/\" + nvr_string + \".%{arch}.rpm\"\n q_cmd = r\"rpm -q --qf '%s %s\\n' --specfile '%s'\" % (\n src_rpm, non_src_rpm, spec_path)\n\n out = os.popen(q_cmd)\n try:\n binary_rpms = []\n source_rpm = None\n while 1:\n line = out.readline()\n if not line:\n break\n l = string.split(string.strip(line))\n assert(len(l) == 2)\n binary_rpms.append(l[1])\n # The source rpm is named after the first entry in the specfile\n if source_rpm is None:\n source_rpm = l[0]\n\n status = out.close()\n if status:\n raise DistutilsExecError(\"Failed to execute: %s\" % repr(q_cmd))\n\n finally:\n out.close()\n\n self.spawn(rpm_cmd)\n\n if not self.dry_run:\n if self.distribution.has_ext_modules():\n pyversion = get_python_version()\n else:\n pyversion = 'any'\n\n if not self.binary_only:\n srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)\n assert(os.path.exists(srpm))\n self.move_file(srpm, self.dist_dir)\n filename = os.path.join(self.dist_dir, source_rpm)\n self.distribution.dist_files.append(\n ('bdist_rpm', pyversion, filename))\n\n if not self.source_only:\n for rpm in binary_rpms:\n rpm = os.path.join(rpm_dir['RPMS'], rpm)\n if os.path.exists(rpm):\n self.move_file(rpm, self.dist_dir)\n filename = os.path.join(self.dist_dir,\n os.path.basename(rpm))\n self.distribution.dist_files.append(\n ('bdist_rpm', pyversion, filename))\n","repo_name":"vxgmichel/setuptools-rpm2","sub_path":"rpm2/bdist_rpm2.py","file_name":"bdist_rpm2.py","file_ext":"py","file_size_in_byte":7321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37869778286","text":"# this file should be run every 30 min\nimport os\nfrom datetime import datetime,timedelta\nimport ntplib\nimport time\n\n\nclass SyncTime(object):\n\n # Take time form server\n @staticmethod\n def fetch_time_from_server():\n try:\n client = ntplib.NTPClient()\n response = client.request('pool.ntp.org')\n os.system('date ' + time.strftime('%m%d%H%M%Y.%S', time.localtime(response.tx_time)))\n print(\"Online Time synchronized\")\n except:\n print('Could not sync with server.')\n time.sleep(1)\n print('Retrying..')\n SyncTime.fetch_time_from_server()\n\n\n @staticmethod\n def get_time_now():\n time_start = datetime.now() + timedelta(0, 3)\n return time_start","repo_name":"AndiDomi/Adaptive-Bitrate-Streaming","sub_path":"ds_production/FetchTime.py","file_name":"FetchTime.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"474770315","text":"import turtle\nimport random\n\n\nclass Shape:\n def __init__(self):\n self.myTurtle = turtle.Turtle('turtle')\n\n def setPen(self):\n r = random.random()\n g = random.random()\n b = random.random()\n self.myTurtle.pencolor((r, g, b))\n pSize = random.randrange(1, 10)\n self.myTurtle.pensize(pSize)\n\n\nclass Rectangle(Shape):\n def __init__(self, x, y):\n Shape.__init__(self)\n self.cx = x\n self.cy = y\n self.width = random.randrange(20, 100)\n self.height = random.randrange(20, 100)\n\n def drawShape(self):\n sx1 = self.cx - self.width / 2\n sy1 = self.cy - self.height / 2\n sx2 = self.cx + self.width / 2\n sy2 = self.cy + self.height / 2\n\n self.setPen()\n self.myTurtle.penup()\n self.myTurtle.goto(sx1, sy1)\n self.myTurtle.pendown()\n self.myTurtle.goto(sx1, sy2)\n self.myTurtle.goto(sx2, sy2)\n self.myTurtle.goto(sx2, sy1)\n self.myTurtle.goto(sx1, sy1)\n\n\nclass Circle(Shape):\n\n def __init__(self, x, y):\n Shape.__init__(self)\n self.cx = x\n self.cy = y\n self.radius = random.randrange(20, 100)\n\n def drawShape(self):\n self.setPen()\n self.myTurtle.penup()\n self.myTurtle.goto(self.cx, self.cy)\n self.myTurtle.pendown()\n self.myTurtle.circle(self.radius)\n\n\ndef screenLeftClick(x, y):\n shape = random.randrange(0, 2)\n if (shape == 0):\n s = Rectangle(x, y)\n else:\n s = Circle(x, y)\n s.drawShape()\n\n\nturtle.title('도형그리기')\nturtle.onscreenclick(screenLeftClick)\nturtle.done()","repo_name":"okdoittttt/PYTHONclass","sub_path":"circleTurtle.py","file_name":"circleTurtle.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23697037157","text":"# doc\n# https://docs.opencv.org/3.0-beta/modules/imgproc/doc/histograms.html\n# compare hist: https://www.programcreek.com/python/example/81596/cv2.compareHist\n# this demo: https://www.pyimagesearch.com/2014/07/14/3-ways-compare-histograms-using-opencv-python/\n\n\n# USAGE\n# python compare.py --dataset images\n\nfrom scipy.spatial import distance as dist\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport glob\nimport cv2\n\ndef sample () :\n\t# store image label (filename) to dictionary\n\tlabels = {}\n\t# store image data to array\n\timages = []\n\n\tfor path in glob.glob(\"images/*.png\"):\n\t\tfilename = path[path.rfind(\"/\") + 1:]\n\t\timage = cv2.imread(path)\n\t\timages.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n\t\t# extract a 3D RGB color histogram, use 8 bins/channel, normalize,\n\t\thist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])\n\t\thist = cv2.normalize(hist, image).flatten()\n\t\tlabels[filename] = hist\n\n\n\t\t# NORMALIZE\n\tOPENCV_METHODS = (\n\t\t(\"Correlation\", cv2.HISTCMP_CORREL),\n\t\t(\"Chi-Squared\", cv2.HISTCMP_CHISQR),\n\t\t(\"Intersection\", cv2.HISTCMP_INTERSECT), \n\t\t(\"Hellinger\", cv2.HISTCMP_BHATTACHARYYA))\n\n\t# loop over the comparison methods\n\tfor (methodName, method) in OPENCV_METHODS:\n\t\tresults = {}\n\t\treverse = False\n\n\t\t# reverse results if using method correlation / intersection\n\t\tif methodName in (\"Correlation\", \"Intersection\"): reverse = True\n\n\t\t# compare histogram\n\t\tfor (k, hist) in labels.items():\n\t\t\td = cv2.compareHist(labels[\"doge.png\"], hist, method)\n\t\t\tresults[k] = d\n\n\t\t# sort results\n\t\tresults = sorted([(v, k) for (k, v) in results.items()], reverse = reverse)\n\n\n\t\t## DISPLAY RESULTS\n\t\tfig = plt.figure(\"Query\")\n\t\tax = fig.add_subplot(1, 1, 1)\n\t\tax.imshow(images[0])\n\t\tplt.axis(\"off\")\n\n\t\t# initialize the results figure\n\t\tfig = plt.figure(\"Results: %s\" % (methodName))\n\t\tfig.suptitle(methodName, fontsize = 10)\n\n\t\tfor (i, (v, k)) in enumerate(results):\n\t\t\t# show the result\n\t\t\tax = fig.add_subplot(3, len(images) / 2, i + 1)\n\t\t\tax.set_title(\"%s: %.2f\" % (k, v))\n\t\t\tplt.imshow(images[i])\n\t\t\tplt.axis(\"off\")\n\n\t# show the OpenCV methods\n\tplt.show()","repo_name":"taquy/ImageProcessing","sub_path":"Q_HistogramComparison/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10184155850","text":"if __name__ == '__main__':\r\n x = int(input())\r\n q = int(input())\r\n\r\n while 2 <= q <= 10:\r\n x_copy = 0\r\n i = 0\r\n while x > 0:\r\n x_copy += (x % q) * 10 ** i\r\n x //= q\r\n i += 1\r\n print(x_copy)\r\n q = 0\r\n\r\n while 10 <= q <= 16:\r\n tablica = [] # * 1000 j = 0\r\n chars = [str(i) for i in range(10)] + [chr(i) for i in range(ord(\"A\"), ord(\"G\"))]\r\n while x > 0:\r\n tablica.append(chars[x % q]) # tablica[j] = chars[x%q]\r\n x //= q\r\n# j += 1\r\n print(tablica)\r\n q = 0\r\n","repo_name":"IwoSzczepaniak/WDI","sub_path":"WDI_zadania/3_WDI_zad1.py","file_name":"3_WDI_zad1.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4894496681","text":"# shell: uvicorn motion_server:app --reload\n\"\"\" Data packaging server\n\nThe data packaging server collates finished actions into processes.\nFinished actions which do not contribute process information are pushed to \n\"\"\"\n\n__all__ = [\"makeApp\"]\n\nfrom helao.servers.base import makeActionServ\nfrom helao.drivers.data.dbpack_driver import DBPack\nfrom helao.helpers.config_loader import config_loader\n\n\ndef makeApp(confPrefix, servKey, helao_root):\n\n config = config_loader(confPrefix, helao_root)\n\n app = makeActionServ(\n config=config,\n server_key=servKey,\n server_title=servKey,\n description=\"Data packaging server\",\n version=0.1,\n driver_class=DBPack,\n )\n\n @app.post(f\"/finish_yml\")\n async def finish_yml(yml_path: str, priority: int = 0):\n await app.driver.add_yml_task(yml_path, priority)\n return yml_path\n\n @app.post(f\"/finish_pending\")\n async def finish_pending():\n pending_dict = await app.driver.finish_pending()\n return pending_dict\n\n @app.post(f\"/list_pending\")\n def list_pending():\n pending_dict = app.driver.list_pending()\n return pending_dict\n \n return app\n","repo_name":"KevinTran-TRI/helao-async","sub_path":"helao/servers/action/dbpack_server.py","file_name":"dbpack_server.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"30008277568","text":"import json\n\nwith open('people.json', mode='r', encoding='utf-8') as file:\n data = json.load(file)\n keys = list(max(data, key=lambda x: len(x)).keys())\n temp = []\n for row in data:\n for i in keys:\n row.setdefault(i, None)\n temp.append(row)\n\nwith open('updated_people.json', mode='w', encoding='utf-8') as save_file:\n json.dump(temp, save_file, indent=' ', sort_keys=True)\n\n","repo_name":"lockiz/-stepik_tests_course","sub_path":"4_Working_with_files/json_step_9_Restoring_missing_Keys/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13892656970","text":"lessons = input().split(\", \")\n\n\ndef add(lesson_tittle):\n if lesson_tittle not in lessons:\n lessons.append(lesson_tittle)\n\n\ndef insert(lesson_tittle, index_to_position):\n if lesson_tittle not in lessons:\n lessons.insert(index_to_position, lesson_tittle)\n\n\ndef remove(lesson_tittle):\n if lesson_tittle in lessons:\n lessons.remove(lesson_tittle)\n if f\"{lesson_tittle}-Exercise\" in lessons:\n lessons.remove(f\"{lesson_tittle}-Exercise\")\n\n\ndef swap(lesson_1, lesson_2):\n if lesson_1 in lessons and lesson_2 in lessons:\n first_lesson = lessons.index(lesson_1)\n second_lesson = lessons.index(lesson_2)\n lessons[first_lesson], lessons[second_lesson] = lessons[second_lesson], lessons[first_lesson]\n if lesson_2 and f\"{lesson_2}-Exercise\" in lessons:\n index_of_lesson_2 = lessons.index(lesson_2) + 1\n lessons.insert(index_of_lesson_2, f\"{lesson_2}-Exercise\")\n lessons.pop(lessons.index(f\"{lesson_2}-Exercise\", lessons.index(f\"{lesson_2}-Exercise\") + 1))\n if lesson_1 and f\"{lesson_1}-Exercise\" in lessons:\n index_of_lesson_1 = lessons.index(lesson_1) + 1\n lessons.insert(index_of_lesson_1, f\"{lesson_1}-Exercise\")\n lessons.pop(lessons.index(f\"{lesson_1}-Exercise\", lessons.index(f\"{lesson_1}-Exercise\") + 1))\n\n\ndef exercise(lesson_title):\n if lesson_title in lessons:\n if f\"{lesson_title}-Exercise\" not in lessons:\n current_lesson_index = lessons.index(lesson_title) + 1\n lessons.insert(current_lesson_index, f\"{lesson_title}-Exercise\")\n elif lesson_title not in lessons:\n lessons.append(lesson_title)\n lessons.append(f\"{lesson_title}-Exercise\")\n\n\ncommand = input()\nwhile command != \"course start\":\n command = command.split(\":\")\n operation = command[0]\n lesson_title = command[1]\n if operation == \"Add\":\n add(lesson_title)\n elif operation == \"Insert\":\n index = int(command[2])\n insert(lesson_title, index)\n elif operation == \"Remove\":\n remove(lesson_title)\n elif operation == \"Swap\":\n lesson_title_1 = command[1]\n lesson_title_2 = command[2]\n swap(lesson_title_1, lesson_title_2)\n elif operation == \"Exercise\":\n exercise(lesson_title)\n command = input()\n\n\nfor count, lesson in enumerate(lessons, 1):\n print(f\"{count}.{lesson}\")","repo_name":"DianVK/softuni_python_fundamentals","sub_path":"Lists Advanced - Exercise/softuni_course_planning.py","file_name":"softuni_course_planning.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"1846877319","text":"from flask import Flask, render_template, request, redirect, url_for, flash\r\n\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy.sql import text\r\nimport datetime\r\nfrom decimal import Decimal\r\n\r\n\r\n# from logging.config import dictConfig\r\n\r\n# dictConfig({\r\n# 'version': 1,\r\n# 'formatters': {'default': {\r\n# 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',\r\n# }},\r\n# 'handlers': {'wsgi': {\r\n# 'class': 'logging.StreamHandler',\r\n# 'stream': 'ext://flask.logging.wsgi_errors_stream',\r\n# 'formatter': 'default'\r\n# }},\r\n# 'root': {\r\n# 'level': 'INFO',\r\n# 'handlers': ['wsgi']\r\n# }\r\n# })\r\n\r\n\r\n\r\napp = Flask(__name__)\r\n\r\ndb_name = 'data.db'\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + db_name\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\r\ndb = SQLAlchemy(app)\r\napp.app_context().push()\r\n\r\nclass shifts(db.Model):\r\n shift_number = db.Column(db.Integer, primary_key = True, nullable=False)\r\n shift_date = db.Column(db.DateTime, nullable = False)\r\n shift_hours = db.Column(db.Float, nullable = False)\r\n shift_earned = db.Column(db.Float, nullable = False)\r\n\r\n def __repr__(self) -> str:\r\n return '' % self.id\r\n\r\n@app.before_first_request\r\ndef create_tables():\r\n db.create_all()\r\n\r\n@app.route('/')\r\ndef index():\r\n shiftList = shifts.query.order_by(shifts.shift_date)\r\n\r\n totalHours = 0\r\n totalEarned = 0\r\n totalShifts = 0\r\n\r\n print(shiftList)\r\n\r\n for shift in shiftList:\r\n totalHours += shift.shift_hours\r\n totalEarned += shift.shift_earned\r\n totalShifts += 1\r\n\r\n averageEarned = totalEarned / totalShifts\r\n averageEarned = '{:.2f}'.format(averageEarned)\r\n\r\n return render_template('index.html', shiftList=shiftList, totalHours=totalHours, totalEarned=totalEarned, totalShifts=totalShifts, averageEarned=averageEarned)\r\n\r\n@app.route('/log-hours/', methods=['GET', 'POST'])\r\ndef log():\r\n\r\n if request.method == 'POST':\r\n\r\n shift_date = datetime.datetime.strptime(request.form.get('shiftDate'), r'%Y-%m-%d')\r\n shift_hours = float(request.form.get('shiftHours'))\r\n shift_earned = shift_hours * float(request.form.get('toplevelWages'))\r\n\r\n new_shift = shifts( shift_date=shift_date, shift_hours=shift_hours, shift_earned=shift_earned)\r\n app.logger.info(new_shift.shift_number)\r\n\r\n try:\r\n db.session.add(new_shift)\r\n db.session.commit()\r\n # flash('success')\r\n return redirect('/log-hours')\r\n\r\n except:\r\n return 'error adding data'\r\n\r\n if request.method == 'GET':\r\n return render_template('log.html')\r\n\r\n@app.route('/delete/')\r\ndef delete(code):\r\n item = shifts.query.filter_by(shift_number=code).first()\r\n\r\n try:\r\n db.session.delete(item)\r\n db.session.commit()\r\n return redirect('/')\r\n except:\r\n return 'error deleting'\r\n\r\n@app.route('/update/', methods=['GET', 'POST'])\r\ndef update(code):\r\n\r\n shift = shifts.query.filter_by(shift_number = code).first()\r\n\r\n if request.method == 'POST':\r\n shift.shift_date = datetime.datetime.strptime(request.form.get('shiftDate'), r'%Y-%m-%d')\r\n shift.shift_hours = float(request.form.get('shiftHours'))\r\n shift.shift_earned = shift.shift_hours * float(request.form.get('toplevelWages'))\r\n\r\n try:\r\n db.session.commit()\r\n return redirect('/')\r\n except:\r\n return 'error updating item'\r\n\r\n\r\n if request.method == 'GET':\r\n return render_template('update.html', shift=shift)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","repo_name":"even-man/log-hours","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3817251211","text":"\ndef crawl():\n # crawl a website and click on a link\n\n # import libraries\n import requests as req # for requesting websites\n from bs4 import BeautifulSoup as bs # for parsing html\n\n # get the website\n agent = {\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}\n session = req.Session()\n\n payload = {\n 'channelid': 'https://www.youtube.com/channel/UCKQvGU-qtjEthINeViNbn6A',\n }\n\n s = session.post(\n \"https://mytoolstown.com/youtube/check_account.php\", data=payload, headers=agent)\n\n\n # Navigate to the next page and scrape the data\n s = session.get('https://mytoolstown.com/youtube/earn/');\n\n t = session.get(\n 'https://mytoolstown.com/youtube/earn/getData.php?kNeT=JKLCM%18HI%19%1A%1FINB%1E%18IMI%1A&type=A')\n\n soup = bs(s.text, 'html.parser')\n\n text = soup.find('h5', {'class': 'card-title'}).text\n name = t.json()['fromuser']\n link = t.json()['link']\n type = t.json()['type']\n id = t.json()['promotionid']\n\n # add href to the button\n soup.find('a', {'id': 'actionbtn'})['onclick'] = \"startwindow('4361759','https://www.youtube.com/channel/UCrG7vaUWXTGrPn0Shdd6n6g?__a=1','Subscribe',2)\"\n\n print(soup.find('a', {'id': 'actionbtn'})['onclick'])\n\n print(text) # print the text\n print(name) # print the name\n print(link) # print the link\n print(type) # print the type\n print(id) # print the id\n\n\nif 1 == 1:\n crawl() # run the function","repo_name":"devpanther/Qubot","sub_path":"index copy.py","file_name":"index copy.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"237192084","text":"import base64\r\n\r\n\r\n# 打开原始文件\r\nwith open('original.txt', 'r') as f1:\r\n # 读取第一行数据\r\n first_line = f1.readline().strip()\r\n\r\n# 关闭原始文件\r\nf1.close()\r\n\r\n# 如果新文件已存在,则清空文件内容\r\nwith open('new.txt', 'w') as f2:\r\n f2.truncate(0)\r\n\r\n# 打开新文件,将第一行数据写入文件\r\nwith open('new.txt', 'w') as f2:\r\n f2.write(first_line)\r\n\r\n# 关闭新文件\r\nf2.close()\r\n\r\n# 打开原始文件,跳过第一行数据,读取剩余数据\r\nwith open('original.txt', 'r') as f1:\r\n f1.readline()\r\n remaining_data = f1.read()\r\n\r\n# 关闭原始文件\r\nf1.close()\r\n\r\n# 将剩余数据重新写回原始文件\r\nwith open('original.txt', 'w') as f1:\r\n f1.write(remaining_data)\r\n\r\n# 关闭原始文件\r\nf1.close()\r\n\r\n# 读取新文件内容\r\nwith open('new.txt', 'r') as f2:\r\n content = f2.read()\r\n\r\n# 将内容转为base64格式\r\nbase64_content = base64.b64encode(content.encode('utf-8')).decode('utf-8')\r\n\r\n# 将base64格式的内容写入文件\r\nwith open('new_file_base64.txt', 'w') as f3:\r\n f3.write(base64_content)","repo_name":"luxl-1379/merge","sub_path":"vps.py","file_name":"vps.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"9404152621","text":"import read_logfile\n\ndef select_files(logfilename, **kwargs):\n #the selection is made by giving kwargs to that function as a dictionary\n log_file = read_logfile.download_logfile(logfilename)\n selection = log_file.copy()\n for i, j in kwargs.items():\n selection = selection.loc[(log_file[i]==j)]\n return selection.reset_index(drop=True)\n\ndef create_header(row):\n #it iterates over the logfile and looks for the cells that are filled with some value\n #to create the header. It appends a list with the position of the sensors of the filled\n #cells. These headers are used to names of the columns of all the text files.\n try:\n header = [\"Date\", \"Time\"]\n for i in range(1,15):\n if row[\"S\"+str(i)] == \"\":\n continue\n if row[\"S\"+str(i)] != \"\":\n header.append(\"s\"+str(i))\n return header\n except:\n raise TypeError(\"Sorry, the header of the files could not be created. Please, review select_files.py\")","repo_name":"jcapo96/RTD_TMS","sub_path":"tools/file_manager/select_files.py","file_name":"select_files.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25171547276","text":"#!/usr/bin/env python\n\n\nimport praw,re,youtube_dl\n\n\noptions = {\n 'format':'bestaudio/best',\n 'extractaudio':True,\n 'audioformat':'mp3',\n 'outtmpl':u'%(id)s.%(ext)s',\n 'noplaylist':True,\n 'nocheckcertificate':True,\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }]\n}\n\nreddit = praw.Reddit(client_id='id',\n client_secret='secret',\n user_agent='my user agent')\n\nfor submission in reddit.subreddit('Music').hot(limit=10):\n if(re.match(\"^(https?\\:\\/\\/)?(www\\.youtube\\.com|youtu\\.?be)\\/.+$\",submission.url)):\n with youtube_dl.YoutubeDL(options) as ydl:\n ydl.download([submission.url])\n","repo_name":"batebates/bulk","sub_path":"musicReddit.py","file_name":"musicReddit.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33539817104","text":"#!/usr/bin/env python3\nimport os\nimport aws_cdk as cdk\nfrom abalone_data_pipeline.abalone_data_pipeline_stack import DataPipelineStack\n\nMODEL = \"abalone\"\nCODECOMMIT_REPOSITORY = \"abalone-data-pipeline\"\n\napp = cdk.App()\n\nDataPipelineStack(\n app,\n CODECOMMIT_REPOSITORY,\n env=cdk.Environment(account=os.getenv(\"CDK_DEFAULT_ACCOUNT\"), region=os.getenv(\"CDK_DEFAULT_REGION\")),\n model_name=MODEL,\n repo_name=CODECOMMIT_REPOSITORY,\n airflow_environment_name=f\"{MODEL}-airflow-environment\"\n)\n\napp.synth()","repo_name":"PacktPublishing/Automated-Machine-Learning-on-AWS","sub_path":"Chapter08/cdk/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"54"} +{"seq_id":"38026497691","text":"import sys\ninput=sys.stdin.readline\n\nli = [0]\n\nA, B = map(int, input().split())\nfor i in range(1, B+1):\n for j in range(i):\n li.append(i)\n\nres = sum(li[A:B+1])\nprint(res)","repo_name":"kraftenty/Problem-Solving","sub_path":"implementation/1292.py","file_name":"1292.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37620549789","text":"s = input()\r\ncCount = ccCount = 0\r\nfor i in range(len(s)):\r\n if s[i] == 'c' or s[i] == 'C':\r\n cCount +=1\r\n if i+1 < len(s):\r\n if s[i]+s[i+1] == 'cc' or s[i]+s[i+1] == 'cC' or s[i]+s[i+1] == 'Cc' or s[i]+s[i+1] == 'CC':\r\n ccCount +=1\r\nprint(cCount)\r\nprint(ccCount)\r\n","repo_name":"dyrnfmxm/python_practice","sub_path":"codeUp_1414.py","file_name":"codeUp_1414.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25979717363","text":"import argparse\nimport random\nfrom pathlib import Path\n\nimport albumentations as A\nimport numpy as np\nimport pandas as pd\nimport pydicom\nimport torch\nimport yaml\nfrom omegaconf import OmegaConf\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport models\nfrom augmentations import RSNAAugmentation\nfrom datasets import RSNADataset\nfrom utils.general_utils import omegaconf_to_yaml\n\n\ndef inference(config):\n\n seed = config['seed']\n torch.backends.cudnn.deterministic = True\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n n_tta = config['n_tta']\n\n output_dir = Path(config['save_root']) / \\\n config['version'] / config['model']\n\n if output_dir.exists():\n print('This version already exists.\\n'\n f'version:{output_dir}')\n ans = None\n while ans not in ['y', 'Y']:\n ans = input('Do you want to continue inference? (y/n): ')\n if ans in ['n', 'N']:\n quit()\n output_dir.mkdir(exist_ok=True, parents=True)\n\n transform = RSNAAugmentation\n\n dataset_args = {\n 'transform': transform(mode='test', **config[\"transform\"]),\n **config[\"dataset\"]\n }\n dataset = RSNADataset(**dataset_args)\n loader = DataLoader(dataset=dataset,\n batch_size=config[\"batch_size\"],\n shuffle=False,\n num_workers=config[\"n_workers\"],\n pin_memory=False)\n\n device = config['gpu'][0]\n\n checkpoint_list = config['checkpoint']\n\n net_list = []\n for ckpt_path_dict in checkpoint_list:\n model_args = {}\n ckpt_path_cnn = Path(ckpt_path_dict['cnn'])\n cfg_path = ckpt_path_cnn.parents[1] / 'train_config.yaml'\n with open(cfg_path, 'r', encoding='utf-8') as f:\n cfg = yaml.load(f, Loader=yaml.SafeLoader)\n model_args['cnn_model'] = cfg[\"model\"][\"name\"]\n model_args['cnn_pretrained_path'] = ckpt_path_cnn\n model_args['cnn_param'] = cfg[\"model\"][\"args\"]\n model_args['cnn_param']['pretrained'] = False\n\n ckpt_path_cnn = Path(ckpt_path_dict['rnn'])\n cfg_path = ckpt_path_cnn.parents[1] / 'train_config.yaml'\n with open(cfg_path, 'r', encoding='utf-8') as f:\n cfg = yaml.load(f, Loader=yaml.SafeLoader)\n model_args['rnn_model'] = cfg[\"model\"][\"name\"]\n model_args['rnn_pretrained_path'] = ckpt_path_cnn\n model_args['rnn_param'] = cfg[\"model\"][\"args\"]\n\n net = getattr(models, 'CNN_RNN')(**model_args)\n net.to(device)\n net.eval()\n\n net_list.append(net)\n \n chunk_size_list = list(config[\"chunk_size_list\"])\n if len(chunk_size_list) == 1:\n chunk_size_list = chunk_size_list * len(net_list)\n \n assert len(chunk_size_list) == len(net_list)\n \n tta = None\n\n exam_names = []\n image_names = []\n with torch.no_grad():\n results_image_level = []\n results_exam_level = []\n for x, exam_name, image_name in tqdm(loader):\n # Note: shape of exam_name & image_name.\n # exam_name -> ['exam_name']\n # image_name -> [('image_name_1', ), ..., ('image_name_n', )]\n exam_names.append(exam_name[0])\n image_name = list(map(lambda x: x[0], image_name))\n image_names.extend(image_name)\n n_sequence = x.size()[1]\n result_tta_image, result_tta_exam = [], []\n for tta_cnt in range(n_tta):\n image = x.clone()\n result_net_exam, result_net_image = [], []\n for net_cnt, net in enumerate(net_list):\n embeddings = []\n for i in range(0, n_sequence, chunk_size_list[net_cnt]):\n embedding = net.cnn(image[:, i:i + chunk_size_list[net_cnt], :, :, :].to(device))\n embeddings.append(embedding)\n embeddings = torch.cat(embeddings, dim=1)\n image_level, exam_level = net.rnn(embeddings)\n\n image_level = torch.sigmoid(\n image_level).cpu().detach().numpy().reshape(-1) # (sequence, )\n exam_level = torch.sigmoid(\n exam_level).cpu().detach().numpy().reshape(-1) #(9, )\n\n exam_level = label_consistency(image_level, exam_level)\n \n result_net_image.append(image_level)\n result_net_exam.append(exam_level)\n result_net_image = np.array(result_net_image) #(len(net_list), sequence)\n result_tta_image.append(result_net_image)\n result_net_exam = np.array(result_net_exam) #(len(net_list), 9)\n result_tta_exam.append(result_net_exam)\n result_tta_image = np.array(result_tta_image) #(n_tta, len(net_list), sequence)\n results_image_level.append(result_tta_image)\n result_tta_exam = np.array(result_tta_exam) #(n_tta, len(net_list), 9)\n results_exam_level.append(result_tta_exam)\n \n results_exam_level = np.array(results_exam_level) #(n_exam, n_tta, len(net_list), 9)\n\n # Note: shape of results_image_level. \n # len(results_image_level) = n_exam\n # results_image_level[i].shape = (n_tta, len(net_list), #image in exam i)\n \n results_exam_level = results_exam_level.mean(axis=1).mean(axis=1)\n results_image_level = list(map(\n lambda x: x.mean(axis=0).mean(axis=0), results_image_level))\n results_exam_level = np.stack([label_consistency(image_level, exam_level)\n for image_level, exam_level in zip(results_image_level, results_exam_level)])\n results_image_level = np.concatenate(results_image_level)\n\n results_exam_level = results_exam_level.reshape(-1)\n\n exam_names = get_exam_names(exam_names)\n assert len(image_names) == len(results_image_level)\n assert len(exam_names) == len(results_exam_level)\n names = image_names + exam_names\n\n results = np.concatenate([results_image_level, results_exam_level])\n\n submission = pd.DataFrame([names, results], index=['id', 'label']).T\n \n if check_consistency(submission, dataset.df):\n print(\"Great! Fanstastic! You are genious!!!\" )\n submission.to_csv(output_dir / 'submission.csv', index=False)\n else:\n print(\"ERROR! submission file doesn't satisfy concistency!!\")\n\n with open(output_dir / 'config.yaml', 'w', encoding='utf-8') as f:\n yaml.dump(omegaconf_to_yaml(cfg), f)\n\n\ndef label_consistency(image_level, exam_level):\n p_negative_exam_for_pe = exam_level[0]\n p_indeterminate = exam_level[1]\n p_chronic_pe = exam_level[2]\n p_acute_and_chronic_pe = exam_level[3]\n p_central_pe = exam_level[4]\n p_leftsided_pe = exam_level[5]\n p_rightsided_pe = exam_level[6]\n p_rv_lv_ratio_gte_1 = exam_level[7]\n p_rv_lv_ratio_lt_1 = exam_level[8]\n\n pe_exist = np.any(image_level > 0.5)\n if pe_exist:\n p_negative_exam_for_pe = np.clip(p_negative_exam_for_pe, None, 0.499)\n p_indeterminate = np.clip(p_indeterminate, None, 0.499)\n\n if p_chronic_pe > 0.5 and p_acute_and_chronic_pe > 0.5:\n tmp_list = [p_chronic_pe, p_acute_and_chronic_pe]\n tmp_list[np.argmin(tmp_list)] = 0.499\n p_chronic_pe, p_acute_and_chronic_pe = tmp_list\n \n if p_central_pe <= 0.5 and p_leftsided_pe <= 0.5 and p_rightsided_pe <= 0.5:\n tmp_list = [p_central_pe, p_leftsided_pe, p_rightsided_pe]\n tmp_list[np.argmax(tmp_list)] = 0.501\n p_central_pe, p_leftsided_pe, p_rightsided_pe = tmp_list\n \n if p_rv_lv_ratio_gte_1 <= 0.5 and p_rv_lv_ratio_lt_1 <= 0.5:\n tmp_list = [p_rv_lv_ratio_gte_1, p_rv_lv_ratio_lt_1]\n tmp_list[np.argmax(tmp_list)] = 0.501\n p_rv_lv_ratio_gte_1, p_rv_lv_ratio_lt_1 = tmp_list\n if p_rv_lv_ratio_gte_1 > 0.5 and p_rv_lv_ratio_lt_1 > 0.5:\n tmp_list = [p_rv_lv_ratio_gte_1, p_rv_lv_ratio_lt_1]\n tmp_list[np.argmin(tmp_list)] = 0.499\n p_rv_lv_ratio_gte_1, p_rv_lv_ratio_lt_1 = tmp_list\n \n else:\n if p_negative_exam_for_pe <= 0.5 and p_indeterminate <= 0.5:\n tmp_list = [p_negative_exam_for_pe, p_indeterminate]\n tmp_list[np.argmax(tmp_list)] = 0.501\n p_negative_exam_for_pe, p_indeterminate = tmp_list\n if p_negative_exam_for_pe > 0.5 and p_indeterminate > 0.5:\n tmp_list = [p_negative_exam_for_pe, p_indeterminate]\n tmp_list[np.argmin(tmp_list)] = 0.499\n p_negative_exam_for_pe, p_indeterminate = tmp_list\n \n p_chronic_pe = np.clip(p_chronic_pe, None, 0.499)\n p_acute_and_chronic_pe = np.clip(p_acute_and_chronic_pe, None, 0.499)\n\n p_central_pe = np.clip(p_central_pe, None, 0.499)\n p_leftsided_pe = np.clip(p_leftsided_pe, None, 0.499)\n p_rightsided_pe = np.clip(p_rightsided_pe, None, 0.499)\n\n p_rv_lv_ratio_gte_1 = np.clip(p_rv_lv_ratio_gte_1, None, 0.499)\n p_rv_lv_ratio_lt_1 = np.clip(p_rv_lv_ratio_lt_1, None, 0.499)\n\n exam_level = np.array([\n p_negative_exam_for_pe,\n p_indeterminate,\n p_chronic_pe,\n p_acute_and_chronic_pe,\n p_central_pe,\n p_leftsided_pe,\n p_rightsided_pe,\n p_rv_lv_ratio_gte_1,\n p_rv_lv_ratio_lt_1,\n ])\n\n return exam_level\n\ndef get_exam_names(exam_names):\n target_cols = [\n 'negative_exam_for_pe', \n 'indeterminate',\n 'chronic_pe', 'acute_and_chronic_pe', # not indeterminate. Only One is true.\n 'central_pe', 'leftsided_pe', 'rightsided_pe', # not indeterminate. At least One is true.\n 'rv_lv_ratio_gte_1', 'rv_lv_ratio_lt_1', # not indeterminate. Only One is true.\n ]\n\n new_exam_names = []\n for e in exam_names:\n for col in target_cols:\n new_exam_names.append(e + '_' + col)\n \n return new_exam_names\n\ndef check_consistency(sub, test_csv):\n str_split = sub.id.str.split('_', 1, expand=True)\n str_split.columns = ['StudyInstanceUID', 'label_type']\n \n condition = ~str_split.label_type.isnull()\n new_df = pd.concat([sub[condition], str_split[condition]], axis=1)\n del new_df['id']\n df_exam = new_df.pivot(index='StudyInstanceUID', columns='label_type', values='label')\n \n condition = str_split.label_type.isnull()\n df_image = sub[condition]\n df_image = df_image.merge(test_csv, how='left', left_on='id', right_on='SOPInstanceUID')\n df_image.rename(columns = {\"label\": \"pe_present_on_image\"}, inplace=True)\n del df_image['id']\n \n df = df_exam.merge(df_image, how='left', on='StudyInstanceUID')\n ids = [\"StudyInstanceUID\", \"SeriesInstanceUID\", \"SOPInstanceUID\"]\n labels = [c for c in df.columns if c not in ids]\n df = df[ids + labels]\n\n # SPLIT NEGATIVE AND POSITIVE EXAMS\n df['positive_images_in_exam'] = df['StudyInstanceUID'].map(df.groupby(['StudyInstanceUID']).pe_present_on_image.max())\n\n df_pos = df.loc[df.positive_images_in_exam > 0.5]\n df_neg = df.loc[df.positive_images_in_exam <= 0.5]\n\n \n # CHECKING CONSISTENCY OF POSITIVE EXAM LABELS\n rule1a = df_pos.loc[((df_pos.rv_lv_ratio_lt_1 > 0.5) & \n (df_pos.rv_lv_ratio_gte_1 > 0.5)) | \n ((df_pos.rv_lv_ratio_lt_1 <= 0.5) & \n (df_pos.rv_lv_ratio_gte_1 <= 0.5))].reset_index(drop = True)\n rule1a['broken_rule'] = '1a'\n\n rule1b = df_pos.loc[(df_pos.central_pe <= 0.5) & \n (df_pos.rightsided_pe <= 0.5) & \n (df_pos.leftsided_pe <= 0.5)].reset_index(drop = True)\n rule1b['broken_rule'] = '1b'\n\n rule1c = df_pos.loc[(df_pos.acute_and_chronic_pe > 0.5) & \n (df_pos.chronic_pe > 0.5)].reset_index(drop = True)\n rule1c['broken_rule'] = '1c'\n\n rule1d = df_pos.loc[(df_pos.indeterminate > 0.5) | \n (df_pos.negative_exam_for_pe > 0.5)].reset_index(drop = True)\n rule1d['broken_rule'] = '1d'\n \n \n # CHECKING CONSISTENCY OF NEGATIVE EXAM LABELS\n rule2a = df_neg.loc[((df_neg.indeterminate > 0.5) & \n (df_neg.negative_exam_for_pe > 0.5)) | \n ((df_neg.indeterminate <= 0.5) & \n (df_neg.negative_exam_for_pe <= 0.5))].reset_index(drop = True)\n rule2a['broken_rule'] = '2a'\n\n rule2b = df_neg.loc[(df_neg.rv_lv_ratio_lt_1 > 0.5) | \n (df_neg.rv_lv_ratio_gte_1 > 0.5) |\n (df_neg.central_pe > 0.5) | \n (df_neg.rightsided_pe > 0.5) | \n (df_neg.leftsided_pe > 0.5) |\n (df_neg.acute_and_chronic_pe > 0.5) | \n (df_neg.chronic_pe > 0.5)].reset_index(drop = True)\n rule2b['broken_rule'] = '2b'\n \n # MERGING INCONSISTENT PREDICTIONS\n errors = pd.concat([rule1a, rule1b, rule1c, rule1d, rule2a, rule2b], axis = 0)\n \n \n if len(errors) == 0:\n return True\n else:\n return False\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config_file',\n '-c',\n type=str,\n required=True,\n help='path of the config file')\n args = parser.parse_args()\n\n cfg = OmegaConf.load(args.config_file)\n\n inference(cfg)\n\n\nif __name__ == '__main__':\n main()","repo_name":"piwafp0720/RSNA-STR-Pulmonary-Embolism-Detection","sub_path":"src/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":13595,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"19493828895","text":"import enum\n\nfrom pprint import pformat\nfrom numbers import Number\nfrom typing import List, Union, Mapping, Sequence, Iterator\n\nimport numpy as np\n\nPeakDtype = np.dtype([('mz', np.float64), ('intensity', np.float32),\n ('annotation', object), ('aggregation', object)])\n\nPeakType = Mapping[str, Union[float, list]]\n\nclass ErrorUnit(enum.Enum):\n Da = 'da'\n PPM = 'ppm'\n\n\nclass PeakList(Sequence[PeakType]):\n peaks: np.ndarray\n\n def __init__(self, peaks):\n if isinstance(peaks, np.ndarray) and peaks.dtype == PeakDtype:\n self.peaks = peaks\n else:\n self.peaks = np.array([tuple(p) for p in peaks], dtype=PeakDtype)\n\n def __len__(self):\n return len(self.peaks)\n\n def __getitem__(self, i):\n return self.peaks[i]\n\n def __iter__(self) -> Iterator[PeakType]:\n return iter(self.peaks)\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({pformat(self.peaks, indent=2)})\"\n\n def __eq__(self, other):\n if other is None:\n return False\n if not isinstance(other, PeakList):\n other = PeakList(other)\n valid = np.allclose(self.peaks['mz'], other.peaks['mz'])\n if not valid:\n return False\n valid = np.allclose(self.peaks['intensity'], other.peaks['intensity'])\n if not valid:\n return False\n return True\n\n def __ne__(self, other):\n return not self == other\n\n def find(self, mz, error_tolerance=10, error_unit='ppm') -> List[Sequence[PeakType]]:\n if isinstance(mz, Number):\n mz = [mz]\n mzs = self.peaks['mz']\n ii = np.searchsorted(mzs, mz)\n n = len(self)\n error_unit = ErrorUnit(error_unit)\n\n outs = []\n if error_unit == ErrorUnit.Da:\n for i, mz_i in zip(ii, mz):\n low = i - 1\n while low >= 0:\n if abs(mzs[low] - mz_i) < error_tolerance:\n low -= 1\n else:\n low += 1\n break\n high = i\n while high < n:\n if abs(mzs[high] - mz_i) < error_tolerance:\n high += 1\n else:\n high -= 1\n break\n outs.extend(self.peaks[slice(low, high + 1)])\n elif error_unit == ErrorUnit.PPM:\n error_tolerance /= 1e6\n for i, mz_i in zip(ii, mz):\n low = i - 1\n while low >= 0:\n if abs(mzs[low] - mz_i) / mz_i < error_tolerance:\n low -= 1\n else:\n low += 1\n break\n high = i\n while high < n:\n if abs(mzs[high] - mz_i) / mz_i < error_tolerance:\n high += 1\n else:\n high -= 1\n break\n outs.extend(self.peaks[slice(low, high + 1)])\n return outs\n","repo_name":"HUPO-PSI/mzSpecLib","sub_path":"implementations/python/mzlib/peak_list.py","file_name":"peak_list.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"54"} +{"seq_id":"34422928022","text":"\"\"\"\nAnalyzes the immune cell population data of melanoma patients and \nidentifies the cell populations significantly responding to treatment tr1.\n\"\"\"\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nimport statsmodels.stats.multitest as smm\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Load data\ndata = pd.read_csv('./cell-count.csv')\ndata.head()\ndata.shape\n\npopulations = ['b_cell', 'cd8_t_cell', 'cd4_t_cell', 'nk_cell', 'monocyte']\n\n\ndef convert_cellcount_to_relative_freq_perc(populations, df):\n \"\"\"Convert cell count to relative frequency (%) \"\"\"\n\n for cell_pop in populations:\n df[cell_pop + '_rel_freq'] = df[cell_pop]/data['total_count'] * 100\n return df\n\n\ndef boxplot_fig(df):\n \"\"\" Creates Box-plot of the population relative frequencies comparing responders vs non-responders for tr1 \"\"\"\n plt.figure()\n\n sns.boxplot(x=df['population'],\n y=df['percentage'],\n hue=df['response'])\n\n plt.xlabel(\"Cell Population\")\n plt.ylabel(\"Relative Frequency %\")\n plt.title(\"Population relative frequency: Responders vs Non-responders to treatment tr1\")\n\n return plt\n\n\ndef t_test(populations, df):\n \"\"\" Conducts a t-test to identify which cell population shows a significant difference\n between responders and non-responders \"\"\"\n pvalues_pop = []\n for cell_pop in populations:\n group1 = df[(df['population'] == cell_pop) & (df['response'] == 'y')]\n group2 = df[(df['population'] == cell_pop) & (df['response'] == 'n')]\n t_stat, p_value = stats.ttest_ind(group1['percentage'], group2['percentage'])\n pvalues_pop.append(p_value)\n\n # FDR correction to adjust the p-values (multiple comparisons correction)\n corrected_p_values = smm.fdrcorrection(pvalues_pop)[1]\n return corrected_p_values\n\n\nif __name__ == '__main__':\n ''' \n 1.\tPlease write a python program to convert cell count in cell-count.csv to relative frequency (in percentage) of \n total cell count for each sample. \n Total cell count of each sample is the sum of cells in the five populations of that sample. \n Please return an output file in csv format with cell count and relative frequency of each population\n of each sample per line. \n The output file should have the following columns:\n\n sample: the sample id as in column sample in cell-count.csv\n total_count: total cell count of sample\n population: name of the immune cell population (e.g. b_cell, cd8_t_cell, etc.)\n count: cell count\n percentage: relative frequency in percentage\n '''\n\n # Compute total count of each sample: sum of cell counts in five populations\n data['total_count'] = data[populations].sum(axis=1)\n\n # Convert cell count to relative frequency (%)\n data = convert_cellcount_to_relative_freq_perc(populations, data)\n\n # Create new dataframe to be stored as output with cell count and relative frequency of each population\n # of each sample per line\n new_df = pd.melt(data, id_vars=['sample', 'total_count'], value_vars=populations,\n var_name='population', value_name='count')\n temp_perc = []\n for cell_pop in populations:\n temp_perc.extend(data[cell_pop + '_rel_freq'])\n new_df['percentage'] = temp_perc\n\n # Save the output file\n new_df.to_csv('./cell-count-relative-freq.csv', index=False)\n\n '''\n 2.\tAmong patients who have treatment tr1, we are interested in comparing the differences in cell population relative \n frequencies of melanoma patients who respond (responders) to tr1 versus those who do not (non-responders), with the \n overarching aim of predicting response to treatment tr1. Response information can be found in column response, with \n value y for responding and value n for non-responding. Please only include PBMC (blood) samples. \n\n a.\tFor each immune cell population, please generate a boxplot of the population relative frequencies \n comparing responders versus non-responders.\n\n b.\tWhich cell populations show a difference between responders and non-responders? \n Please include statistics to support your conclusion.\n\n '''\n # Filter patients with PBMC blood samples that were given treatment tr1\n data_tr1 = data[(data['treatment'] == 'tr1') & (data['sample_type'] == 'PBMC')]\n\n # Drop samples with response na\n data_tr1 = data_tr1.dropna(subset=['response'])\n\n # Filter the output dataframe with relative frequency of each cell population per line\n new_df_tr1 = new_df[new_df['sample'].isin(data_tr1['sample'])]\n\n # Add response variable to the filtered output dataframe\n temp_resp = list(data_tr1['response']) * len(populations)\n new_df_tr1['response'] = temp_resp\n new_df_tr1.reset_index(inplace=True, drop=True)\n\n # Box-plot of the population relative frequencies comparing responders vs non-responders for tr1\n plot = boxplot_fig(new_df_tr1)\n plot.show()\n\n # T-test to identify which cell population shows a significant difference between responders and non-responders\n p_values = t_test(populations, new_df_tr1)\n\n # Result: Print Cell populations showing significant difference between responders and non-responders\n index = [ind for ind, x in enumerate(list(p_values)) if x < 0.05]\n signif_diff_pop = [populations[i] for i in index]\n print('Cell populations showing significant difference between responders and non-responders: ', signif_diff_pop)\n","repo_name":"mbaxip/Python_Code_Challenges","sub_path":"Cell_count_coding-problem/Cell_count_python.py","file_name":"Cell_count_python.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"247204259","text":"from helper.get_price import (\n get_raw_price_async,\n get_clean_price,\n compute_arb_opportunities,\n get_output,\n)\nfrom pprint import pprint\n\n\ndef get_list_arb():\n \"\"\"Run the arb finder\n\n Returns:\n List: List sorted by % of all arb opportunities found.\n \"\"\"\n dict_price_raw = get_raw_price_async()\n dict_clean_price = get_clean_price(dict_price_raw)\n list_arb_price = compute_arb_opportunities(dict_clean_price)\n res = get_output(list_arb_price)\n sorted_list_arb = sorted(res.items(), key=lambda i: i[1][\"%\"], reverse=True)\n pprint(sorted_list_arb)\n return sorted_list_arb\n\n\nif __name__ == \"__main__\":\n get_list_arb()\n","repo_name":"edd34/oracle_orfeed","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"28414264329","text":"from Crypto.Cipher import AES\nimport base64, hashlib, json\nfrom app.services import payment\nfrom app.models import Vault\nfrom app.utils import further_processing, standardize_response\n\nclass CardRepo:\n gateway = 'briantree'\n available_gateways = ['stripe', 'briantree']\n\n def __init__(self, gateway = None):\n if gateway is not None and gateway in self.available_gateways:\n self.gateway = gateway\n\n def create_token(self, user, card_number):\n md5Key = hashlib.md5(user.encryption_key.encode(\"utf-8\")).digest()\n md5Key = md5Key+md5Key[0:16]\n\n blockSize = 16\n padDiff = blockSize - len(card_number) % blockSize\n padding = chr(padDiff)*padDiff\n card_number += padding\n cipher = AES.new(md5Key, AES.MODE_CBC, user.iv_string)\n ciphertext = base64.b64encode(cipher.encrypt(card_number)).decode('utf-8')\n return ciphertext\n\n def decode_token(self, user, token):\n md5Key = hashlib.md5(user.encryption_key.encode(\"utf-8\")).digest()\n md5Key = md5Key+md5Key[0:16]\n\n cipher = AES.new(md5Key, AES.MODE_CBC, user.iv_string)\n\n decrypted = cipher.decrypt(base64.b64decode(token)).decode(\"utf-8\")\n return decrypted[:decrypted.rfind('}')+1]\n\n def pay(self, data, user):\n methods = {\n 'briantree': payment.Briantree(),\n 'stripe': payment.Stripe()\n }\n vault = Vault.query.filter_by(user_id=user.id).filter_by(uuid=data['token']).first()\n data['card'] = json.loads(self.decode_token(user, vault.card_token))\n status = methods[self.gateway].pay(data)\n\n response = standardize_response(self.gateway, status)\n if response == True:\n return {\"status\": \"success\", \"message\": \"charge successful\"}\n elif response == False:\n return {\"status\": \"error\", \"message\": \"charge failure\"}, 500\n else:\n # do further processing on the transaction\n return further_processing(self.gateway, response)\n","repo_name":"faradayyg/card-token-generator","sub_path":"app/repositories/cardRepository.py","file_name":"cardRepository.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74229951522","text":"\"\"\"\nManipulate a cv2 video with shadertoy.\n\nDependencies:\n pip install opencv-python\n\n\"\"\"\nfrom __future__ import annotations\n\nimport arcade\nfrom arcade.experimental.shadertoy import Shadertoy\nimport cv2 # type: ignore\n\nSCREEN_WIDTH = 400\nSCREEN_HEIGHT = 300\nSCREEN_TITLE = \"ShaderToy Video\"\n\n\nclass ShadertoyVideo(arcade.View):\n \"\"\"\n Can be used to add effects like rain to the background of the game.\n Make sure to inherit this view and call super for `__init__`, `on_draw`, `on_update` and `on_resize`.\n \"\"\"\n\n def __init__(self, path: str):\n super().__init__()\n self.shadertoy = Shadertoy(\n self.window.get_framebuffer_size(),\n \"\"\"\n void mainImage( out vec4 fragColor, in vec2 fragCoord )\n {\n // Calculate the texture coordinate of the current fragment.\n // This interpolates from 0,0 to 1,1 from lower left to upper right\n vec2 uv = fragCoord.xy / iResolution.xy;\n\n // Alter texture coordinates to make some waves\n vec2 pos = uv - vec2(0.5);\n float dist = length(pos) - iTime / 5.0;\n vec2 direction = normalize(pos);\n vec2 uv2 = uv + (direction * (sin(dist * 50.0 - iTime) - 0.5)) * 0.02;\n\n fragColor = texture(iChannel0, uv2);\n }\n \"\"\",\n )\n self.video = cv2.VideoCapture(str(arcade.resources.resolve_resource_path(path)))\n width, height = (\n int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)),\n )\n self.video_texture = self.window.ctx.texture((width, height), components=3)\n self.video_texture.wrap_x = self.window.ctx.CLAMP_TO_EDGE\n self.video_texture.wrap_y = self.window.ctx.CLAMP_TO_EDGE\n self.video_texture.swizzle = \"BGR1\"\n self.shadertoy.channel_0 = self.video_texture\n self.window.set_size(width, height)\n\n def on_draw(self):\n self.clear()\n self.shadertoy.render()\n\n def on_update(self, delta_time: float):\n self.shadertoy.time += delta_time\n self.next_frame()\n\n def on_resize(self, width: int, height: int):\n super().on_resize(width, height)\n self.shadertoy.resize(self.window.get_framebuffer_size())\n\n def next_frame(self):\n exists, frame = self.video.read()\n frame = cv2.flip(frame, 0)\n if exists:\n self.video_texture.write(frame)\n","repo_name":"pythonarcade/arcade","sub_path":"arcade/experimental/shadertoy_video_cv2.py","file_name":"shadertoy_video_cv2.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":1537,"dataset":"github-code","pt":"54"} +{"seq_id":"12255423565","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 18 17:43:49 2018\r\n\r\n@author: Michael\r\n\"\"\"\r\n\r\nimport random\r\nimport sys\r\n\r\n\"\"\"\r\nINPUT name of files to process and desired length of substrings\r\n\"\"\"\r\n\r\nletters = 'abcdefghijklmnopqrstuvwxyz'\r\n\r\nf_name = sys.argv[1]\r\nlength = int(sys.argv[2])\r\n\r\n#retrieve the data from the desired file\r\nraw = open(f'{f_name}.txt','r')\r\ndata = raw.read()\r\nraw.close()\r\n\r\n#create samples\r\nnew_file = open(f'human_data_len{length}.txt','w+')\r\nfor starts in range(0, len(data), length//2): #duplicate middle sections\r\n \r\n #make sequences of given length\r\n if starts+length < len(data): # len to make must be less than whats left\r\n new_file.write(data[starts:starts+length] + '\\n')\r\n \r\nnew_file.close()","repo_name":"MACarolan/Human-or-Random","sub_path":"Data Creator/Splitter/Splitter.py","file_name":"Splitter.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12430469877","text":"\"\"\"\nWrite a program to print the hours of the day from 0:0:0 to 23:59:59,\neach on a separate line.\n\nTimes should be written in the format \"{hour} : {minutes} : {seconds} \".\n\"\"\"\n\n\nhour = 23\nminutes = 59\nseconds = 59\n\nfor i in range(hour + 1):\n for m in range(minutes + 1):\n for s in range(seconds + 1):\n print(f\"{i} : {m} : {s}\")\n","repo_name":"KirilMadzharov/Programming-Basics-with-Python-February-2023","sub_path":"4. For Loop/3. More Exercise/10. Clock - part 2.py","file_name":"10. Clock - part 2.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7816253628","text":"import datetime\nfrom botbuilder.core import ActivityHandler, TurnContext, MessageFactory\nfrom botbuilder.schema import SuggestedActions, CardAction, ChannelAccount\nfrom options_reply import pergunta\nfrom check_id import check_id\n\nclass MyBot(ActivityHandler):\n \n async def on_message_activity(self, turn_context: TurnContext):\n # isso vai ser executado cada vez que o bot receber uma mensagem\n\n # SENDER ID\n sender_id = turn_context.activity.from_property.id\n is_first_timer = check_id(sender_id)\n if not is_first_timer:\n await turn_context.send_activity('Bem vindo!')\n # SENDER ID\n\n # # DATETIME\n # data = datetime.datetime.today().date()\n # hora = datetime.datetime.today().time()\n # await turn_context.send_activity(data, hora)\n # # DATETIME\n\n if turn_context.activity.text == 'Sim, por favor': \n # se a mensagem for 'Gostaria do Evangelho de hoje, por favor', ele vai executar esse if\n exec(open(\"get_api.py\").read()) # executa o arquivo api.py que cria um arquivo evangelho.txt\n meuArquivo = open(\"evangelho.txt\", 'r') # le o arquivo evangelho.txt\n evangelho = meuArquivo.read() # cria uma string com o que tem escrito no arquivo\n await turn_context.send_activity(evangelho) # manda uma mensagem (turn_context.send_activity) com o evangelho\n\n elif turn_context.activity.text == 'Não, obrigado':\n # se a mensagem for 'Não, obrigado', ele vai responder com 'Paz de Cristo'\n await turn_context.send_activity('Paz de Cristo')\n \n else:\n await turn_context.send_activity('Desculpe, não entendi. Você gostaria do evangelho de hoje?') # vai dizer que não entendeu\n # e mandar as opções de novo\n await turn_context.send_activity(pergunta)\n\n async def on_members_added_activity(\n self,\n members_added: [ChannelAccount],\n turn_context: TurnContext\n ):\n for member_added in members_added:\n if member_added.id != turn_context.activity.recipient.id:\n await turn_context.send_activity(\"Olá, bem-vindo(a) ao bot do evangelho! Você gostaria de receber o evangelho de hoje?\")\n await turn_context.send_activity(pergunta)","repo_name":"caioagralemos/Bot-do-Evangelho","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73706225122","text":"import json, os\r\nfrom biqe.util import *\r\n\r\n\r\ndef process_bio():\r\n data_dir = './data'\r\n graph, feature_modules, node_maps = load_graph(data_dir, 128)\r\n print(\"Loading edge data..\")\r\n test_queries = load_test_queries_by_formula(data_dir + \"/test_edges.pkl\")\r\n train_queries = load_queries_by_formula(data_dir + \"/train_edges.pkl\")\r\n val_queries = load_test_queries_by_formula(data_dir + \"/val_edges.pkl\")\r\n\r\n print(\"Loading query data..\")\r\n for i in range(2, 4):\r\n train_queries.update(load_queries_by_formula(data_dir + \"/train_queries_{:d}.pkl\".format(i)))\r\n i_val_queries = load_test_queries_by_formula(data_dir + \"/val_queries_{:d}.pkl\".format(i))\r\n val_queries[\"one_neg\"].update(i_val_queries[\"one_neg\"])\r\n val_queries[\"full_neg\"].update(i_val_queries[\"full_neg\"])\r\n i_test_queries = load_test_queries_by_formula(data_dir + \"/test_queries_{:d}.pkl\".format(i))\r\n test_queries[\"one_neg\"].update(i_test_queries[\"one_neg\"])\r\n test_queries[\"full_neg\"].update(i_test_queries[\"full_neg\"])\r\n # bhushan\r\n write_queries(test_queries, 'test_one.json', is_train=False)\r\n write_queries(val_queries, 'dev_one.json', is_train=False)\r\n write_queries(train_queries, 'train.json', is_train=True)\r\n exit()\r\n\r\n\r\ndef gen_path(q):\r\n # note that target is at beg of array and anchors at end.\r\n path = []\r\n if q.formula.query_type in {'1-chain','2-chain','3-chain'}:\r\n assert len(q.anchor_nodes)==1\r\n path = ['[MASK]'] + ['-'.join(p) for p in q.formula.rels] + [str(q.anchor_nodes[0])]\r\n path = '-#-'.join(path)\r\n elif q.formula.query_type in {'2-inter','3-inter'}:\r\n path = []\r\n for count, s in enumerate(q.anchor_nodes):\r\n segment = ['[MASK]']\r\n rel = '-'.join(q.formula.rels[count])\r\n segment.append(rel)\r\n segment.append(str(s))\r\n path.append('-#-'.join(segment))\r\n path = '[SEP]'.join(path)\r\n elif q.formula.query_type =='3-chain_inter':\r\n path = []\r\n for count, s in enumerate(q.anchor_nodes):\r\n segment = ['[MASK]']\r\n segment.append('-'.join(q.formula.rels[0]))\r\n rel = '-'.join(q.formula.rels[-1][count])\r\n segment.append(rel)\r\n segment.append(str(s))\r\n path.append('-#-'.join(segment))\r\n path = '[SEP]'.join(path)\r\n elif q.formula.query_type == '3-inter_chain':\r\n path = []\r\n for count,s in enumerate(q.anchor_nodes):\r\n segment = ['[MASK]']\r\n if count<=0:\r\n rel = '-'.join(q.formula.rels[count])\r\n else:\r\n rel = '-'.join(q.formula.rels[count][0]) + '-#-' + '-'.join(q.formula.rels[count][1])\r\n segment.append(rel)\r\n segment.append(str(s))\r\n path.append('-#-'.join(segment))\r\n path = '[SEP]'.join(path)\r\n return path\r\n\r\ndef write_queries(data_queries, f_name, is_train=False):\r\n data = []\r\n data_dir = os.path.join(\"./data\",f_name)\r\n data_queries = data_queries if is_train else data_queries['one_neg']\r\n for formula_type in data_queries:\r\n formulas = data_queries[formula_type]\r\n for formula in formulas:\r\n for q in formulas[formula]:\r\n ex = dict()\r\n ex['target'] = q.target_node\r\n ex['anchors'] = q.anchor_nodes\r\n ex['path'] = gen_path(q)\r\n ex['neg_samples'] = [] if is_train else q.neg_samples\r\n ex['type'] = formula_type\r\n if q.hard_neg_samples is not None or is_train:\r\n ex['hard_negs'] = q.hard_neg_samples\r\n else:\r\n ex['hard_negs'] = []\r\n data.append(ex)\r\n with open(data_dir,'w',encoding='utf8') as f:\r\n json.dump(data,f,indent=None,separators=(\",\\n\",\": \"))\r\n\r\nif __name__ == \"__main__\":\r\n process_bio()","repo_name":"bhushank/biqe","sub_path":"scripts/process_bio.py","file_name":"process_bio.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"26799306620","text":"# Read user input\ninput_count = int(input())\nsum_even = 0\nsum_odd = 0\n\n# Logic\nfor i in range(0, input_count):\n num = int(input())\n if i % 2 == 0:\n sum_even += num\n else:\n sum_odd += num\n\n# Output\nif sum_even == sum_odd:\n print(f\"Yes\\nSum = {abs(sum_even)}\")\nelse:\n print(f\"No\\nDiff = {abs(sum_even - sum_odd)}\")","repo_name":"NSkorchev/PythonBasicsEx","sub_path":"Lesson 7/OddEvenSum.py","file_name":"OddEvenSum.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10149686064","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\ncurr = 1\r\ngame = True\r\n\r\nwhile game:\r\n die = int(input())\r\n if curr + die <= 100:\r\n curr += die\r\n\r\n if curr == 54:\r\n curr = 19\r\n elif curr == 90:\r\n curr = 48\r\n elif curr == 99:\r\n curr = 77\r\n elif curr == 9:\r\n curr = 34\r\n elif curr == 40:\r\n curr = 64\r\n elif curr == 67:\r\n curr = 86\r\n\r\n if curr == 100:\r\n print(\"You are now on square 100\")\r\n print(\"You Win!\")\r\n game = False\r\n elif die == 0:\r\n print(\"You Quit!\")\r\n game = False\r\n else:\r\n print(\"You are now on square \" + str(curr))\r\n","repo_name":"DavidLoi/DMOJ","sub_path":"CCC/CCC '03 S1 - Snakes and Ladders.py","file_name":"CCC '03 S1 - Snakes and Ladders.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71344428321","text":"import heterocl as hcl\nimport heterocl.op.nn as nn\nimport sys\nimport os\nroot_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(root_dir)\nfrom utils.functions import linear, relu\n\n###############################################################################\n# BasicBlock for ResNet\n###############################################################################\n\ndef nn_basicblock(input,\n weight_conv1, \n gamma_1,beta_1,mean_1,var_1,\n weight_conv2,\n gamma_2,beta_2,mean_2,var_2,\n weight_conv_shortcut=None,\n gamma_s=None,beta_s=None,mean_s=None,var_s=None,\n stride=1,prefix=\"basicblock\"):\n # print(weight_conv1.shape, a_batch_norm1.shape, b_batch_norm1.shape,\n # weight_conv2.shape, a_batch_norm2.shape, b_batch_norm2.shape)\n # if weight_conv_shortcut != None:\n # print(weight_conv_shortcut.shape, a_batch_norm_shortcut.shape,\n # b_batch_norm_shortcut.shape)\n expansion = 1\n batch, in_channels, in_height, in_width = input.shape\n out_channels, channel, kernel_h, kernel_w = weight_conv1.shape\n conv1 = nn.conv2d_nchw(input,weight_conv1,strides=[stride,stride],name=f\"{prefix}_conv1\",padding=[1,1])\n batch_norm1, _, _ = nn.batch_norm(conv1,gamma_1,beta_1,mean_1,var_1,name=f\"{prefix}_bn1\")\n _relu = relu(batch_norm1, name=f\"{prefix}_relu1\")\n conv2 = nn.conv2d_nchw(_relu, weight_conv2, name=f\"{prefix}_conv2\",padding=[1,1])\n batch_norm2, _, _ = nn.batch_norm(conv2, gamma_2, beta_2, mean_2, var_2, name=f\"{prefix}_bn2\")\n if stride != 1 or in_channels != expansion * out_channels:\n shortcut = nn.conv2d_nchw(input, weight_conv_shortcut,\n name=f\"{prefix}_convs\", strides=[stride, stride], padding=[0, 0])\n identity, _, _ = nn.batch_norm(shortcut, gamma_s,beta_s,mean_s,var_s,name=f\"{prefix}_bns\")\n else:\n identity = input\n addition = hcl.compute(\n identity.shape, lambda *x: batch_norm2[x] + identity[x],name=f\"{prefix}_add\")\n out = relu(addition, name=f\"{prefix}_relu2\")\n return out\n\n###############################################################################\n# BottleNeck for ResNet\n###############################################################################\n\n\n# def bottleneck(input,\n# weights,\n# name=\"bottleneck\", stride=1\n# ):\n# expansion = 4\n# weight_conv1, a_batch_norm1, b_batch_norm1, weight_conv2, a_batch_norm2, b_batch_norm2, weight_conv3, a_batch_norm3, b_batch_norm3, weight_conv_shortcut, a_batch_norm_shortcut, b_batch_norm_shortcut = weights\n# batch, in_channels, in_height, in_width = input.shape\n# out_channels, channel, kernel_h, kernel_w = weight_conv1.shape\n# identity = input\n# conv1 = conv2d(input, weight_conv1, name=\"bottleneck_conv1\")\n# batch_norm1 = batchnorm2d(conv1, a_batch_norm1,\n# b_batch_norm1, name=\"bottleneck_bn1\")\n# _relu1 = relu(batch_norm1, name=\"bottleneck_relu1\")\n# conv2 = conv2d(_relu1, weight_conv2, name=\"bottleneck_conv2\",\n# stride=[stride, stride])\n# batch_norm2 = batchnorm2d(conv2, a_batch_norm2,\n# b_batch_norm2, name=\"bottleneck_bn2\")\n# _relu2 = relu(batch_norm2, name=\"bottleneck_relu2\")\n# conv3 = conv2d(_relu2, weight_conv3, name=\"bottleneck_conv3\")\n# batch_norm3 = batchnorm2d(conv3, a_batch_norm3,\n# b_batch_norm3, name=\"bottleneck_bn3\")\n# if stride != 1 or in_channels != out_channels * expansion:\n# identity = conv2d(identity, weight_conv_shortcut,\n# name=\"bottleneck_conv_shortcut\", stride=[stride, stride])\n# identity = batchnorm2d(identity, a_batch_norm_shortcut,\n# b_batch_norm_shortcut, name=\"bottleneck_bn_shortcut\")\n\n# out = batch_norm3 + identity\n# out = relu(out, name=\"bn_output_relu\")\n\n# return out\n\n\ndef make_layer(input, num_blocks, stride, weights, type):\n \"\"\"make resnet layers(by layer i didnt mean this 'layer' was the\n same as a neuron netowork layer, ex. conv layer), one layer may\n contain more than one residual block\n\n Args:\n block: block type, basic block or bottle neck block\n out_channels: output depth channel number of this layer\n num_blocks: how many blocks per layer\n stride: the stride of the first block of this layer\n weights = [(weight_conv1, a_batch_norm1, b_batch_norm1,\n weight_conv2, a_batch_norm2, b_batch_norm2,\n weight_conv_shortcut, a_batch_norm_shortcut, b_batch_norm_shortcut),...]\n\n Return:\n return a resnet layer\n \"\"\"\n\n # we have num_block blocks per layer, the first block\n # could be 1 or 2, other blocks would always be 1\n strides = [stride] + [1] * (num_blocks - 1)\n for i, stride in enumerate(strides):\n if type == 0: # 0 is basicblock\n input = nn_basicblock(input, weights[i], stride=stride)\n elif type == 1: # 1 is bottleneck\n pass\n # input = bottleneck(input, weights[i], stride=stride)\n\n return input\n\n\ndef nn_resnet18(input_image,\n weight_conv1, conv1_gamma_1,conv1_beta_1,conv1_mean_1,conv1_var_1,\n conv2_0_conv1, conv2_0_bn_gamma_1, conv2_0_bn_beta_1,conv2_0_bn_mean_1,conv2_0_bn_var_1, conv2_0_conv2, conv2_0_bn_gamma_2, conv2_0_bn_beta_2,conv2_0_bn_mean_2,conv2_0_bn_var_2,\n conv2_1_conv1, conv2_1_bn_gamma_1, conv2_1_bn_beta_1,conv2_1_bn_mean_1,conv2_1_bn_var_1, conv2_1_conv2, conv2_1_bn_gamma_2, conv2_1_bn_beta_2,conv2_1_bn_mean_2,conv2_1_bn_var_2,\n conv3_0_conv1, conv3_0_bn_gamma_1, conv3_0_bn_beta_1,conv3_0_bn_mean_1,conv3_0_bn_var_1, conv3_0_conv2, conv3_0_bn_gamma_2, conv3_0_bn_beta_2,conv3_0_bn_mean_2,conv3_0_bn_var_2, conv3_0_conv_s, conv3_0_bn_gamma_s, conv3_0_bn_beta_s, conv3_0_bn_mean_s, conv3_0_bn_var_s,\n conv3_1_conv1, conv3_1_bn_gamma_1, conv3_1_bn_beta_1,conv3_1_bn_mean_1,conv3_1_bn_var_1, conv3_1_conv2, conv3_1_bn_gamma_2, conv3_1_bn_beta_2,conv3_1_bn_mean_2,conv3_1_bn_var_2,\n conv4_0_conv1, conv4_0_bn_gamma_1, conv4_0_bn_beta_1,conv4_0_bn_mean_1,conv4_0_bn_var_1, conv4_0_conv2, conv4_0_bn_gamma_2, conv4_0_bn_beta_2,conv4_0_bn_mean_2,conv4_0_bn_var_2, conv4_0_conv_s, conv4_0_bn_gamma_s, conv4_0_bn_beta_s, conv4_0_bn_mean_s, conv4_0_bn_var_s,\n conv4_1_conv1, conv4_1_bn_gamma_1, conv4_1_bn_beta_1,conv4_1_bn_mean_1,conv4_1_bn_var_1, conv4_1_conv2, conv4_1_bn_gamma_2, conv4_1_bn_beta_2,conv4_1_bn_mean_2,conv4_1_bn_var_2,\n conv5_0_conv1, conv5_0_bn_gamma_1, conv5_0_bn_beta_1,conv5_0_bn_mean_1,conv5_0_bn_var_1, conv5_0_conv2, conv5_0_bn_gamma_2, conv5_0_bn_beta_2,conv5_0_bn_mean_2,conv5_0_bn_var_2, conv5_0_conv_s, conv5_0_bn_gamma_s, conv5_0_bn_beta_s, conv5_0_bn_mean_s, conv5_0_bn_var_s,\n conv5_1_conv1, conv5_1_bn_gamma_1, conv5_1_bn_beta_1,conv5_1_bn_mean_1,conv5_1_bn_var_1, conv5_1_conv2, conv5_1_bn_gamma_2, conv5_1_bn_beta_2,conv5_1_bn_mean_2,conv5_1_bn_var_2,\n weight_fc, bias_fc\n ):\n ''' params: basicblock, [2, 2, 2, 2] '''\n conv1 = nn.conv2d_nchw(input_image, weight_conv1,name=\"conv1_x_0_conv1\",padding=[1,1])\n batch_norm1, _, _ = nn.batch_norm(conv1, conv1_gamma_1, conv1_beta_1,conv1_mean_1,conv1_var_1,name=\"conv1_x_0_bn1\")\n _relu = relu(batch_norm1,name=\"conv1_x_0_relu\")\n\n conv2_x_0 = nn_basicblock(_relu, conv2_0_conv1, conv2_0_bn_gamma_1, conv2_0_bn_beta_1,conv2_0_bn_mean_1,conv2_0_bn_var_1, conv2_0_conv2,\n conv2_0_bn_gamma_2, conv2_0_bn_beta_2,conv2_0_bn_mean_2,conv2_0_bn_var_2,prefix=\"conv2_x_0\")\n conv2_x_1 = nn_basicblock(conv2_x_0, conv2_1_conv1, conv2_1_bn_gamma_1, conv2_1_bn_beta_1,conv2_1_bn_mean_1,conv2_1_bn_var_1, conv2_1_conv2,\n conv2_1_bn_gamma_2, conv2_1_bn_beta_2,conv2_1_bn_mean_2,conv2_1_bn_var_2,prefix=\"conv2_x_1\")\n\n conv3_x_0 = nn_basicblock(conv2_x_1, conv3_0_conv1, conv3_0_bn_gamma_1, conv3_0_bn_beta_1,conv3_0_bn_mean_1,conv3_0_bn_var_1, conv3_0_conv2,\n conv3_0_bn_gamma_2, conv3_0_bn_beta_2,conv3_0_bn_mean_2,conv3_0_bn_var_2, conv3_0_conv_s, conv3_0_bn_gamma_s, conv3_0_bn_beta_s,conv3_0_bn_mean_s,conv3_0_bn_var_s, stride=2,prefix=\"conv3_x_0\")\n conv3_x_1 = nn_basicblock(conv3_x_0, conv3_1_conv1, conv3_1_bn_gamma_1, conv3_1_bn_beta_1,conv3_1_bn_mean_1,conv3_1_bn_var_1, conv3_1_conv2,\n conv3_1_bn_gamma_2, conv3_1_bn_beta_2,conv3_1_bn_mean_2,conv3_1_bn_var_2,prefix=\"conv3_x_1\")\n\n conv4_x_0 = nn_basicblock(conv3_x_1, conv4_0_conv1, conv4_0_bn_gamma_1, conv4_0_bn_beta_1,conv4_0_bn_mean_1,conv4_0_bn_var_1, conv4_0_conv2,\n conv4_0_bn_gamma_2, conv4_0_bn_beta_2,conv4_0_bn_mean_2,conv4_0_bn_var_2, conv4_0_conv_s, conv4_0_bn_gamma_s, conv4_0_bn_beta_s,conv4_0_bn_mean_s,conv4_0_bn_var_s, stride=2,prefix=\"conv4_x_0\")\n conv4_x_1 = nn_basicblock(conv4_x_0, conv4_1_conv1, conv4_1_bn_gamma_1, conv4_1_bn_beta_1,conv4_1_bn_mean_1,conv4_1_bn_var_1, conv4_1_conv2,\n conv4_1_bn_gamma_2, conv4_1_bn_beta_2,conv4_1_bn_mean_2,conv4_1_bn_var_2, prefix=\"conv4_x_1\")\n\n conv5_x_0 = nn_basicblock(conv4_x_1, conv5_0_conv1, conv5_0_bn_gamma_1, conv5_0_bn_beta_1,conv5_0_bn_mean_1,conv5_0_bn_var_1, conv5_0_conv2,\n conv5_0_bn_gamma_2, conv5_0_bn_beta_2,conv5_0_bn_mean_2,conv5_0_bn_var_2, conv5_0_conv_s, conv5_0_bn_gamma_s, conv5_0_bn_beta_s, conv5_0_bn_mean_s, conv5_0_bn_var_s, stride=2,prefix=\"conv5_x_0\")\n conv5_x_1 = nn_basicblock(conv5_x_0, conv5_1_conv1, conv5_1_bn_gamma_1, conv5_1_bn_beta_1,conv5_1_bn_mean_1,conv5_1_bn_var_1, conv5_1_conv2,\n conv5_1_bn_gamma_2, conv5_1_bn_beta_2,conv5_1_bn_mean_2,conv5_1_bn_var_2, prefix=\"conv5_x_1\")\n\n avg_pool = nn.avg_pool2d_nchw(conv5_x_1,stride=[1,1],pooling=[conv5_x_1.shape[2],conv5_x_1.shape[3]],padding=[0,0],name=\"avg\")\n avg_view = hcl.compute(\n (avg_pool.shape[0], avg_pool.shape[1]), lambda b, c: avg_pool[b, c, 0, 0],name=\"avg_view\")\n fc = linear(avg_view, weight_fc, bias_fc,name=\"linear\")\n return fc","repo_name":"Yang-Qirui/CNN-Acceleration","sub_path":"examples/my_resnet/resnet_model.py","file_name":"resnet_model.py","file_ext":"py","file_size_in_byte":10272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37657757995","text":"# coding=utf-8\n\nimport os\nimport datetime\nimport time\nimport utils\nimport random\nimport feedparser\n\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext import webapp\nfrom google.appengine.api.channel import channel\n\nfrom models import *\n\nclients = []\n\nclass IndexHandler(webapp.RequestHandler):\n def get(self): \n self.response.headers['Content-Type'] = 'text/html'\n \n client_id = str(random.random())\n clients.append(client_id)\n \n path = os.path.join(os.path.dirname(__file__) + '/../templates/', 'index.html')\n self.response.out.write(template.render(path, {\"token\": channel.create_channel(client_id)}))\n \nclass UpdateHandler(webapp.RequestHandler):\n def get(self):\n \n count = 20\n updates = Update.all().order(\"-date\").fetch(count, int(self.request.get(\"page\")) * count)\n \n self.response.headers['Content-Type'] = 'application/json; charset=utf-8' \n self.response.out.write(utils.updates_to_json(updates))\n \nclass CronHandler(webapp.RequestHandler):\n def get(self): # every 1 minute\n feeds = {\"nrg\": \"http://rss.nrg.co.il/newsflash/\",\n \"וואלה!\": \"http://rss.walla.co.il/?w=/1/22/0/@rss\",\n \"ynet\": \"http://www.ynet.co.il/Integration/StoryRss1854.xml\",\n \"mako\": \"http://rcs.mako.co.il/rss/news-israel.xml\"}\n \n updates = []\n\n for name, url in feeds.iteritems():\n for entry in feedparser.parse(url).entries:\n if Update.get_by_key_name(entry.title) is None:\n entry = Update(key_name=entry.title, \n content=utils.force_unicode(entry.title), \n date=datetime.datetime.fromtimestamp(time.mktime(entry.date_parsed)),\n source=utils.force_unicode(name),\n description=utils.force_unicode(entry.description));\n \n entry.put()\n updates.append(entry)\n \n updates.sort(key=lambda item:item.date, reverse=True)\n \n for client_id in clients:\n try:\n channel.send_message(client_id, utils.updates_to_json(updates))\n except:\n clients.remove(client_id)","repo_name":"alongubkin/karmelupdates","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38651314876","text":"from typing import List, Optional\nfrom settings import Settings, settings as base_settings\nfrom . import Idbdriver\nfrom . redis import get_connection\nfrom models.ticker import Tick, Ticker\n\n\nclass Storage:\n\n def __init__(\n self, \n db_client: Idbdriver = get_connection(),\n settings: Settings = base_settings\n ) -> None:\n self.settings = settings\n self.client = db_client\n\n async def get_last_tick(self, ticker: str) -> Optional[Tick]:\n tickets = await self.client.get_ticks(ticker, 1)\n if tickets:\n return tickets[0]\n \n async def get_ticker_history(self, ticker: str, limit: int) -> List[Tick]:\n return await self.client.get_ticks(ticker, limit)\n\n async def update_ticker(self, ticker: Ticker) -> None:\n await self.client.add_tick(ticker.name, ticker.last_tick.dict())\n\n async def save_active_ticker(self, tickers: List[Ticker]) -> None:\n payload = {\n \"active\": list(map(lambda x: x.name, tickers))\n }\n await self.client.set(\"active_tickers\", payload)\n","repo_name":"jzz30owner/test_task","sub_path":"worker/clients/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26776933832","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Project : MeUtils.\n# @File : Bert\n# @Time : 2020/11/20 2:59 下午\n# @Author : yuanjie\n# @Email : yuanjie@xiaomi.com\n# @Software : PyCharm\n# @Description :\n\nfrom meutils.pipe import *\n\nos.environ['TF_KERAS'] = '1'\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\n\ntf.get_logger().setLevel(40) # logging.ERROR\n\nfrom meutils.np_utils import normalize\nfrom meutils.path_utils import get_module_path\n\nimport zipfile\nfrom bert4keras.backend import keras\nfrom bert4keras.models import build_transformer_model\nfrom bert4keras.tokenizers import Tokenizer\nfrom bert4keras.snippets import sequence_padding\n\n\nclass Bert4Vec(object):\n\n def __init__(self, bert_dir=None):\n\n if bert_dir is None:\n bert_dir = 'chinese_roformer-sim-char-ft_L-6_H-384_A-6'\n\n if not Path(bert_dir).is_dir():\n logger.info(\"下载预训练模型\")\n url = f'https://open.zhuiyi.ai/releases/nlp/models/zhuiyi/{bert_dir}.zip'\n filename = wget.download(url)\n\n assert zipfile.is_zipfile(filename)\n zipfile.ZipFile(filename).extractall()\n logger.info(bert_dir)\n\n self.dict_path = f\"{bert_dir}/vocab.txt\"\n self.config_path = f\"{bert_dir}/bert_config.json\"\n self.checkpoint_path = f\"{bert_dir}/bert_model.ckpt\"\n\n self.tokenizer = Tokenizer(self.dict_path, do_lower_case=True)\n\n # 建立加载模型\n logger.info(\"BuildingModel\")\n model_name = 'roformer' if 'roformer' in bert_dir else 'bert'\n\n self._bert = build_transformer_model(\n self.config_path,\n self.checkpoint_path,\n model=model_name,\n with_pool='linear',\n application='unilm',\n return_keras_model=False # True: bert.predict([np.array([token_ids]), np.array([segment_ids])])\n )\n\n self.encoder = keras.models.Model(self._bert.model.inputs, self._bert.model.outputs[0])\n # self._seq2seq = keras.models.Model(self._bert.model.inputs, self._bert.model.outputs[1])\n\n def encode(self, sentences='万物皆可embedding', maxlen=256, batch_size=1000, decimals=6, return_list=True):\n \"\"\"自行设计缓存\"\"\"\n if isinstance(sentences, str):\n sentences = [sentences]\n\n assert isinstance(sentences, (tuple, list))\n\n data = self.sentences2seq(sentences=map(str, sentences), maxlen=maxlen)\n vecs = normalize(np.round(self.encoder.predict(data, batch_size=batch_size), decimals))\n return vecs.tolist() if return_list else vecs\n\n def sentences2seq(self, sentences, maxlen=64):\n batch_token_ids, batch_segment_ids = [], []\n for s in sentences:\n token_ids, segment_ids = self.tokenizer.encode(s, maxlen=maxlen)\n batch_token_ids.append(token_ids)\n batch_segment_ids.append(segment_ids)\n batch_token_ids = sequence_padding(batch_token_ids)\n batch_segment_ids = sequence_padding(batch_segment_ids)\n return batch_token_ids, batch_segment_ids\n\n\nif __name__ == '__main__':\n BERT_HOME = \"/Users/yuanjie/Downloads/chinese_roformer-sim-char-ft_L-6_H-384_A-6\"\n s2v = Bert4Vec(BERT_HOME)\n print(s2v.encode(['万物皆向量']))\n\n from appzoo import App\n\n app = App()\n app.add_route('/simbert', s2v.encode, result_key='vectors', method='POST')\n app.run(access_log=False)\n","repo_name":"yuanjie-ai/x2embedding","sub_path":"x2embedding/zoo.py","file_name":"zoo.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20461206630","text":"from django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom restshop.api.product.models import Product\nfrom restshop.api.unit.models import Unit\nfrom restshop.api.user.models import Seller\n\n\nclass OrderTestCase(APITestCase):\n order_data = {\n 'name': 'Temp',\n 'phone': '+375555555',\n 'address': '5th Str 55'\n }\n\n def setUp(self):\n self.order_url = reverse('restshop:order-list')\n self.cart_url = reverse('restshop:cart')\n\n User.objects.create_user('temp', 'temp@gmail.com', '123123')\n self.client.login(username='temp', password='123123')\n\n seller_user = User.objects.create_user('nike', 'nike@gmail.com', '123123')\n seller = Seller.objects.create(user=seller_user, name='Nike', address='California')\n\n product = Product.objects.create(seller=seller, title='Nike Huarache')\n Unit.objects.create(product=product, sku='000000', price=95, num_in_stock=10)\n Unit.objects.create(product=product, sku='000001', price=95, num_in_stock=10)\n\n seller_user = User.objects.create_user('adidas', 'adidas@gmail.com', '123123')\n seller = Seller.objects.create(user=seller_user, name='Adidas', address='California')\n\n product = Product.objects.create(seller=seller, title='Adidas Yeezy Boost')\n Unit.objects.create(product=product, sku='100000', price=95, num_in_stock=10)\n Unit.objects.create(product=product, sku='100001', price=95, num_in_stock=10)\n Unit.objects.create(product=product, sku='100002', price=95, num_in_stock=0)\n\n def add_to_cart_and_assert(self, unit=None, expected_status=status.HTTP_201_CREATED):\n \"\"\"Add to cart and check if response has the same status as expected.\"\"\"\n if unit is None:\n unit = {'sku': '000000', 'quantity': 2}\n\n response = self.client.post(self.cart_url, unit)\n self.assertEqual(response.status_code, expected_status)\n\n def assert_cart_length(self, expected_length, quantity_of=None):\n \"\"\"Check cart length and, if passed, quantity of the item.\n quantity_of is a 2-item tuple where quantity_of[0] is the index of item\n and quantity_of[1] is the expected quantity.\"\"\"\n response = self.client.get(self.cart_url)\n self.assertEqual(len(response.data['data']), expected_length)\n\n if quantity_of is not None:\n self.assertEqual(response.data['data'][quantity_of[0]]['quantity'], quantity_of[1])\n\n return response\n\n def order_and_assert(self, data=None, expected_status=status.HTTP_201_CREATED):\n if data is None:\n data = self.order_data\n\n response = self.client.post(self.order_url, data)\n self.assertEqual(response.status_code, expected_status)\n\n def assert_orders_length(self, expected_length):\n response = self.client.get(self.order_url)\n self.assertEqual(len(response.data['data']), expected_length)\n\n def test_add_to_cart(self):\n \"\"\"Test adding to cart by a common user.\"\"\"\n self.add_to_cart_and_assert()\n self.assert_cart_length(1)\n\n def test_add_same_unit(self):\n \"\"\"Test adding to cart the same unit.\n Cart should not change the number of elements.\n But quantity should be updated if passed.\"\"\"\n self.add_to_cart_and_assert({'sku': '000000'})\n self.assert_cart_length(1, quantity_of=(0, 1))\n\n self.add_to_cart_and_assert({'sku': '000000', 'quantity': 2})\n self.assert_cart_length(1, quantity_of=(0, 2))\n\n def test_add_nonexistent_unit(self):\n \"\"\"Test adding a nonexistent unit to cart.\"\"\"\n self.add_to_cart_and_assert({'sku': '999'}, expected_status=status.HTTP_400_BAD_REQUEST)\n\n def test_add_too_many_items(self):\n \"\"\"Test adding more items of unit than available.\"\"\"\n self.add_to_cart_and_assert({'sku': '000000', 'quantity': 15},\n expected_status=status.HTTP_400_BAD_REQUEST)\n\n def test_add_to_cart_anonymously(self):\n \"\"\"Test adding unit to cart not being logged in.\"\"\"\n self.client.logout()\n self.add_to_cart_and_assert()\n self.assert_cart_length(1)\n\n def test_order(self):\n \"\"\"Test creating order as a common user.\"\"\"\n self.add_to_cart_and_assert()\n self.order_and_assert()\n self.assert_orders_length(1)\n\n def test_order_with_empty_cart(self):\n \"\"\"Test creating order with no units in cart.\"\"\"\n self.order_and_assert(expected_status=status.HTTP_400_BAD_REQUEST)\n self.assert_orders_length(0)\n\n def test_order_anonymously(self):\n \"\"\"Test order not being logged in.\"\"\"\n self.client.logout()\n self.add_to_cart_and_assert()\n self.order_and_assert()\n\n\nclass UserTestCase(APITestCase):\n def setUp(self):\n self.url = reverse('restshop:auth')\n User.objects.create_user('temp', 'temp@gmail.com', '123123')\n\n def test_login(self):\n \"\"\"Log in as a common user.\"\"\"\n response = self.client.post(self.url, {'username': 'temp', 'password': '123123'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.client.get(reverse('restshop:order-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.client.logout()\n\n response = self.client.get(reverse('restshop:order-list'))\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_logout(self):\n \"\"\"Log out as a common user.\"\"\"\n response = self.client.post(self.url, {'username': 'temp', 'password': '123123'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.client.get(reverse('restshop:order-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.client.delete(self.url)\n\n response = self.client.get(reverse('restshop:order-list'))\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n","repo_name":"StasDeep/Rest-Shop","sub_path":"restshop_project/restshop/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6108,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"54"} +{"seq_id":"10002855165","text":"# 1. Напишите программу, удаляющую из текста все слова, содержащие \"абв\". В тексте используется разделитель пробел.\n\nimport os\nclear = lambda: os.system('clear')\nclear()\n\nimport random\nfrom random import sample\n\nprint(\"Задача 1. Напишите программу, удаляющую из текста все слова, содержащие 'абв'. В тексте используется разделитель пробел.\")\n\ndef new_list_string ():\n num = int(input(\"Введите количество слов для генерации случайной строки: \"))\n text_temp = str(input(\"Введите символы для генерации случайных слов (и удаления этой последовательности): \"))\n new_list = []\n for i in range (num):\n text = sample (text_temp, k=len(text_temp))\n new_list.append(''.join(text))\n print(' '.join(new_list))\n \n new_list2 = new_list.copy()\n if text_temp not in new_list2:\n print(f'Сгенерированный список не содержит значение: {text_temp}')\n else:\n while text_temp in new_list2:\n new_list2.remove(text_temp)\n print(' '.join(new_list2))\n\nnew_list_string()","repo_name":"SolnechnayaS/GB-Python-HomeWork","sub_path":"HomeWork5/task5_1.py","file_name":"task5_1.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16196671627","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 24 19:21:30 2019\n\n@author: rljahn\n\"\"\"\n\n'''\n# Q13 “2” versus “not 2”\n# plot |w| versus C = { 1e-5, 1e-3, 1e-1, 10, 30}. \n# Describe your findings\n'''\n\nfrom sklearn import svm\nimport util\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\n# load datasets\ntrain, test = util.load_data()\n\n# range of parameter C\nC = [ 1e-5, 1e-3, 1e-1, 1e1, 1e3 ]\n\n# preprocessing \nX,y = util.preprocessing(train, 2.0)\n\n# traing svm and store the L2-norm of w\nw_norm = []\n\nfor c in C:\n print(\">>>>> C = {} >>>\".format(c))\n clf = svm.SVC(C = c, kernel = 'linear')\n clf.fit(X, y)\n print('w = ',clf.coef_)\n print('b = ',clf.intercept_)\n norm = np.linalg.norm(clf.coef_)\n print(\"|w|\", norm)\n w_norm.append(norm) # default L-2 norm\n\n# plot result (|w| v.s. log(C))\nplt.plot(np.log10(C),w_norm)\nplt.scatter(np.log10(C),w_norm)\nplt.xlabel(\"log10(C)\")\nplt.ylabel(\"|w|\")\nplt.savefig(\"hw1_13_log.png\")\nplt.show()\n'''\n# plot result (|w| v.s. C)\nplt.plot(C,w_norm)\nplt.scatter(C,w_norm)\nplt.xlabel(\"C\")\nplt.ylabel(\"|w|\")\nplt.savefig(\"hw1_13.png\")\nplt.show()\n'''\n# Findings QQ? \nprint(\"As stated in the lecture, C is a trade-off of large margin & margin violation.\") \nprint(\"From the plots, we can see a positive trend between log10(C) and |w|. With 'larger C',\\n the optimization problem is less constraint/regularized, therefore the resulting |w| could be larger.\")\nprint(\"It is also noted that a decrease in |w| at C = 10, but general trend is stated above.\")\n","repo_name":"skyshine102/NTUCS_ML2019_Tech","sub_path":"hw1/b02901043/hw1_13.py","file_name":"hw1_13.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30397722248","text":"import numpy as np\nfrom read_img import read_img\nfrom time import time\nimport sys\n\ndef get_outputs(img_name = 'wordle.png'):\n\n '''\n Get outputs for the input image\n \n :param: img_name\n :type: str\n '''\n\n t1 = time()\n\n COLORS = [np.array([[120, 124, 126]]), \\\n np.array([[201, 182, 95]]), \\\n np.array([[106, 172, 105]])]\n\n gray, yellow, green = set(), set(), set()\n poss = [gray, yellow, green]\n\n poss = read_img(poss, COLORS, img_name)\n print (poss)\n\n res, res_full = [], []\n\n # check commonly used words\n with open('words_five.txt', 'rt') as f:\n for line in f: \n judge = True\n\n # check gray \n for c, idx in gray:\n if line[idx] == c: \n judge = False\n break\n\n if judge:\n # check green\n for c, idx in green:\n if line[idx] != c: \n judge = False\n break\n\n if judge:\n # check yellow\n for c, idx in yellow:\n if c not in line:\n judge = False\n break\n elif line[idx] == c:\n judge = False\n break\n if judge: \n res.append(line[: -1])\n \n t2 = time()\n\n # check all words if there are not enough words produced by reading the most frequent word list\n if len(res) <= 10: \n with open('words_five_full.txt', 'rt') as f:\n for line in f: \n judge = True\n\n # check gray \n for c, idx in gray:\n if line[idx] == c: \n judge = False\n break\n \n if judge:\n # check green\n for c, idx in green:\n if line[idx] != c: \n judge = False\n break\n if judge:\n # check yellow\n for c, idx in yellow:\n if c not in line:\n judge = False\n break\n elif line[idx] == c:\n judge = False\n break\n if judge: \n res_full.append(line[: -1])\n \n t3 = time()\n\n # print out results\n if len(res) > 10:\n print ('Some possible commonly used words are ' + ', '.join(res))\n print ('Got these words for you in just %.2f seconds' % (t2 - t1))\n elif len(res_full) < 1:\n print ('No words found based on your input!')\n elif len(res_full) == 1:\n print ('It got to be {}!'.format(res_full[0]))\n print ('Got this unique word for you in just %.2f seconds' % (t3 - t1))\n else: \n print ('Some possible words are ' + ', '.join(res_full))\n print ('Got these words for you in just %.2f seconds' % (t3 - t1))\n\nif __name__ == '__main__':\n\n if len(sys.argv) > 1: get_outputs(sys.argv[1])\n else: get_outputs()","repo_name":"kagaminearia/wordle_helper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"955397871","text":"# exc. 9.1.2\r\ndef file_options(file_name, task):\r\n\tif task == 'sort':\r\n\t\tedit_file = open(file_name, 'r')\r\n\t\tfile_list = edit_file.read().split(' ') \r\n\t\tprint(sorted(file_list))\r\n\t\tedit_file.close()\r\n\r\n\telif task == 'rev':\r\n\t\tedit_file = open(file_name, 'r')\r\n\t\tfor line in edit_file:\r\n\t\t\tprint(line[::-1])\r\n\t\tedit_file.close()\r\n\r\n\telif task == 'last':\r\n\t\tn = int(input('Enter a number: '))\r\n\t\tn = n * (-1)\r\n\t\tfile_list = []\r\n\t\tedit_file = open(file_name, 'r')\r\n\t\tfor line in edit_file:\r\n\t\t\tfile_list += [line]\r\n\t\tfile_list = file_list[n:]\r\n\t\tfor line in file_list:\r\n\t\t\tprint(line)\r\n\t\t#print(file_list[n:], sep=\"\\n\")\r\n\r\ndef main():\r\n\tfile_name = \"C:\\\\Users\\\\user\\\\Desktop\\\\sampleFile.txt\"\r\n\ttask = input('Enter a task: ')\r\n\tfile_options(file_name, task)\r\n\r\nif __name__ == '__main__':\r\n\tmain()","repo_name":"aviorkahalani/self.py","sub_path":"unit 9/exc. 9.1.2.py","file_name":"exc. 9.1.2.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16144843332","text":"class WordsGame:\n last_char = 'Р'\n error_message = 'Тебе на Р'\n dictionary = {'а': 'Ахуеть ты пидар', 'б': 'бля, ну ты и пидар',\n 'в': 'вот жеж ты пидар', 'г': 'Галя, глянь какой пидар',\n 'д': 'друг, извини, но ты пидар', 'е': 'ебать пидар',\n 'ё': 'ёмае ну и пидар', 'ж': 'жаль, но ты пидар',\n 'з': 'заяйка, прости, но ты пидар', 'и': 'и опять же, ты пидар',\n 'к': 'какой же пидар', 'л': 'лох, пидар',\n 'м': 'мне желаь, но ты пидар', 'н': 'ну и пидар',\n 'о': 'охуеть, мисье, но вы пидар', 'п': 'ПИДАР',\n 'р': 'румяный пидар', 'с': 'сука, да ты же пидар',\n 'т': 'тьфу ты пидар', 'у': 'ух сука, да ты же пидар',\n 'ф': 'фу ты пидар', 'х': 'хуясе ты пидар',\n 'ц': 'ц.. не придумал, но ты пидар', 'ш': 'шо сука, ты пидар',\n 'щ': 'що сука, ти пiдор', 'э': 'эээ да ты же пидар',\n 'ю': 'Юра не пидар, а ты пидар', 'я': 'я хуею с того, какой ты пидар',\n 'ь': 'не выебывайся', 'ъ': 'не выебывайся',\n 'ы': 'не выебывайся'}\n\n def get_answer(self, message):\n first_char = message[0]\n if first_char.lower() != self.last_char.lower():\n return self.error_message\n else:\n last_char = message[-1]\n return self.dictionary[last_char.lower()]\n","repo_name":"yuridolenko/made_bot","sub_path":"words_game.py","file_name":"words_game.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30993414676","text":"#!/usr/bin/python3\r\nfrom Cleaning import *\r\nfrom News import *\r\nimport pandas as pd\r\nfrom Color import *\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom XML2News import *\r\nimport nltk\r\nimport time\r\nimport random\r\nimport pandas as pd\r\nimport numpy as np\r\n# from sklearn.cluster import DBSCAN\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom onnxmltools import convert_sklearn\r\nfrom onnxmltools.utils import save_model\r\nfrom skl2onnx.common.data_types import FloatTensorType,StringTensorType\r\nimport warnings\r\nwarnings.simplefilter(action='ignore', category=FutureWarning)\r\n\r\nclass Model(object):\r\n \"\"\"docstring for Model.\"\"\"\r\n def show_most_informative_features(self,vectorizer, clf, n=20):\r\n feature_names = vectorizer.get_feature_names()\r\n coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))\r\n top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1])\r\n for (coef_1, fn_1), (coef_2, fn_2) in top:\r\n predict = self.pipe.predict([fn_2])\r\n if predict=='mostly false' or predict==0:\r\n display(\"\\t%.4f\\t%-15s\\t\\t%.4f\\t%-15s\"%(coef_1, fn_1, coef_2, fn_2),'red')\r\n if predict=='mostly true' or predict==1:\r\n display(\"\\t%.4f\\t%-15s\\t\\t%.4f\\t%-15s\"%(coef_1, fn_1, coef_2, fn_2),'yellow')\r\n def __init__(self,eval,save,evaldiff):\r\n super(Model, self).__init__()\r\n\r\n self.vectorizer = TfidfVectorizer()\r\n self.model = MultinomialNB(alpha=0,fit_prior=False)\r\n # model = SVC(gamma=2, C=1)\r\n # model = MLPClassifier(alpha=1)\r\n\r\n # self.model = SVC(kernel=\"linear\", C=0.025)\r\n self.pipe = []\r\n list_text = []\r\n list_target = []\r\n\r\n # The list is created by the file XML2News.py, it's a list of News object, the parameters is 1 or 2\r\n news_list = createNews(2)\r\n display(\"=> List OK\",'yellow')\r\n # First shuffle of the list, just to mix the false and true data from the creation.\r\n random.shuffle(news_list)\r\n # To avoid a calculation time to long I use only a part of the total list.\r\n news_list = news_list[:]\r\n # Treatement of the news. We only need to do it once so I don't put it in the preprocessing,\r\n # maybe in the future a news function to do it could be cool\r\n for index,news in enumerate(news_list):\r\n # This don't return anything, only the getters return the value\r\n news.clean_text()\r\n # I don't take the text without texts\r\n if (len(news.getCleanedText())>0):\r\n list_text.append(news.getCleanedText())\r\n list_target.append(news.getVeracity())\r\n if index%1000==0 :\r\n print(\"News n{} tagged\".format(index))\r\n display(\"Clean : OK\",'yellow')\r\n # This split the corpus into Learning and testing corpus with a ratio 2/3 1/3s\r\n # I call Corpus the text and target the veracity\r\n mid = 2*round((len(news_list)/3))\r\n self.appCorpus = list_text[:mid]\r\n self.testCorpus = list_text[mid:]\r\n self.appTarget = list_target[:mid]\r\n self.testTarget = list_target[mid:]\r\n\r\n self.preprocessing()\r\n display(\" Preprocessing : OK\",'yellow')\r\n self.process()\r\n display(\" Process : OK\",'yellow')\r\n if eval:\r\n self.eval()\r\n display(\" Evaluation : OK\",'yellow')\r\n if save:\r\n self.save()\r\n if evaldiff:\r\n self.evaldiff()\r\n\r\n def changeModel(self,model):\r\n old_model = str(self.model)\r\n self.model = model\r\n display(\"Model change from {} to {}\".format(old_model,str(model)),\"yellow\")\r\n self.preprocessing()\r\n self.process()\r\n display(\"News model trained\",\"yellow\")\r\n def preprocessing(self):\r\n \"\"\"\r\n Function that shuffle the corpus for the differents model\r\n Shape :\r\n appCorpus : List of list of token\r\n appTarget : List of symbols {'mostly true','mostly false'}\r\n testCorpus : List of list of token\r\n testTarget : List of symbols {'mostly true','mostly false'}\r\n \"\"\"\r\n # Regroupment of the 2 lists in a list of tuples\r\n appBoth = list(zip(self.appCorpus,self.appTarget))\r\n # Shuffle\r\n random.shuffle(appBoth)\r\n # Split in two lists\r\n self.appCorpus = [x[0] for x in appBoth]\r\n self.appTarget = [x[1] for x in appBoth]\r\n\r\n def process(self):\r\n \"\"\"\r\n Function that train the model.\r\n The parameters have obvious names\r\n\r\n \"\"\"\r\n # This part transform our array of array of token into array of sentence to make the Tfidf work\r\n joinedTestCorpus = []\r\n joinedAppCorpus = []\r\n for array in self.appCorpus:\r\n joinedAppCorpus.append(' '.join(array))\r\n for array in self.testCorpus:\r\n joinedTestCorpus.append(' '.join(array))\r\n joinedAppCorpus=np.array(joinedAppCorpus)\r\n joinedTestCorpus=np.array(joinedTestCorpus)\r\n self.appTarget=np.array(self.appTarget)\r\n print(joinedAppCorpus.shape)\r\n print(self.appTarget.shape)\r\n # The vectorizer is on top of the file, it's a TfidfVectorizer without any customization\r\n # To ease the save of the model I used a sklearn pipeline that contain a TfidfVectorizer and a Bayesian model.\r\n # The bayesian model is set without any prior probability to avoid a bias due to a huge gap in the number of samples of each class\r\n # model = MultinomialNB(alpha=0,fit_prior=False)\r\n # model = SVC(gamma=2, C=1)\r\n # model = MLPClassifier(alpha=1)\r\n self.pipe = make_pipeline(self.vectorizer,self.model)\r\n self.pipe.fit(joinedAppCorpus,self.appTarget)\r\n print(type(self.pipe.steps[1][1]))\r\n self.show_most_informative_features(self.vectorizer, self.pipe.steps[1][1], 20)\r\n\r\n def save(self):\r\n # Onnx Save (can't save a list of model for now)\r\n\r\n onx = convert_sklearn(self.pipe, 'Pipe',\r\n [('input', StringTensorType([1, 1]))])\r\n\r\n save_model(onx, \"Model.onnx\")\r\n\r\n print (\"Model saved\")\r\n def eval(self):\r\n \"\"\"\r\n Evaluation of the model\r\n Parameters:\r\n - ev: boolean that mean evaluation or not\r\n \"\"\"\r\n joinedTestCorpus = [] # Array of sentence\r\n model_list = [] # List of model\r\n list_text = [] # temporary list of token\r\n list_target = [] # temporary list of veracity\r\n\r\n # Here you can choose the number of model you want to train. In the eventuality of a bagging.\r\n nmodel =1\r\n for i in range(0,nmodel):\r\n\r\n model_list.append(self.pipe)\r\n display(\"Model \"+ str(i) +\" : OK\",'yellow')\r\n display(\"=>DONE Start of the evaluation \",\"yellow\")\r\n\r\n # Evaluation of the model.\r\n\r\n # Creation of the array of sentences fo the test\r\n for array in self.testCorpus:\r\n joinedTestCorpus.append(' '.join(array))\r\n self.testTarget = np.array(self.testTarget)\r\n self.testTarget[self.testTarget=='mostly false']=int(0)\r\n self.testTarget[self.testTarget=='mostly true']=int(1)\r\n self.testTarget = [int(item) for item in self.testTarget]\r\n resList =[]\r\n\r\n # Prediction for each model\r\n for index,model in enumerate(model_list):\r\n print(len(joinedTestCorpus))\r\n predicted = model.predict(np.array(joinedTestCorpus))\r\n predicted = np.array(predicted)\r\n predicted[predicted == 'mostly false']=0\r\n predicted[predicted == 'mostly true']=1\r\n predicted = [int(item) for item in predicted]\r\n resList.append(predicted)\r\n print(\"Model n\"+str(index)+\" used\")\r\n resList = np.array(resList)\r\n # Vertical sum of the result.\r\n pred = list(map(sum,zip(*list(resList))))\r\n\r\n display(\"Accuracy of the combined model = \"+str(accuracy_score(self.testTarget,pred)),'yellow')\r\n print(np.unique(pred))\r\n print(np.unique(self.testTarget))\r\n if len(np.unique(self.testTarget))!=2:\r\n return((accuracy_score(self.testTarget,pred),1,1))\r\n # Creation of the confusion matrix\r\n confusion=confusion_matrix(self.testTarget, pred)\r\n\r\n matrice_confusion = pd.DataFrame(confusion, [\"0\",\"1\"],\r\n [\"0\",\"1\"])\r\n precisionFalse = confusion[0][0]/(np.sum(confusion[0]))\r\n precisionTrue = confusion[1][1]/(np.sum(confusion[1]))\r\n display(\"Precision for False = \"+str(precisionFalse),'yellow')\r\n display(\"Precision for True = \"+str(precisionTrue),'yellow')\r\n pprint(matrice_confusion)\r\n # return((accuracy_score(self.testTarget,pred),precisionFalse,precisionTrue))\r\n\r\n def evaldiff(self):\r\n corpus=createNews(1)\r\n joinedCorpus=[]\r\n list_text = []\r\n list_target = []\r\n for index,news in enumerate(corpus):\r\n # This don't return anything, only the getters return the value\r\n news.clean_text()\r\n # I don't take the text without texts\r\n if (len(news.getCleanedText())>0):\r\n list_text.append(news.getCleanedText())\r\n list_target.append(news.getVeracity())\r\n if index%1000==0 :\r\n print(\"News n{} tagged\".format(index))\r\n display(\"Clean : OK\",'yellow')\r\n for array in list_text:\r\n\r\n joinedCorpus.append(' '.join(array))\r\n corpusTarget = list_target\r\n print(np.unique(corpusTarget))\r\n print(corpusTarget)\r\n newcorpus = []\r\n for item in corpusTarget:\r\n if item == 'mostly false':\r\n newcorpus.append(0)\r\n else:\r\n newcorpus.append(1)\r\n corpusTarget = newcorpus\r\n\r\n corpusTarget = [int(item) for item in corpusTarget]\r\n predicted = self.pipe.predict(np.array(joinedCorpus))\r\n predicted[predicted == 'mostly false']=int(0)\r\n predicted[predicted == 'mostly true']=int(1)\r\n predicted = [int(item) for item in predicted]\r\n print(predicted)\r\n\r\n display(\"Accuracy of the combined model = \"+str(accuracy_score(corpusTarget, predicted)),'yellow')\r\n confusion=confusion_matrix(corpusTarget, predicted)\r\n matrice_confusion = pd.DataFrame(confusion, [\"0\",\"1\"],\r\n [\"0\",\"1\"])\r\n precisionFalse = confusion[0][0]/(np.sum(confusion[0]))\r\n precisionTrue = confusion[1][1]/(np.sum(confusion[1]))\r\n display(\"Precision for False = \"+str(precisionFalse),'yellow')\r\n display(\"Precision for True = \"+str(precisionTrue),'yellow')\r\n pprint(matrice_confusion)\r\n return((accuracy_score(corpusTarget, predicted),precisionFalse,precisionTrue))\r\n\r\n\r\nclassifiers = [MultinomialNB(alpha=0,fit_prior=False),SVC(gamma=2, C=1),MLPClassifier(alpha=1),SVC(kernel=\"linear\", C=0.025)]\r\nres = []\r\nmodels = []\r\nmodels.append(Model(False,True,False))\r\nfor model in models:\r\n res.append(model.eval())\r\n # try:\r\n # res.append(model.eval())\r\n # except IndexError as e:\r\n # pass\r\nfor line in res:\r\n\r\n display(\"Total accuracy = {}\".format(line[0]),\"cyan\")\r\n display(\"False class accuracy = {}\".format(line[1]),\"magenta\")\r\n display(\"True accuracy = {}\".format(line[2]),\"cyan\")\r\n","repo_name":"msvaillant/fake-news","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":11821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39980233991","text":"import json\nimport os\n\ndef files_creation():\n if not os.path.exists('data'):\n os.mkdir('data')\n if not os.path.exists('data/contests_id.txt'):\n os.mknod('data/contests_id.txt')\n if not os.path.exists('data/nicknames.txt'):\n os.mknod('data/nicknames.txt')\n if not os.path.exists('data/meta.txt'):\n os.mknod('data/meta.txt')\n\ndef specify_meta_information():\n m = {}\n\n print('Specify meta information:')\n print(\"Input name of the group:\")\n m['Group'] = input().strip()\n print('Input desirable name of the excel file:')\n m['FILE_NAME_DISK'] = input().strip()\n print('Input your Codeforces API key:')\n m['API_KEY'] = input().strip()\n print('Input your Codeforces Secret key:')\n m['SECRET'] = input().strip()\n with open('data/meta.txt', 'w') as file:\n json.dump(m, file, ensure_ascii=False)\n\nif __name__ == '__main__':\n files_creation()\n specify_meta_information()\n\n","repo_name":"dingearteom/Codeforces_rating","sub_path":"specify_meta_information.py","file_name":"specify_meta_information.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38956202103","text":"# coding: utf-8\n# 给定一个二叉树,它的每个结点都存放一个 0-9 的数字,每条从根到叶子节点的路径都代表一个数字。\n# 例如,从根到叶子节点路径 1->2->3 代表数字 123。\n# 计算从根到叶子节点生成的所有数字之和。\n# 说明: 叶子节点是指没有子节点的节点。\n#\n# 示例 1:\n# 输入: [1,2,3]\n# 1\n# / \\\n# 2 3\n# 输出: 25\n# 解释:\n# 从根到叶子节点路径 1->2 代表数字 12.\n# 从根到叶子节点路径 1->3 代表数字 13.\n# 因此,数字总和 = 12 + 13 = 25.\n# 示例 2:\n# 输入: [4,9,0,5,1]\n# 4\n# / \\\n# 9 0\n#  / \\\n# 5 1\n# 输出: 1026\n# 解释:\n# 从根到叶子节点路径 4->9->5 代表数字 495.\n# 从根到叶子节点路径 4->9->1 代表数字 491.\n# 从根到叶子节点路径 4->0 代表数字 40.\n# 因此,数字总和 = 495 + 491 + 40 = 1026.\n\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def sumNumbers(self, root):\n \"\"\"\n 递归\n :type root: TreeNode\n :rtype: int\n \"\"\"\n return self.calSum(root,0)\n\n def calSum(self, root, cur_sum):\n if root is None:\n return 0\n else:\n cur_sum = cur_sum * 10 + root.val\n if root.left is None and root.right is None:\n return cur_sum\n else:\n return self.calSum(root.left, cur_sum) + self.calSum(root.right, cur_sum)\n","repo_name":"yunzhongETian/leetcode_python","sub_path":"二叉树操作/129_求根到叶子节点数字之和/sum_root_to_leaf_numbers.py","file_name":"sum_root_to_leaf_numbers.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"12846551008","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 24 14:01:44 2021\n\n@author: ZR_YL\n\"\"\"\n\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\nfrom scipy.optimize import leastsq\nimport math\n#import predict_economic_LDM as predict\nimport algorithms.predict_inputdata as predict\nfrom dao.interface import getData\nimport json\nfrom algorithms.evaluation import RMSE,MAPE\n\ndef LDM(PreStartYear,PreEndYear,buildingarea,loaddensity,pretype=\"全社会用电量\",city=\"云南省\"):\n \n def Density(n,Dlist,Plist):\n #n为所画片区,Dlist为对应的负荷密度,Plist为对应的建筑面积\n load=0\n for i in range(n):\n load=Dlist[i]*Plist[i]+load\n \n return load\n \n data1 = pd.read_csv(buildingarea, encoding=\"UTF-8\")\n data2=pd.read_csv(loaddensity, encoding=\"UTF-8\")\n columns=data1.columns\n columns2=data2.columns\n \n if len(columns) != len(columns2):\n raise ValueError(\"负荷密度和建筑密度列表不匹配,请重新上传\")\n elif not (data1[columns[0]].values == data2[columns2[0]].values).all():\n raise ValueError(\"负荷密度和建筑密度列表不匹配,请重新上传\")\n else:\n StartYear = str(data1[columns[0]].values[0])\n EndYear = str(data1[columns[0]].values[-1])\n #预测建筑用地数据\n building=predict.pre(data1.loc[:,[columns[0],columns[1]]],columns[1],int(PreStartYear),int(PreEndYear))\n for i in range(2,len(columns)):\n c=predict.pre(data1.loc[:,[columns[0],columns[i]]],columns[i],int(PreStartYear),int(PreEndYear))\n building=pd.merge(building,c,on=columns[0])\n \n #预测负荷密度\n density=predict.pre(data2.loc[:,[columns2[0],columns2[1]]],columns2[1],int(PreStartYear),int(PreEndYear))\n for i in range(2,len(columns2)):\n c=predict.pre(data2.loc[:,[columns2[0],columns2[i]]],columns2[i],int(PreStartYear),int(PreEndYear))\n density=pd.merge(density,c,on=columns2[0])\n \n \n #读取历史负荷数据\n period=int(EndYear)-int(StartYear)+1\n finaldata=[]\n name=[pretype]\n datajson = getData(\"云南省_year_电力电量类\", pretype, StartYear, EndYear)\n data=json.loads(datajson)\n finaldata.append(data)\n \n final=pd.DataFrame(finaldata,index=name)\n final=final.T\n \n trainx=[]\n start=0#训练集的起始位置\n for i in range(start,period):\n d=[building[columns[-1]].values[i]]\n b=[density[columns[-1]].values[i]]\n trainx.append(Density(1,d,b))\n \n trainy=[]\n trainyear=[]\n for j in range(period):\n if int(final.index.values[j]) in data1[\"year\"].values[start:]:\n trainy.append(final[pretype].values[j])\n trainyear.append(final.index.values[j])\n \n prex=[]\n \n for a in range(period,len(building.values)):\n d=[building[columns[-1]].values[a]]\n b=[density[columns[-1]].values[a]]\n prex.append(Density(1,d,b))\n \n trainx=np.array(trainx).reshape(-1,1)\n trainy=np.array(trainy).reshape(-1,1)\n prex=np.array(prex).reshape(-1,1)\n\n #训练模型\n reg = LinearRegression().fit(trainx, trainy)\n prey = [x * reg.coef_[0][0] + reg.intercept_[0] for x in prex]\n \n pretrainy= [tx * reg.coef_[0][0] + reg.intercept_[0] for tx in trainx]\n ypre=np.array(prey).reshape(1,-1).squeeze()\n ytrain=np.array(pretrainy).reshape(1,-1)\n \n mape=MAPE(pretrainy,trainx)\n rmse=RMSE(pretrainy,trainx)\n \n #返回结果\n result={\"trainfromyear\":StartYear,\"traintoyear\":EndYear,\"trainresult\":ytrain.tolist(),\"prefromyear\":PreStartYear,\"pretoyear\":PreEndYear,\"preresult\":ypre.tolist(),\"MAPE\":mape,\"RMSE\":rmse}\n return result\n\nif __name__ == '__main__':\n\n PreStartYear = \"2020\"\n PreEndYear = \"2029\"\n buildingarea=\"D:/lab/Yunnan_Pre/data/yunnan_building.csv\"\n loaddensity=\"D:/lab/Yunnan_Pre/data/yunnan_loaddensity.csv\"\n result=LDM(PreStartYear,PreEndYear,buildingarea,loaddensity)","repo_name":"Lyanf/ynpowerbackend","sub_path":"src/algorithms/LDM.py","file_name":"LDM.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38606359467","text":"from multiprocessing import Process, Queue\nfrom content_generator_microservice import CG\n\nif __name__ == '__main__':\n\n q = Queue()\n # Life Generator sends data and appends it to Queue\n data = ['Portland','Oregon']\n q.put(data)\n\n # Set up Content Generator process\n p = Process(target=CG, args=(q,))\n\n # Content generator receives data, processes, appends result back in queue\n p.start()\n p.join()\n\n # Data is available in the queue for Life_Generator \n print(q.get())","repo_name":"Terencetang11/CS361_Microservices_Project","sub_path":"Life_Generator/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"880750369","text":"# coding: utf-8\n\nimport flask\nfrom coprs import app\nfrom coprs import db\nfrom coprs.exceptions import CoprHttpException\nfrom coprs.views.misc import backend_authenticated\nfrom . import stats_rcv_ns\nfrom ...logic.stat_logic import CounterStatLogic, handle_be_stat_message\n\n\n@stats_rcv_ns.route(\"/\")\ndef ping():\n return \"OK\", 200\n\n\n@stats_rcv_ns.route(\"///\", methods=['POST'])\n@backend_authenticated\ndef increment(counter_type, name):\n app.logger.debug(flask.request.remote_addr)\n\n CounterStatLogic.incr(name, counter_type)\n db.session.commit()\n return \"\", 201\n\n\n@stats_rcv_ns.route(\"/from_backend\", methods=['POST'])\n@backend_authenticated\ndef backend_stat_message_handler():\n try:\n handle_be_stat_message(flask.request.json)\n db.session.commit()\n except Exception as err:\n app.logger.exception(err)\n raise CoprHttpException from err\n\n return \"OK\", 201\n","repo_name":"fedora-copr/copr","sub_path":"frontend/coprs_frontend/coprs/views/stats_ns/stats_receiver.py","file_name":"stats_receiver.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"54"} +{"seq_id":"9954558842","text":"def main():\n while(True):\n s=input(\"Enter a string = \")\n if s==\"\":\n break\n else:\n print(\"Total vowles in string =\",count_vowels(s))\n\ndef count_vowels(s):\n s=s.lower()\n count=0\n for i in s:\n if i=='a' or i=='e' or i== 'i'\\\n or i=='o' or i=='u':\n count+=1\n return count \nmain()","repo_name":"Magrawal17/Jacobs-University-Coursework","sub_path":"Programming in Python/Homework5/P6.py","file_name":"P6.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"38625552618","text":"from random import randint\nprimos = media1 = media2 = 0\nnum = []\nmult3_maior10 = []\nentre10e30 = []\n\nfor i in range(10):\n num.append(randint(0, 100))\n cont = 0\n for j in range(1, num[i]+1):\n if num[i] % j == 0:\n cont += 1\n if cont == 2: \n primos += num[i]\n\nfor i in range(10):\n if num[i] % 3 == 0 and num[i] > 10:\n mult3_maior10.append(num[i])\n\n if num[i] >= 10 and num[i] <= 30:\n entre10e30.append(num[i])\n\nif len(mult3_maior10) > 0: \n media1 = sum(mult3_maior10) / len(mult3_maior10)\nif len(entre10e30) > 0:\n media2 = sum(entre10e30) / len(entre10e30)\n\nprint(f'{num}\\n')\nprint('RESULTADO: ')\nprint(f'a) Soma dos números primos: {primos}')\nprint(f'b) Média dos nºs múltiplos de três e maiores que dez: {media1:.1f}')\nprint(f'c) Média dos nºs maiores ou iguais a dez e menores ou iguais a trinta: {media2:.1f}')\n ","repo_name":"rogeriofrsouza/Fatec-LP3","sub_path":"Listas de exercício/Lista 1/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25470204586","text":"import os\nimport torch\nimport random\nimport logging\nfrom tqdm import tqdm\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\nfrom typing import List, Dict, Any, Union, Optional\n\nfrom torch import nn, Tensor\nimport torch.nn.functional as F\nimport torch.distributed as dist\nfrom torch.utils.data import Dataset\n\nfrom transformers import AdapterTrainer, PreTrainedTokenizer, Trainer\n\nfrom .validate_utils import validate_during_training\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass FinetuneCollator:\n def __init__(self, tokenizer: PreTrainedTokenizer, max_query_len: int, max_doc_len: int, padding=True):\n self.tokenizer = tokenizer\n self.max_query_len = max_query_len\n self.max_doc_len = max_doc_len\n self.padding = padding\n\n def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:\n # tokenizing batch of text is much faster\n query_input = self.tokenizer(\n [x['query'] for x in features],\n padding=self.padding,\n return_tensors='pt',\n add_special_tokens=True,\n return_attention_mask=True,\n return_token_type_ids=True,\n truncation=True,\n max_length=self.max_query_len\n )\n query_input['position_ids'] = torch.arange(0, query_input['input_ids'].size(1))[None, :]\n doc_input = self.tokenizer(\n [x['doc'] for x in features],\n padding=self.padding,\n return_tensors='pt',\n add_special_tokens=True,\n return_attention_mask=True,\n return_token_type_ids=True,\n truncation=True,\n max_length=self.max_doc_len\n )\n doc_input['position_ids'] = torch.arange(0, doc_input['input_ids'].size(1))[None, :]\n # we have to prevent inbatch false negatives when gathering tensors in the trainer\n # because each distributed process has its own collators\n qids = torch.tensor([x['qid'] for x in features], dtype=torch.long)\n docids = torch.tensor([x['docid'] for x in features], dtype=torch.long)\n\n batch_data = {\n \"query_input\": query_input,\n \"doc_input\": doc_input,\n \"qids\": qids,\n \"docids\": docids,\n }\n\n if 'neg_docs' in features[0]:\n neg_doc_input = self.tokenizer(\n sum([x['neg_docs'] for x in features], []),\n padding=self.padding,\n return_tensors='pt',\n add_special_tokens=True,\n return_attention_mask=True,\n return_token_type_ids=True,\n truncation=True,\n max_length=self.max_doc_len\n ) \n neg_doc_input['position_ids'] = torch.arange(0, neg_doc_input['input_ids'].size(1))[None, :]\n neg_docids = torch.tensor(sum([x['neg_docids'] for x in features], []), dtype=torch.long) \n batch_data.update({\n \"neg_doc_input\": neg_doc_input,\n \"neg_docids\": neg_docids,\n }) \n return batch_data\n\n\nclass QDRelDataset(Dataset):\n def __init__(self, \n tokenizer: PreTrainedTokenizer, \n qrel_path: str, \n query_path: str, \n corpus_path: str, \n max_query_len: int, \n max_doc_len: int, \n negative: str, \n neg_per_query: int,\n rel_threshold=1, \n verbose=True):\n '''\n negative: choices from `random' or a path to a json file that contains \\\n the qid:neg_pid_lst \n '''\n super().__init__()\n self.tokenizer = tokenizer\n self.queries, qid2offset = [], dict()\n for idx, line in enumerate(tqdm(open(query_path), disable=not verbose, mininterval=10)):\n qid, query = line.split(\"\\t\")\n qid2offset[qid] = idx\n self.queries.append(query.strip())\n\n self.corpus, docid2offset = [], dict()\n for idx, line in enumerate(tqdm(open(corpus_path), disable=not verbose, mininterval=10)):\n splits = line.split(\"\\t\")\n if len(splits) == 2:\n docid, body = splits\n else:\n raise NotImplementedError()\n docid2offset[docid] = idx\n self.corpus.append(body.strip())\n\n self.qrels = defaultdict(list)\n for line in tqdm(open(qrel_path), disable=not verbose, mininterval=10):\n qid, _, docid, rel = line.split()\n if int(rel) >= rel_threshold:\n qoffset = qid2offset[qid]\n docoffset = docid2offset[docid]\n self.qrels[qoffset].append(docoffset)\n \n if os.path.exists(negative):\n self.negative = {}\n for line in tqdm(open(negative), disable=not verbose, mininterval=10, desc=\"read negatives\"):\n qid, neg_docids = line.strip().split(\"\\t\")\n neg_docids = neg_docids.split(\" \")\n qoffset, neg_docoffsets = qid2offset[qid], [docid2offset[docid] for docid in neg_docids]\n assert len(set(neg_docoffsets) & set(self.qrels[qoffset])) == 0, \"Negative docids and relevant docids should not overlap.\"\n self.negative[qoffset] = neg_docoffsets\n assert set(self.negative.keys()) == set(self.qrels.keys())\n else:\n self.negative = negative\n self.neg_per_query = neg_per_query\n\n self.qids = sorted(self.qrels.keys())\n self.max_query_len = max_query_len\n self.max_doc_len = max_doc_len\n self.qrels = dict(self.qrels)\n\n def get_qrels(self):\n return self.qrels\n\n def __len__(self):\n return len(self.qids)\n \n def __getitem__(self, index):\n '''\n We do not tokenize text here and instead tokenize batch of text in the collator because\n a. Tokenizing batch of text is much faster then tokenizing one by one\n b. Usually, the corpus is too large and we cannot afford to use multiple num workers\n '''\n qid = self.qids[index]\n query = self.queries[qid]\n rel_docids = self.qrels[qid]\n docid = random.choice(rel_docids)\n doc = self.corpus[docid]\n data = {\n \"query\": query,\n \"doc\": doc,\n \"docid\": docid,\n \"qid\": qid\n }\n if self.neg_per_query > 0:\n if self.negative == \"random\":\n neg_docids = random.sample(range(len(self.corpus)), self.neg_per_query)\n elif isinstance(self.negative, Dict):\n neg_docids = random.sample(self.negative[qid], self.neg_per_query)\n else:\n raise NotImplementedError()\n neg_docs = [self.corpus[neg_docid] for neg_docid in neg_docids ]\n data.update({\"neg_docids\": neg_docids, \"neg_docs\": neg_docs})\n return data\n\n\nclass BaseContrastDenseFinetuner:\n def compute_loss(self, model, inputs, return_outputs=False):\n \"\"\"\n Compute contrastive loss.\n \"\"\"\n query_embeds = model(**inputs['query_input']) # Nq, dim\n doc_embeds = model(**inputs['doc_input']) # Nq, dim\n qids = self._prepare_input(inputs['qids']).contiguous() # _prepare_input to gpu\n docids = self._prepare_input(inputs['docids']).contiguous()\n \n if self.args.local_rank > -1:\n query_embeds = self._gather_tensor(query_embeds)\n doc_embeds = self._gather_tensor(doc_embeds)\n qids, docids = self._gather_tensor(qids), self._gather_tensor(docids) \n\n if 'neg_doc_input' not in inputs:\n loss = self.compute_inbatch_contrastive_loss(query_embeds, doc_embeds, qids, docids)\n return (loss, (query_embeds, doc_embeds)) if return_outputs else loss\n else:\n neg_doc_embeds = model(**inputs['neg_doc_input']) \n neg_docids = self._prepare_input(inputs['neg_docids']).contiguous()\n if self.args.local_rank > -1:\n neg_doc_embeds = self._gather_tensor(neg_doc_embeds)\n neg_docids = self._gather_tensor(neg_docids)\n loss = self.compute_contrastive_loss(query_embeds, doc_embeds, neg_doc_embeds, qids, docids, neg_docids)\n return (loss, (query_embeds, doc_embeds, neg_doc_embeds)) if return_outputs else loss\n\n def compute_inbatch_contrastive_loss(self, query_embeds, doc_embeds, qids, docids): \n labels = torch.arange(len(query_embeds), dtype=torch.long, device=query_embeds.device)\n all_doc_embeds = doc_embeds\n all_docids = docids\n negative_mask = self._compute_negative_mask(qids, all_docids)\n\n similarities = torch.matmul(query_embeds, all_doc_embeds.transpose(0, 1))\n similarities = similarities * self.args.inv_temperature\n similarities = similarities - 10000.0 * negative_mask\n contrast_loss = F.cross_entropy(similarities, labels) \n if self.args.local_rank > -1:\n contrast_loss = contrast_loss * dist.get_world_size()\n return contrast_loss\n\n def compute_contrastive_loss(self, query_embeds, doc_embeds, neg_doc_embeds, qids, docids, neg_docids): \n labels = torch.arange(len(query_embeds), dtype=torch.long, device=query_embeds.device)\n all_doc_embeds = torch.vstack((doc_embeds, neg_doc_embeds))\n all_docids = torch.hstack((docids, neg_docids))\n negative_mask = self._compute_negative_mask(qids, all_docids)\n\n similarities = torch.matmul(query_embeds, all_doc_embeds.transpose(0, 1))\n similarities = similarities * self.args.inv_temperature\n similarities = similarities - 10000.0 * negative_mask\n contrast_loss = F.cross_entropy(similarities, labels) \n if self.args.local_rank > -1:\n contrast_loss = contrast_loss * dist.get_world_size()\n return contrast_loss\n\n @torch.no_grad()\n def _compute_negative_mask(self, qids, docids):\n negative_mask = torch.zeros((len(qids), len(docids)), dtype=torch.bool, device=qids.device)\n for i, qid in enumerate(qids):\n for d in self.qrels[qid.item()]:\n negative_mask[i] = torch.logical_or(negative_mask[i], docids==d)\n negative_mask = negative_mask.type(torch.float32)\n negative_mask.fill_diagonal_(0)\n return negative_mask\n\n def _gather_tensor(self, t: Tensor):\n all_tensors = [torch.empty_like(t) for _ in range(dist.get_world_size())]\n dist.all_gather(all_tensors, t)\n all_tensors[self.args.local_rank] = t\n all_tensors = torch.cat(all_tensors)\n return all_tensors\n\n def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):\n return 0\n\n def evaluate(self, \n eval_dataset: Optional[Dataset] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",) -> Dict[str, float]:\n metrics = validate_during_training(self, eval_dataset, ignore_keys, metric_key_prefix)\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)\n return metrics\n\n\nclass BackboneContrastDenseFinetuner(BaseContrastDenseFinetuner, Trainer):\n def __init__(self, qrels, *args, **kwargs):\n super(BackboneContrastDenseFinetuner, self).__init__(*args, **kwargs)\n self.qrels = qrels # is used to compute negative mask\n\n\nclass AdapterContrastDenseFinetuner(BaseContrastDenseFinetuner, AdapterTrainer):\n def __init__(self, qrels, *args, **kwargs):\n super(AdapterContrastDenseFinetuner, self).__init__(*args, **kwargs)\n self.qrels = qrels # is used to compute negative mask\n\n\n","repo_name":"jingtaozhan/disentangled-retriever","sub_path":"src/disentangled_retriever/dense/finetune/contrast_utils.py","file_name":"contrast_utils.py","file_ext":"py","file_size_in_byte":11687,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"54"} +{"seq_id":"72593520482","text":"import numpy as np\nfrom enum import Enum\nfrom numpy.polynomial.polynomial import polyval\nfrom numpy.polynomial.legendre import legval, legvander\n\ndef legandr(x, n):\n if n == 0:\n return 1\n if n == 1:\n return x\n else:\n return ((2 * n - 1) / (n)) * x * legandr(x, n - 1) - ((n - 1) / n) * legandr(x, n - 2)\n\n\n\nclass ApproxType(Enum):\n algebraic = 0\n legendre = 1\n harmonic = 2\n\n\ndef func(x):\n \"\"\"\n this method should implement VECTORIZED target function\n \"\"\"\n return abs(3*x + np.sin(x))\n\n\ndef approx(X0, Y0, X1, approx_type: ApproxType, dim):\n \"\"\"\n this method should perform approximation on [-1; 1] interval\n :param X0: X-values (1 x N0)\n :param Y0: Y-values (1 x N0)\n :param X1: approximation points (1 x N1)\n :param approx_type:\n 0 - algebraic polynomes (1, x, x^2, ...)\n 1 - legendre polynomes\n 2 - harmonic\n :param dim: dimension\n :return Y1: approximated Y-values (1 x N1)\n :return a: vector (1 x dim) of approximation coefficients\n :return P: (for approx_type 0 and 1) coefficients of approximation polynome P (1 x dim)\n \"\"\"\n if approx_type is ApproxType.algebraic:\n Q = np.vander(X0, dim, increasing=True)\n mat = Q.T @ Q\n b = Q.T @ Y0\n P = np.linalg.solve(mat, b)\n y = polyval(X1, P)\n return y, [], P\n if approx_type is ApproxType.legendre:\n Q = legvander(X0, dim - 1)\n mat = Q.T @ Q\n b = Q.T @ Y0\n P = np.linalg.solve(mat, b)\n y = legval(X1, P)\n\n D = [[0 for i in range(dim)] for j in range(dim)]\n D[0][0] = 1\n D[0][1] = 0\n D[1][0] = 0 \n D[1][1] = 1 \n i = 2 \n while i < (dim):\n D[i][0] = -(i-1)/(i)*D[i-2][0] \n j = 0\n while j <= i:\n D[i][j] = (2*i-1)/i*D[i-1][j-1] - (i-1)/i*D[i-2][j]\n j+=1\n i+=1 \n D = np.array(D)\n G = P @ D\n\n return y, P, G\n raise Exception(f'approximation of type {approx_type} not supported yet')\n\n \n\n\n","repo_name":"BMaksim/Num-and-optim-methods","sub_path":"Num/Sem2/met3.py","file_name":"met3.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73077372640","text":"text = 'this is book'\npattern = 'is'\n\n #i 는 텍스트를 반복하는 녀석\ndef func(text, pattern):\n for i in range(len(text)-len(pattern)+1):\n for j in range(len(pattern)): ##패턴을 반복하는 녀석\n if text[i+j] != pattern[j]:\n break\n else:\n return i\n return -1\n\ndef func2(text, pattern):\n m = len(pattern)\n for i in range(len(text)- len(pattern)+1):\n if text[i:i+m] == pattern:\n return i\n return -1\n\n\ndef func3(text, pattern):\n i = 0 #i는 전체 텍스트\n j = 0 #j는 검사할 패턴\n M = len(pattern)\n N = len(text)\n\n while j < M and i < N:\n if text[i] != pattern[j]:\n i = i-j\n j = -1\n i = i +1\n j = j +1\n if j == len(pattern):\n return i-len(pattern)\n else:\n return -1\n\n","repo_name":"cmkds/algo","sub_path":"수업/0812/BruteForce 고지식한알고리즘.py","file_name":"BruteForce 고지식한알고리즘.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27005487120","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport cv2\nimport json\nimport pickle\nimport argparse\nimport itertools\nimport pandas as pd\nfrom tqdm import tqdm\nimport xgboost as xgb\nimport lightgbm as lgbm\nfrom sklearn.preprocessing import StandardScaler\n\nfrom utils.boxes import *\nfrom config.cfg import Configuration as cfg\nfrom modules.kv_embedding import KVEmbedding\n\nfrom sklearn.metrics import recall_score, precision_score, f1_score\n\n\nclass Trainer(object):\n def __init__(self, args) -> None:\n self.kv_embed = KVEmbedding()\n self.dirname = cfg.dataset.image_path\n self.cols = ['k_id', 'k_text', 'k_box', 'v_id', 'v_text', 'v_box', 'k_embed', 'v_embed', 'width', 'height', 'fname']\n self.lang = args.lang\n \n def load_data(self, type_data='train'):\n df = []\n label_path = os.path.join(cfg.dataset.data_path, f\"{self.lang}.{type_data}.json\")\n with open(label_path, 'r') as f_json:\n data = json.load(f_json)\n for doc in tqdm(data['documents']):\n f_name = doc['img']['fname']\n h, w = doc['img']['height'], doc['img']['width']\n df_label = pd.DataFrame(doc['document'])\n # Make linking data\n re = []\n for links in df_label[df_label.label=='question'].linking:\n if not links: continue\n k_id = links[0][0]\n v_ids = [l[1] for l in links]\n k = df_label[df_label.id==k_id].iloc[0]\n v = df_label[df_label.id.isin(v_ids)]\n re.append({\n 'k_id': k_id,\n 'k_text': str(k.text),\n 'k_box': k.box,\n 'k_embed': self.kv_embed.embedding(str(k.text)),\n 'v_id': v.id.tolist(),\n 'v_text': v.text.tolist(),\n 'v_box': [p for p in v.box],\n 'v_embed': [self.kv_embed.embedding(str(t)) for t in v.text.tolist()],\n 'width': w,\n 'height': h,\n 'fname': f_name\n })\n re = pd.DataFrame(re)\n if re.shape[0] == 0: continue\n re = re.explode(['v_text', 'v_box', 'v_id', 'v_embed']).reset_index(drop=True)\n non_re = []\n # Make non-linking data\n for (i, k_id, k_box, k_text, k_embed, w, h, fname), (j, v_id, v_box, v_text, v_embed) in itertools.product(\n re[['k_id', 'k_box', 'k_text', 'k_embed', 'width', 'height', 'fname']].to_records(index=True),\n re[['v_id', 'v_box', 'v_text', 'v_embed']].to_records(index=True)):\n if i==j: continue\n non_re.append({\n 'k_box': k_box,\n 'v_box': v_box,\n 'k_id': k_id,\n 'v_id': v_id,\n 'k_text': k_text,\n 'v_text': v_text,\n 'k_embed': k_embed,\n 'v_embed': v_embed,\n 'width': w,\n 'height': h,\n 'fname': fname\n })\n non_re = pd.DataFrame(non_re).reset_index(drop=True)\n non_re['label'] = 0.0\n \n re = re[self.cols].copy()\n re['label'] = 1.0\n re_total = pd.concat([re, non_re])\n df.append(re_total)\n return pd.concat(df)\n \n def make_features(self, df:pd.DataFrame):\n print(\"Making features ...\")\n df.k_box = df.apply(lambda x: normalize_scale_bbox(x.k_box, x.width, x.height), axis=1)\n df.v_box = df.apply(lambda x:normalize_scale_bbox(x.v_box, x.width, x.height), axis=1)\n k_features = pd.DataFrame(df.k_box.tolist(), index=df.index, columns=['k_' + s for s in ['x1', 'y1', 'x2', 'y2']])\n v_features = pd.DataFrame(df.v_box.tolist(), index=df.index, columns=['v_' + s for s in ['x1', 'y1', 'x2', 'y2']])\n \n df = pd.concat([k_features, v_features, df[self.cols], df['label']], axis=1)\n \n df['k_cx'] = df.k_x1.add(df.k_x2).div(2)\n df['k_cy'] = df.k_y1.add(df.k_y2).div(2)\n \n df['v_cx'] = df.v_x1.add(df.v_x2).div(2)\n df['v_cy'] = df.v_y1.add(df.v_y2).div(2)\n \n df['fe1'] = abs(df.v_x1 - df.k_x1)\n df['fe2'] = abs(df.v_y1 - df.k_y1)\n df['fe3'] = abs(df.v_x1 - df.k_x2)\n df['fe4'] = abs(df.v_y1 - df.k_y2)\n df['fe5'] = abs(df.v_x2 - df.k_x1)\n df['fe6'] = abs(df.v_y2 - df.k_y1)\n df['fe7'] = abs(df.v_x2 - df.k_x2)\n df['fe8'] = abs(df.v_y2 - df.k_y2)\n df['fe9'] = abs(df.v_x2 - df.v_x1)\n df['fe10'] = abs(df.v_y2 - df.v_y1)\n df['fe11'] = abs(df.k_x2 - df.k_x1)\n df['fe12'] = abs(df.k_y2 - df.k_y1)\n \n df['fe13'] = df.apply(lambda x: cal_degrees([x.k_x1, x.k_y1], [x.v_x1, x.v_y1]), axis=1)\n df['fe14'] = df.apply(lambda x: cal_degrees([x.k_x2, x.k_y1], [x.v_x2, x.v_y1]), axis=1)\n df['fe15'] = df.apply(lambda x: cal_degrees([x.k_x2, x.k_y2], [x.v_x2, x.v_y2]), axis=1)\n df['fe16'] = df.apply(lambda x: cal_degrees([x.k_x1, x.k_y2], [x.v_x1, x.v_y2]), axis=1)\n df['fe17'] = df.apply(lambda x: cal_degrees([x['k_cx'], x['k_cy']], [x['v_cx'], x['v_cy']]), axis=1)\n\n df['fe18'] = df.apply(lambda x: boxes_distance([x.k_x1-x.v_x2, x.k_y2-x.v_y1],[x.v_x1-x.k_x2, x.v_y2-x.k_y1]), axis=1)\n df['fe19'] = df.apply(lambda x: dist_points([x.k_cx, x.k_cy], [x.v_cx, x.v_cy]), axis=1)\n\n df['fe20'] = df['k_embed']\n df['fe21'] = df['v_embed']\n \n cols = [c for c in df.columns if c.startswith('fe')] + ['label']\n\n return df[cols], df[self.cols]\n \n def post_process(self, df: pd.DataFrame, pred_prob):\n # one value only links to one key but one key can link to many value\n df['pred_prob'] = pred_prob\n df['is_linking'] = 0.0\n \n fnames = df.fname.unique().tolist()\n for fname in fnames:\n df_fname = df[df.fname==fname]\n v_ids = df_fname.v_id.unique().tolist()\n for v_id in v_ids:\n df_vid = df_fname[df_fname.v_id==v_id]\n idx_max = df_vid.pred_prob.idxmax()\n df.loc[(df.fname==fname)&(df.v_id==v_id)&(df.index==idx_max), 'is_linking'] = 1.0\n return df\n\n def preprocess_data(self):\n os.makedirs(os.path.join(cfg.dataset.features_path, self.lang), exist_ok=True)\n os.makedirs(os.path.join(cfg.dataset.scaler_path, self.lang), exist_ok=True)\n train_feat_pth = os.path.join(cfg.dataset.features_path, self.lang, 'train.pkl')\n val_feat_pth = os.path.join(cfg.dataset.features_path, self.lang, 'val.pkl')\n train_org_pth = os.path.join(cfg.dataset.features_path, self.lang, 'train_df.pkl')\n val_org_pth = os.path.join(cfg.dataset.features_path, self.lang, 'val_df.pkl')\n scaler_pth = os.path.join(cfg.dataset.scaler_path, self.lang, 'scaler.pkl')\n scaler = StandardScaler()\n \n if os.path.exists(train_feat_pth):\n print(\"Loading features training data ...\")\n features_train = pickle.load(open(train_feat_pth, 'rb'))\n df_train = pickle.load(open(train_org_pth, 'rb'))\n else:\n print('Loading training data ...')\n df_train = self.load_data(type_data='train')\n features_train, __ = self.make_features(df_train)\n pickle.dump(df_train, open(train_org_pth, 'wb'))\n pickle.dump(features_train, open(train_feat_pth, 'wb'))\n \n if os.path.exists(val_feat_pth):\n print(\"Loading features valid data...\")\n features_val = pickle.load(open(val_feat_pth, 'rb'))\n df_val = pickle.load(open(val_org_pth, 'rb'))\n else:\n print('Loading valid data ...')\n df_val = self.load_data(type_data='val')\n features_val, __ = self.make_features(df_val)\n pickle.dump(df_val, open(val_org_pth, 'wb'))\n pickle.dump(features_val, open(val_feat_pth, 'wb'))\n \n X_train, y_train = features_train.values[:, :-1], features_train.values[:, -1].astype(int)\n if os.path.exists(scaler_pth):\n scaler = pickle.load(open(scaler_pth, 'rb'))\n X_train = scaler.transform(X_train)\n else:\n X_train = scaler.fit_transform(X_train)\n pickle.dump(scaler, open(scaler_pth, 'wb'))\n \n X_val, y_val = features_val.values[:, :-1], features_val.values[:, -1].astype(int)\n X_val = scaler.transform(X_val)\n \n return (X_train, y_train), (X_val, y_val), (df_train.reset_index(drop=True), df_val.reset_index(drop=True))\n\n \n def train(self):\n os.makedirs(os.path.join(cfg.dataset.model_path, self.lang), exist_ok=True)\n train_data, val_data, data_df = self.preprocess_data()\n X_train, y_train = train_data\n X_val, y_val = val_data\n train_df, val_df = data_df\n \n print(f'Shape of X_train: {X_train.shape}')\n print(f'Shape of X_val: {X_val.shape}')\n \n if not os.path.exists(os.path.join(cfg.dataset.model_path, self.lang, 'clf.pkl')):\n print(\"============================= TRAINING =================================\")\n print('Training model ...')\n \n if os.path.exists(cfg.dataset.params):\n print(\"Loading tuning params ...\")\n params = json.load(open(cfg.dataset.params, 'r', encoding='utf-8'))\n else:\n print(\"Loading default params ...\")\n params = {\n 'random_state': 1997,\n 'n_estimators': 200,\n 'n_jobs': 15,\n 'max_depth': 10,\n }\n # clf = xgb.XGBClassifier(objective=\"binary:logistic\", **params)\n clf = lgbm.LGBMClassifier(objective='binary', **params)\n clf.fit(X_train, y_train)\n\n print('Saving model ...')\n with open(os.path.join(cfg.dataset.model_path, self.lang, 'clf.pkl'), 'wb') as f_cls:\n pickle.dump(clf, f_cls, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n clf = pickle.load(open(os.path.join(cfg.dataset.model_path, 'clf.pkl'), 'rb'))\n pred_prob = clf.predict_proba(X_val)[:, 1]\n y_preds = self.post_process(val_df, pred_prob).is_linking.values.astype(int)\n y_val = y_val.astype(int)\n print(\"============================= EVALUATION =================================\")\n print(f\"Recall: {recall_score(y_val, y_preds)}\")\n print(f\"Precision: {precision_score(y_val, y_preds)}\")\n print(f\"F1-score: {f1_score(y_val, y_preds)}\")\n\n\ndef cli():\n parser = argparse.ArgumentParser()\n parser.add_argument('--lang', default=cfg.dataset.lang, type=str, help='Language specific for training')\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n args = cli()\n trainer = Trainer(args)\n trainer.train()\n","repo_name":"tuongtranngoc/Language-independent-Entity-Linking","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71586146721","text":"\"\"\"Filesystem utility functions.\"\"\"\nimport os\nimport errno\nimport contextlib\nfrom pathlib import Path\nimport tarfile\nimport zipfile\n\ndef makedirs(path):\n \"\"\"Create directory recursively if not exists.\n Similar to `makedir -p`, you can skip checking existence before this function.\n\n Parameters\n ----------\n path : str\n Path of the desired dir\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\ndef try_import(package, message=None, fromlist=None):\n \"\"\"Try import specified package, with custom message support.\n\n Parameters\n ----------\n package : str\n The name of the targeting package.\n message : str, default is None\n If not None, this function will raise customized error message when import error is found.\n\n\n Returns\n -------\n module if found, raise ImportError otherwise\n\n \"\"\"\n try:\n return __import__(package, fromlist=fromlist)\n except ImportError as e:\n if not message:\n raise e\n raise ImportError(message)\n\ndef try_import_cv2():\n \"\"\"Try import cv2 at runtime.\n\n Returns\n -------\n cv2 module if found. Raise ImportError otherwise\n\n \"\"\"\n msg = \"cv2 is required, you can install by package manager, e.g. 'apt-get', \\\n or `pip install opencv-python --user` (note that this is unofficial PYPI package).\"\n return try_import('cv2', msg)\n\ndef try_import_munkres():\n \"\"\"Try import munkres at runtime.\n\n Returns\n -------\n munkres module if found. Raise ImportError otherwise\n Munkres (Hungarian) algorithm for the Assignment Problem\n\n \"\"\"\n msg = \"munkres is required, you can install by `pip install munkres --user`. \"\n return try_import('munkres', msg)\n\ndef try_import_colorama():\n \"\"\"Try import colorama at runtime.\n\n Returns\n -------\n colorama module if found. Raise ImportError otherwise\n\n \"\"\"\n msg = \"colorama is required, you can install by `pip install colorama --user` \\\n (note that this is unofficial PYPI package).\"\n return try_import('colorama', msg)\n\ndef try_import_decord():\n \"\"\"Try import decord at runtime.\n\n Returns\n -------\n Decord module if found. Raise ImportError otherwise\n\n \"\"\"\n msg = \"Decord is required, you can install by `pip install decord --user` \\\n (note that this is unofficial PYPI package).\"\n return try_import('decord', msg)\n\ndef try_import_mmcv():\n \"\"\"Try import mmcv at runtime.\n\n Returns\n -------\n mmcv module if found. Raise ImportError otherwise\n\n \"\"\"\n msg = \"mmcv is required, you can install by first `pip install Cython --user` \\\n and then `pip install mmcv --user` (note that this is unofficial PYPI package).\"\n return try_import('mmcv', msg)\n\ndef try_import_rarfile():\n \"\"\"Try import rarfile at runtime.\n\n Returns\n -------\n rarfile module if found. Raise ImportError otherwise\n\n \"\"\"\n msg = \"rarfile is required, you can install by first `sudo apt-get install unrar` \\\n and then `pip install rarfile --user` (note that this is unofficial PYPI package).\"\n return try_import('rarfile', msg)\n\ndef import_try_install(package, extern_url=None):\n \"\"\"Try import the specified package.\n If the package not installed, try use pip to install and import if success.\n\n Parameters\n ----------\n package : str\n The name of the package trying to import.\n extern_url : str or None, optional\n The external url if package is not hosted on PyPI.\n For example, you can install a package using:\n \"pip install git+http://github.com/user/repo/tarball/master/egginfo=xxx\".\n In this case, you can pass the url to the extern_url.\n\n Returns\n -------\n \n The imported python module.\n\n \"\"\"\n import tempfile\n import portalocker\n lockfile = os.path.join(tempfile.gettempdir(), package + '_install.lck')\n with portalocker.Lock(lockfile):\n try:\n return __import__(package)\n except ImportError:\n try:\n from pip import main as pipmain\n except ImportError:\n from pip._internal import main as pipmain\n from types import ModuleType\n # fix for pip 19.3\n if isinstance(pipmain, ModuleType):\n from pip._internal.main import main as pipmain\n\n # trying to install package\n url = package if extern_url is None else extern_url\n pipmain(['install', '--user', url]) # will raise SystemExit Error if fails\n\n # trying to load again\n try:\n return __import__(package)\n except ImportError:\n import sys\n import site\n user_site = site.getusersitepackages()\n if user_site not in sys.path:\n sys.path.append(user_site)\n return __import__(package)\n return __import__(package)\n\ndef try_import_dali():\n \"\"\"Try import NVIDIA DALI at runtime.\n \"\"\"\n try:\n dali = __import__('nvidia.dali', fromlist=['pipeline', 'ops', 'types'])\n dali.Pipeline = dali.pipeline.Pipeline\n except (ImportError, RuntimeError) as e:\n if isinstance(e, ImportError):\n msg = \"DALI not found, please check if you installed it correctly.\"\n elif isinstance(e, RuntimeError):\n msg = \"No CUDA-capable device is detected ({}).\".format(e)\n class dali:\n class Pipeline:\n def __init__(self):\n raise NotImplementedError(msg)\n return dali\n\ndef try_import_html5lib():\n \"\"\"Try import html5lib at runtime.\n\n Returns\n -------\n html5lib module if found. Raise ImportError otherwise\n\n \"\"\"\n msg = \"html5lib is required, you can install by package manager, \" \\\n \"e.g. pip install html5lib --user` (note that this is unofficial PYPI package).\"\n return try_import('html5lib', msg)\n\ndef try_import_gdfDownloader():\n \"\"\"Try import googleDriveFileDownloader at runtime.\n\n Returns\n -------\n googleDriveFileDownloader module if found. Raise ImportError otherwise\n\n \"\"\"\n msg = \"googleDriveFileDownloader is required, you can install by package manager, \" \\\n \"e.g. pip install googleDriveFileDownloader --user` \" \\\n \"(note that this is unofficial PYPI package).\"\n return try_import('googleDriveFileDownloader', msg)\n\ndef unzip(zip_file_path, root='./', strict=False):\n \"\"\"Unzips files located at `zip_file_path` into parent directory specified by `root`.\n \"\"\"\n root = os.path.expanduser(root)\n with zipfile.ZipFile(zip_file_path) as zf:\n if strict or not os.path.exists(os.path.join(root, zf.namelist()[-1])):\n zf.extractall(root)\n folder = os.path.commonprefix(zf.namelist())\n return os.path.join(root, folder)\n\ndef untar(tar_file_path, root='./', strict=False):\n \"\"\"Untars files located at `tar_file_path` into parent directory specified by `root`.\n \"\"\"\n root = os.path.expanduser(root)\n with tarfile.open(tar_file_path, 'r:gz') as zf:\n if strict or not os.path.exists(os.path.join(root, zf.getnames()[-1])):\n zf.extractall(root)\n folder = os.path.commonprefix(zf.getnames())\n return os.path.join(root, folder)\n\n@contextlib.contextmanager\ndef temporary_filename(suffix=None):\n \"\"\"Context that introduces a temporary file.\n\n Creates a temporary file, yields its name, and upon context exit, deletes it.\n (In contrast, tempfile.NamedTemporaryFile() provides a 'file' object and\n deletes the file as soon as that file object is closed, so the temporary file\n cannot be safely re-opened by another library or process.)\n\n Parameters\n ----------\n suffix: desired filename extension (e.g. '.mp4').\n\n Yields\n ----------\n The name of the temporary file.\n \"\"\"\n import tempfile\n try:\n f = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)\n tmp_name = f.name\n f.close()\n yield tmp_name\n finally:\n os.unlink(tmp_name)\n\nclass _DisplayablePath:\n \"\"\"A util class for displaying the tree structure of root path.\n\n Example:\n\n >>> paths = _DisplayablePath.make_tree(Path('doc'))\n >>> for path in paths:\n >>> print(path.displayable())\n\n Parameters\n ----------\n path : str\n The path.\n parent_path : str\n The parent parth.\n is_last : bool\n Whether it's the last node in this depth.\n\n \"\"\"\n display_filename_prefix_middle = '├──'\n display_filename_prefix_last = '└──'\n display_parent_prefix_middle = ' '\n display_parent_prefix_last = '│ '\n\n def __init__(self, path, parent_path, is_last):\n self.path = Path(str(path))\n self.parent = parent_path\n self.is_last = is_last\n if self.parent:\n self.depth = self.parent.depth + 1\n else:\n self.depth = 0\n\n # pylint: disable=inconsistent-return-statements\n @classmethod\n def make_tree(cls, root, parent=None, is_last=False, criteria=None, max_depth=1):\n \"\"\"Make tree structure from root.\n\n Parameters\n ----------\n root : str\n The root dir.\n parent : _DisplayablePath\n The parent displayable path.\n is_last : bool\n Whether it's the last in this level.\n criteria : function\n The criteria used to filter dir/path.\n max_depth : int\n Maximum depth for search.\n\n \"\"\"\n root = Path(str(root))\n criteria = criteria or cls._default_criteria\n\n displayable_root = cls(root, parent, is_last)\n if displayable_root.depth > max_depth:\n return displayable_root\n yield displayable_root\n\n children = sorted(list(path\n for path in root.iterdir()\n if criteria(path)),\n key=lambda s: str(s).lower())\n count = 1\n for path in children:\n is_last = count == len(children)\n if path.is_dir() and displayable_root.depth < max_depth - 1:\n yield from cls.make_tree(path,\n parent=displayable_root,\n is_last=is_last,\n criteria=criteria,\n max_depth=max_depth)\n else:\n yield cls(path, displayable_root, is_last)\n count += 1\n\n @classmethod\n def _default_criteria(cls, path):\n _ = path\n return True\n\n @property\n def displayname(self):\n if self.path.is_dir():\n return self.path.name + '/'\n return self.path.name\n\n def displayable(self):\n \"\"\"Display string\"\"\"\n if self.parent is None:\n return self.displayname\n\n _filename_prefix = (self.display_filename_prefix_last\n if self.is_last\n else self.display_filename_prefix_middle)\n\n parts = ['{!s} {!s}'.format(_filename_prefix,\n self.displayname)]\n\n parent = self.parent\n while parent and parent.parent is not None:\n parts.append(self.display_parent_prefix_middle\n if parent.is_last\n else self.display_parent_prefix_last)\n parent = parent.parent\n\n return ''.join(reversed(parts))\n\n\nclass PathTree:\n \"\"\"A directory tree structure viewer.\n\n Parameters\n ----------\n root : str or pathlib.Path\n The root directory.\n max_depth : int\n Max depth for recursive sub-folders, please be conservative to not spam the filesystem.\n\n \"\"\"\n def __init__(self, root, max_depth=1):\n self._disp_path = _DisplayablePath.make_tree(Path(root), max_depth=max_depth)\n\n def __str__(self):\n s = '\\n'.join([p.displayable() for p in self._disp_path])\n return s\n\ndef try_import_skimage():\n \"\"\"Try import scikit-image at runtime.\n\n Returns\n -------\n scikit-image module if found. Raise ImportError otherwise\n\n \"\"\"\n msg = \"skimage is required, you can install by package manager, e.g. \" \\\n \"`pip install scikit-image --user` (note that this is unofficial PYPI package).\"\n return try_import('skimage', msg)\n","repo_name":"dmlc/gluon-cv","sub_path":"gluoncv/utils/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":12461,"program_lang":"python","lang":"en","doc_type":"code","stars":5662,"dataset":"github-code","pt":"54"} +{"seq_id":"39547980521","text":"import json\nfrom flask_restplus import Resource, reqparse, inputs\nfrom datetime import datetime\nfrom datetime import datetime, timezone\nfrom flask import current_app, request\nfrom werkzeug.exceptions import BadRequest, NotFound\n\nfrom app.api.mines.permits.permit.models.permit import Permit\nfrom app.api.mines.permits.permit_amendment.models.permit_amendment import PermitAmendment\nfrom app.api.mines.permits.permit_amendment.models.permit_amendment_document import PermitAmendmentDocument\nfrom app.api.mines.permits.permit_conditions.models.standard_permit_conditions import StandardPermitConditions\nfrom app.api.mines.permits.permit_conditions.models.permit_conditions import PermitConditions\nfrom app.api.now_applications.models.now_application import NOWApplication\nfrom app.api.now_applications.models.now_application_identity import NOWApplicationIdentity\nfrom app.api.now_applications.models.application_type_code import ApplicationTypeCode\nfrom app.api.mines.mine.models.mine import Mine\nfrom app.api.parties.party.models.party import Party\nfrom app.api.parties.party_appt.models.mine_party_appt import MinePartyAppointment\nfrom app.extensions import api, db\nfrom app.api.utils.access_decorators import requires_role_view_all, requires_role_edit_permit, requires_role_edit_securities, requires_role_mine_admin\nfrom app.api.utils.resources_mixins import UserMixin\nfrom app.api.mines.response_models import PERMIT_MODEL\nfrom app.api.mines.mine.resources.mine_type import MineType\nfrom app.api.mines.mine.models.mine_type_detail import MineTypeDetail\nfrom app.api.utils.helpers import generate_draft_permit_no_suffix, get_preamble_text\n\n\nclass PermitListResource(Resource, UserMixin):\n parser = reqparse.RequestParser(trim=True)\n parser.add_argument(\n 'now_application_guid',\n type=str,\n help='Returns any draft permit and draft permit amendments related to this application.')\n parser.add_argument(\n 'permit_no', type=str, help='Number of the permit being added.', location='json')\n parser.add_argument(\n 'permittee_party_guid',\n type=str,\n help='GUID of the party that is the permittee for this permit.',\n location='json')\n parser.add_argument(\n 'permit_status_code', type=str, location='json', help='Status of the permit being added.')\n parser.add_argument(\n 'received_date',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,\n location='json')\n parser.add_argument(\n 'issue_date',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,\n location='json')\n parser.add_argument(\n 'authorization_end_date',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,\n location='json')\n parser.add_argument(\n 'now_application_guid',\n type=str,\n location='json',\n help='The now_application_guid this permit is related to.')\n parser.add_argument(\n 'issuing_inspector_title',\n type=str,\n location='json',\n help='Title of the Issuing Inspector for this permit.')\n parser.add_argument(\n 'regional_office', type=str, location='json', help='The regional office for this permit.')\n parser.add_argument(\n 'is_exploration',\n type=bool,\n location='json',\n help='Whether the permit is an exploration permit or not.')\n parser.add_argument('description', type=str, location='json', help='Permit description')\n parser.add_argument('uploadedFiles', type=list, location='json', store_missing=False)\n parser.add_argument(\n 'exemption_fee_status_code',\n type=str,\n help='Fee exemption status for the mine.',\n trim=True,\n store_missing=False,\n location='json')\n parser.add_argument(\n 'exemption_fee_status_note',\n type=str,\n help='Fee exemption status note for the mine.',\n trim=True,\n location='json')\n parser.add_argument(\n 'site_properties',\n type=json.dumps,\n store_missing=False,\n help='It includes object of string codes for mine_commodity_code and mine_disturbance_code.',\n location='json')\n parser.add_argument('liability_adjustment', type=str, location='json', store_missing=False)\n parser.add_argument(\n 'security_received_date',\n location='json',\n type=lambda x: inputs.datetime_from_iso8601(x) if x else None,\n store_missing=False)\n parser.add_argument('security_not_required', location='json', type=bool, store_missing=False)\n parser.add_argument(\n 'security_not_required_reason', location='json', type=str, store_missing=False)\n\n @api.doc(params={'mine_guid': 'mine_guid to filter on'})\n @requires_role_view_all\n @api.marshal_with(PERMIT_MODEL, envelope='records', code=200)\n def get(self, mine_guid):\n now_application_guid = request.args.get('now_application_guid')\n if now_application_guid:\n current_app.logger.info('Supplied now_application_guid: ' + str(now_application_guid))\n if now_application_guid is not None:\n permit = Permit.find_by_now_application_guid(now_application_guid)\n results = [permit] if permit else []\n else:\n results = Mine.find_by_mine_guid(mine_guid).mine_permit\n return results\n\n @api.doc(params={'permit_guid': 'Permit guid.'})\n @requires_role_edit_permit\n @api.marshal_with(PERMIT_MODEL, code=201)\n def post(self, mine_guid):\n data = self.parser.parse_args()\n permit_no = data.get('permit_no')\n data['site_properties'] = json.loads(data.get('site_properties', '{}'))\n\n mine = Mine.find_by_mine_guid(mine_guid)\n if not mine:\n raise NotFound('There was no mine found with the provided mine_guid.')\n\n identity = NOWApplicationIdentity.find_by_mine_guid(mine.mine_guid)\n application_type_description = None\n if identity:\n application_type = ApplicationTypeCode.find_by_application_type_code(\n identity.application_type_code)\n application_type_description = application_type.description if application_type else None\n\n permittee_party_guid = data.get('permittee_party_guid')\n if permittee_party_guid:\n party = Party.find_by_party_guid(permittee_party_guid)\n if not party:\n raise NotFound('Permittee party not found')\n\n if not permit_no:\n now_application_guid = data.get('now_application_guid')\n if not now_application_guid:\n raise NotFound('There was no Notice of Work found with the provided guid.')\n\n now_application_identity = NOWApplicationIdentity.find_by_guid(now_application_guid)\n now_application = now_application_identity.now_application\n notice_of_work_type_code = now_application.notice_of_work_type_code[0]\n\n permit_prefix = notice_of_work_type_code if notice_of_work_type_code != 'S' else 'G'\n if permit_prefix in ['M', 'C'] and data.get('is_exploration'):\n permit_prefix = permit_prefix + 'X'\n\n if now_application_identity.now_number is not None:\n permit_no = permit_prefix + '-DRAFT-' + str(now_application_identity.now_number)\n # Handle the situation where 'P-DRAFT-None' causes a non-unique error\n else:\n permit_no = permit_prefix + '-DRAFT-' + str(mine.mine_no)\n\n last_draft_permit = Permit.find_by_permit_no_deleted_in_draft(permit_no)\n if last_draft_permit:\n permit_no = generate_draft_permit_no_suffix(last_draft_permit.permit_no, permit_no)\n\n permit = Permit.find_by_permit_no(permit_no)\n if permit:\n raise BadRequest(\"That permit number is already in use.\")\n\n uploadedFiles = data.get('uploadedFiles', [])\n\n # we do not have permit yet so we will use the hybrid property logic at this point\n permit_prefix = permit_no[0]\n Permit.validate_exemption_fee_status(\n data.get('is_exploration'), data.get('permit_status_code'), permit_prefix,\n data.get('site_properties', {}).get('mine_disturbance_code'),\n data.get('site_properties', {}).get('mine_tenure_type_code'),\n data.get('exemption_fee_status_code'))\n\n permit = Permit.create(mine, permit_no, data.get('permit_status_code'),\n data.get('is_exploration'), data.get('exemption_fee_status_code'),\n data.get('exemption_fee_status_note'))\n\n is_generated_in_core = True if permit.permit_status_code == 'D' else False\n\n amendment = PermitAmendment.create(\n permit,\n mine,\n data.get('received_date'),\n data.get('issue_date'),\n data.get('authorization_end_date'),\n 'OGP',\n description='Initial permit issued.',\n issuing_inspector_title=data.get('issuing_inspector_title'),\n regional_office=data.get('regional_office'),\n now_application_guid=data.get('now_application_guid'),\n liability_adjustment=data.get('liability_adjustment'),\n security_received_date=data.get('security_received_date'),\n security_not_required=data.get('security_not_required'),\n security_not_required_reason=data.get('security_not_required_reason'),\n is_generated_in_core=is_generated_in_core)\n\n db.session.add(permit)\n db.session.add(amendment)\n\n now_application_guid = data.get('now_application_guid')\n if now_application_guid is not None and permit.permit_status_code == 'D':\n application_identity = NOWApplicationIdentity.find_by_guid(now_application_guid)\n\n application_type_description = None\n if application_identity:\n application_type = ApplicationTypeCode.find_by_application_type_code(\n application_identity.application_type_code)\n application_type_description = 'application' if application_type.application_type_code == 'ADA' else application_type.description\n amendment.preamble_text = get_preamble_text(\n application_type_description) if is_generated_in_core else None\n\n if application_identity.now_application:\n now_type = application_identity.now_application.notice_of_work_type_code\n\n standard_conditions = StandardPermitConditions.find_by_notice_of_work_type_code(\n now_type)\n for condition in standard_conditions:\n PermitConditions.create(condition.condition_category_code,\n condition.condition_type_code,\n amendment.permit_amendment_id, condition.condition,\n condition.display_order, condition.sub_conditions)\n db.session.commit()\n\n for newFile in uploadedFiles:\n new_pa_doc = PermitAmendmentDocument(\n document_name=newFile['fileName'],\n document_manager_guid=newFile['document_manager_guid'],\n mine_guid=mine.mine_guid,\n )\n amendment.related_documents.append(new_pa_doc)\n db.session.commit()\n\n if permittee_party_guid:\n permittee_start_date = data.get('issue_date')\n permittee = MinePartyAppointment.create(\n None,\n permittee_party_guid,\n mine_party_appt_type_code='PMT',\n start_date=permittee_start_date,\n processed_by=self.get_user_info(),\n permit=permit)\n permittee.assign_related_guid('PMT', permit.permit_guid)\n db.session.add(permittee)\n db.session.commit()\n\n #for marshalling\n permit._context_mine = mine\n return permit\n\n\nclass PermitResource(Resource, UserMixin):\n parser = reqparse.RequestParser(trim=True)\n parser.add_argument(\n 'permit_no', type=str, help='Number of the permit being added.', location='json')\n parser.add_argument(\n 'permittee_party_guid',\n type=str,\n help='GUID of the party that is the permittee for this permit.',\n location='json',\n store_missing=False)\n parser.add_argument(\n 'permit_status_code',\n type=str,\n location='json',\n help='Status of the permit being added.',\n store_missing=False)\n parser.add_argument(\n 'remaining_static_liability', type=str, location='json', store_missing=False)\n parser.add_argument(\n 'received_date',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,\n location='json',\n store_missing=False)\n parser.add_argument(\n 'issue_date',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,\n location='json',\n store_missing=False)\n parser.add_argument(\n 'authorization_end_date',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,\n location='json',\n store_missing=False)\n parser.add_argument(\n 'permit_amendment_status_code',\n type=str,\n location='json',\n help='Status of the permit being added.',\n store_missing=False)\n parser.add_argument(\n 'description', type=str, location='json', help='Permit description', store_missing=False)\n parser.add_argument('uploadedFiles', type=list, location='json', store_missing=False)\n parser.add_argument(\n 'now_application_guid',\n type=str,\n help='GUID of the NoW application for the specified permit.',\n location='json',\n store_missing=False)\n parser.add_argument(\n 'exemption_fee_status_code',\n type=str,\n help='Fee exemption status for the mine.',\n trim=True,\n store_missing=False,\n location='json')\n parser.add_argument(\n 'exemption_fee_status_note',\n type=str,\n help='Fee exemption status note for the mine.',\n trim=True,\n store_missing=False,\n location='json')\n\n parser.add_argument(\n 'site_properties',\n type=json.dumps,\n location='json',\n store_missing=False,\n help='{ mine_commodity_code, mine_disturbance_code}.')\n\n @api.doc(params={'permit_guid': 'Permit guid.'})\n @requires_role_view_all\n @api.marshal_with(PERMIT_MODEL, code=200)\n def get(self, permit_guid, mine_guid):\n permit = Permit.find_by_permit_guid_or_no(permit_guid)\n if not permit:\n raise NotFound('Permit not found.')\n if mine_guid not in [str(m.mine_guid) for m in permit._all_mines]:\n raise BadRequest('Permit and mine_guid mismatch.')\n return permit\n\n @api.doc(params={'permit_guid': 'Permit guid.'})\n @requires_role_edit_securities\n @api.marshal_with(PERMIT_MODEL, code=200)\n def put(self, permit_guid, mine_guid):\n data = self.parser.parse_args()\n permit = Permit.find_by_permit_guid(permit_guid, mine_guid)\n data['site_properties'] = json.loads(data.get('site_properties', '{}'))\n if not permit:\n raise NotFound('Permit not found.')\n\n is_exploration = permit.permit_no[1] == \"X\" or permit.is_exploration\n Permit.validate_exemption_fee_status(\n is_exploration, data.get('permit_status_code'), permit.permit_prefix,\n data.get('site_properties', {}).get('mine_disturbance_code'),\n data.get('site_properties', {}).get('mine_tenure_type_code'),\n data.get('exemption_fee_status_code'))\n\n if data.get('site_properties') != {}:\n MineType.create_or_update_mine_type_with_details(\n mine_guid=mine_guid,\n permit_guid=permit_guid,\n mine_tenure_type_code=data.get('site_properties', {}).get('mine_tenure_type_code'),\n mine_disturbance_codes=data.get('site_properties',\n {}).get('mine_disturbance_code', []),\n mine_commodity_codes=data.get('site_properties', {}).get('mine_commodity_code', []))\n\n # If the permit status has changed, update the \"status changed\" timestamp.\n permit_status_code = data.get('permit_status_code')\n if permit_status_code and permit_status_code != permit.permit_status_code:\n permit.status_changed_timestamp = datetime.now(timezone.utc)\n\n for key, value in data.items():\n if key in ['permit_no', 'mine_guid', 'uploadedFiles', 'site_properties']:\n continue # non-editable fields from put or should be handled separately\n setattr(permit, key, value)\n\n permit.save()\n return permit\n\n @api.doc(params={'permit_guid': 'Permit guid.'})\n @requires_role_mine_admin\n @api.response(204, 'Successfully deleted.')\n def delete(self, permit_guid, mine_guid):\n permit = Permit.find_by_permit_guid(permit_guid, mine_guid)\n if not permit:\n raise NotFound('Permit not found.')\n\n try:\n permit.delete()\n except Exception as e:\n raise BadRequest(e)\n\n return None, 204\n\n @api.doc(params={'permit_guid': 'Permit guid.', 'now_application_guid': 'NoW application guid'})\n @requires_role_edit_permit\n @api.marshal_with(PERMIT_MODEL, code=200)\n def patch(self, permit_guid, mine_guid):\n permit = Permit.find_by_permit_guid(permit_guid, mine_guid)\n\n if not permit:\n raise NotFound('Permit not found.')\n\n now_application_guid = self.parser.parse_args()['now_application_guid']\n now_application = NOWApplication.find_by_application_guid(now_application_guid)\n\n if not now_application:\n raise NotFound('NoW application not found')\n\n if permit.permit_status_code == 'D':\n #assign permit_no\n permit.assign_permit_no(now_application.notice_of_work_type_code[0])\n\n permit.save()\n return permit","repo_name":"PinkDiamond1/mds","sub_path":"services/core-api/app/api/mines/permits/permit/resources/permit.py","file_name":"permit.py","file_ext":"py","file_size_in_byte":18186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"38735363863","text":"from kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom kivy.uix.image import Image\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.relativelayout import RelativeLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.config import Config\nimport random\n\n# Config.set('graphics', 'resizable', 0)\nConfig.set('graphics', 'width', 400)\nConfig.set('graphics', 'height', 650)\n\n\nclass ListTwo(Screen):#второй экран\n def __init__(self, **kw):\n super(ListTwo, self).__init__(**kw)\n\n def on_enter(self): # Будет вызвана в момент открытия экрана\n background = Image(source='background.png', allow_stretch=True, keep_ratio=False)\n\n label0 = Label(text='ВЫБОР ФИЛЬМА', font_size='20sp', halign='center')\n btn_test = Button(text='Рандом', size_hint_x=.17, on_press=lambda x: set_screen('rand'))\n\n button_next = Button(text='ВЫБРАТЬ', on_press=lambda x: set_screen('ListTree'),\n background_color=[.32, .85, .94, 1], size_hint=(0.4, .4), pos_hint={'x': 0.2, 'y': 0.2})\n\n box_next = BoxLayout(size_hint=(0.4, 0.2), pos_hint={'x': 0.3, 'y': 0})\n box_next.add_widget(button_next)\n\n box = BoxLayout(size_hint=(1, 0.05), pos_hint={'x': 0, 'y': 0.95}, orientation='horizontal')\n box.add_widget(label0)\n box.add_widget(btn_test)\n\n\n layout = GridLayout(cols=3, padding=[40, 0, 40, 0], spacing=5, size_hint=(1, 0.7),\n pos_hint={'x': 0, 'y': 0.2})\n\n btn1 = Button(background_normal='adven.jpg', on_press=lambda x: set_screen('adven'),\n background_down='blue.png')\n btn2 = Button(background_normal='bourn.jpg', on_press=lambda x: set_screen('bourn'),\n background_down='blue.png')\n btn3 = Button(background_normal='darkfields.jpg', on_press=lambda x: set_screen('darkfields'),\n background_down='blue.png')\n btn4 = Button(background_normal='expend.jpg', on_press=lambda x: set_screen('expend'),\n background_down='blue.png')\n btn5 = Button(background_normal='here.jpg', on_press=lambda x: set_screen('here'),\n background_down='blue.png')\n btn6 = Button(background_normal='imperator.jpg', on_press=lambda x: set_screen('imperator'),\n background_down='blue.png')\n btn7 = Button(background_normal='lovewith.jpg', on_press=lambda x: set_screen('lovewith'),\n background_down='blue.png')\n btn8 = Button(background_normal='noeye.jpg', on_press=lambda x: set_screen('noeye'),\n background_down='blue.png')\n btn9 = Button(background_normal='raid.jpg', on_press=lambda x: set_screen('raid'),\n background_down='blue.png')\n layout.add_widget(btn1)\n layout.add_widget(btn2)\n layout.add_widget(btn3)\n layout.add_widget(btn4)\n layout.add_widget(btn5)\n layout.add_widget(btn6)\n layout.add_widget(btn7)\n layout.add_widget(btn8)\n layout.add_widget(btn9)\n\n super_layout = FloatLayout()\n\n super_layout.add_widget(background)\n super_layout.add_widget(layout)\n super_layout.add_widget(box_next)\n\n ultra_super_layoyt = BoxLayout(orientation='vertical')\n ultra_super_layoyt.add_widget(box)\n ultra_super_layoyt.add_widget(super_layout)\n\n self.add_widget(ultra_super_layoyt)\n\n\n\ndef set_screen(name_screen):\n sm.current = name_screen\n\n\nsm = ScreenManager()\nsm.add_widget(ListTwo(name='two'))\n\n\n\nclass FoodOptionsApp(App):\n def build(self):\n return sm\n\n\nif __name__ == '__main__':\n FoodOptionsApp().run()\n","repo_name":"Victor101020/kivi-movie-selection-app","sub_path":"maincopy.py","file_name":"maincopy.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28910885","text":"import json as json\nimport os\n\n\ndef easy_new_Recipe():\n name = input(\"Name of Recipe\")\n ingredients = [input(\"ingredients:\").split(\"/\")]\n method = input(\"method\")\n link = input(\"link\")\n new_Recipe(\"id, name, ingredients, method, link=None\")\n\n\ndef new_Recipe(name, ingredients, method, link=None):\n '''\n Need to check if the file already exists, use name remove white space\n '''\n recipe_Dict = {\n \"id\":set_Recipe_ID(),\n \"name\":name,\n \"ingredients\":ingredients,\n \"method\":method,\n \"link\":link\n }\n\n filename = \"Meals/{} - {}.json\".format(str(recipe_Dict[\"id\"]), recipe_Dict[\"name\"])\n\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, \"w\") as outfile:\n outfile.write(json.dumps(recipe_Dict, indent=4))\n\ndef set_Recipe_ID():\n # go through folder, get largest num add one\n id = 1\n for file in os.listdir(\"Meals/\"):\n if file.startswith(str(id)):\n id += 1\n else:\n return id\n\n\ndef ingredients_reader(txt):\n return [tuple(item.split(\",\")) for item in txt.split(\"/\")]\n\n\n\ndef read_Recipe(id):\n for file in os.listdir(\"Meals/\"):\n if file.startswith(str(id)):\n filepath = \"Meals/\"+file\n \n with open(filepath, 'r') as openfile:\n json_Obj = json.load(openfile)\n \n print(json_Obj)\n","repo_name":"JAGOD123/MealPlanner","sub_path":"recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18011377841","text":"##################################################\n# Purpose: Script to set and run other scripts #\n# Author: Amy Andrews #\n# Resources used:\n# Pytorch documentation https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html\n# Pytorch documentation https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html\n##################################################\nimport torch.cuda\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader, Subset\nimport random\nimport pprint\n\nfrom load_data import load_data\nfrom set_args import get_args\nfrom utils import *\nfrom train_loop import train_model\n\n\ndef main(args):\n\n # seed setting for repro\n random.seed(args['seed'])\n torch.manual_seed(args['seed'])\n torch.cuda.manual_seed_all(args['seed'])\n cudnn.deterministic = True\n cudnn.benchmark = False\n\n device = torch.device(args['device'])\n\n # model_bt for model before training, just 'model' after\n if 'pretrained_model' not in args or args['pretrained_model'] == '':\n model_bt, checkpoint = load_model(args)\n elif args['pretrained_model'] != '':\n path = args['mod_dir'] + args['pretrained_model']\n model_bt, checkpoint = load_model(args, load_from_path=path)\n\n # training loop for different scenarios\n if args['train_loss'] == 'supervised':\n train_set, val_set, test_set = load_data(args)\n train = DataLoader(train_set, batch_size=args['batch_size'], shuffle=True, num_workers=args['num_workers'], pin_memory=True)\n val = DataLoader(val_set, batch_size=args['batch_size'], shuffle=False, num_workers=args['num_workers'], pin_memory=True)\n print('Train set length:', len(train_set))\n elif args['train_loss'] == 'fixmatch':\n supervised_set, unsupervised_set, val_set, test_set = load_data(args)\n # set batch size for unlabelled data using fixmatch arguments\n unsupervised_batch_size = args['batch_size'] * args['fm_ratio_mu']\n supervised = DataLoader(supervised_set, batch_size=args['batch_size'], shuffle=True, num_workers=args['num_workers'],\n drop_last=True, pin_memory=True)\n unsupervised = DataLoader(unsupervised_set, batch_size=unsupervised_batch_size, shuffle=True, num_workers=args['num_workers'],\n drop_last=True, pin_memory=True)\n val = DataLoader(val_set, batch_size=args['batch_size'], shuffle=False, num_workers=args['num_workers'], pin_memory=True)\n train = [supervised, unsupervised]\n\n opt = optim.SGD(model_bt.parameters(), lr=args['learning_rate'],\n weight_decay=args['weight_decay'], momentum=args['momentum'], nesterov=True)\n if args['scheduler'] == 'cosine':\n # from https://github.com/Celiali/FixMatch/blob/main/experiments/experiment.py\n scheduler = get_cosine_schedule_with_warmup(optimizer=opt, num_warmup_steps=0,\n num_training_steps=2**20) # hard coded rather than num iters for replication\n elif args['scheduler'] == 'plateau' or args['scheduler'] == 'plateau_train':\n scheduler = lr_scheduler.ReduceLROnPlateau(opt, 'min', verbose=True)\n\n # set paths and names\n args = assign_paths(args)\n\n # run\n model, ema_model, res = train_model(model_bt, opt, scheduler, train, val, args, checkpoint=checkpoint)\n\n # report where model and results are stored\n print(f'Model stored at {args[\"mod_path\"]} after {res[\"total_epochs_trained\"]} epochs')\n\n\n# run if running this file\nif __name__ == '__main__':\n\n args = get_args(bash_parser=True)\n\n print('\\n**********************************')\n print('Experiment :', args['train_loss'] + '_' + args['mod_type'] + '_' + args['scheduler'])\n print('Dataset :', args['dataset'])\n pprint.pprint(args)\n print('************************************')\n\n main(args)","repo_name":"myndrws/ssal_ct","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15440410340","text":"from datetime import datetime, timedelta\nfrom django.conf import settings\n\nfrom app.models.company import Company\nfrom app.dtos.notification.email_data import EmailData\n\nfrom .event_handler_base import EventHandlerBase\nfrom ..events.company_daily_employee_data_change_report_event import CompanyDailyEmployeeDataChangeReportEvent\nfrom app.service.send_email_service import SendEmailService\nfrom app.service.data_modification_service import DataModificationService\n\n\nclass CompanyDailyEmployeeDataChangeReportEventHandler(EventHandlerBase):\n\n def __init__(self):\n super(CompanyDailyEmployeeDataChangeReportEventHandler, self).__init__(CompanyDailyEmployeeDataChangeReportEvent)\n self._send_email_service = SendEmailService()\n self._data_modification_service = DataModificationService()\n \n def _internal_handle(self, event):\n if (not event.company_id):\n raise ValueError('The event is expected to provide company_id, which is missing!')\n\n emails = self._send_email_service.get_employer_emails_by_company(event.company_id)\n\n # Get employee data modification summery records\n mod_summaries = self._data_modification_service.employee_modifications_summary(event.company_id, 24 * 60)\n if (len(mod_summaries) <= 0):\n # No modifications detected. Do not send anything\n return\n\n email_data = self._get_email_data(event.company_id, mod_summaries)\n\n self._send_email_service.send_support_email(\n emails, email_data.subject, email_data.context_data,\n email_data.html_template_path, email_data.txt_template_path\n )\n\n def _get_email_data(self, company_id, data_modification_summaries):\n # Get display date\n date_text = self._get_display_date()\n\n # Now prepare the email content data\n subject = '[System Notification - {0}] Employee Data Change Notification'.format(date_text)\n\n html_template_path = 'email/user_data_change_notification.html'\n txt_template_path = 'email/user_data_change_notification.txt'\n\n company_users_collection = [{ \n 'company': Company.objects.get(pk=company_id),\n 'mod_summary_list': data_modification_summaries,\n }]\n\n context_data = { \n 'date': date_text\n }\n context_data = {\n 'context_data':context_data,\n 'company_users_collection':company_users_collection,\n 'site_url':settings.SITE_URL\n }\n\n return EmailData(subject, html_template_path, txt_template_path, context_data, False) \n\n def _get_display_date(self):\n now = datetime.now()\n date = now - timedelta(hours=12)\n return date.strftime('%m/%d/%Y')\n","repo_name":"smoothbenefits/BenefitMY_Python","sub_path":"app/service/event_bus/event_handlers/company_daily_employee_data_change_report_event_handler.py","file_name":"company_daily_employee_data_change_report_event_handler.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25825031662","text":"\"\"\"Config flow for Custom Plant integration.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport re\nfrom typing import Any\nimport urllib.parse\n\nimport voluptuous as vol\n\nfrom homeassistant import config_entries, data_entry_flow\nfrom homeassistant.components.sensor import SensorDeviceClass\nfrom homeassistant.const import (\n ATTR_DEVICE_CLASS,\n ATTR_DOMAIN,\n ATTR_ENTITY_PICTURE,\n ATTR_NAME,\n)\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers import config_validation as cv\nfrom homeassistant.helpers.network import NoURLAvailableError, get_url\nfrom homeassistant.helpers.selector import selector\n\nfrom .const import (\n ATTR_ENTITY,\n ATTR_LIMITS,\n ATTR_OPTIONS,\n ATTR_SEARCH_FOR,\n ATTR_SELECT,\n ATTR_SENSORS,\n ATTR_SPECIES,\n CONF_MAX_CONDUCTIVITY,\n CONF_MAX_DLI,\n CONF_MAX_HUMIDITY,\n CONF_MAX_ILLUMINANCE,\n CONF_MAX_MOISTURE,\n CONF_MAX_TEMPERATURE,\n CONF_MIN_CONDUCTIVITY,\n CONF_MIN_DLI,\n CONF_MIN_HUMIDITY,\n CONF_MIN_ILLUMINANCE,\n CONF_MIN_MOISTURE,\n CONF_MIN_TEMPERATURE,\n DATA_SOURCE,\n DATA_SOURCE_PLANTBOOK,\n DOMAIN,\n DOMAIN_PLANTBOOK,\n DOMAIN_SENSOR,\n FLOW_CONDUCTIVITY_TRIGGER,\n FLOW_DLI_TRIGGER,\n FLOW_ERROR_NOTFOUND,\n FLOW_FORCE_SPECIES_UPDATE,\n FLOW_HUMIDITY_TRIGGER,\n FLOW_ILLUMINANCE_TRIGGER,\n FLOW_MOISTURE_TRIGGER,\n FLOW_PLANT_INFO,\n FLOW_PLANT_LIMITS,\n FLOW_RIGHT_PLANT,\n FLOW_SENSOR_CONDUCTIVITY,\n FLOW_SENSOR_HUMIDITY,\n FLOW_SENSOR_ILLUMINANCE,\n FLOW_SENSOR_MOISTURE,\n FLOW_SENSOR_TEMPERATURE,\n FLOW_STRING_DESCRIPTION,\n FLOW_TEMP_UNIT,\n FLOW_TEMPERATURE_TRIGGER,\n OPB_DISPLAY_PID,\n)\nfrom .plant_helpers import PlantHelper\n\n_LOGGER = logging.getLogger(__name__)\n\n\n@config_entries.HANDLERS.register(DOMAIN)\nclass PlantConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):\n \"\"\"Handle a config flow for Plants.\"\"\"\n\n VERSION = 1\n\n def __init__(self):\n self.plant_info = {}\n self.error = None\n\n @staticmethod\n @callback\n def async_get_options_flow(\n config_entry: config_entries.ConfigEntry,\n ) -> config_entries.OptionsFlow:\n \"\"\"Create the options flow.\"\"\"\n return OptionsFlowHandler(config_entry)\n\n async def async_step_import(self, import_input):\n \"\"\"Importing config from configuration.yaml\"\"\"\n _LOGGER.debug(import_input)\n # return FlowResultType.ABORT\n return self.async_create_entry(\n title=import_input[FLOW_PLANT_INFO][ATTR_NAME],\n data=import_input,\n )\n\n async def async_step_user(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n errors = {}\n if user_input is not None:\n _LOGGER.debug(\"User Input %s\", user_input)\n # Validate user input\n valid = await self.validate_step_1(user_input)\n if valid:\n # Store info to use in next step\n self.plant_info = user_input\n self.plant_info[ATTR_SEARCH_FOR] = user_input[ATTR_SPECIES]\n _LOGGER.debug(\"Plant_info: %s\", self.plant_info)\n\n # Return the form of the next step\n return await self.async_step_select_species()\n\n # Specify items in the order they are to be displayed in the UI\n if self.error == FLOW_ERROR_NOTFOUND:\n errors[ATTR_SPECIES] = self.error\n data_schema = {\n vol.Required(ATTR_NAME, default=self.plant_info.get(ATTR_NAME)): cv.string,\n vol.Optional(\n ATTR_SPECIES, default=self.plant_info.get(ATTR_SPECIES, \"\")\n ): cv.string,\n }\n\n data_schema[FLOW_SENSOR_TEMPERATURE] = selector(\n {\n ATTR_ENTITY: {\n ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,\n ATTR_DOMAIN: DOMAIN_SENSOR,\n }\n }\n )\n data_schema[FLOW_SENSOR_MOISTURE] = selector(\n {\n ATTR_ENTITY: {\n ATTR_DOMAIN: DOMAIN_SENSOR,\n }\n }\n )\n data_schema[FLOW_SENSOR_CONDUCTIVITY] = selector(\n {ATTR_ENTITY: {ATTR_DOMAIN: DOMAIN_SENSOR}}\n )\n data_schema[FLOW_SENSOR_ILLUMINANCE] = selector(\n {\n ATTR_ENTITY: {\n ATTR_DEVICE_CLASS: SensorDeviceClass.ILLUMINANCE,\n ATTR_DOMAIN: DOMAIN_SENSOR,\n }\n }\n )\n data_schema[FLOW_SENSOR_HUMIDITY] = selector(\n {\n ATTR_ENTITY: {\n ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,\n ATTR_DOMAIN: DOMAIN_SENSOR,\n }\n }\n )\n\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(data_schema),\n errors=errors,\n description_placeholders={\"opb_search\": self.plant_info.get(ATTR_SPECIES)},\n )\n\n async def async_step_select_species(self, user_input=None):\n \"\"\"Search the openplantbook\"\"\"\n errors = {}\n\n if user_input is not None:\n _LOGGER.debug(\"User Input %s\", user_input)\n # Validate user input\n valid = await self.validate_step_2(user_input)\n if valid:\n # Store info to use in next step\n self.plant_info[DATA_SOURCE] = DOMAIN_PLANTBOOK\n self.plant_info[ATTR_SPECIES] = user_input[ATTR_SPECIES]\n\n # Return the form of the next step\n _LOGGER.debug(\"Plant_info: %s\", self.plant_info)\n return await self.async_step_limits()\n plant_helper = PlantHelper(self.hass)\n search_result = await plant_helper.openplantbook_search(\n species=self.plant_info[ATTR_SEARCH_FOR]\n )\n if search_result is None:\n return await self.async_step_limits()\n dropdown = []\n for pid, display_pid in search_result.items():\n dropdown.append({\"label\": display_pid, \"value\": pid})\n _LOGGER.debug(\"Dropdown: %s\", dropdown)\n data_schema = {}\n data_schema[ATTR_SPECIES] = selector({ATTR_SELECT: {ATTR_OPTIONS: dropdown}})\n\n return self.async_show_form(\n step_id=\"select_species\",\n data_schema=vol.Schema(data_schema),\n errors=errors,\n description_placeholders={\n \"opb_search\": self.plant_info[ATTR_SPECIES],\n FLOW_STRING_DESCRIPTION: \"Results from OpenPlantbook\",\n },\n )\n\n async def async_step_limits(self, user_input=None):\n \"\"\"Handle max/min values\"\"\"\n\n plant_helper = PlantHelper(self.hass)\n if user_input is not None:\n _LOGGER.debug(\"User Input %s\", user_input)\n # Validate user input\n valid = await self.validate_step_3(user_input)\n if (\n plant_helper.has_openplantbook\n and self.plant_info.get(ATTR_SEARCH_FOR)\n and self.plant_info.get(DATA_SOURCE) == DOMAIN_PLANTBOOK\n and not user_input.get(FLOW_RIGHT_PLANT)\n ):\n return await self.async_step_select_species()\n if valid:\n self.plant_info[ATTR_ENTITY_PICTURE] = user_input.get(\n ATTR_ENTITY_PICTURE\n )\n self.plant_info[OPB_DISPLAY_PID] = user_input.get(OPB_DISPLAY_PID)\n if not self.plant_info[ATTR_SPECIES]:\n self.plant_info[ATTR_SPECIES] = self.plant_info[OPB_DISPLAY_PID]\n user_input.pop(ATTR_ENTITY_PICTURE)\n user_input.pop(OPB_DISPLAY_PID)\n if FLOW_RIGHT_PLANT in user_input:\n user_input.pop(FLOW_RIGHT_PLANT)\n self.plant_info[FLOW_PLANT_LIMITS] = user_input\n _LOGGER.debug(\"Plant_info: %s\", self.plant_info)\n # Return the form of the next step\n return await self.async_step_limits_done()\n\n data_schema = {}\n plant_config = await plant_helper.generate_configentry(\n config={\n ATTR_NAME: self.plant_info[ATTR_NAME],\n ATTR_SPECIES: self.plant_info[ATTR_SPECIES],\n ATTR_SENSORS: {},\n }\n )\n extra_desc = \"\"\n if plant_config[FLOW_PLANT_INFO].get(OPB_DISPLAY_PID):\n # We got data from OPB. Display a \"wrong plant\" switch\n data_schema[vol.Optional(FLOW_RIGHT_PLANT, default=True)] = cv.boolean\n\n display_pid = plant_config[FLOW_PLANT_INFO].get(OPB_DISPLAY_PID)\n else:\n if plant_helper.has_openplantbook:\n # We did not get any data from OPB. Show a warning\n if (\n not self.plant_info[ATTR_SEARCH_FOR]\n or self.plant_info[ATTR_SEARCH_FOR] == \"\"\n ):\n extra_desc = \"Skipping OpenPlantbook due to missing species. Using default values for thresholds.

\"\n else:\n extra_desc = f\"Did not find **«{self.plant_info[ATTR_SEARCH_FOR]}»** in OpenPlantbook. Using default values for thresholds.

\"\n display_pid = self.plant_info[ATTR_SEARCH_FOR].title() or \"\"\n data_schema[\n vol.Optional(\n OPB_DISPLAY_PID,\n default=display_pid,\n )\n ] = cv.string\n data_schema[\n vol.Required(\n CONF_MAX_MOISTURE,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MAX_MOISTURE\n ),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MIN_MOISTURE,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MIN_MOISTURE\n ),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MAX_ILLUMINANCE,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MAX_ILLUMINANCE\n ),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MIN_ILLUMINANCE,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MIN_ILLUMINANCE\n ),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MAX_DLI,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(CONF_MAX_DLI),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MIN_DLI,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(CONF_MIN_DLI),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MAX_TEMPERATURE,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MAX_TEMPERATURE\n ),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MIN_TEMPERATURE,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MIN_TEMPERATURE\n ),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MAX_CONDUCTIVITY,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MAX_CONDUCTIVITY\n ),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MIN_CONDUCTIVITY,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MIN_CONDUCTIVITY\n ),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MAX_HUMIDITY,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MAX_HUMIDITY\n ),\n )\n ] = int\n data_schema[\n vol.Required(\n CONF_MIN_HUMIDITY,\n default=plant_config[FLOW_PLANT_INFO][ATTR_LIMITS].get(\n CONF_MIN_HUMIDITY\n ),\n )\n ] = int\n\n data_schema[\n vol.Optional(\n ATTR_ENTITY_PICTURE,\n default=plant_config[FLOW_PLANT_INFO].get(ATTR_ENTITY_PICTURE),\n )\n ] = str\n entity_picture = plant_config[FLOW_PLANT_INFO].get(ATTR_ENTITY_PICTURE)\n if not entity_picture.startswith(\"http\"):\n try:\n entity_picture = f\"{get_url(self.hass, require_current_request=True)}{urllib.parse.quote(entity_picture)}\"\n except NoURLAvailableError:\n _LOGGER.error(\n \"No internal or external url found. Please configure these in HA General Settings\"\n )\n entity_picture = \"\"\n return self.async_show_form(\n step_id=\"limits\",\n data_schema=vol.Schema(data_schema),\n description_placeholders={\n ATTR_ENTITY_PICTURE: entity_picture,\n ATTR_NAME: plant_config[FLOW_PLANT_INFO].get(ATTR_NAME),\n FLOW_TEMP_UNIT: self.hass.config.units.temperature_unit,\n \"br\": \"
\",\n \"extra_desc\": extra_desc,\n },\n )\n\n async def async_step_limits_done(self, user_input=None):\n \"\"\"After limits are set\"\"\"\n return self.async_create_entry(\n title=self.plant_info[ATTR_NAME],\n data={FLOW_PLANT_INFO: self.plant_info},\n )\n\n async def validate_step_1(self, user_input):\n \"\"\"Validate step one\"\"\"\n _LOGGER.debug(\"Validating step 1: %s\", user_input)\n return True\n\n async def validate_step_2(self, user_input):\n \"\"\"Validate step two\"\"\"\n _LOGGER.debug(\"Validating step 2: %s\", user_input)\n\n if not ATTR_SPECIES in user_input:\n return False\n if not isinstance(user_input[ATTR_SPECIES], str):\n return False\n if len(user_input[ATTR_SPECIES]) < 5:\n return False\n _LOGGER.debug(\"Valid\")\n\n return True\n\n async def validate_step_3(self, user_input):\n \"\"\"Validate step three\"\"\"\n _LOGGER.debug(\"Validating step 3: %s\", user_input)\n\n return True\n\n async def validate_step_4(self, user_input):\n \"\"\"Validate step four\"\"\"\n return True\n\n\nclass OptionsFlowHandler(config_entries.OptionsFlow):\n \"\"\"Handling opetions for plant\"\"\"\n\n def __init__(\n self,\n entry: config_entries.ConfigEntry,\n ) -> None:\n \"\"\"Initialize options flow.\"\"\"\n\n entry.async_on_unload(entry.add_update_listener(self.update_plant_options))\n\n self.plant = None\n self.entry = entry\n\n async def async_step_init(\n self, user_input: dict[str, Any] | None = None\n ) -> data_entry_flow.FlowResult:\n \"\"\"Manage the options.\"\"\"\n if user_input is not None:\n if ATTR_SPECIES not in user_input or not re.match(\n r\"\\w+\", user_input[ATTR_SPECIES]\n ):\n user_input[ATTR_SPECIES] = \"\"\n if ATTR_ENTITY_PICTURE not in user_input or not re.match(\n r\"(\\/)?\\w+\", user_input[ATTR_ENTITY_PICTURE]\n ):\n user_input[ATTR_ENTITY_PICTURE] = \"\"\n if OPB_DISPLAY_PID not in user_input or not re.match(\n r\"\\w+\", user_input[OPB_DISPLAY_PID]\n ):\n user_input[OPB_DISPLAY_PID] = \"\"\n\n return self.async_create_entry(title=\"\", data=user_input)\n\n self.plant = self.hass.data[DOMAIN][self.entry.entry_id][\"plant\"]\n plant_helper = PlantHelper(hass=self.hass)\n data_schema = {}\n data_schema[\n vol.Optional(\n ATTR_SPECIES, description={\"suggested_value\": self.plant.species}\n )\n ] = cv.string\n if plant_helper.has_openplantbook and self.plant.species:\n data_schema[\n vol.Optional(FLOW_FORCE_SPECIES_UPDATE, default=False)\n ] = cv.boolean\n\n display_species = self.plant.display_species or \"\"\n data_schema[\n vol.Optional(\n OPB_DISPLAY_PID, description={\"suggested_value\": display_species}\n )\n ] = str\n entity_picture = self.plant.entity_picture or \"\"\n data_schema[\n vol.Optional(\n ATTR_ENTITY_PICTURE, description={\"suggested_value\": entity_picture}\n )\n ] = str\n\n data_schema[\n vol.Optional(\n FLOW_ILLUMINANCE_TRIGGER, default=self.plant.illuminance_trigger\n )\n ] = cv.boolean\n data_schema[\n vol.Optional(FLOW_DLI_TRIGGER, default=self.plant.dli_trigger)\n ] = cv.boolean\n\n data_schema[\n vol.Optional(FLOW_HUMIDITY_TRIGGER, default=self.plant.humidity_trigger)\n ] = cv.boolean\n data_schema[\n vol.Optional(\n FLOW_TEMPERATURE_TRIGGER, default=self.plant.temperature_trigger\n )\n ] = cv.boolean\n data_schema[\n vol.Optional(FLOW_MOISTURE_TRIGGER, default=self.plant.moisture_trigger)\n ] = cv.boolean\n data_schema[\n vol.Optional(\n FLOW_CONDUCTIVITY_TRIGGER, default=self.plant.conductivity_trigger\n )\n ] = cv.boolean\n\n # data_schema[vol.Optional(CONF_CHECK_DAYS, default=self.plant.check_days)] = int\n\n return self.async_show_form(step_id=\"init\", data_schema=vol.Schema(data_schema))\n\n async def update_plant_options(\n self, hass: HomeAssistant, entry: config_entries.ConfigEntry\n ):\n \"\"\"Handle options update.\"\"\"\n\n _LOGGER.debug(\n \"Update plant options begin for %s Data %s, Options: %s\",\n entry.entry_id,\n entry.options,\n entry.data,\n )\n entity_picture = entry.options.get(ATTR_ENTITY_PICTURE)\n\n if entity_picture is not None:\n if entity_picture == \"\":\n self.plant.add_image(entity_picture)\n else:\n try:\n url = cv.url(entity_picture)\n _LOGGER.debug(\"Url 1 %s\", url)\n # pylint: disable=broad-except\n except Exception as exc1:\n _LOGGER.warning(\"Not a valid url: %s\", entity_picture)\n if entity_picture.startswith(\"/local/\"):\n try:\n url = cv.path(entity_picture)\n _LOGGER.debug(\"Url 2 %s\", url)\n except Exception as exc2:\n _LOGGER.warning(\"Not a valid path: %s\", entity_picture)\n raise vol.Invalid(\n f\"Invalid URL: {entity_picture}\"\n ) from exc2\n else:\n raise vol.Invalid(f\"Invalid URL: {entity_picture}\") from exc1\n _LOGGER.debug(\"Update image to %s\", entity_picture)\n self.plant.add_image(entity_picture)\n\n new_display_species = entry.options.get(OPB_DISPLAY_PID)\n if new_display_species is not None:\n self.plant.display_species = new_display_species\n\n new_species = entry.options.get(ATTR_SPECIES)\n force_new_species = entry.options.get(FLOW_FORCE_SPECIES_UPDATE)\n if new_species is not None and (\n new_species != self.plant.species or force_new_species is True\n ):\n _LOGGER.debug(\n \"Species changed from '%s' to '%s'\", self.plant.species, new_species\n )\n plant_helper = PlantHelper(hass=self.hass)\n plant_config = await plant_helper.generate_configentry(\n config={\n ATTR_SPECIES: new_species,\n ATTR_ENTITY_PICTURE: entity_picture,\n OPB_DISPLAY_PID: new_display_species,\n FLOW_FORCE_SPECIES_UPDATE: force_new_species,\n }\n )\n if plant_config[DATA_SOURCE] == DATA_SOURCE_PLANTBOOK:\n self.plant.species = new_species\n self.plant.add_image(plant_config[FLOW_PLANT_INFO][ATTR_ENTITY_PICTURE])\n self.plant.display_species = plant_config[FLOW_PLANT_INFO][\n OPB_DISPLAY_PID\n ]\n for key, value in plant_config[FLOW_PLANT_INFO][\n FLOW_PLANT_LIMITS\n ].items():\n set_entity = getattr(self.plant, key)\n _LOGGER.debug(\"Entity: %s To: %s\", set_entity, value)\n set_entity_id = set_entity.entity_id\n _LOGGER.debug(\n \"Setting %s to %s\",\n set_entity_id,\n value,\n )\n\n self.hass.states.async_set(\n set_entity_id,\n new_state=value,\n attributes=self.hass.states.get(set_entity_id).attributes,\n )\n\n else:\n self.plant.species = new_species\n\n # We need to reset the force_update option back to False, or else\n # this will only be run once (unchanged options are will not trigger the flow)\n options = dict(entry.options)\n data = dict(entry.data)\n options[FLOW_FORCE_SPECIES_UPDATE] = False\n options[OPB_DISPLAY_PID] = self.plant.display_species\n options[ATTR_ENTITY_PICTURE] = self.plant.entity_picture\n _LOGGER.debug(\n \"Doing a refresh to update values: Data: %s Options: %s\",\n data,\n options,\n )\n\n hass.config_entries.async_update_entry(entry, data=data, options=options)\n _LOGGER.debug(\"Update plant options done for %s\", entry.entry_id)\n self.plant.update_registry()\n","repo_name":"Olen/homeassistant-plant","sub_path":"custom_components/plant/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":22173,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"54"} +{"seq_id":"26738259760","text":"import os\nimport numpy as np\nimport lumapi\n\nnp.random.seed(seed = 98765)\n\nfrom lumopt.utilities.wavelengths import Wavelengths\nfrom lumopt.utilities.materials import Material\nfrom lumopt.geometries.polygon import FunctionDefinedPolygon\nfrom lumopt.figures_of_merit.modematch import ModeMatch\nfrom lumopt.optimizers.generic_optimizers import ScipyOptimizers\nfrom lumopt.optimization import Optimization\n\n######## SPECTRAL RANGE #########\nwavelengths = Wavelengths(start = 1530.0e-9, stop = 1570.0e-9, points = 25)\n\n######## OPTIMIZABLE GEOMETRY ########\nn_grates = 20\nwg_height = 220.0e-9\nwg_length = 30.0e-6\netch_depth = 0.8\nx0 = -6.0e-6\ny0 = 0.0\ndef grate_function(params):\n y2 = y0+wg_height\n y1 = y2-etch_depth*wg_height\n x_begin = x0-wg_length\n verts = np.array( [ [x_begin,y0], [x_begin,y2], [x0,y2], [x0,y1] ] )\n xp = float(x0)\n for idx in range(n_grates):\n a = params[2*idx]*1e-6\n b = params[2*idx+1]*1e-6\n pitch = a+b\n verts = np.concatenate((verts, [[xp+a, y1], [xp+a, y2], [xp+pitch, y2], [xp+pitch, y1]] ), axis = 0)\n xp += pitch\n verts = np.concatenate((verts, [[xp, y1], [xp, y0]]), axis = 0)\n return verts\ninitial_params = np.zeros(2*n_grates)\nfor idx in range(n_grates):\n initial_params[2*idx] = 0.1+0.2*np.sin(np.pi/2.0*idx/n_grates)*np.random.random()\n initial_params[2*idx+1] = 0.7-initial_params[2*idx]\nbounds = [(0.1, 0.9)] * (2*n_grates)\nSi = Material(base_epsilon = 3.47668**2, mesh_order = 2)\ngeometry = FunctionDefinedPolygon(func = grate_function, \n initial_params = initial_params,\n bounds = bounds,\n z = 0.0,\n depth = wg_height,\n eps_out = 1.0 ** 2,\n eps_in = Si,\n edge_precision = 5,\n dx = 1.0e-5)\n\n######## FIGURE OF MERIT ########\nfom = ModeMatch(monitor_name = 'fom',\n mode_number = 'fundamental TM mode',\n direction = 'Backward',\n target_T_fwd = lambda wl: 0.5*np.ones(wl.size),\n norm_p = 1,\n target_fom = 0.0)\n\n######## OPTIMIZATION ALGORITHM ########\noptimizer = ScipyOptimizers(max_iter = 200,\n method = 'L-BFGS-B',\n scaling_factor = 1.0,\n pgtol = 1.0e-5,\n ftol = 1.0e-5,\n scale_initial_gradient_to = 0.0)\n\n######## BASE 2-D SIMULATION ########\nproj_path = os.path.join(os.path.dirname(__file__))\nbase_sim = os.path.join(proj_path, 'grating_coupler_base.fsp')\n\n######## PUT 2-D OPTIMIZATION TOGETHER ########\nopt = Optimization(base_script = base_sim,\n wavelengths = wavelengths,\n fom = fom,\n geometry = geometry,\n optimizer = optimizer,\n use_var_fdtd = False,\n hide_fdtd_cad = False,\n use_deps = True,\n plot_history = True,\n store_all_simulations = True,\n save_global_index = False,\n label = None)\n\n######## RUN THE 2-D OPTIMIZATION ########\nres = opt.run()\n\n######## 2-D SIMULATION WITH OPTIMIZED STRUCTURE ########\nfinal_sim = os.path.join(proj_path, 'optimized_grating_coupler_etch_0p8_bandwidth_40nm.fsp')\nwith lumapi.FDTD() as fdtd:\n fdtd.load(base_sim)\n fdtd.addpoly()\n fdtd.setnamed('polygon', 'vertices', grate_function(res[1]))\n fdtd.setnamed('polygon', 'material', Si.name)\n fdtd.setnamed('polygon', 'index', np.sqrt(Si.base_epsilon))\n fdtd.setnamed('polygon', 'x', x0)\n fdtd.setnamed('polygon', 'y', y0)\n fdtd.setnamed('back', 'enabled', True)","repo_name":"hiepdxphysics/Lumerical-Coding","sub_path":"grating_coupler_opt.py","file_name":"grating_coupler_opt.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"39848784517","text":"from tkinter import *\nimport math\n\nclass Calculator:\n def input_value(self, val):\n self.entry_value.insert(\"end\", val)\n\n def clear_all(self):\n self.entry_value.delete(0, \"end\")\n\n def get_result(self):\n try:\n return_value=eval(self.entry_value.get())\n f=open(\"calc_log.txt\", \"w\")\n f.write(self.entry_value.get())\n f.close()\n except SyntaxError or NameError:\n self.entry_value.delete(0, \"end\")\n self.entry_value.insert(0, 'Input Error, Press AC button')\n else:\n self.entry_value.delete(0, \"end\")\n self.entry_value.insert(0, return_value)\n\n def get_last_log(self):\n f=open(\"calc_log.txt\", \"r\")\n read_value=f.read()\n self.entry_value.delete(0, \"end\")\n self.entry_value.insert(0, read_value)\n f.close()\n\n def __init__(self, main):\n main.title(\"Calculator\")\n main.geometry()\n\n self.entry_value=Entry(main, width=40, justify=RIGHT)\n self.entry_value.grid(row=0, column=0, columnspan=3)\n self.entry_value.focus_set()\n\n Button(main, text=\"=\", width=20, command=lambda: self.get_result()).grid(row=5, column=2, columnspan=2)\n Button(main, text='AC', width=10, command=lambda: self.clear_all()).grid(row=4, column=0)\n Button(main, text=\"<\", width=10, command=lambda: self.get_last_log()).grid(row=0, column=3)\n Button(main, text=\"+\", width=10, command=lambda: self.input_value('+')).grid(row=1, column=3)\n Button(main, text=\"-\", width=10, command=lambda: self.input_value('-')).grid(row=2, column=3)\n Button(main, text=\"x\", width=10, command=lambda: self.input_value('*')).grid(row=3, column=3)\n Button(main, text=\"÷\", width=10, command=lambda: self.input_value('/')).grid(row=4, column=3)\n Button(main, text=\".\", width=10, command=lambda: self.input_value('.')).grid(row=4, column=2)\n Button(main, text=\"(\", width=10, command=lambda: self.input_value('(')).grid(row=5, column=0)\n Button(main, text=\")\", width=10, command=lambda: self.input_value(')')).grid(row=5, column=1)\n Button(main, text=\"7\", width=10, command=lambda: self.input_value(7)).grid(row=1, column=0)\n Button(main, text=\"8\", width=10, command=lambda: self.input_value(8)).grid(row=1, column=1)\n Button(main, text=\"9\", width=10, command=lambda: self.input_value(9)).grid(row=1, column=2)\n Button(main, text=\"4\", width=10, command=lambda: self.input_value(4)).grid(row=2, column=0)\n Button(main, text=\"5\", width=10, command=lambda: self.input_value(5)).grid(row=2, column=1)\n Button(main, text=\"6\", width=10, command=lambda: self.input_value(6)).grid(row=2, column=2)\n Button(main, text=\"1\", width=10, command=lambda: self.input_value(1)).grid(row=3, column=0)\n Button(main, text=\"2\", width=10, command=lambda: self.input_value(2)).grid(row=3, column=1)\n Button(main, text=\"3\", width=10, command=lambda: self.input_value(3)).grid(row=3, column=2)\n Button(main, text=\"0\", width=10, command=lambda: self.input_value(0)).grid(row=4, column=1)\n\n f=open(\"calc_log.txt\", \"w\")\n f.close()\n\nclass ScienceCalculator(Calculator):\n def get_sqrt(self):\n try:\n return_value=eval(self.entry_value.get())\n except SyntaxError or NameError:\n self.entry_value.delete(0, \"end\")\n self.entry_value.insert(0, 'Input Error, Press AC button')\n else:\n calc_value=math.sqrt(return_value)\n self.entry_value.delete(0, \"end\")\n self.entry_value.insert(0, calc_value)\n\n def get_pow(self):\n try:\n return_value=eval(self.entry_value.get())\n except SyntaxError or NameError:\n self.entry_value.delete(0, \"end\")\n self.entry_value.insert(0, 'Input Error, Press AC button')\n else:\n calc_value=math.pow(return_value, 2)\n self.entry_value.delete(0, \"end\")\n self.entry_value.insert(0, calc_value)\n\n def __init__(self, main):\n super().__init__(main)\n main.title(\"Science Calculator\")\n Button(main, text=\"√\", width=10, command=lambda :self.get_sqrt()).grid(row=5, column=0)\n Button(main, text=\"x²\", width=10, command=lambda: self.get_pow()).grid(row=5, column=1)\n\nmain = Tk()\ncalc=ScienceCalculator(main)\nmain.mainloop()","repo_name":"GitOfVitol/PythonBasic","sub_path":"OOP.py","file_name":"OOP.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5034516565","text":"import datetime\nimport re\n\nfrom pytest import mark\n\nfrom tests.factories import WalletBuilder\nfrom tests.utils import UUID_PATTERN\n\n\n# TODO: test error messages text\n\n\ndef test_create_expense(test_client, category_fixture, wallet, token_value_fixture):\n response = test_client.post(\n f'/accounting/wallets/{wallet.name}/expenses',\n json={\n 'date': '2022-03-27',\n 'amount': '100000',\n 'category': category_fixture.name,\n },\n headers={'Authorization': f'token {token_value_fixture}'}\n )\n response_data = response.json()\n\n assert response.headers['content-type'] == 'application/json'\n assert response.status_code == 201\n assert response_data['date'] == '2022-03-27'\n assert response_data['amount'] == '100000'\n assert response_data['category'] == category_fixture.name\n assert re.match(UUID_PATTERN, response_data['uuid'])\n\n\ndef test_create_expense_with_wrong_category_name(test_client, category_fixture, wallet, token_value_fixture):\n response = test_client.post(\n f'/accounting/wallets/{wallet.name}/expenses',\n json={\n 'date': '2022-03-27',\n 'amount': '100000',\n 'category': 'wrong category name'\n },\n headers={'Authorization': f'token {token_value_fixture}'}\n )\n\n assert response.headers['content-type'] == 'application/json'\n assert response.status_code == 400\n\n\ncreate_expense_data_params = (\n {'date': '2022-03-27'},\n {'amount': '1000'},\n {'date': 'wrong date', 'amount': '100'},\n {'date': '2022-03-29', 'amount': 'wrong amount'},\n)\n\n\n@mark.parametrize('data', create_expense_data_params)\ndef test_create_expense_with_missing_or_wrong_data(data, test_client, wallet, token_value_fixture):\n response = test_client.post(\n f'/accounting/wallets/{wallet.name}/expenses',\n json=data,\n headers={'Authorization': f'token {token_value_fixture}'}\n )\n assert response.status_code == 400\n\n\ndef test_create_expense_with_date_in_the_future(test_client, wallet, token_value_fixture):\n response = test_client.post(\n f'/accounting/wallets/{wallet.name}/expenses',\n json={\n 'date': (datetime.date.today() + datetime.timedelta(days=1)).isoformat(),\n 'amount': '100000'\n },\n headers={'Authorization': f'token {token_value_fixture}'}\n )\n\n assert response.status_code == 400\n\n\ndef test_create_expense_with_higher_amount_that_wallet_balance(test_client, category_fixture, wallet,\n token_value_fixture):\n wallet = WalletBuilder().balance('100_000').create()\n response = test_client.post(\n f'/accounting/wallets/{wallet.name}/expenses',\n json={\n 'date': '2022-03-27',\n 'amount': '102000',\n 'category': category_fixture.name,\n },\n headers={'Authorization': f'token {token_value_fixture}'}\n )\n response_data = response.json()\n\n assert response.status_code == 400\n assert response_data['error'] == 'expense amount must be lower than wallet balance'\n\n\ndef test_get_expense(test_client, category_fixture, wallet, token_value_fixture):\n post_response = test_client.post(\n f'/accounting/wallets/{wallet.name}/expenses',\n json={\n 'date': '2022-03-27',\n 'amount': '100000',\n 'category': category_fixture.name,\n },\n headers={'Authorization': f'token {token_value_fixture}'}\n )\n post_response_data = post_response.json()\n response = test_client.get(\n f'/accounting/wallets/{wallet.name}/expenses/{post_response_data[\"uuid\"]}',\n headers={'Authorization': f'token {token_value_fixture}'}\n )\n response_data = response.json()\n\n assert response.status_code == 200\n assert response.headers['content-type'] == 'application/json'\n assert response_data['date'] == post_response_data['date']\n assert response_data['amount'] == post_response_data['amount']\n assert response_data['category'] == post_response_data['category']\n assert response_data['uuid'] == post_response_data['uuid']\n\n\ndef test_get_non_existing_expense(expense_fixture, test_client, wallet, token_value_fixture):\n response = test_client.get(\n f'/accounting/wallets/{wallet.name}/expenses/1234',\n headers={'Authorization': f'token {token_value_fixture}'}\n )\n response_data = response.json()\n\n assert response.status_code == 404\n assert response.headers['content-type'] == 'application/json'\n assert response_data == {}\n\n\ndef test_get_all_expenses(test_client, token_value_fixture):\n wallet_builder = WalletBuilder() \\\n .add_expense(amount='100000') \\\n .add_expense(amount='50000') \\\n .add_expense(amount='20000')\n wallet = wallet_builder.create()\n response = test_client.get(\n f'/accounting/wallets/{wallet.name}/expenses',\n headers={'Authorization': f'token {token_value_fixture}'}\n )\n response_data = response.json()\n expected_expenses = [\n {\n 'date': expense.date.isoformat(),\n 'amount': f'{expense.amount:f}',\n 'uuid': expense.uuid,\n 'category': expense.category.name\n }\n for expense in wallet.expenses\n ]\n assert response.status_code == 200\n assert response.headers['content-type'] == 'application/json'\n assert response_data['expenses'] == expected_expenses\n","repo_name":"pity7736/odin","sub_path":"tests/unit_tests/accounting_tests/api/test_expense.py","file_name":"test_expense.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40289936381","text":"import os\nimport datetime\nimport time\nfrom typing import Optional\nimport re\n\nimport psycopg\nimport psycopg.sql as sql\nimport psycopg.types.composite as composite\nfrom psycopg.rows import namedtuple_row\n\nimport structrecon.model.types as types\nimport structrecon.model.standard_data_files as datafile\nimport structrecon.cache.populator as populator_base\n\nimport structrecon.cache.populators.bigg_populator as bigg_populator\nimport structrecon.cache.populators.chebi_populator as chebi_populator\nimport structrecon.cache.populators.ecmdb_populator as ecmdb_populator\nimport structrecon.cache.populators.metanetx_populator as metanetx_populator\nimport structrecon.cache.populators.pubchem_populator as pubchem_populator\n\n# TODO backup and restore using pg_dump\n\n# Default parameters for local database\n # (different when running in Docker than locally)\n# db_host: str = 'localhost'\n# db_port: str = '5455'\n# db_name: str = 'structrecon_db'\n# db_user: str = 'postgres'\n# db_pass: str = 'postgres'\n\n# Connect with:\n# postgres://user:pass@host:port/db_name\n\n# By default, on linux, may need to set password for 'postgres' user in postgres:\n# sudo -u postgres psql\n# \\password postgres\n# Set password to 'postgres'\n\nbasedir = '.'\nschema_file = f'{basedir}/structrecon/cache/database_schema.sql'\n\nmain_table_name = 'ids'\n\n# List of actual populators\npopulators: list[populator_base.Populator] = [\n bigg_populator.bigg_populator,\n chebi_populator.chebi_populator,\n ecmdb_populator.ecmdb_populator,\n metanetx_populator.mnx_populator,\n pubchem_populator.pubchem_populator\n]\n\n\n# Type info\ninfo_ref_id: composite.CompositeInfo\ninfo_ref_st: composite.CompositeInfo\n\n\n# Connect to the database, create if it does not exist\n # Requires running Postgres server\n# ======================================================================================================================\ndef connect(\n db_host: str,\n db_port: str,\n db_name: str,\n db_user: str,\n db_pass: str\n) -> psycopg.connection.Connection:\n \"\"\" Connect and create if it does not exist.\n Also create all relevant types and populators.\n \"\"\"\n\n # Connect to Server and create database if not exists\n # ==================================================================================================================\n try:\n print(f'Attempting to connect: postgres://{db_user}:{db_pass}@{db_host}:{db_port}')\n with psycopg.connect(f'postgres://{db_user}:{db_pass}@{db_host}:{db_port}') as conn:\n print(f'Connection succesful')\n\n # Attempt to create the database\n with conn.cursor() as c:\n try:\n # Create database\n conn.autocommit = True\n c.execute(\n sql.SQL(f'CREATE DATABASE {db_name}')\n )\n conn.autocommit = False\n conn.commit()\n print(f' - Database {db_name} created.')\n\n except psycopg.errors.DuplicateDatabase:\n print(f' - Database {db_name} exists.')\n # TODO check database version (in 'meta' table)\n\n except psycopg.OperationalError as e:\n print(f'Database connection failed:\\n\\t{e}')\n exit(1)\n\n\n # Connect to database\n # ==================================================================================================================\n print(f'Connecting to database: postgres://{db_user}:{db_pass}@{db_host}:{db_port}/{db_name}')\n try:\n conn = psycopg.connect(f'postgres://{db_user}:{db_pass}@{db_host}:{db_port}/{db_name}')\n c = conn.cursor()\n print(f'Connected to database {db_name}. Applying schema')\n\n # Also create the populators\n c.execute(\"CREATE TABLE IF NOT EXISTS meta (field TEXT PRIMARY KEY, val INT);\")\n c.execute(\"INSERT INTO meta VALUES ('structrecon_version', 1) ON CONFLICT DO NOTHING;\")\n c.execute(\"INSERT INTO meta VALUES ('schema_version', 1) ON CONFLICT DO NOTHING;\")\n c.execute(\"CREATE TABLE IF NOT EXISTS sources (\"\n \"source TEXT PRIMARY KEY,\"\n \"last_updated TIMESTAMP\"\n \");\")\n conn.commit()\n\n # Create enumerated types for identifier types and structural types\n # ==============================================================================================================\n sqltypedef_id = sql.SQL(', ').join([\n sql.SQL(f\"'{repr_type.shortname}'\") for repr_type in types.standard_repr_types\n if isinstance(repr_type, types.IdentifierType)\n ])\n\n sqltypedef_st = sql.SQL(', ').join([\n sql.SQL(f\"'{repr_type.shortname}'\") for repr_type in types.standard_repr_types\n if isinstance(repr_type, types.StructureType)\n ])\n\n q = sql.SQL(\n \"CREATE TYPE {type_id} AS ENUM ({sqltypedef_id}, 'input');\"\n ).format(\n type_id = sql.Identifier('IDENTIFIER_TYPE'),\n sqltypedef_id = sqltypedef_id\n )\n try:\n c.execute(q)\n conn.commit()\n except psycopg.errors.DuplicateObject as e:\n print(e)\n conn.rollback()\n\n q = sql.SQL(\n \"CREATE TYPE {type_st} AS ENUM ({sqltypedef_st});\"\n ).format(\n type_st = sql.Identifier('STRUCTURE_TYPE'),\n sqltypedef_st=sqltypedef_st\n )\n try:\n c.execute(q)\n conn.commit()\n except psycopg.errors.DuplicateObject as e:\n print(e)\n conn.rollback()\n\n # Also create the combined identifier types\n q = sql.SQL(\n \"CREATE TYPE {REF_ID} AS (\"\n \"repr_type {IDENTIFIER_TYPE},\"\n \"id TEXT,\"\n \"src TEXT\"\n \");\"\n ).format(\n IDENTIFIER_TYPE = sql.Identifier('IDENTIFIER_TYPE'),\n REF_ID = sql.Identifier('REF_ID')\n )\n try:\n c.execute(q)\n conn.commit()\n except psycopg.errors.DuplicateObject as e:\n print(e)\n conn.rollback()\n\n q = sql.SQL(\n \"CREATE TYPE {REF_ST} AS (\"\n \"repr_type {STRUCTURE_TYPE},\"\n \"id TEXT,\"\n \"src TEXT\"\n \");\"\n ).format(\n STRUCTURE_TYPE=sql.Identifier('STRUCTURE_TYPE'),\n REF_ST = sql.Identifier('REF_ST')\n )\n try:\n c.execute(q)\n conn.commit()\n except psycopg.errors.DuplicateObject as e:\n print(e)\n conn.rollback()\n\n # Create main table\n # ==============================================================================================================\n q = sql.SQL(\n \"CREATE TABLE IF NOT EXISTS {main_table_name} (\"\n \"repr_type {IDENTIFIER_TYPE},\"\n \"id TEXT,\"\n \"refs {REF_ID} ARRAY,\"\n \"structs {REF_ST} ARRAY,\"\n \"PRIMARY KEY (repr_type, id)\"\n \");\"\n ).format(\n main_table_name = sql.Identifier(main_table_name),\n IDENTIFIER_TYPE = sql.Identifier('IDENTIFIER_TYPE'),\n REF_ID = sql.Identifier('REF_ID'),\n REF_ST = sql.Identifier('REF_ST')\n )\n c.execute(q)\n conn.commit()\n\n print(f'Schema applied.')\n conn.commit()\n\n except psycopg.OperationalError as e:\n print(f'Database connection failed:\\n\\t{e}')\n exit(1)\n\n # TODO return connection to the database\n return conn\n\n\n# Source table getters and setters\n# ======================================================================================================================\n\ndef get_source(conn: psycopg.connection.Connection, datafile: datafile.database_filetype) -> Optional[int]:\n # Return time last updated, or None if not present\n c = conn.cursor()\n\n q = sql.SQL(\n \"SELECT *\"\n \"FROM {table_sources}\"\n \"WHERE {col_source} = {source}\"\n ).format(\n table_sources = sql.Identifier('sources'),\n col_source = sql.Identifier('source'),\n source = datafile.name\n )\n\n print(f' - Checking precense of {datafile.name} in ID table...')\n c.execute(q)\n result = c.fetchone()\n\n if result is None:\n print(f' {datafile.name} not present, inserting.')\n return None\n else:\n print(f' {datafile.name} present, last updated {result[1]}. Skipping.')\n return result[1]\n\n\ndef update_source(conn: psycopg.connection.Connection, datafile: datafile.database_filetype):\n # TODO\n # Update the corresponding entry in the sources table\n c = conn.cursor()\n\n q = sql.SQL(\n \"INSERT INTO {table_sources} \"\n \"VALUES ({source}, {last_updated})\"\n \"ON CONFLICT ({col_source})\"\n \" DO UPDATE SET {col_last_updated} = excluded.{col_last_updated}\"\n ).format(\n table_sources = sql.Identifier('sources'),\n col_source = sql.Identifier('source'),\n col_last_updated = sql.Identifier('last_updated'),\n source = datafile.name,\n last_updated = 'now'\n )\n\n print(f' Datafile {datafile.name} processed, inserting into sources table.')\n c.execute(q)\n conn.commit()\n print(f' - Source table update successful.')\n\n\n# Database interactions\n # These are done using just a cursor created by the populate script\n# ======================================================================================================================\n\n# Database output parser regex\n # Format: '(inchi,InChI=1S/C6H8O7/c7-3(8)1-6(13,5(11)12)2-4(9)10/h13H,1-2H2,(H,7,8)(H,9,10)(H,11,12)/p-3,ChEBI)'\n # '([TYPE],id,[SOURCE])\n # Get list of type strs and source strs\n\ntype_strs_re = [t.shortname for t in types.standard_repr_types]\nsrc_strs_re = ['BiGG', 'ChEBI', 'mnx depr', 'PubChem', 'MetaNetX', 'ECMDB']\nre_result_tuple = re.compile(\n r\"^\\((\" + r'|'.join(type_strs_re) + r\"),(\\S*),(\" + r'|'.join(src_strs_re) + r\")\\)$\"\n)\n\ndef add_cache_item(\n c: psycopg.Cursor,\n repr_type: types.IdentifierType,\n id: str\n) -> psycopg.Cursor:\n \"\"\" Add an item with no references. Assumes standardised ids\n \"\"\"\n\n # Ensure standardisation\n id = repr_type.stdfunc(id)\n\n q = sql.SQL(\n \"INSERT INTO {table_ids} ({col_repr_type}, {col_id}) \"\n \"VALUES ({repr_type}, {id})\"\n \"ON CONFLICT DO NOTHING;\"\n ).format(\n table_ids = sql.Identifier(main_table_name),\n col_repr_type = sql.Identifier('repr_type'),\n col_id = sql.Identifier('id'),\n repr_type = repr_type.shortname,\n id = id\n )\n c.execute(q)\n return c\n\n\ndef add_cache_item_with_connections(\n c: psycopg.Cursor,\n source: str,\n item: tuple[types.IdentifierType, str],\n refs: list[tuple[types.IdentifierType, str]] = None,\n structs: list[tuple[types.StructureType, str]] = None,\n oneway: bool = False\n) -> psycopg.Cursor:\n \"\"\" Add a 'row' from any database file. Note that all connections will be two-way.\n If the item already exist, just add the connections.\n The source is the string representing the source of these edges.\n If oneway enabled, do not attempt to add back-edges\n \"\"\"\n\n # Ensure standardised identifiers\n item = (item[0], item[0].stdfunc(item[1]))\n std_refs: list[tuple[types.IdentifierType, str]]\n if refs:\n std_refs = [\n (ref[0], ref[0].stdfunc(ref[1])) for ref in refs\n ]\n else:\n std_refs = []\n\n # Construct refs and structure SQLs from the parameters\n if refs:\n sql_refs_list = sql.SQL('ARRAY [') + sql.SQL(', ').join(\n [\n sql.SQL('(') +\n sql.SQL('{ref_list_type}, {ref_list_id}, {source}').format(\n ref_list_type = ref[0].shortname,\n ref_list_id = ref[1].strip(),\n source = source\n ) +\n sql.SQL(')::{REF_ID}').format(\n REF_ID = sql.Identifier('REF_ID')\n )\n for ref in std_refs\n ]\n ) + sql.SQL(']::{REF_ID}[]').format(\n REF_ID = sql.Identifier('REF_ID')\n )\n else:\n sql_refs_list = sql.SQL('ARRAY[]::{REF_ID}[]').format(\n REF_ID = sql.Identifier('REF_ID')\n )\n\n if structs:\n sql_structs_list = sql.SQL('ARRAY [') + sql.SQL(', ').join(\n [\n sql.SQL('(') +\n sql.SQL('{struct_list_type}, {struct_list_id}, {source}').format(\n struct_list_type = struct[0].shortname,\n struct_list_id = struct[1].strip(),\n source = source\n ) +\n sql.SQL(')::{REF_ST}').format(\n REF_ST = sql.Identifier('REF_ST')\n )\n for struct in structs\n ]\n ) + sql.SQL(']::{REF_ST}[]').format(\n REF_ST = sql.Identifier('REF_ST')\n )\n else:\n sql_structs_list = sql.SQL('ARRAY[]::{REF_ST}[]').format(\n REF_ST = sql.Identifier('REF_ST')\n )\n\n # Insert the primary item with references\n # If the primary item already exists, then add references and structures to the corresponding lists\n q = sql.SQL(\n \"INSERT INTO {table_ids}\"\n \"VALUES ({repr_type}, {id}, {sql_refs_list}, {sql_structs_list})\"\n \"ON CONFLICT ({col_repr_type}, {col_id}) DO UPDATE\"\n \" SET {col_refs} = {table_ids}.{col_refs} || excluded.{col_refs},\"\n \" {col_structs} = {table_ids}.{col_structs} || excluded.{col_structs};\"\n ).format(\n table_ids = sql.Identifier(main_table_name),\n col_repr_type = sql.Identifier('repr_type'),\n col_id = sql.Identifier('id'),\n col_refs = sql.Identifier('refs'),\n col_structs = sql.Identifier('structs'),\n repr_type = item[0].shortname,\n id = item[1].strip(),\n sql_refs_list = sql_refs_list,\n sql_structs_list = sql_structs_list\n )\n c.execute(q)\n\n # Process the referenced items. If they don't exist, insert them with a reference to the primary item.\n # If they do exist, update with a reference to the primary item.\n if refs:\n for ref in std_refs:\n # TODO\n q = sql.SQL(\n \"INSERT INTO {table_ids}\"\n \"VALUES ({ref_repr_type}, {ref_id}, {sql_back_ref}, {empty_struct_list})\"\n \"ON CONFLICT ({col_repr_type}, {col_id}) DO UPDATE\"\n \" SET {col_refs} = {table_ids}.{col_refs} || {sql_back_ref};\"\n ).format(\n table_ids = sql.Identifier(main_table_name),\n col_repr_type = sql.Identifier('repr_type'),\n col_id = sql.Identifier('id'),\n col_refs = sql.Identifier('refs'),\n ref_repr_type = ref[0].shortname,\n ref_id = ref[1].strip(),\n\n # Insert empty list by default\n empty_struct_list = sql.SQL('ARRAY[]::{REF_ST}[]').format(\n REF_ST = sql.Identifier('REF_ST')\n ),\n\n # Also construct back-reference (single-element REF_ID array)\n sql_back_ref = sql.SQL(\n \"ARRAY [({primary_item_type}, {primary_item_id}, {source})]::{REF_ID}[]\"\n ).format(\n REF_ID = sql.Identifier('REF_ID'),\n primary_item_type = item[0].shortname,\n primary_item_id = item[1].strip(),\n source = source\n )\n )\n c.execute(q)\n\n return c\n\n\n# Getters\n# ======================================================================================================================\n\ndef get_connections(\n conn: psycopg.Connection,\n sources: list[(types.IdentifierType, str)]\n) -> dict[(types.IdentifierType, str), list[(types.RepresentationType, str, str)]]:\n \"\"\" Given a list of source IDs, for each ID return a list of connected IDs and the\n name of the associated edge.\n \"\"\"\n\n # Standardise sources\n sources_str = [\n (src[0], src[0].stdfunc(src[1])) for src in sources\n ]\n\n # Create cursor with namedtuple row factory\n c = conn.cursor(row_factory = namedtuple_row)\n\n # First construct the list of sources for the query\n sql_sources_list = sql.SQL('(') + sql.SQL(', ').join(\n [\n sql.SQL(\"({source_repr_type}, {source_id})\").format(\n source_repr_type = source_repr_type.shortname,\n source_id = source_id.strip()\n )\n for source_repr_type, source_id in sources\n ]\n ) + sql.SQL(')')\n\n # Main query\n q = sql.SQL(\n \"SELECT * \"\n \"FROM {table_ids} \"\n \"WHERE ({col_repr_type}, {col_id}) IN {sql_sources_list};\"\n ).format(\n table_ids = sql.Identifier('ids'),\n col_repr_type = sql.Identifier('repr_type'),\n col_id = sql.Identifier('id'),\n col_refs = sql.Identifier('refs'),\n col_structs = sql.Identifier('structs'),\n sql_sources_list = sql_sources_list\n )\n\n c.execute(q)\n result = c.fetchall()\n\n # Create map of result\n result_map: dict[(types.IdentifierType, str), list[(types.RepresentationType, str, str)]] = dict()\n\n result: psycopg.cursor.Row\n for result in result:\n from_tuple = (types.string_type_map[result.repr_type], result.id)\n result_map[from_tuple] = []\n\n # Process references\n refs = result.refs[1:-1]\n refs_list: list[str] = [\n s.strip('\"').replace('\\\\\"', '') for s in refs.split('\",\"')\n ]\n structs = result.structs[1:-1]\n structs_list: list[str] = [\n s.strip('\"').replace('\\\\\"', '') for s in structs.split('\",\"')\n ]\n\n # Match with the regex\n re_matches = [\n re.search(re_result_tuple, ref_str) for ref_str in refs_list + structs_list\n ]\n\n # Encode matches in the fields\n for match in re_matches:\n if match is None:\n continue\n result_map[from_tuple].append(\n (\n types.get_type_from_str(match.group(1)),\n match.group(2),\n match.group(3)\n )\n )\n\n return result_map\n\n\n# Population script\n# ======================================================================================================================\ndef populate(\n data_files_dir: str,\n conn: psycopg.connection.Connection,\n limit: int = 10e10 # Max entries per file to load into the database\n # Call with a lower limit to debug\n):\n \"\"\" Populate the database given files in the directory.\n \"\"\"\n print(f'Populating database from directory: {data_files_dir}')\n\n # Check present sources\n print(f'Sources present:')\n c = conn.cursor()\n c.execute(\n sql.SQL(\n \"SELECT * FROM {table_sources}\"\n ).format(\n table_sources = sql.Identifier('sources')\n )\n )\n results = c.fetchall()\n for row in results:\n print(f' - {row[0]}\\t{row[1]}')\n\n # Check files\n if os.path.isdir(data_files_dir):\n print(f\"Database directory '{data_files_dir}' exists. Found database files:\")\n for fname in os.listdir(data_files_dir):\n if fname in datafile.fname_datafile_map.keys():\n print(f' - {fname} [recognised: {datafile.fname_datafile_map[fname].name}]')\n datafile.fname_datafile_map[fname].fname_actual = fname # Set the actual filename as found in this step.\n else:\n print(f' - {fname} (NOT RECOGNISED)')\n\n\n # Populate the identifier table\n # Use populator functions in the 'populators' directory\n for populator in populators:\n print(f'* Starting populator: {populator.name}')\n populator.populate(\n conn,\n [f'{data_files_dir}/{data_file.fname_actual}' for data_file in populator.datafiles],\n limit\n )\n conn.commit()\n\n\n\n\n\n\n\n\n# Delete script\n# ======================================================================================================================\n\ndef cache_delete(\n db_host: str,\n db_port: str,\n db_name: str,\n db_user: str,\n db_pass: str,\n force: bool\n):\n \"\"\" Delete the database, should primarily be used for debugging.\n Apply the --force command-line parameter to ignore connected sessions.\n \"\"\"\n try:\n with psycopg.connect(\n f'postgres://{db_user}:{db_pass}@{db_host}:{db_port}'\n ) as conn:\n conn.autocommit = True\n with conn.cursor() as c:\n\n # List databases\n c.execute(\"SELECT datname FROM pg_database WHERE datistemplate = false;\")\n print(f'Databases found on server:')\n rows = c.fetchall()\n for row in rows:\n print(f' - {row[0]}')\n\n if db_name not in {row[0] for row in rows}:\n print(f'Database {db_name} does not exist.')\n return\n\n # Delete\n print(f'Deleting structrecon_db...')\n if force:\n c.execute(f\"DROP DATABASE {db_name} (FORCE);\")\n else:\n c.execute(f\"DROP DATABASE {db_name};\")\n\n # List again\n print(f'Databases found on server:')\n c.execute(\"SELECT datname FROM pg_database WHERE datistemplate = false;\")\n rows = c.fetchall()\n for row in rows:\n print(f' - {row[0]}')\n\n except psycopg.OperationalError as e:\n print(f'Database connection failed:\\n\\t{e}')\n exit(1)\n\n\n# Misc\n# ======================================================================================================================\n\n# Progress bar\ndef print_prog_bar(n: int, n_total: int, t_0):\n n = max(n, 1)\n width = 20\n percent = f'{100 * (n/n_total):5.1f}%'\n filled_chars = int(width * n // n_total)\n bar = '[' + '='*filled_chars + ' '*(width-filled_chars) + ']'\n t = time.time() - t_0\n eta = (n_total/n) * t - t\n\n if n == n_total:\n print(f'\\r {percent} {bar} ({n}/{n_total}) Done.')\n else:\n print(f'\\r {percent} {bar} ({n}/{n_total}) est. {str(datetime.timedelta(seconds = round(eta)))}', end = \"\")\n\n","repo_name":"casbjorn/structrecon","sub_path":"structrecon/cache/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":22770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73435498083","text":"import keras.backend as K\nimport tensorflow as tf\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.layers import Input, Dense, concatenate, Lambda\nfrom keras.models import Model\nfrom keras.utils import plot_model\n\nfrom config import img_size, channel, embedding_size\n\n\ndef build_model():\n base_model = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=(img_size, img_size, channel),\n pooling='avg')\n image_input = base_model.input\n x = base_model.layers[-1].output\n out = Dense(embedding_size)(x)\n image_embedder = Model(image_input, out)\n\n input_a = Input((img_size, img_size, channel), name='anchor')\n input_p = Input((img_size, img_size, channel), name='positive')\n input_n = Input((img_size, img_size, channel), name='negative')\n\n normalize = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='normalize')\n\n x = image_embedder(input_a)\n output_a = normalize(x)\n x = image_embedder(input_p)\n output_p = normalize(x)\n x = image_embedder(input_n)\n output_n = normalize(x)\n\n merged_vector = concatenate([output_a, output_p, output_n], axis=-1)\n\n model = Model(inputs=[input_a, input_p, input_n],\n outputs=merged_vector)\n return model\n\n\nif __name__ == '__main__':\n with tf.device(\"/cpu:0\"):\n model = build_model()\n print(model.summary())\n plot_model(model, to_file='model.svg', show_layer_names=True, show_shapes=True)\n\n K.clear_session()\n","repo_name":"foamliu/FaceNet","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"54"} +{"seq_id":"72312664160","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nfrom distutils import sysconfig\n\nimport contextlib\nimport os\nimport subprocess\nimport tempfile\nfrom joblib import Parallel, delayed\n\nCXX_COMPILER = sysconfig.get_config_var('CXX')\n\nEVALUATE_FN_NAME = \"evaluate\"\nALWAYS_INLINE = \"__attribute__((__always_inline__))\"\n\n\nclass CodeGenerator(object):\n def __init__(self):\n self._file = tempfile.NamedTemporaryFile(prefix='compiledtrees_', suffix='.cpp', delete=True)\n self._indent = 0\n\n @property\n def file(self):\n self._file.flush()\n return self._file\n\n def write(self, line):\n self._file.write(\" \" * self._indent + line + \"\\n\")\n\n @contextlib.contextmanager\n def bracketed(self, preamble, postamble):\n assert self._indent >= 0\n self.write(preamble)\n self._indent += 1\n yield\n self._indent -= 1\n self.write(postamble)\n\n\ndef code_gen_tree(tree, evaluate_fn=EVALUATE_FN_NAME, gen=None):\n \"\"\"\n Generates C code representing the evaluation of a tree.\n\n Writes code similar to:\n ```\n extern \"C\" {\n __attribute__((__always_inline__)) float evaluate(float* f) {\n if (f[9] <= 0.175931170583) {\n return 0.0;\n }\n else {\n return 1.0;\n }\n }\n }\n ```\n\n to the given CodeGenerator object.\n \"\"\"\n if gen is None:\n gen = CodeGenerator()\n\n def recur(node):\n if tree.children_left[node] == -1:\n assert tree.value[node].size == 1\n gen.write(\"return {0}f;\".format(tree.value[node].item()))\n return\n\n branch = \"if (f[{feature}] <= {threshold}f) {{\".format(\n feature=tree.feature[node],\n threshold=tree.threshold[node])\n with gen.bracketed(branch, \"}\"):\n recur(tree.children_left[node])\n\n with gen.bracketed(\"else {\", \"}\"):\n recur(tree.children_right[node])\n\n with gen.bracketed('extern \"C\" {', \"}\"):\n fn_decl = \"{inline} float {name}(float* f) {{\".format(\n inline=ALWAYS_INLINE,\n name=evaluate_fn)\n with gen.bracketed(fn_decl, \"}\"):\n recur(0)\n return gen.file\n\ndef _gen_tree(i, tree):\n \"\"\"\n Generates cpp code for i'th tree.\n Moved out of code_gen_ensemble scope for parallelization.\n \"\"\"\n name = \"{name}_{index}\".format(name=EVALUATE_FN_NAME, index=i)\n gen_tree = CodeGenerator()\n return code_gen_tree(tree, name, gen_tree)\n\ndef code_gen_ensemble(trees, individual_learner_weight, initial_value,\n gen=None, n_jobs=1):\n \"\"\"\n Writes code similar to:\n\n ```\n extern \"C\" {\n __attribute__((__always_inline__)) float evaluate_partial_0(float* f) {\n if (f[4] <= 0.662200987339) {\n return 1.0;\n }\n else {\n if (f[8] <= 0.804652512074) {\n return 0.0;\n }\n else {\n return 1.0;\n }\n }\n }\n }\n extern \"C\" {\n __attribute__((__always_inline__)) float evaluate_partial_1(float* f) {\n if (f[4] <= 0.694428026676) {\n return 1.0;\n }\n else {\n if (f[7] <= 0.4402526021) {\n return 1.0;\n }\n else {\n return 0.0;\n }\n }\n }\n }\n\n extern \"C\" {\n float evaluate(float* f) {\n float result = 0.0;\n result += evaluate_partial_0(f) * 0.1;\n result += evaluate_partial_1(f) * 0.1;\n return result;\n }\n }\n ```\n\n to the given CodeGenerator object.\n \"\"\"\n\n if gen is None:\n gen = CodeGenerator()\n\n tree_files =[_gen_tree(i, tree) for i, tree in enumerate(trees)]\n\n with gen.bracketed('extern \"C\" {', \"}\"):\n # add dummy definitions if you will compile in parallel\n for i, tree in enumerate(trees):\n name = \"{name}_{index}\".format(name=EVALUATE_FN_NAME, index=i)\n gen.write(\"float {name}(float* f);\".format(name=name))\n\n fn_decl = \"float {name}(float* f) {{\".format(name=EVALUATE_FN_NAME)\n with gen.bracketed(fn_decl, \"}\"):\n gen.write(\"float result = {0}f;\".format(initial_value))\n for i, _ in enumerate(trees):\n increment = \"result += {name}_{index}(f) * {weight}f;\".format(\n name=EVALUATE_FN_NAME,\n index=i,\n weight=individual_learner_weight)\n gen.write(increment)\n gen.write(\"return result;\")\n return tree_files + [gen.file]\n\ndef _compile(cpp_f):\n o_f = tempfile.NamedTemporaryFile(prefix='compiledtrees_', suffix='.o', delete=True)\n _call([CXX_COMPILER, cpp_f, \"-c\", \"-fPIC\", \"-o\", o_f.name, \"-O3\"])\n return o_f\n\ndef _call(args):\n DEVNULL = open(os.devnull, 'w')\n subprocess.check_call(\" \".join(args),\n shell=True, stdout=DEVNULL, stderr=DEVNULL)\n\ndef compile_code_to_object(files, n_jobs=1):\n # if ther is a single file then create single element list\n # unicode for filename; name attribute for file-like objects\n if type(files) is unicode or hasattr(files, 'name'):\n files = [files]\n\n so_f = tempfile.NamedTemporaryFile(prefix='compiledtrees_', suffix='.so', delete=True)\n o_files = Parallel(n_jobs=n_jobs, backend='threading')(delayed(_compile)(f.name) for f in files)\n # link trees\n _call([CXX_COMPILER, \"-shared\"] + [f.name for f in o_files] + [\"-fPIC\",\n \"-flto\", \"-o\", so_f.name, \"-O3\"])\n return so_f\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/ajtulloch/sklearn-compiledtrees/compiledtrees/code_gen.py","file_name":"code_gen.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2427419735","text":"import sys\nimport time\n\nimport ignite\nimport torchvision\nimport torch\n\nimport kernel1cf7569c78 as dataset\nimport kernel73ed4cc67f as wheat_detector\n\n\ndef evaluate(train_engine, test_engine, train_loader, test_loader):\n print(f\"Evaluating\\n-----------------------------------------\")\n test_engine.run(test_loader)\n test_metrics = test_engine.state.metrics\n test_engine.run(train_loader)\n train_metrics = test_engine.state.metrics\n\n for name, value in train_metrics.items():\n print(f\"{name} {value}/{test_metrics[name]}\")\n\n\nclass WheatCriterion:\n def __init__(self, pixel_criterion, size_criterion, offset_criterion,\n sign_criterion):\n self.pixel_criterion = pixel_criterion\n self.size_criterion = size_criterion\n self.offset_criterion = offset_criterion\n self.sign_criterion = sign_criterion\n\n def __call__(self, predicted, target):\n pixel_loss = self.pixel_criterion(predicted[0], target[0])\n box_w_loss = self.size_criterion(predicted[1], target[1])\n box_h_loss = self.size_criterion(predicted[2], target[2])\n box_w_sign_loss = self.sign_criterion(predicted[3], target[3])\n box_h_sign_loss = self.sign_criterion(predicted[4], target[4])\n\n offset_w_loss = self.size_criterion(predicted[5], target[5])\n offset_h_loss = self.size_criterion(predicted[6], target[6])\n offset_w_sign_loss = self.sign_criterion(predicted[7], target[7])\n offset_h_sign_loss = self.sign_criterion(predicted[8], target[8])\n\n loss = (pixel_loss + box_w_loss + box_h_loss + box_w_sign_loss +\n box_h_sign_loss + offset_w_loss + offset_h_loss +\n offset_w_sign_loss + offset_h_sign_loss)\n\n if torch.isnan(loss):\n raise Exception(\"NaN LOSS\")\n\n return loss\n\n\ndef prepare_batch(batch, device, non_blocking):\n img, pixel_class, box_class, box_sign, offset_class, offset_sign = batch\n\n img = img.float().to(device)\n\n pixel_class = pixel_class[:, 0, :, :].long().to(device)\n\n box_height_class = box_class[:, 0, :, :].long().to(device)\n box_width_class = box_class[:, 1, :, :].long().to(device)\n box_height_sign_class = box_sign[:, 0, :, :].long().to(device)\n box_width_sign_class = box_sign[:, 1, :, :].long().to(device)\n\n box_h_offset_class = offset_class[:, 0, :, :].long().to(device)\n box_w_offset_class = offset_class[:, 1, :, :].long().to(device)\n box_h_offset_sign_class = offset_sign[:, 0, :, :].long().to(device)\n box_w_offset_sign_class = offset_sign[:, 1, :, :].long().to(device)\n\n return img, (pixel_class, box_width_class, box_height_class,\n box_width_sign_class, box_height_sign_class,\n box_w_offset_class, box_h_offset_class,\n box_w_offset_sign_class, box_h_offset_sign_class)\n\n\ndef main():\n if True and torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n print(device)\n\n raw_bboxes = dataset.read_bboxes(\"../input/global-wheat-detection/train.csv\")\n mean, std = dataset.bbox_stats(raw_bboxes)\n\n train_loader, validate_loader = dataset.get_wheat_loaders(\n \"../input/global-wheat-detection/train\",\n batch_size=4,\n split=0.9,\n num_workers=4,\n encodings_path=\"../input/wheatclasses/train_classes_half_size\",\n raw_bbox_path=\"../input/global-wheat-detection/train.csv\",\n gen_encodings=False,\n mean=mean,\n std=std)\n\n pixel_criterion = torch.nn.CrossEntropyLoss()\n size_criterion = torch.nn.CrossEntropyLoss()\n offset_criterion = torch.nn.CrossEntropyLoss()\n sign_criterion = torch.nn.CrossEntropyLoss()\n\n wheat_criterion = WheatCriterion(pixel_criterion, size_criterion,\n offset_criterion, sign_criterion)\n\n wheat_net = wheat_detector.WheatDetector()\n wheat_net.load_state_dict(torch.load(\"../input/boxclasses295/wheat_detector_model_265_295.pth\", map_location=device))\n\n optimizer = torch.optim.Adam(wheat_net.parameters(), lr=0.0001)\n\n trainer = ignite.engine.create_supervised_trainer(\n wheat_net,\n optimizer,\n wheat_criterion,\n device,\n prepare_batch=prepare_batch)\n\n metrics = {\"loss\": ignite.metrics.Loss(wheat_criterion, device=device)}\n evaluator = ignite.engine.create_supervised_evaluator(\n wheat_net, metrics, device, prepare_batch=prepare_batch)\n trainer.add_event_handler(\n ignite.engine.Events.EPOCH_COMPLETED(every=4),\n ignite.handlers.ModelCheckpoint('/kaggle/working/',\n 'wheat_detector',\n n_saved=4,\n require_empty=False,\n create_dir=True), {'model': wheat_net})\n trainer.add_event_handler(ignite.engine.Events.EPOCH_COMPLETED(every=4),\n evaluate, evaluator, train_loader,\n validate_loader)\n\n trainer.run(train_loader, max_epochs=8)\n #evaluate(trainer, evaluator, train_loader, validate_loader)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/global-wheat-detection/cosminga/kernel4ad6778a3d.py","file_name":"kernel4ad6778a3d.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"18234287442","text":"# Digital clock using python\n\nimport time\nimport datetime as dt\nimport turtle\n\n# using two turtle to create this clock.\n\t\n# create a turtle to display time\nt = turtle.Turtle()\n\n# create a turtle to create rectangle box\nt1 = turtle.Turtle()\n\n# create screen\nsrc = turtle.Screen()\n\n# set background color of the screen\nsrc.bgcolor(\"light green\")\n\n# we are using system time to display time\n# obtain current hour, minute and second\n# from the system\nsec = dt.datetime.now().second\nmin = dt.datetime.now().minute\nhr = dt.datetime.now().hour\nt1.pensize(3)\nt1.color('black')\nt1.penup()\n\n# set the position of turtle\nt1.goto(-110, 0)\nt1.pendown()\n\n# create rectangular box\nfor i in range(2):\n\tt1.forward(300)\n\tt1.left(90)\n\tt1.forward(90)\n\tt1.left(90)\n\t\n# hide the turtle\nt1.hideturtle()\n\nt.goto(-80,0)\n\n# displaying time in 24 hour format\n# running the while loop to display time while the screen is open\nwhile True:\n\tt.hideturtle()\n\tt.clear()\n\n\n\n\t# display the time\n\tt.write(str(hr).zfill(2)\n\t\t\t+\":\"+str(min).zfill(2)+\":\"\n\t\t\t+str(sec).zfill(2),\n\t\t\tfont =(\"Arial Narrow\", 50, \"bold\"))\n\ttime.sleep(1)\n\tsec+= 1\n\t\n\tif sec == 60:\n\t\tsec = 0\n\t\tmin+= 1\n\t\n\tif min == 60:\n\t\tmin = 0\n\t\thr+= 1\n\t\n\tif hr == 13:\n\t\thr = 1\n","repo_name":"rohitaswchoudhary/fun_with_python","sub_path":"Turtle/digital_clock.py","file_name":"digital_clock.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"69982248481","text":"from absl import logging\nimport numpy as np\nimport random\nimport time\nfrom src import interface, environment, heuristic, simulate\nfrom typing import Tuple, List, Optional\n\n\nclass RandomSelect(interface.Agent):\n \"\"\"Randomly select an action\n \"\"\"\n\n def act(self, state: np.ndarray, actions: List) -> Tuple:\n return random.choice(actions)\n\n def __str__(self):\n return 'RandomSelect()'\n\n\nclass GreedyMinimumCost(interface.Agent):\n \"\"\"Randomly select one if there are multiple actions with minimum cost\n\n this is a simplified version of Minimax(max_depth=1)\n \"\"\"\n\n def __init__(\n self,\n env: interface.Environment = environment.SubtractionRule(),\n ):\n self.env = env\n\n def act(self, state: np.ndarray, actions: List) -> Tuple:\n costs = []\n for action in actions:\n _, cost, _ = self.env.step(state, action)\n costs.append(cost)\n # if there are multiple actions with minimum cost, randomly select one\n min_cost = min(costs)\n min_cost_actions = [action for action, cost in zip(actions, costs) if cost == min_cost]\n return random.choice(min_cost_actions)\n\n def __str__(self):\n return 'GreedyMinimumCost(env={})'.format(self.env)\n\n\nclass Minimax(interface.Agent):\n \"\"\"minimax with alpha-beta pruning\n \"\"\"\n\n # TODO(shawn): add iterative deepening when time is enough\n\n class Node:\n def __init__(self, state: np.ndarray, depth: int, action: Tuple = None):\n self.state = state # state of this node\n self.depth = depth # depth of this node\n self.action = action # action to reach this node\n self.value = None # value (cost backpropagated by minimax) of this node\n self.path_cost = None # path cost (cost from root to this node) of this node\n self.children = [] # children of this node\n\n def __init__(\n self,\n max_depth: int = 3,\n heuristic: interface.Estimator = heuristic.Zero(),\n env: interface.Environment = environment.SubtractionRule(),\n epsilon_greedy: float = 0.0,\n ) -> None:\n \"\"\"\n Args:\n max_depth: max depth to search\n heuristic: heuristic function\n env: game rule\n \"\"\"\n self.max_depth = max_depth\n self.env = env\n self.heuristic = heuristic\n self.epsilon_greedy = epsilon_greedy\n\n def _minimax(\n self,\n node: Node,\n max_depth: int,\n alpha: float = -np.inf,\n beta: float = np.inf\n ) -> None:\n \"\"\"a DFS alpha beta pruning method\n\n if node.depth is even, it's player's turn (do min)\n if node.depth is odd, it's opponent's turn (do max)\n\n Args:\n node: current node\n max_depth: max depth to search\n alpha: alpha value\n beta: beta value\n\n Returns:\n None, value is saved to node.value\n \"\"\"\n # if terminal node\n if self.env.is_terminal(node.state):\n node.value = node.path_cost\n return\n # if reaches max_depth\n if node.depth == max_depth:\n h = self.heuristic.estimate(node.state)\n # NOTE(shawn): h is in the perspective of player\n # if the next step is player's turn (node.depth is even), h is positive\n node.value = h if node.depth % 2 == 0 else -h\n node.value += node.path_cost\n return\n # expand children\n for action in self.env.actions(node.state):\n child_state, cost, _ = self.env.step(node.state, action)\n child = self.Node(\n state=child_state,\n depth=node.depth + 1,\n action=action,\n )\n # NOTE(shawn): cost is in the perspective of player\n # if the next step is player's turn (node.depth is even), cost is positive\n child.path_cost = cost if node.depth % 2 == 0 else -cost\n child.path_cost += node.path_cost\n node.children.append(child)\n # if player's turn, try to minimize cost\n if node.depth % 2 == 0:\n node.value = np.inf\n for child in node.children:\n self._minimax(child, max_depth, alpha, beta)\n node.value = min(node.value, child.value)\n beta = min(beta, node.value)\n if beta < alpha:\n break\n # if opponent's turn, try to maximize cost\n else:\n node.value = -np.inf\n for child in node.children:\n self._minimax(child, max_depth, alpha, beta)\n node.value = max(node.value, child.value)\n alpha = max(alpha, node.value)\n if beta < alpha:\n break\n\n def act(self, state: np.ndarray, actions: List) -> Tuple:\n # epsilon greedy\n if random.random() < self.epsilon_greedy:\n return random.choice(actions)\n\n # init first layer\n self.root = self.Node(state, depth=0)\n self.root.path_cost = 0\n # start search\n self._minimax(self.root, self.max_depth)\n # return action with min value\n min_value = min([child.value for child in self.root.children])\n min_value_actions = [child.action for child in self.root.children if child.value == min_value]\n return random.choice(min_value_actions)\n\n def __str__(self):\n return 'Minimax(max_depth={}, heuristic={}, env={}, epsilon_greedy={})'.format(\n self.max_depth, self.heuristic, self.env, self.epsilon_greedy)\n\n\nclass MinimaxV2(interface.Agent):\n \"\"\"minimax with alpha-beta pruning + map branch factor to max depth\n \"\"\"\n\n def __init__(\n self,\n max_computing_nodes: int = 1200000,\n heuristic: interface.Estimator = heuristic.OneStepLookAhead(),\n env: interface.Environment = environment.SubtractionRule(),\n timeout: float = 58.,\n ) -> None:\n \"\"\"\n Args:\n max_computing_nodes: to estimate max depth\n heuristic: heuristic function\n env: game rule\n \"\"\"\n self.max_computing_nodes = max_computing_nodes\n self.env = env\n self.heuristic = heuristic\n self.timeout = timeout\n self.act_start_time = None\n self.memory = {}\n\n def _minimax(\n self,\n state: np.ndarray,\n from_action: str,\n path_cost: int,\n depth: int,\n alpha: float = -np.inf,\n beta: float = np.inf\n ) -> Tuple[int, Tuple[int, int]]:\n \"\"\"a DFS alpha beta pruning method\n\n if node.depth is even, it's player's turn (do min)\n if node.depth is odd, it's opponent's turn (do max)\n\n Args:\n state: current state\n path_cost: path cost (cost from root to this node)\n depth: current depth\n alpha: alpha value\n beta: beta value\n\n Returns:\n value: value of this node\n move: action to reach this node\n \"\"\"\n # check if in memory\n #if depth == 2 and from_action in self.memory:\n # return self.memory[from_action], None\n # if terminal node\n # TODO(shawn): refactor environment API\n is_terminal, cost_terminal = self.env._is_terminal_and_cost(state)\n if is_terminal:\n if depth % 2 != 0:\n return path_cost + cost_terminal, None\n else:\n return path_cost - cost_terminal, None\n # if reaches max_depth\n if depth == self.max_depth:\n h = self.heuristic.estimate(state)\n # NOTE(shawn): h is in the perspective of player\n # if the next step is player's turn (node.depth is even), h is positive\n value = h if depth % 2 == 0 else -h\n value += path_cost\n return value, None\n # if player's turn, try to minimize cost\n if depth % 2 == 0:\n value = np.inf\n for action in self.env.actions(state):\n # take a step\n row_col_idx, subtract_val = action\n if row_col_idx < 3:\n state[row_col_idx, :] -= subtract_val\n else:\n state[:, row_col_idx - 3] -= subtract_val\n # go deeper if still have time\n if time.time() - self.act_start_time < self.timeout:\n child_v, _ = self._minimax(\n state=state,\n from_action=from_action + str(action),\n path_cost=path_cost + subtract_val,\n depth=depth + 1,\n alpha=alpha,\n beta=beta,\n )\n else:\n child_v = self.heuristic.estimate(state)\n logging.warning('run out of time during minimax, depth: {}, action: {}'.format(depth, action))\n # save to memory if depth == 0\n if depth == 0:\n self.memory[str(action)] = child_v\n # undo step\n if row_col_idx < 3:\n state[row_col_idx, :] += subtract_val\n else:\n state[:, row_col_idx - 3] += subtract_val\n # update value\n if child_v < value:\n value, move = child_v, action\n beta = min(beta, value)\n if value <= alpha:\n break\n return value, move\n # if opponent's turn, try to maximize cost\n else:\n value = -np.inf\n for action in self.env.actions(state):\n # take a step\n row_col_idx, subtract_val = action\n if row_col_idx < 3:\n state[row_col_idx, :] -= subtract_val\n else:\n state[:, row_col_idx - 3] -= subtract_val\n # go deeper if still have time\n if time.time() - self.act_start_time < self.timeout:\n child_v, _ = self._minimax(\n state=state,\n from_action=from_action + str(action),\n path_cost=path_cost - subtract_val,\n depth=depth + 1,\n alpha=alpha,\n beta=beta,\n )\n else:\n child_v = self.heuristic.estimate(state)\n logging.warning('run out of time during minimax, depth: {}, action: {}'.format(depth, action))\n # save to memory if depth == 1\n #if depth == 1 and not self.env.is_terminal(state):\n # # NOTE(shawn): sub_val_x here is to compensate the path_cost when being swapped\n # sub_val_first = int(from_action[1:-1].split(',')[1])\n # sub_val_second = action[1]\n # self.memory[str(action) + from_action] = child_v + (sub_val_second - sub_val_first) * 2\n # XXX(shawn): debug\n #if depth == 1:\n # if from_action + str(action) in self.memory:\n # if self.memory[from_action + str(action)] != child_v:\n # print('from_action: {}, action: {}, value: {}'.format(from_action, action, self.memory[from_action + str(action)]))\n # print('from_action: {}, action: {}, value: {}'.format(from_action, action, child_v))\n # print('state: {}'.format(state))\n # undo step\n if row_col_idx < 3:\n state[row_col_idx, :] += subtract_val\n else:\n state[:, row_col_idx - 3] += subtract_val\n # update value\n if child_v > value:\n value, move = child_v, action\n alpha = max(alpha, value)\n if value >= beta:\n break\n return value, move\n\n def act(self, state: np.ndarray, actions: List) -> Tuple:\n # init timer\n self.act_start_time = time.time()\n\n # estimate max depth\n num_branches = len(actions)\n self.max_depth = min(8,\n max(3,int(np.log(self.max_computing_nodes) / (np.log(num_branches) + 1e-6))))\n\n # start search\n self.memory.clear()\n _, action_mx = self._minimax(\n state=state,\n from_action=\"\",\n path_cost=0,\n depth=0,\n alpha=-np.inf,\n beta=np.inf,\n )\n return action_mx\n\n # TODO(shawn): select the action that subtracts to the min element in state\n # get all actions with min value from memory\n values = [self.memory[str(action)] for action in actions]\n min_value_actions = [action for action, value in zip(actions, values) if value == min(values)]\n # select the action that subtracts to the min element in state\n min_state_value = [np.min(self.env.step(state, action)[0], axis=1 if action[0] < 3 else 0)[action[0] % 3] for action in min_value_actions]\n #return min_value_actions[np.argmin(min_state_value)]\n\n def __str__(self):\n return 'MinimaxV2(max_computing_nodes={}, heuristic={}, env={}, timeout={})'.format(\n self.max_computing_nodes, self.heuristic, self.env, self.timeout)\n\n\nclass MonteCarloTreeSearch(interface.Agent):\n \"\"\"Monte Carlo Tree Search\n \"\"\"\n\n class Node(object):\n def __init__(\n self,\n state: np.ndarray,\n parent: 'Node' = None,\n action: Tuple = None,\n depth: int = 0,\n cost_player: int = 0,\n cost_opponent: int = 0,\n ):\n # static\n self.state = state\n self.parent = parent\n self.action = action\n self.depth = depth\n self.cost_player = cost_player\n self.cost_opponent = cost_opponent\n # dynamic\n self.children = []\n self.wins = 0 # in player's perspective\n self.visits = 0\n\n def __init__(\n self,\n max_num_simulations: int = 1000,\n max_time_seconds: float = 58.,\n coeffi_explore: float = np.sqrt(2),\n player: interface.Agent = RandomSelect(),\n env: interface.Environment = environment.SubtractionRule(),\n game: interface.GameSimulator = simulate.TwoPlayerGame(),\n handicapped: int = 0,\n run_only_when_min_val_less_than: int = -1,\n ):\n self.player = player\n self.env = env\n self.game = game\n self.max_num_simulations = max_num_simulations\n self.max_time_seconds = max_time_seconds\n self.coeffi_explore = coeffi_explore\n self.root = None\n self.run_condition = run_only_when_min_val_less_than\n self.handicapped = handicapped\n # MCTS tree\n self.hashmap = {}\n\n def act(self, state: np.ndarray, actions: List) -> Tuple:\n\n # debug\n print('state: {}'.format(state))\n\n\n _st = time.time()\n\n # init root\n state_hash = str(state)\n #if state_hash not in self.hashmap:\n if True:\n root = self.Node(state, action=(-1,-1), cost_player=self.handicapped, cost_opponent=0)\n #self.hashmap[state_hash] = root\n else:\n root = self.hashmap[state_hash]\n print(state_hash)\n print('wins:', root.wins)\n print('visits:', root.visits)\n print('state:', root.state)\n\n # NOTE(shawn): replace with min cost select if you doesn't believe in MCTS\n if self.run_condition > 0 and np.min(state) > self.run_condition:\n '''\n # find minimum cost\n min_cost = np.min([child.cost_player for child in root.children])\n # find most visited child with minimum cost\n min_cost_children = [child for child in root.children if child.cost_player == min_cost]\n child_most_visited = min_cost_children[np.argmax([child.visits for child in min_cost_children])]\n\n # debug print\n for child in child_most_visited.children:\n print('\\tstate: {}, action: {}, visits: {}, wins: {}'.format(child.state, child.action, child.visits, child.wins))\n\n return child_most_visited.action\n '''\n # random select action with minimum cost\n costs = []\n for action in actions:\n _, cost, _ = self.env.step(state, action)\n costs.append(cost)\n min_cost = min(costs)\n min_cost_actions = [action for action, cost in zip(actions, costs) if cost == min_cost]\n return random.choice(min_cost_actions)\n\n # start search\n num_simulations = 0\n while num_simulations < self.max_num_simulations and time.time() - _st < self.max_time_seconds:\n leaf = self._selection(root)\n child = self._expansion(leaf)\n win = self._simulation(child)\n self._backpropagation(child, win)\n num_simulations += 1\n\n # debug\n for child in root.children:\n print('action: {}, visits: {}, wins: {}, depth: {}'.format(child.action, child.visits, child.wins, child.depth))\n\n\n # return action with max wins\n child_robust = root.children[np.argmax([child.visits for child in root.children])]\n #child_max = root.children[np.argmax([child.wins for child in root.children])]\n return child_robust.action\n\n def _selection(self, x: Node) -> Node:\n \"\"\"select a node to expand\n \"\"\"\n while len(x.children) > 0: # loop until reaches leaf\n # calculate UCB1\n ucb1s = []\n for child in x.children:\n if child.visits == 0:\n ucb1 = np.inf if x.depth % 2 == 0 else -np.inf\n else:\n exploit = child.wins / child.visits\n explore = np.sqrt(np.log(x.visits) / child.visits)\n ucb1 = exploit + self.coeffi_explore * explore if x.depth % 2 == 0 \\\n else exploit - self.coeffi_explore * explore\n ucb1s.append(ucb1)\n\n # debug\n #print('depth:', x.depth)\n #print('wins:', [child.wins for child in x.children])\n #print('visits:', [child.visits for child in x.children])\n #print('ucb1s:', ucb1s)\n\n # select child with max/min UCB1\n if x.depth % 2 == 0:\n x = x.children[np.argmax(ucb1s)] # if player's turn, select child with max ucb1\n else:\n x = x.children[np.argmin(ucb1s)] # if opponent's turn, select child with min ucb1\n return x\n\n def _expansion(self, x: Node) -> Node:\n \"\"\"expand a node\n \"\"\"\n if x.visits != 0:\n # expand and add children to tree\n for action in self.env.actions(x.state):\n child_state, cost, _ = self.env.step(x.state, action)\n _n = self.Node(\n state=child_state,\n parent=x,\n action=action,\n depth=x.depth + 1,\n cost_player=x.cost_player + (cost if x.depth % 2 == 0 else 0),\n cost_opponent=x.cost_opponent + (cost if x.depth % 2 != 0 else 0),\n )\n #self.hashmap[str(child_state)] = _n # add to hashmap\n x.children.append(_n)\n # return itself if no children\n if len(x.children) == 0:\n return x\n # random select a child\n x = random.choice(x.children)\n return x\n\n def _simulation(self, x: Node) -> int:\n \"\"\"run simulation\n \"\"\"\n self.game.reset()\n _, costs = self.game.run(\n agents=[self.player, self.player],\n env=self.env,\n initial_state=x.state,\n initial_costs=\\\n (x.cost_player, x.cost_opponent) if x.depth % 2 == 0 \\\n else (x.cost_opponent, x.cost_player)\n )\n cost_player = costs[0] if x.depth % 2 == 0 else costs[1]\n cost_opponent = costs[1] if x.depth % 2 == 0 else costs[0]\n win = 1 if cost_player < cost_opponent else -1\n return win\n\n def _backpropagation(self, x: Node, win: int):\n \"\"\"backpropagation\n \"\"\"\n while x is not None:\n x.wins += win\n x.visits += 1\n x = x.parent\n\n def __str__(self):\n return 'MonteCarloTreeSearch(player={}, env={}, game={}, num_simulations={}, time_limit={}, coeffi={}, run_condition={}, handicapped={})'.format(\n self.player, self.env, self.game,\n self.max_num_simulations, self.max_time_seconds,\n self.coeffi_explore, str(self.run_condition), self.handicapped)","repo_name":"kertansul/subtraction-game","sub_path":"src/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":21021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23496637541","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport can\n\nfrom canopen_301_402.constants import *\nfrom canopen_301_402.canopen_msgs.msg import CanOpenMessage\nfrom canopen_301_402.canopen_msgs.cob import CanOpenId\n\nclass CanOpenMessageNmtBootup(CanOpenMessage):\n \"\"\"docstring for CanOpenMessageNmtBootup\"\"\"\n def __init__(self, canopen, node_id, original_can_msg=None):\n self.canopen = canopen\n\n self.connection_set = self.canopen.connection_set\n service = CanOpenService.nmt_error_control\n function_code = self.connection_set.determine_function_code(service)\n \n data = [0]\n\n\n # initialize CanOpenMessage\n super(CanOpenMessageNmtBootup, self).__init__(function_code, node_id, service, data, original_can_msg = original_can_msg)\n \n \n @classmethod\n def try_from_canopen_msg(cls, msg, canopen):\n '''\n @summary: try to convert from canopen msg\n @param cls: CanOpenMessageNmtBootup\n @param msg: CanOpenMessage\n @param canopen: CanOpen\n @result: None, if not possible, CanOpenMessageNmtBootup instance\n '''\n\n if ((msg.service == CanOpenService.nmt_error_control) and\n (msg.node_id > 0) and \n (len(msg.data) >= 1) and \n (msg.data[0] == 0)):\n\n return CanOpenMessageNmtBootup(canopen, msg.node_id, original_can_msg = msg)\n else:\n return None\n\n","repo_name":"xaedes/canopen_301_402","sub_path":"src/canopen_301_402/canopen_msgs/msg_nmt_bootup.py","file_name":"msg_nmt_bootup.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"38592029902","text":"# link: https://leetcode.com/problems/is-graph-bipartite/\n\nclass Solution:\n def isBipartite(self, graph: List[List[int]]) -> bool:\n \n # Two colors : A ~ 1, B ~ 2\n color = {}\n \n def dfs(node):\n \n # traversing the adjacency list of node and performing dfs recurison\n for nextNode in graph[node]:\n # if adjustend nextNode has same color then it is not bipartite\n if nextNode in color:\n if color[nextNode] == color[node]:\n return False\n # if not in color, we assign it opposite color\n else:\n color[nextNode] = 1 - color[node]\n # run dfs on it to check if it satify the condition\n if not dfs(nextNode):\n return False\n return True\n \n \n # traversing given graph\n for node in range(len(graph)):\n # if node not in color\n if node not in color:\n # color it as 1\n color[node] = 0\n # run dfs to check if it satisfy the condition for bipartite\n # if dfs shows False result that means not bipartite\n if not dfs(node):\n return False\n return True\n\n\"\"\"\nTime Complexity = O(V+E) as we are visiting each cell of matrix constant number of times\nSpace Complexity = O(V) in worst case recursion can touch all cells of matrix\n\"\"\"","repo_name":"techonair/Programming-Pathshala","sub_path":"Graphs/Assignment-2/Bipartite Graph.py","file_name":"Bipartite Graph.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7304397802","text":"from pymongo import MongoClient\ntry:\n conn = MongoClient()\n print(\"Connected successfully!!!\")\nexcept: \n print(\"Could not connect to MongoDB\")\n \n# database\ndb = conn.database\n# Created or Switched to collection names: my_gfg_collection\ncollection = db.model_data\n\nimport torch\nfrom matplotlib import pyplot as plt\nimport cv2\nfrom PIL import Image\nimport numpy as np\ndef get_iou(bb1, bb2):\n \n \"\"\"\n Calculate the Intersection over Union (IoU) of two bounding boxes.\n\n Parameters\n ----------\n bb1 : dict\n Keys: {'x1', 'x2', 'y1', 'y2'}\n The (x1, y1) position is at the top left corner,\n the (x2, y2) position is at the bottom right corner\n bb2 : dict\n Keys: {'x1', 'x2', 'y1', 'y2'}\n The (x, y) position is at the top left corner,\n the (x2, y2) position is at the bottom right corner\n\n Returns\n -------\n float\n in [0, 1]\n \"\"\"\n assert bb1['x1'] < bb1['x2']\n assert bb1['y1'] < bb1['y2']\n assert bb2['x1'] < bb2['x2']\n assert bb2['y1'] < bb2['y2']\n\n # determine the coordinates of the intersection rectangle\n x_left = max(bb1['x1'], bb2['x1'])\n y_top = max(bb1['y1'], bb2['y1'])\n x_right = min(bb1['x2'], bb2['x2'])\n y_bottom = min(bb1['y2'], bb2['y2'])\n\n if x_right < x_left or y_bottom < y_top:\n return 0.0\n\n # The intersection of two axis-aligned bounding boxes is always an\n # axis-aligned bounding box\n intersection_area = (x_right - x_left) * (y_bottom - y_top)\n\n # compute the area of both AABBs\n bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])\n bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = intersection_area / float(bb1_area + bb2_area - intersection_area)\n assert iou >= 0.0\n assert iou <= 1.0\n #print(iou)\n return iou\ndef try_overlapping(chairs,persons):\n if len(persons) == 0:\n return [0]*len(chairs)\n occupied = [0]*len(chairs)\n for i,row_chairs in chairs.iterrows():\n x1,x2,y1,y2 = row_chairs['xmin'] , row_chairs['xmax'] , row_chairs['ymin'], row_chairs['ymax']\n bb1 = {'x1':x1, 'x2':x2, 'y1':y1, 'y2':y2}\n for j,row in persons.iterrows():\n x1,x2,y1,y2 = row['xmin'] , row['xmax'] , row['ymin'], row['ymax']\n bb2 = {'x1':x1, 'x2':x2, 'y1':y1, 'y2':y2}\n percentage = get_iou(bb1,bb2)\n if percentage > 0.3:\n occupied[i] = 1\n return occupied\n#Model\ndef main():\n model = torch.hub.load('mostlyAditya/yolov5', 'yolov5s',device = 'cpu') # local repo\n # Images\n img = 'yolov5/data/videos/seating_data.mp4'\n img = cv2.VideoCapture(img)\n found = False\n chairs = []\n second = 0\n width = int(img.get(3))\n height = int(img.get(4))\n #vid = cv2.VideoWriter('save_video.avi',cv2.VideoWriter_fourcc(*'MJPG'),10,(width, height))\n\n while img.isOpened():\n ret, frame = img.read()\n for i in range(23):\n img.read()\n # Make detections \n results = None\n try:\n results = model(frame)\n except AttributeError:\n print(\"Completed\")\n break\n #results.save()\n #vid.write(np.squeeze(results.render()))\n cv2.imshow('YOLO', np.squeeze(results.render()))\n #results.print() \n #results.show() # or .show()\n #results = results.xyxy[0] # img1 predictions (tensor)\n boxes = results.pandas().xyxy[0]\n if not found:\n chairs = boxes[boxes[\"class\"] == 56].sort_values('xmin')\n found = True\n persons = boxes[boxes[\"class\"] == 0].sort_values('xmin')\n #chairs.to_csv('file.csv', mode='a', index=False, header=False)\n #persons.to_csv('file.csv', mode='a', index=False, header=False)\n \n seats = try_overlapping(chairs,persons)\n occupied = {\n 'second':second,\n 'seats':seats\n }\n collection.insert_one(occupied)\n print(occupied)\n print(second)\n second += 1\n #print(boxes)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n img.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()\n\n#img = cv2.imread('yolov5/data/videos/VID_20220826_133818.mp4')\n#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY )\n# Inference\n'''\nresults = model(img, size=328) # includes NMS\n\n# Results\nresults.print() \n#results.show() # or .show()\n\n#results = results.xyxy[0] # img1 predictions (tensor)\nboxes = results.pandas().xyxy[0]\nprint(boxes)\n'''","repo_name":"mostlyAditya/capstone","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29443331769","text":"import asyncio\nimport logging\n\nfrom pytoniq_core import Address\n\nfrom pytoniq import LiteBalancer, WalletV4R2, LiteClient\n\n\nasync def main():\n logging.basicConfig(level=logging.INFO)\n client = LiteBalancer.from_mainnet_config(trust_level=1)\n\n await client.start_up()\n\n \"\"\"wallet seqno\"\"\"\n result = await client.run_get_method(address='EQBvW8Z5huBkMJYdnfAEM5JqTNkuWX3diqYENkWsIL0XggGG', method='seqno', stack=[])\n print(result) # [242]\n wallet = await WalletV4R2.from_address(provider=client, address='EQBvW8Z5huBkMJYdnfAEM5JqTNkuWX3diqYENkWsIL0XggGG')\n print(wallet.seqno) # 242\n print(await wallet.get_seqno()) # 242\n print(await wallet.run_get_method(method='seqno', stack=[])) # [242]\n\n \"\"\"dex router get method\"\"\"\n result = await client.run_get_method(address='EQB3ncyBUTjZUA5EnFKR5_EnOMI9V1tTEAAPaiU71gc4TiUt', method='get_router_data', stack=[])\n print(result) # [0, 0 refs>, 1 refs>, 1 refs>, 1 refs>, 1 refs>]\n print(result[1].load_address()) # EQBJm7wS-5M9SmJ3xLMCj8Ol-JKLikGDj-GfDwL1_6b7cENC\n\n \"\"\"jetton wallets\"\"\"\n owner_address = Address('EQBvW8Z5huBkMJYdnfAEM5JqTNkuWX3diqYENkWsIL0XggGG')\n request_stack = [owner_address.to_cell().begin_parse()]\n result = await client.run_get_method(address='EQBynBO23ywHy_CgarY9NK9FTz0yDsG82PtcbSTQgGoXwiuA', method='get_wallet_address', stack=request_stack)\n print(result) # [ 0 refs>]\n jetton_wallet_address = result[0].load_address()\n print(jetton_wallet_address) # EQDapqw6EnsabFZO46A4nIUXXtT4IIcnjPuabomeT4m3paST\n\n result = await client.run_get_method(address='EQDapqw6EnsabFZO46A4nIUXXtT4IIcnjPuabomeT4m3paST', method='get_wallet_data', stack=[])\n print(result) # [2005472, 0 refs>, 0 refs>, 1 refs>]\n\n await client.close_all()\n\n \"\"\"can run get method for any block liteserver remembers\"\"\"\n client = LiteClient.from_mainnet_config(2, 2) # archive liteserver\n await client.connect()\n blk, _ = await client.lookup_block(wc=0, shard=-2**63, seqno=33000000)\n result = await client.run_get_method(address='EQBvW8Z5huBkMJYdnfAEM5JqTNkuWX3diqYENkWsIL0XggGG', method='seqno', stack=[], block=blk)\n await client.close()\n print(result)\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"yungwine/pytoniq","sub_path":"examples/get_methods.py","file_name":"get_methods.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"16059648402","text":"from homework.cs231n.data_utils import load_cifar10\nimport numpy as np\nfrom homework.cs231n.classifiers.neural_net import TwoLayerNet\nimport matplotlib.pyplot as plt\n\n\ndef get_cifar10_data(num_training = 49000, num_validation = 1000, num_test = 1000):\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n x_train, y_train, x_test, y_test = load_cifar10(cifar10_dir)\n\n #subsample the data\n mask = range(num_training, num_training + num_validation)\n x_val = x_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n x_train = x_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n x_test = x_test[mask]\n y_test = y_test[mask]\n\n #Normalize the data: subtract the mean image\n mean_image = np.mean(x_train, axis = 0)\n x_train -= mean_image\n x_val -= mean_image\n x_test -= mean_image\n\n #reshape data to rows\n x_train = x_train.reshape(num_training, -1)\n x_val = x_val.reshape(num_validation, -1)\n x_test = x_test.reshape(num_test, -1)\n\n return x_train, y_train, x_val, y_val, x_test, y_test\n\n# Invoke the above function to get our data.\nx_train, y_train, x_val, y_val, x_test, y_test = get_cifar10_data()\nprint('Train data shape: ', x_train.shape)\nprint('Train labels shape: ', y_train.shape)\nprint('Validation data shape: ', x_val.shape)\nprint('Validation labels shape: ', y_val.shape)\nprint('Test data shape: ', x_test.shape)\nprint('Test labels shape: ', y_test.shape)\n\ninput_size = 32 * 32 * 3\nhidden_size = 50\nnum_classes = 10\nnet = TwoLayerNet(input_size, hidden_size, num_classes)\n\n#Train the network use SGD\nstats = net.train(x_train, y_train, x_val, y_val,\n num_iters = 1000, batch_size = 200,\n learning_rate = 1e-4, learning_rate_decay = 0.95,\n reg = 0.5, verbose = True)\n#Predict on the validation set\nval_acc = (net.predict(x_val) == y_val).mean()\nprint('validation accuracy: ', val_acc)\n\n#Plot the loss function and train / validation accuracies\nplt.subplot(2, 1, 1)\nplt.plot(stats['loss_history'])\nplt.title('Loss history')\nplt.xlabel('Iteration')\nplt.ylabel('Loss')\n\nplt.subplot(2, 1, 2)\nplt.plot(stats['train_acc_history'], label = 'train')\nplt.plot(stats['val_acc_history'], label = 'train')\nplt.title('Classification accuracy history')\nplt.xlabel('Epoch')\nplt.ylabel('Clasification accuracy')\nplt.show()\n\nfrom homework.cs231n.vis_utils import visualize_grid\n\n# Visualize the weights of the network\n\ndef show_net_weights(net):\n W1 = net.params['w1']\n W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)\n plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))\n plt.gca().axis('off')\n plt.show()\n\nshow_net_weights(net)\n\ninput_size = 32*32*3\nnum_classes = 10\nhidden_size = [75, 100, 125]\nresults = {}\nbest_val_acc = 0\nbest_net = None\n\nlearning_rates = np.array([0.7, 0.8, 0.9, 1.0, 1.1]) * 1e-3\nregularization_strengths = [0.75, 1.9, 1.25]\nprint('running')\nfor hs in hidden_size:\n for lr in learning_rates:\n for reg in regularization_strengths:\n net = TwoLayerNet(input_size, hs, num_classes)\n\n stats = net.train(x_train, y_train, x_val, y_val,\n num_iters = 1500, batch_size = 200,\n learning_rate = lr, learning_rate_decay = 0.95,\n reg = reg, verbose = False)\n val_acc = (net.predict(x_val) == y_val).mean()\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_net = net\n results[(hs, lr, reg)] = val_acc\n\nprint('finshed')\nfor hs, lr, reg in sorted(results):\n val_acc = results[(hs, lr, reg)]\n print('hs %d lr %e reg %e val accuracy: %f' % (hs, lr, reg, val_acc))\n\nprint('best validation accuracy achieved during cross_validation: %f' % best_val_acc)\ntest_acc = (best_net.predict(x_test) == y_test).mean()\nprint('test_accuracy:', test_acc)\n\n","repo_name":"AndrewZhou924/Digital-Image-Processing-Course-Design","sub_path":"project-2/softmax_higher/two_layer_net.py","file_name":"two_layer_net.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36747190307","text":"\nref_file = open(\"../matlab/results_matlab.txt\", 'r')\ndut_file = open(\"./results_modelsim.txt\", 'r')\n\nerror_flag = 0\nline = 1\n\nfor dut_line in dut_file:\n dut_line = dut_line.strip()\n input_line = ref_file.readline().strip()\n \n #dut_res = int(dut_line,'b')\n #exp_res = int(input_line,'b')\n\n if input_line != dut_line:\n error_flag = 1\n print(f\"Error at line: {line}\")\n print(f\"dut result: {dut_line}\")\n print(f\"exp result: {input_line}\")\n print()\n line += 1\n\nif error_flag == 1:\n print(\"File check failed!\")\nelse:\n print(\"File check successful!\")\n","repo_name":"Dragosk97/ISA-laboratories","sub_path":"lab2/MBE_multiplier/sim/output_check.py","file_name":"output_check.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27114953976","text":"\"\"\"\nI don't know what this is revisit later\n\"\"\"\n\nfrom scipy.interpolate import interp1d\nimport scipy.signal as sig\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#######First read data from files#######\n\ndirec = r\"C:/Users/Pedro/Dropbox/1-Three Omega with Ara/new_python/3omega-lockin-Pedro/TARA8/New folder\"\n\ntemp_data = pd.read_csv(direc + '/' + 'temp_28_39.csv')\nT = np.array(temp_data['Temp'])\ntime_T = np.array(temp_data['time'])\n\n\nvolt_data = pd.read_csv(direc +'/'+'avolt_28_39.csv')\nV = np.array(volt_data['volt'])\ntime_V = np.array(volt_data['time'])\n\nshunt_data = pd.read_csv(direc + '/' + 'avsh_28_39.csv')\nVsh = np.array(shunt_data['volt'])\ntime_Vsh = np.array(shunt_data['time'])\nVsh= Vsh[200:-1]*-1\nplt.plot(Vsh)\n#######Calculate Resistance from Volt and Shunt measurement#######\nVsh_avg = Vsh.mean()\nVsh = np.array([Vsh_avg for i in V])\nR_shunt = 0.099\nI = Vsh/R_shunt\nR = V/I\n\n#######Interpolate#######\nif len(time_T) > len(time_V):\n\ttime = time_V\nelse:\n\ttime = time_T\n\n#time = time[100:400]\nfT = interp1d(time_T, T,'quadratic')\nfR = interp1d(time_V, R,'quadratic')\ntemp = np.array([fT(t) for t in time])\nresist = np.array([fR(t) for t in time])\n\nr1 = resist[1:len(resist)]\nr2 = resist[:-1]\n\ndel_r = r1 - r2\n\nt1 = temp[1:len(temp)]\nt2 = temp[:-1]\n\ndel_t = t1 - t2\n\nT2 = sig.savgol_filter(T,window_length = len(T),polyorder = 3)\n\nfig1 = plt.figure()\nfig1.suptitle('Temp fitted data', fontsize=26)\nplt.plot(time_T,T2)\n\nfig2 = plt.figure()\nfig2.suptitle('Temp raw data', fontsize=26)\nplt.plot(time_T,T)\n\n\n\nfig3 = plt.figure()\nfig3.suptitle('Resist fitted data', fontsize=26)\nplt.plot(time,resist)\n\nfig4 = plt.figure()\nfig4.suptitle('Resist raw data', fontsize=26)\nplt.plot(time_V,R)\n\n\nfig5 = plt.figure()\nfig5.suptitle('del_r', fontsize=26)\nplt.plot(time[:-1],del_r)\n\nfig6 = plt.figure()\nfig6.suptitle('del_t', fontsize=26)\nplt.plot(time[:-1],del_t)\nplt.show()\n\n\n\n\nDRDT = del_r/del_t\nDRDT_org = DRDT\n\nfig2 = plt.figure()\nfig2.suptitle('raw drdt', fontsize=26)\nplt.plot(DRDT_org)\n\nfor i in range(20):\n loc_max = np.where(DRDT==DRDT.max())\n DRDT = np.delete(DRDT,loc_max)\n loc_min = np.where(DRDT==DRDT.min())\n DRDT = np.delete(DRDT,loc_min)\n fig3 = plt.figure()\n fig3.suptitle('fix drdt'+str(i) + ' , DRDT: ' +str(DRDT.mean()), fontsize=26)\n plt.plot(DRDT)\n\nplt.show()\n\n\nDRDT_avg = DRDT.mean()\n\n\nprint('DRDT_avg: ' + str(DRDT_avg))\nprint('raw DRDT: ' + str(DRDT_org.mean()))\nprint('DRDT_median: ' + str(np.median(DRDT)))\n'''\nfor i in range(20):\n ma = a.max()\n mi = a.min()\n m1 = np.where(a==ma)\n m1 = m1[0][0]\n m2 = np.where(a==mi)\n m2= m2[0][0]\n a=np.delete(a,m1)\n a=np.delete(a,m2)\n'''","repo_name":"PedroOliviera/3-omega","sub_path":"Current code/DRDT_calc_kinda_words.py","file_name":"DRDT_calc_kinda_words.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2836851123","text":"from pydantic import BaseModel\nfrom datetime import datetime\n\nclass Publication(BaseModel):\n title: str\n content_publication: str\n author_publication: str\n publication_date: datetime\n image: str\n\n class Config:\n schema_extra = {\n \"example\": {\n \"title\": \"Titulo de prueba\",\n \"content_publication\": \"Texto de prueba\",\n \"author_publication\": \"21312312412\",\n \"publication_date\": \"2022-05-01T10:00:00.000Z\",\n \"image\": \"https://example.com/image.jpg\"\n }\n }","repo_name":"julsanchezsa/UN_CampusConnect_bienestar","sub_path":"app/schemas/publication.py","file_name":"publication.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13036489280","text":"import numpy as np\nimport os\n\nfrom sklearn.metrics import roc_auc_score\nfrom simplestat import statinf\n\nimport json\n\nfns=[f\"results/{zw}/result.npz\" for zw in os.listdir(\"results\")]\nfns=[fn for fn in fns if os.path.isfile(fn)]\n\n\n\ny_true=None\ny_scores=[]\nfor fn in fns:\n f=np.load(fn)\n if y_true is None:\n y_true=f[\"y_true\"]\n y_scores.append(f[\"y_score\"]/f[\"div\"])\n\n\n\naucs=[roc_auc_score(y_true,y_score) for y_score in y_scores]\n\nprint(json.dumps(statinf(aucs),indent=2))\n\n\naucs=[]\nfor i in range(len(y_scores)):\n y_score=np.median(np.abs(y_scores[:i+1])**2,axis=0)\n auc=roc_auc_score(y_true,y_score)\n aucs.append(auc)\n\n\nnp.savez_compressed(\"forimpro.npz\",t=aucs)\n\nfrom plt import *\nplt.plot(aucs)\nplt.show()\n","repo_name":"psorus/RandNet","sub_path":"genimpro.py","file_name":"genimpro.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"3084258557","text":"from freeswitch import consoleLog\nimport shlex\n\n\ndef usage():\n res = \"Usage: python PCARI_Parse_SMS_Body\"\n return res\n\n\ndef parse(args):\n res = shlex.split(args)\n s = [\n 'GSEND',\n 'GCREATE',\n 'GDELETE',\n 'GADDMEM',\n 'GDELMEM',\n 'GUNJOIN',\n 'GROUP']\n if res[0].upper() in s:\n res[0] = res[0].upper()\n else:\n for i in range(0, len(res)):\n res[i] = res[i].upper()\n return (len(res), res)\n\n\ndef chat(message, args):\n (len, f) = parse(args)\n if (f):\n consoleLog('info', \"Returned Chat: \" + str(f) + \"\\n\")\n message.chat_execute('set', '_len=%d' % len)\n for i in range(0, len):\n value = str(f[i]).strip() # we know it is a string!\n # if (i == 0):\n # value = value.upper()\n varname = 'data_' + str(i)\n consoleLog('info', \"Return Chat: \" + varname + \"=\" + value + \"\\n\")\n message.chat_execute('set', '%s=%s' % (varname, value))\n else:\n consoleLog('info', usage())\n\n\ndef fsapi(session, stream, env, args):\n (len, f) = parse(args)\n if (f):\n consoleLog('info', \"Returned FSAPI: \" + str(len) + str(f) + \"\\n\")\n stream.write('len=' + str(len) + '\\n')\n stream.write('arguments=' + str(f))\n else:\n stream.write(usage())\n","repo_name":"pcarivbts/vbts-clientfiles","sub_path":"scripts/PCARI_Parse_SMS_Body.py","file_name":"PCARI_Parse_SMS_Body.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43379702602","text":"import boto3\nfrom botocore.exceptions import ClientError\n\nsm_client = boto3.client(\"sagemaker\")\nssm_client = boto3.client('ssm')\n\ndef get_approved_package(model_package_group_name):\n \"\"\"Gets the latest approved model package for a model package group.\n\n Args:\n model_package_group_name: The model package group name.\n\n Returns:\n The SageMaker Model Package ARN.\n \"\"\"\n try:\n # Get the latest approved model package\n response = sm_client.list_model_packages(\n ModelPackageGroupName=model_package_group_name,\n ModelApprovalStatus=\"Approved\",\n SortBy=\"CreationTime\",\n MaxResults=100,\n )\n approved_packages = response[\"ModelPackageSummaryList\"]\n\n # Fetch more packages if none returned with continuation token\n while len(approved_packages) == 0 and \"NextToken\" in response:\n response = sm_client.list_model_packages(\n ModelPackageGroupName=model_package_group_name,\n ModelApprovalStatus=\"Approved\",\n SortBy=\"CreationTime\",\n MaxResults=100,\n NextToken=response[\"NextToken\"],\n )\n approved_packages.extend(response[\"ModelPackageSummaryList\"])\n\n # Return error if no packages found\n if len(approved_packages) == 0:\n error_message = (\n f\"No approved ModelPackage found for ModelPackageGroup: {model_package_group_name}\"\n )\n raise Exception(error_message)\n\n # Return the pmodel package arn\n model_package_arn = approved_packages[0][\"ModelPackageArn\"]\n return model_package_arn\n except ClientError as e:\n error_message = e.response[\"Error\"][\"Message\"]\n raise Exception(error_message)\n\ndef handler(event, context):\n print(event)\n model_package_group_name = event[\"ModelPackageGroupName\"]\n if event[\"invokationSource\"] == \"CodeBuild\":\n latest_model_arn = get_approved_package(model_package_group_name)\n else:\n latest_model_arn = event[\"modelArn\"]\n \n model_details = sm_client.describe_model_package(ModelPackageName=latest_model_arn)\n ssm_client.put_parameter(Name=\"/deployed-model/version\",Value=str(model_details[\"ModelPackageVersion\"]), Overwrite=True, Type=\"String\")\n print(model_details)\n return {\"ModelUrl\": model_details[\"InferenceSpecification\"][\"Containers\"][0][\"ModelDataUrl\"]}\n\n\nif __name__ == '__main__':\n invoke_event = {\"ModelPackageGroupName\": \"TagQualityInspectionPackageGroup\", \"invokationSource\": \"CodeBuild\"}\n result = handler(invoke_event, None)\n print(result['ModelUrl'])\n","repo_name":"aws-samples/mlops-at-edge-for-quality-inspection","sub_path":"inference/lib/assets/model_version_helper/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"54"} +{"seq_id":"1524362521","text":"import numpy as np\nimport pandas as pd\nfrom homemade.utils.features.prepare_for_training import prepare_for_training\n\n\nclass LinearRegression:\n def __init__(self, data, labels, normalize_data=True):\n (\n data_processed,\n features_mean,\n features_deviation\n ) = prepare_for_training(data, normalize_data)\n self.data = data_processed\n self.labels = labels\n self.features_mean = features_mean\n self.features_deviation = features_deviation\n self.normalize_data = normalize_data\n\n num_features = self.data.shape[1]\n self.theta = np.zeros((num_features, 1))\n\n def train(self, alpha, num_iteration=500):\n cost_history = self.gradient_descent(alpha, num_iteration)\n return self.theta, cost_history\n\n def gradient_descent(self, alpha, num_iteration):\n cost_history = []\n for _ in range(num_iteration):\n self.gradient_step(alpha)\n cost_history.append(self.cost_function(self.data, self.labels))\n return cost_history\n\n def gradient_step(self, alpha):\n num_examples = self.data.shape[0]\n predictions = LinearRegression.hypothesis(self.data, self.theta)\n delta = predictions - self.labels\n theta = self.theta\n theta = theta - alpha * (1 / num_examples) * (delta.T @ self.data).T\n self.theta = theta\n\n def get_cost(self, data, labels):\n data_processed = prepare_for_training(\n data,\n self.normalize_data,\n )[0]\n return self.cost_function(data_processed, labels)\n\n def predict(self, data):\n data_processed = prepare_for_training(\n data,\n self.normalize_data,\n )[0]\n predictions = LinearRegression.hypothesis(data_processed, self.theta)\n return predictions\n\n def cost_function(self, data, labels):\n num_examples = data.shape[0]\n delta = LinearRegression.hypothesis(data, self.theta) - labels\n\n cost = (1 / 2 * num_examples) * (delta.T @ delta)\n\n return cost[0][0]\n\n @staticmethod\n def hypothesis(data, theta):\n predictions = data @ theta\n return predictions\n\n\nif __name__ == '__main__':\n data = pd.read_csv(\n '/Users/anhvietpham/Documents/Dev-Chicken/Machine-Learning/machine-learning-research/homemade/linear_regression/data/2019.csv')\n input_param_name = \"GDP per capita\"\n out_param_name = \"Score\"\n train_data = data.sample(frac=0.8)\n test_data = data.drop(train_data.index)\n x_train = train_data[[input_param_name]].values\n y_train = train_data[[out_param_name]].values\n x_test = test_data[[input_param_name]].values\n y_test = test_data[[out_param_name]].values\n num_iterations = 500\n regularization_param = 0\n learning_rate = 0.01\n linear_regression = LinearRegression(x_train, y_train)\n # Train linear regression.\n (theta, cost_history) = linear_regression.train(learning_rate, num_iterations)\n # Print training results.\n print('Initial cost: {:.2f}'.format(cost_history[0]))\n print('Optimized cost: {:.2f}'.format(cost_history[-1]))\n train_cost = linear_regression.get_cost(x_train, y_train)\n test_cost = linear_regression.get_cost(x_test, y_test)\n print(f'Train cost: {train_cost}')\n print(f'Test cost: {test_cost}')\n test_predictions = linear_regression.predict(x_test)\n test_predictions_table = pd.DataFrame({\n 'Economy GDP per Capita': x_test.flatten(),\n 'Test Happiness Score': y_test.flatten(),\n 'Predicted Happiness Score': test_predictions.flatten(),\n 'Prediction Diff': (y_test - test_predictions).flatten()\n })\n print(test_predictions_table.head(10))\n","repo_name":"AnhVietPham/Machine-Learning","sub_path":"homemade/linear_regression/univariate/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5224605412","text":"import pandas as pd\nimport scipy.stats\nimport ggplot\nfrom ggplot import *\nfrom sklearn import cross_validation\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels\n\ndata = pd.read_csv(\"turnstile_data_master_with_weather.csv\")\ndf_subway = pd.DataFrame(data)\nprint(df_subway.columns)\n\np1 = ggplot(aes('Hour', 'ENTRIESn_hourly', color = 'rain'), data = df_subway) + geom_point()\n#print(p1)\n\n#Null hypothesis: The hourly entries on rainy and non rainy days are the same.\n# Use Welch's t-test\n# P critical: 0.05\n\ndf_subway_rain = df_subway[df_subway.rain == 1]\ndf_subway_norain = df_subway[df_subway.rain == 0]\n\nttest = scipy.stats.ttest_ind(df_subway_rain.ENTRIESn_hourly, df_subway_norain.ENTRIESn_hourly, equal_var = False)\n\nprint(\"t-test: \", ttest)\n\nif ttest[1] < 0.05:\n print(\"Small p value, Null hypothesis rejected\")\n\n# Add day of the week to dataframe\ndf_subway.DATEn = pd.to_datetime(df_subway.DATEn)\ndf_subway['dayofweek'] = df_subway['DATEn'].apply(lambda x: x.strftime('%w'))\n\n\np1 = ggplot(aes(x = 'maxpressurei', y = 'ENTRIESn_hourly'), data = df_subway) + geom_point()\n#print(p1)\n\np2 = ggplot(aes(x = 'maxtempi', y = 'ENTRIESn_hourly'), data = df_subway) + geom_point()\n#print(p2)\n\np3 = ggplot(aes(x = 'meanwindspdi', y = 'ENTRIESn_hourly'), data = df_subway) + geom_point()\nprint(p3)\n\n# p1, p2, p3 show normal distribution\n\n# split dataframe into training and testing sets\n\ntrain, test = cross_validation.train_test_split(df_subway, test_size=0.3)\n\n\n#Regression:\nimport statsmodels.formula.api as sma\nlm = sma.ols(formula = 'ENTRIESn_hourly ~ maxtempi + maxpressurei + meanwindspdi + Hour', data = train).fit()\nprint(\"Regression intercepts and coefficients\", lm.params)\n\nprint(lm.summary(), lm.conf_int(), lm.pvalues)\n\n# p < 0.05 will denote a relationship between features and output\n\n\ntest_columns = ['maxtempi', 'maxpressurei', 'meanwindspdi', 'Hour']\ntest = pd.DataFrame(test, columns=test_columns)\ntest.head()\n\n#Predict using test set\npred = lm.predict(test)\n\n\n\n# Specific features/input columns for training\nfeature_columns = ['Hour', 'maxpressurei', 'maxdewpti', 'maxtempi', 'meanwindspdi', 'rain', 'fog', 'precipi', 'thunder', 'dayofweek' ]\n\nfeatures = df_subway[feature_columns]\ny = df_subway.ENTRIESn_hourly\n\nfrom sklearn import linear_model\n\nreg = linear_model.LinearRegression()\nreg.fit(features, y)\n\nprint(reg.intercept_)\nprint(reg.coef_)\n\n#calculate r squared\nreg.score(features, y)\n\n\n","repo_name":"nupur1492/Python_Projects","sub_path":"subway_ridership.py","file_name":"subway_ridership.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22433735280","text":"# -- coding: utf-8 --\nimport cv2\nfrom pathlib import Path\n\ndef mirror_padding(img_path):\n img1 = cv2.imread(img_path)\n padding_y = img1.shape[0] // 5\n padding_x = img1.shape[1] // 5\n img2 = cv2.copyMakeBorder(img1, padding_y, padding_y, padding_x, padding_x, cv2.BORDER_REFLECT_101)\n return img2\n\nif __name__ == '__main__':\n image_paths = Path(\"..\\\\data\\\\train\").glob(\"*\\\\*.jpg\")\n save_path = Path('..\\\\data\\\\train\\\\padded\\\\')\n save_path.mkdir(parents=True, exist_ok=True)\n for image_path in image_paths:\n img_name = image_path.with_suffix(\".png\")\n img = mirror_padding(str(image_path))\n cv2.imwrite(str(save_path / img_name.name), img)","repo_name":"Mayu14/pict_gen","sub_path":"given/give_margin.py","file_name":"give_margin.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37174208977","text":"import os\nfrom collections import Counter\nfrom pathlib import Path\n\nimport stanza\nimport spacy\n\nfrom coreference_resolution import coreference_resolution\nfrom utils import read_story\n\nnlp_stanza = stanza.Pipeline('en', processors='tokenize,ner')\nnlp_spacy = spacy.load('en_core_web_trf')\n\n\ndef NER(sentence, method):\n # perform ner\n if method == 'stanza':\n doc = nlp_stanza(sentence)\n name_entity = [ent.text for ent in doc.ents if ent.type == 'PERSON']\n else:\n doc = nlp_spacy(sentence)\n name_entity = [x for x in doc.ents if x.label_ in ['PERSON']]\n\n # convert all names to lowercase and remove 's in names\n name_entity = [str(x).lower().replace(\"'s\", \"\") for x in name_entity]\n\n # remove article words\n name_entity = [x.split(' ') for x in name_entity]\n name_entity = [[word for word in x if not word in ['the', 'an', 'a', 'and']] for x in name_entity]\n name_entity = [' '.join(x) for x in name_entity]\n\n return name_entity\n\n\ndef name_entity_recognition(doc, use_cor_res=True, method='stanza'):\n if use_cor_res:\n doc = coreference_resolution(doc)\n\n characters = NER(doc, method)\n counts = Counter(characters)\n characters = [x for x in counts]\n counts = [counts[x] for x in counts]\n\n return characters, counts, doc\n\n\nif __name__ == '__main__':\n USE_COR_RES = True\n\n data_folder = Path(os.getcwd()) / 'data/aesop/original'\n\n name = 'The_Cock_and_the_Pearl'\n short_story = read_story(name, data_folder)\n\n characters, counts, doc = name_entity_recognition(short_story)\n print(characters)\n print(doc)\n","repo_name":"anzemur/literacy-knowledge-base","sub_path":"src/characters/name_entity_recognition.py","file_name":"name_entity_recognition.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"3980889354","text":"#!/usr/bin/env python3\nfrom flask import (Flask,\n render_template,\n redirect,\n request,\n url_for,\n make_response,\n jsonify,\n abort,\n session as login_session)\n\nfrom sqlalchemy import (create_engine,\n desc)\nfrom sqlalchemy.orm import sessionmaker\nfrom model import (Base,\n User,\n Question,\n Dud,\n Code,\n Quiz,\n QuizJoin,\n Score)\n\nfrom json import loads\nimport bleach\nimport html\nimport re\n\napp = Flask(__name__)\napp.secret_key = \"super secret key\"\n\nengine = create_engine(\"postgresql+psycopg2://jakechorley@/js_quiz\")\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\ndb_session = DBSession()\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n\n@app.route(\"/allquizzes\")\ndef get_all_quizzes():\n quizzes = db_session.query(Quiz).all()\n all_quizzes = []\n for quiz in quizzes:\n print(\"Constructing a quiz\")\n quiz_dict = {\n \"id\": quiz.id,\n \"name\": quiz.name,\n \"description\": quiz.description,\n \"length\": db_session.query(QuizJoin).filter_by(quiz_id = quiz.id).count()\n }\n all_quizzes.append(quiz_dict)\n print('Finished constructing the quiz')\n return jsonify(all_quizzes), 200\n\n@app.route(\"/userscores\", methods=[\"POST\"])\ndef get_scores():\n # Takes a user id to get all scores pertaining to a user\n # This route needs to be authenticated\n data = request.data.decode(\"utf-8\")\n print(\"----\")\n print(data)\n\n data = loads(data)\n\n # print(login_session)\n # print(data.user_id)\n\n if \"user_id\" not in login_session or login_session[\"user_id\"] is not data[\"user_id\"]:\n return \"Forbidden\", 403\n\n scores = db_session.query(Score).filter_by(user_id=data[\"user_id\"]).all()\n\n all_scores = []\n for score in scores:\n score_dict = {\n \"quiz_id\": score.quiz_id,\n \"score\": score.score\n }\n all_scores.append(score_dict)\n return jsonify(all_scores), 200\n\n@app.route(\"/choosequiz/\")\ndef choose_quiz(quiz_id):\n quiz = db_session.query(Quiz).filter_by(id=quiz_id).first()\n\n quiz = {\n \"id\": quiz.id,\n \"name\": quiz.name,\n \"description\": quiz.description,\n \"timeLimit\": quiz.time_limit\n }\n\n question_ids = db_session.query(QuizJoin).filter_by(quiz_id=quiz[\"id\"]).all()\n question_ids = [x.question_id for x in question_ids]\n questions = db_session.query(Question).filter(Question.id.in_(question_ids)).all()\n all_questions = []\n for question in questions:\n duds = db_session.query(Dud).filter_by(question_id=question.id).all()\n all_duds = []\n for dud in duds:\n all_duds.append(html.unescape(dud.text))\n\n codes = db_session.query(Code).filter_by(question_id=question.id).all()\n all_codes = []\n for code in codes:\n all_codes.append({\n \"type\": html.unescape(code.type),\n \"sample\": html.unescape(code.sample)\n })\n\n if question.correct_replies == 0:\n if question.incorrect_replies == 0:\n difficulty = 1\n else:\n difficulty = 0\n else:\n difficulty = question.incorrect_replies / question.correct_replies\n\n question_dict = {\n \"id\": question.id,\n \"text\": html.unescape(question.text),\n \"answer\": html.unescape(question.answer),\n \"explanation\": html.unescape(question.explanation),\n \"duds\": all_duds,\n \"codes\": all_codes,\n \"difficulty\": difficulty\n }\n all_questions.append(question_dict)\n print(\"--------------\")\n print(\"Question\", question_dict[\"difficulty\"])\n\n print(\"---------\")\n all_questions = sorted(all_questions, key=lambda k: k[\"difficulty\"])\n print(all_questions)\n\n data = {\n \"quiz\": quiz,\n \"questionSet\": all_questions\n }\n\n return jsonify(data), 200\n\n\n@app.route(\"/quiz\")\ndef quiz():\n\n questions = db_session.query(Question).all()\n all_questions = []\n for question in questions:\n duds = db_session.query(Dud).filter_by(question_id=question.id).all()\n all_duds = []\n for dud in duds:\n all_duds.append(html.unescape(dud.text))\n\n codes = db_session.query(Code).filter_by(question_id=question.id).all()\n all_codes = []\n for code in codes:\n all_codes.append({\n \"type\": html.unescape(code.type),\n \"sample\": html.unescape(code.sample)\n })\n\n # TODO if statement needs testing\n if question.correct_replies == 0:\n if question.incorrect_replies == 0:\n difficulty = 1\n else:\n difficulty = 0\n else:\n difficulty = question.incorrect_replies / question.correct_replies\n\n question_dict = {\n \"id\": question.id,\n \"text\": html.unescape(question.text),\n \"answer\": html.unescape(question.answer),\n \"explanation\": html.unescape(question.explanation),\n \"duds\": all_duds,\n \"codes\": all_codes,\n \"difficulty\": difficulty\n }\n all_questions.append(question_dict)\n print(\"--------------\")\n print(\"Question\", question_dict[\"difficulty\"])\n\n print(\"---------\")\n all_questions = sorted(all_questions, key=lambda k: k[\"difficulty\"])\n print(all_questions)\n return jsonify(all_questions), 200\n\n\n@app.route(\"/difficulty\", methods=[\"POST\"])\ndef difficulty():\n data = request.data.decode(\"utf-8\")\n print(\"----\")\n print(data)\n print(\"----\")\n data = loads(data)\n question = db_session.query(Question).filter_by(id=data[\"id\"]).first()\n if data[\"correct\"] == \"correct\":\n question.correct_replies += 1\n elif data[\"correct\"] == \"incorrect\":\n question.incorrect_replies += 1\n db_session.commit()\n return \"OK\", 200\n\n@app.route(\"/score\", methods=[\"POST\"])\ndef score():\n data = request.data.decode(\"utf-8\")\n print(\"----\")\n print(data)\n\n data = loads(data)\n\n if \"user_id\" not in login_session or login_session[\"user_id\"] is not data[\"user_id\"]:\n print(type(login_session[\"user_id\"]))\n print(type(data[\"user_id\"]))\n return \"Forbidden\", 403\n\n current_score = db_session.query(Score) \\\n .filter_by(user_id=data[\"user_id\"]) \\\n .filter_by(quiz_id=data[\"quiz_id\"]) \\\n .first()\n\n print(current_score)\n if current_score == None:\n print('Creating new score')\n new_score = Score(score=data[\"score\"],\n user_id=data[\"user_id\"],\n quiz_id=data[\"quiz_id\"])\n\n db_session.add(new_score)\n\n else:\n print('Found current score')\n current_score.score = max(current_score.score, data[\"score\"])\n\n db_session.commit()\n\n\n return \"OK\", 200\n\n# Authentication\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n # TODO: Make sure to bleach this stuff\n if request.method == \"POST\":\n # if using json request.json.get('username')\n data = request.get_json()\n username = bleach.clean(data[\"username\"])\n password = bleach.clean(data[\"password\"])\n if username is None or password is None:\n print(\"Username or password missing\")\n abort(400)\n if (not re.match(r\"^[\\da-z]{6,32}$\", username)):\n abort(400)\n if (not re.match(r\".*[a-z].*[a-z].*\", username)):\n abort(400)\n if (not re.match(r\"[A-Za-z\\d@$!%*#?&\\-]{6,32}\", password)):\n print(\"Invalid password\")\n abort(400)\n else:\n print(\"Valid password\")\n\n # print(re.match(r\"^[\\da-z]{6,32}$\", username))\n if db_session.query(User).filter_by(username=username).first() is not None:\n print(\"Username already taken\")\n # This needs to inform the user\n abort(400)\n print(\"Creating user\")\n user = User(username = username)\n user.generate_salt()\n user.hash_password(password)\n db_session.add(user)\n db_session.commit()\n login_session[\"user\"] = user.username\n login_session[\"user_id\"] = user.id\n return jsonify({\"username\": user.username, \"user_id\": user.id}), 201\n\n\n@app.route(\"/login\", methods=[\"GET\",\"POST\"])\ndef login():\n if request.method == \"POST\":\n data = request.get_json()\n username = data[\"username\"]\n password = data[\"password\"]\n if username is None or password is None:\n print(\"Username or password missing\")\n abort(401)\n user = db_session.query(User).filter_by(username=username).first()\n if not user:\n print(\"User not found\")\n # This needs to inform the user\n abort(401)\n if not user.verify_password(password):\n print(\"Incorrect password\")\n abort(401)\n # This needs to inform the user\n login_session[\"user\"] = user.username\n login_session[\"user_id\"] = user.id\n return jsonify({\"username\": user.username, \"user_id\": user.id}), 201\n\n\n@app.route(\"/logout\", methods=[\"POST\"])\ndef log_out():\n print(\"Log Out being attempted\")\n if not login_session[\"user\"]:\n print(\"Not logged in\")\n abort(400)\n print(\"Destroying user session\")\n del login_session[\"user\"]\n\n return \"Logged out\", 200\n\n@app.route(\"/new\", methods=[\"POST\"])\ndef new():\n\n # Extract data from response\n data = request.data.decode(\"utf-8\")\n print(\"----\")\n print(data)\n data = loads(data)\n\n # Authenticate\n if \"user_id\" not in login_session or login_session[\"user_id\"] is not data[\"user_id\"]:\n # print(type(login_session[\"user_id\"]))\n # print(type(data[\"user_id\"]))\n # print(type(login_session))\n # print(login_session.keys())\n return \"Forbidden\", 403\n\n # Extract quiz from data\n quiz = data[\"quiz\"]\n questions = data[\"questions\"]\n\n\n ###########################\n # Data must be cleaned\n\n # Validate data\n\n ## Check questions length 3 or more\n if len(questions) < 3:\n return \"Bad Request\", 400\n\n ## Check there is a valid title\n if not isinstance(quiz[\"title\"], str) or len(quiz[\"title\"]) == 0:\n print(\"1\")\n return \"Bad Request\", 400\n\n ## Check there is a valid description\n if not isinstance(quiz[\"description\"], str) or len(quiz[\"description\"]) == 0:\n print(\"2\")\n return \"Bad Request\", 400\n\n ## Check there is a number for timer\n if not isinstance(quiz[\"timer\"], int) or quiz[\"timer\"] < 0 or quiz[\"timer\"] > 30:\n print(\"3\")\n return \"Bad Request\", 400\n\n ## For each question\n for question in questions:\n\n ### Check valid question\n if not isinstance(question[\"question\"], str) or len(question[\"question\"]) == 0:\n print(\"4\")\n return \"Bad Request\", 400\n\n ### Check codes are valid format\n if len(question[\"codes\"]) > 3:\n print(\"5\")\n return \"Bad Request\", 400\n # Need to confirm these\n valid_codes = [\"html\", \"css\", \"javascript\", \"python\"]\n\n for code in question[\"codes\"]:\n if not code[\"language\"] in valid_codes:\n print(\"6\")\n return \"Bad Request\", 400\n\n if not isinstance(code[\"contents\"], str) or len(code[\"contents\"]) == 0:\n print(\"7\")\n return \"Bad Request\", 400\n\n ### Check valid answer\n if not isinstance(question[\"answer\"], str) or len(question[\"answer\"]) == 0:\n print(\"8\")\n return \"Bad Request\", 400\n\n ### Check there is between 1 and 5 duds\n if len(question[\"duds\"]) < 1 or len(question[\"duds\"]) > 5:\n print(\"9\")\n return \"Bad Request\", 400\n ### Check that the duds are all valid strings\n for dud in question[\"duds\"]:\n if not isinstance(dud, str) or len(dud) == 0:\n print(\"10\")\n return \"Bad Request\", 400\n\n ### Check that there is a valid explanation\n if not isinstance(question[\"explanation\"], str) or len(question[\"explanation\"]) == 0:\n print(\"11\")\n return \"Bad Request\", 400\n\n # Add quiz to database\n print('Validated')\n\n new_quiz = Quiz(\n name=bleach.clean(quiz[\"title\"]),\n description=bleach.clean(quiz[\"description\"]),\n time_limit=quiz[\"timer\"],\n visible=True,\n creator=data[\"user_id\"]\n )\n\n db_session.add(new_quiz)\n db_session.flush()\n print(\"Added quiz\")\n\n for question in questions:\n\n new_question = Question(\n text=bleach.clean(question[\"question\"]),\n answer=bleach.clean(question[\"answer\"]),\n explanation=bleach.clean(question[\"explanation\"]),\n correct_replies=0,\n incorrect_replies=0\n )\n db_session.add(new_question)\n db_session.flush()\n print(\"Added question\")\n\n new_quiz_join = QuizJoin(\n question_id=new_question.id,\n quiz_id=new_quiz.id\n )\n db_session.add(new_quiz_join)\n print(\"Added quizjoin\")\n\n for dud in question[\"duds\"]:\n new_dud = Dud(question_id=new_question.id,\n text=bleach.clean(dud))\n db_session.add(new_dud)\n\n print(\"Added dud\", dud)\n\n for code in question[\"codes\"]:\n new_code = Code(question_id=new_question.id,\n type=bleach.clean(code[\"language\"]),\n sample=bleach.clean(code[\"contents\"])\n )\n db_session.add(new_code)\n\n print(\"Added code\", code)\n\n db_session.commit()\n print(\"Added to database\")\n\n return \"OK\", 200\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host=\"0.0.0.0\", port=5000)\n","repo_name":"jakec-github/codeQuiz","sub_path":"src/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22723087689","text":"import pandas as pd\nimport src.schema as S\nfrom src.core import BaseTransformer\nfrom collections import Counter\nimport nltk\n\n\nclass Text2SeqConvertor(BaseTransformer):\n \"\"\"\n Convert text to tokens and then to ids.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.UNK = \"UNK\"\n self.PAD = \"PAD\"\n self.min_count = 10\n self.vocab_size = 0\n self.tokens = None\n self.token_to_id = None\n self.UNK_IX = None\n self.PAD_IX = None\n\n def _fit_df(self, X: pd.DataFrame, y=None):\n \"\"\"\n Fit tokens and get ids.\n :param X: dataset\n :param y: (None) Ignored.\n :return: Fitted convertor.\n \"\"\"\n\n tokenizer = nltk.tokenize.WordPunctTokenizer()\n X[S.JOKE] = X[[S.JOKE]].applymap(lambda x: \" \".join(tokenizer.tokenize(x.lower())))\n\n token_counts = Counter()\n for line in X[S.JOKE].values:\n token_counts.update(line.split(\" \"))\n\n tokens = sorted(t for t, c in token_counts.items() if c >= self.min_count)\n tokens = [self.UNK, self.PAD] + tokens\n\n self.tokens = tokens\n self.vocab_size = len(tokens)\n self.token_to_id = {t: i for i, t in enumerate(tokens)}\n self.UNK_IX, self.PAD_IX = map(self.token_to_id.get, [self.UNK, self.PAD])\n\n def get_tokens(self):\n return self.tokens\n\n def get_vocab_size(self):\n return self.vocab_size\n\n def get_token_to_id(self):\n return self.token_to_id\n\n def get_unk_pad_ix(self):\n return self.UNK_IX, self.PAD_IX\n\n def _transform_df(self,\n X: pd.DataFrame\n ) -> pd.DataFrame:\n pass\n","repo_name":"DoktaPola/NLP_humor_models","sub_path":"src/text2seq/convertor.py","file_name":"convertor.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"40067406070","text":"from datetime import date, time\n\nfrom flask import json\n\nfrom website.models import Gender\n\n\ndef serialize(obj):\n if isinstance(obj, Gender):\n return obj.name\n if isinstance(obj, date):\n serial = obj.isoformat()\n return serial\n if isinstance(obj, time):\n serial = obj.isoformat()\n return serial\n return obj.__dict__\n\n\nclass BaseJson:\n\n def __init__(self, t):\n self.type = t\n\n def to_json(self):\n return json.dumps(self.__dict__, default=serialize)\n\n\nclass PageJson(BaseJson):\n\n def __init__(self, json_type, page, pages):\n BaseJson.__init__(self, json_type)\n self.items = []\n self.page = page\n self.pages = pages\n\n\nclass EntityJson(BaseJson):\n\n def __init__(self, e_type, entity):\n BaseJson.__init__(self, e_type)\n self.id = entity.id\n self.created_at = entity.created_at\n self.updated_at = entity.updated_at\n\n\nclass UserJson(EntityJson):\n\n def __init__(self, user):\n EntityJson.__init__(self, 'user', user)\n self.first_name = user.first_name\n self.last_name = user.last_name\n self.email = user.email\n self.phone_number = user.phone_number\n self.birthdate = user.birthdate\n self.gender = user.gender\n self.role = user.role.name\n\n\nclass ChannelJson(EntityJson):\n\n def __init__(self, channel):\n EntityJson.__init__(channel, 'channel', channel)\n self.name = channel.name\n self.description = channel.description\n self.user_id = channel.user_id\n self.members = channel.num_members\n\n\nclass UserPageJson(PageJson):\n\n def __init__(self, users, page, pages):\n PageJson.__init__(self, 'users', page, pages)\n self.page = page\n self.pages = pages\n for user in users:\n self.items.append(UserJson(user))\n\n\nclass ChannelPageJson(PageJson):\n\n def __init__(self, channels, page, pages):\n PageJson.__init__(self, 'channels', page, pages)\n for channel in channels:\n self.items.append(ChannelJson(channel))\n","repo_name":"ConteDevel/vistory","sub_path":"auth/website/jsons/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11028334272","text":"#!/usr/bin/env python3\n\n\"\"\" Extract introns from the gff file of the species\n\"\"\"\nimport sys\nimport os\n\n__author__ = \"Titouan Laessle\"\n__copyright__ = \"Copyright 2017 Titouan Laessle\"\n__license__ = \"MIT\"\n\n# gff file path of the species:\nspecies_table = str(sys.argv[1])\n# Output path:\noutput = str(sys.argv[2])\n\n\n###\n# Check if parent directory is present, if not create it\n###\ndef checking_parent(file_path):\n # We don't need the file name, so will take everything but the last part\n parent_directories = '/'.join(file_path.split('/')[0:(len(file_path.split('/')) - 1)])\n # As we uses parallel, we ended up we one thread doesn't seeing the directory, attempting\n # creating it, while another just did the same -> error \"The file already exist\", and stopped everything...\n try:\n if not os.path.exists(parent_directories):\n os.makedirs(parent_directories)\n except:\n pass\n\n\n###\n# Add the introns in between exons in a gff file\n# Inputs:\n# - species_table : path to the gff file of features (tested only on NCBI assemblies gff files)\n# - *_column : column number when separating feature line by \\t\n# - outfile : path to the output file\n# Output: gff file + the introns\n###\ndef adding_introns(species_table, output, feature_column, strand_column, start_column, end_column):\n # Checking parent directory of output are present\n checking_parent(output)\n\n with open(species_table, 'r') as feature_table, open(output, 'w') as outfile:\n for each_line in feature_table:\n actual_line = each_line.split('\\t')\n # We will use the fact that there is no \\t in the comments to detect them\n if len(actual_line) != 9:\n outfile.write(each_line)\n # We store this line for the next line as a phony variable as it is not with the right structure\n previous_line = ['.'] * 9\n # We also avoid all the lines which are not exons\n elif actual_line[feature_column] != 'exon':\n outfile.write(each_line)\n # Here, no phony, as it is already at the right structure\n previous_line = actual_line\n # We have stored the previous line each time, we can thus check whether it is an exon or not\n else:\n if previous_line[feature_column] == 'exon':\n # We are in between two exons = intron! We will create the line to write:\n # NOTE: we take into account both strand, and when the strand is negative,\n # we must invert the start and end, as the negative gene have reverse coordinates\n if actual_line[strand_column] == '+':\n start = int(previous_line[end_column]) + 1 # +1 to take intron nucleotide only\n end = int(actual_line[start_column]) - 1\n else:\n start = int(actual_line[end_column]) + 1\n end = int(previous_line[start_column]) - 1\n # First 2 elements and last 4 elements are the same for exon/intron\n line_to_write = actual_line[0:2] + ['intron', start, end] + actual_line[5:]\n for each_element in range(len(line_to_write)):\n # Avoid the \\t at the last element\n outfile.write(str(line_to_write[each_element]) + '\\t') if each_element != 8 \\\n else outfile.write(str(line_to_write[each_element]))\n # Note: the last element always contain a \\n at the end\n # We also need the exon line written, and the previous line stored\n outfile.write(each_line)\n previous_line = actual_line\n else:\n # If it is not, we are at the first exon -> just move on\n outfile.write(each_line)\n previous_line = actual_line\n\n\n# Particularities of the feature table (e.g. which column contains what information):\nfeature_column = 2\n# Indeed, introns are not directly annotated, but can be infered through exons\nfeature_type = 'exon'\nstrand_column = 6\nstart_column = 3\nend_column = 4\n\nadding_introns(species_table, output, feature_column, strand_column, start_column, end_column)\n","repo_name":"UrsusSalificus/SignatureOrigene","sub_path":"scripts/add_introns_to_gff.py","file_name":"add_introns_to_gff.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"8005734672","text":"# defaultdict una subclase de dict de collections en Python que proporciona una manera conveniente de definir \n# un valor predeterminado para cualquier nueva clave que se agregue al diccionario\n\n# Introducir dos enteros n y m, crear la lista_a con n letras y lista_b con m letras de letras, la tarea es recorrer \n# la lista_b # letra por letra y ubicar las posciones donde se encuentre en la lista_a\n\"\"\"\n5 2 group A size n = 5, group B size m = 2\na group A contains 'a', 'a', 'b', 'a', 'b'\na\nb\na\nb\na group B contains 'a', 'b'\nb\"\"\"\n\nfrom collections import defaultdict\nd = defaultdict(list)\nn, m = map(int,input().split())\nfor i in range(n):\n d[input()].append(str(i+1))\n#print(d['a'])\n#print(d['b'])\nfor j in range(m):\n print(\" \".join(d[input()]) or -1)\n ","repo_name":"AngelMasterr/Python","sub_path":"4_Ejercicio_HR_collections/defaultdict.py","file_name":"defaultdict.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"42824029366","text":"#14.\tПодсчитать сумму цифр в вещественном числе\nn=input('введите число ')\nsum=0\nfor digit in n:\n if digit.isdigit():\n sum+=int(digit)\nprint('Сумма= ', sum)\n\n# num=float(input('введите число '))\n# num=str(num)\n# #замена точки на пробел\n# num1=num.replace('.',' ')\n# num2=int(num1)\n# #если ввели отрицательное значение\n# if num2<0:\n# num2*=-1\n\n# sum_didg=0\n# while num2>0:\n# ost=num2%10\n# sum_didg+=ost\n# num2//=10\n# print(sum_didg)","repo_name":"AlexandraKolomiec/Python2","sub_path":"Task014.py","file_name":"Task014.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34860981075","text":"#Is There an Odd Bit?\n#https://www.codewars.com/kata/5d6f49d85e45290016bf4718\n\ndef any_odd(x):\n binary_x = bin(x)[2:]\n reversed = str(binary_x)[::-1]\n for idx, val in enumerate(reversed):\n if idx%2 == 1 and val=='1':\n return 1\n\n return 0\n","repo_name":"WinrichSy/Codewars_Solutions","sub_path":"Python/7kyu/IsThereAnOddBit.py","file_name":"IsThereAnOddBit.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40857216973","text":"# redis client queue\n\nimport redis\n\nfrom redis_queue import SimpleQueue\nfrom tasks import get_word_counts\n\nNUMBER_OF_TASKS = 10\n\nif __name__ == '__main__':\n red = redis.Redis(host='localhost', port=6379)\n queue = SimpleQueue(red, 'sample')\n count = 0\n for n in range(NUMBER_OF_TASKS):\n queue.enqueue('pride-and-prejudice.txt')\n queue.enqueue('heart-of-darkness.txt')\n queue.enqueue('frankenstein.txt')\n queue.enqueue('dracula.txt')\n count+= 4\n print(f'Enqueued {count} tasks')\n","repo_name":"atomek88/redisQueueMultiProc","sub_path":"src/redis_queue_client.py","file_name":"redis_queue_client.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37447816951","text":"#matristeki '0' oranı %30 un altında ise matrisi hashe atayan program\na=[[1,2,3,5],[4,5,6,2],[6,5,5,0],[4,0,0,0],[0,5,6,9]]\nrow=len(a)\ncolm=len(a[0])\nmax=row*colm\nzCount=0\nfor i in range(row):\n for j in range(colm):\n if(a[i][j]==0):\n zCount+=1\n\npercent=(zCount*100)//max\nif(percent<30):\n my_hash={}\n for i in range(row):\n for j in range(colm):\n my_hash[(i,j)]=a[i][j]\n\n for key in my_hash:\n print(my_hash[key],end=\" \")\nelse:\n print(\"matristeki 0 orani %\",percent)\n","repo_name":"sahinramazan/programming-lab","sub_path":"Vize-Örnek2.py","file_name":"Vize-Örnek2.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37759996826","text":"\"\"\"\nImprove your Python script one last time, this new script should:\n1. connect to this CTF games' ip and on port 20003.\n2. after you connect to it, receive the prompt.\n3. respond to it to receive your flag.\n\"\"\"\n\n# !/usr/bin/python # This is client.py file\nimport socket # Import socket module\ns = socket.socket() # Create a socket object\nhost = '1.2.3.4' # Remote Server IP\nport = 20003 # Remote Server Port\ns.connect((host, port))\nprint(s.recv(1024).decode()) # Calc the sum of the given numbers in 3 seconds, 5 pairs, \"begin\" to continue\ns.send(str.encode('begin'))\nfor x in range(0, 5):\n myvar1 = s.recv(1024).decode() # The numbers are: 4083 and 2224\n print(myvar1)\n z = int(myvar1[17:21]) + int(myvar1[26:30])\n print('The total is:', z)\n s.send(str.encode(str(z)))\nprint(s.recv(1024).decode())\ns.close() # Close the socket when done\n","repo_name":"Sambsamb/INF601b","sub_path":"CTF1-ListenPort-Reply2.py","file_name":"CTF1-ListenPort-Reply2.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33860319839","text":"import nltk\nfrom nltk.corpus import wordnet\nimport re\nimport nltk\nimport pandas as pd\nimport re\n# nltk.download('stopwords')\n# nltk.download('wordnet')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus.reader.wordnet import WordNetError\n# nltk.download('wordnet') \nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom scipy.sparse import coo_matrix\nfrom nltk.corpus import wordnet\nfrom transformers import pipeline\nfrom summarizer import Summarizer, TransformerSummarizer\n\nstop_words = set(stopwords.words(\"english\"))\n\ndef data_process(dataset):\n corpus = []\n text = re.sub('[^a-zA-Z]', ' ', dataset)\n # text = text.lower()\n text = re.sub(\"</?.*?>\",\" <> \",text)\n text = re.sub(\"(\\\\d|\\\\W)+\",\" \",text)\n text = text.split()\n lem = WordNetLemmatizer()\n text = [lem.lemmatize(word) for word in text if not word in \n stop_words] \n text = \" \".join(text)\n return text\n\ndef tfidf_Data (data,m, n,x,y):\n \"\"\"\n data: problem statement\n m = document to pass though the tfidf transform\n n = countvectorizer features (integer)\n (x,y) = ngram range\n \n \"\"\"\n corpus = data_process(data)\n corpus = [corpus]\n cv=CountVectorizer(stop_words=stop_words, max_features=n, ngram_range=(x,y))\n X=cv.fit_transform(corpus)\n tfidf_transformer=TfidfTransformer(smooth_idf=True,use_idf=True)\n tfidf_transformer.fit(X)\n feature_names=cv.get_feature_names()\n doc=corpus[m]\n tf_idf_vector=tfidf_transformer.transform(cv.transform([doc]))\n return tf_idf_vector,feature_names\n\ndef sort_coo(coo_matrix):\n tuples = zip(coo_matrix.col, coo_matrix.data)\n return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)\n \ndef extract_topn_from_vector(feature_names, sorted_items, topn=10):\n \"\"\"get the feature names and tf-idf score of top n items\"\"\"\n \n #use only topn items from vector\n sorted_items = sorted_items[:topn]\n \n score_vals = []\n feature_vals = []\n \n # word index and corresponding tf-idf score\n for idx, score in sorted_items:\n \n #keep track of feature name and its corresponding score\n score_vals.append(round(score, 3))\n feature_vals.append(feature_names[idx])\n \n \n results= {}\n for idx in range(len(feature_vals)):\n results[feature_vals[idx]]=score_vals[idx]\n \n return results, feature_vals\n\ndef bart_summarizer(data):\n summarizer_bart = pipeline(task='summarization', model=\"bart-large-cnn\")\n summary_bart = summarizer_bart(data, min_length=30, max_length = 140)\n summary = summary_bart[0]['summary_text']\n \n return summary\n\n\ndef extract(body):\n\tkeywords = []\n\tprint('TODO extract keywords')\n\tKE, feature_names = tfidf_Data(body, 0, 1000, 1,3)\n\tsorted_items = sort_coo(KE.tocoo())\n\tKE, keywords = extract_topn_from_vector(feature_names,sorted_items,5)\n\n\treturn keywords\n\ndef extract_synonyms(keywords):\n\tsynonyms = []\n\tprint('TODO extract synonyms')\n\tfor i in range(len(keywords)):\n\t\tfor syn in wordnet.synsets(keywords[i]):\n\t\t\tfor l in syn.lemmas():\n\t\t\t\tsynonyms.append(l.name())\n\n\treturn keywords + synonyms\n\n\ndef extract_summarizer(body):\n\tprint('TODO extract summary')\n\tsummary = bart_summarizer(body)\n\n\treturn summary\n\ndef extract_keywords(body, add_synonyms = True, add_summarizer = True):\n\n\t# extract keywords from body\n\tkeywords = extract(body)\n\n\t# add synonyms\n\tif add_synonyms:\n\t\tkeywords = extract_synonyms(keywords)\n\n\t# add summarizer\n\tif add_summarizer:\n\t\tsummary = [extract_summarizer(body)]\n\n\treturn keywords + summary","repo_name":"rahulmadanraju/Semantic-Search-Engine","sub_path":"engine/preprocessing/keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"22204030731","text":"from typing import Optional\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n \"\"\"\n For both iterative and recursive approaches\n TC: O(N), N: number of nodes\n SC: O(N), N: number of nodes\n \"\"\"\n def isSymmetric(self, root: Optional[TreeNode]) -> bool:\n if not root:\n return False\n \n def is_mirror(l_node, r_node):\n if not l_node and not r_node:\n return True\n elif not l_node or not r_node:\n return False\n elif l_node.val == r_node.val:\n return is_mirror(l_node.left, r_node.right) and is_mirror(l_node.right, r_node.left)\n else:\n return False\n \n return is_mirror(root.left, root.right)\n\n\n def isSymmetric(self, root):\n if root is None:\n return True\n\n stack = [(root.left, root.right)]\n while stack:\n l_node, r_node = stack.pop()\n if not l_node and not r_node:\n continue\n elif not l_node or not r_node:\n return False\n elif l_node.val == r_node.val:\n stack.append((l_node.left, r_node.right))\n stack.append((l_node.right, r_node.left))\n else:\n return False\n return True\n\n","repo_name":"aybu/online-judge","sub_path":"leetcode/101-symmetric-tree.py","file_name":"101-symmetric-tree.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35443895999","text":"import math\njarijari=int(input())\ntinggi= int(input())\nPhi= 22/7\nVolume=Phi*jarijari*jarijari*tinggi\nLuas= (2*Phi*jarijari)*(jarijari+tinggi)\nKeliling= 2*Phi*jarijari\nprint(\"Volume = {:1.2f} cm\". format (Volume))\nprint(\"Luas = {:1.2f} cm\". format (Luas))\nprint(\"Keliling = {:1.2f} cm\". format (Keliling))","repo_name":"Ajengdpr/Praktikkum-Pemrograman-1-Modul-2","sub_path":"PRAK204-2210817220001-AjengDiahPramesti.py","file_name":"PRAK204-2210817220001-AjengDiahPramesti.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8097006372","text":"import re\n\n\ndef parse(markdown):\n res = []\n for line in markdown.split('\\n'): # split by line\n line = re.sub(r'__(.*?)__', r'\\1', line)\n line = re.sub(r'_(.*?)_', r'\\1', line)\n header_match = re.match(r'(#+) (.*)', line) # match #### header\n if header_match and len(header_match.group(1)) < 7:\n res.append('{1}'.format(len(header_match.group(1)), header_match.group(2)))\n elif line.startswith('* '): # list\n if res and res[-1] == '': # if previous line was a list\n res.pop() # remove the last \n else: # if previous line was not a list\n res.append('
    ') # start a new list\n res.append('
  • ' + line[2:] + '
  • ') # add list item\n res.append('
') # end list\n else: # if not header or list, but paragraph\n res.append('

' + line + '

') # add paragraph\n return ''.join(res) # join all lines into a string\n ","repo_name":"formidablae/Exercism","sub_path":"python/markdown/markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"21470779707","text":"#read two integer and print two lines. the first line should contained integer devision ,// and other line should contain float devision\na=input(\"enter first number = \")\nb=input(\"second number = \")\n\n # integer devision \ninteger_result=int(a)//int(b)\nprint(\"integer_result =\", integer_result)\n\n# float devision\nfloat_result=int(a)/int(b)\nprint(\"float_result =\",float_result)\n\n\n\n\n# same program in single line \na,b=(input(\"enter two number\")).split(\",\")\nc=int(a)//int(b)\nprint(\"integer\",c)\nc=int(a)/int(b)\nprint(\"float\",c)","repo_name":"AKHILESH1705/programing_questions","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15341006834","text":"import random\n\n#=================== TEST 1\n\n# exits = [\"east\", \"northeast\", \"south\"]\n# chosen = \"\"\n#\n# while chosen not in exits:\n# chosen = input(\"Choose a direction: \")\n# if chosen == \"q\":\n# print(\"GAME OVER\")\n# break;\n# else:\n# print(\"You got out!\")\n\n\n#=================== CHALLENGE\n\nhighest = 100\nanswer = random.randint(1, highest)\n\nprint(\"Please guess a number between 1 and {}: \".format(highest), end='')\nguess = 0\nwhile guess != answer:\n guess = int(input())\n if guess == 0:\n print(\"bye\")\n break\n if guess < answer:\n print(\"Please guess higher\")\n elif guess > answer:\n print(\"Please guess lower\")\n else:\n print(\"CORRECT!\")\n break\n","repo_name":"AbhishekNose/Playing-With-Python","sub_path":"Project003 - While Loops - Random Number Guessing/whileLooping.py","file_name":"whileLooping.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7541895216","text":"from pyspark import SparkConf, SparkContext\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql import Row\r\n\r\nimport os\r\nprint(os.environ['HADOOP_HOME'])\r\n\r\n\r\ndef process_line(line):\r\n # Splitting the line by delimiter and extracting required fields\r\n parts = line.split(\";\")\r\n try:\r\n # Check for missing values\r\n if '?' in parts or '\"\"' in parts:\r\n return (None, None, None, None)\r\n\r\n # Extracting the columns of interest and converting them to float\r\n global_active_power = float(parts[2].replace('\"', ''))\r\n global_reactive_power = float(parts[3].replace('\"', ''))\r\n voltage = float(parts[4].replace('\"', ''))\r\n global_intensity = float(parts[5].replace('\"', ''))\r\n return (global_active_power, global_reactive_power, voltage, global_intensity)\r\n except ValueError as e:\r\n print(f\"Error: {e}\")\r\n return (None, None, None, None)\r\n\r\n\r\ndef stats_map_function(record):\r\n return (\"stats\", (\r\n # For global_active_power\r\n (record[0], record[0], record[0], record[0]**2),\r\n # For global_reactive_power\r\n (record[1], record[1], record[1], record[1]**2),\r\n (record[2], record[2], record[2], record[2]**2), # For voltage\r\n # For global_intensity\r\n (record[3], record[3], record[3], record[3]**2),\r\n 1 # Count\r\n ))\r\n\r\n\r\ndef stats_reduce_function(x, y):\r\n \"\"\"Reduce function to calculate stats for each column.\"\"\"\r\n return (\r\n (min(x[0][0], y[0][0]), max(x[0][1], y[0][1]),\r\n x[0][2] + y[0][2], x[0][3] + y[0][3]),\r\n (min(x[1][0], y[1][0]), max(x[1][1], y[1][1]),\r\n x[1][2] + y[1][2], x[1][3] + y[1][3]),\r\n (min(x[2][0], y[2][0]), max(x[2][1], y[2][1]),\r\n x[2][2] + y[2][2], x[2][3] + y[2][3]),\r\n (min(x[3][0], y[3][0]), max(x[3][1], y[3][1]),\r\n x[3][2] + y[3][2], x[3][3] + y[3][3]),\r\n x[4] + y[4]\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n spark = SparkSession.builder \\\r\n .appName(\"PowerConsumptionStats\") \\\r\n .master(\"local\") \\\r\n .config(\"spark.hadoop.validateOutputSpecs\", \"false\") \\\r\n .config(\"spark.hadoop.home.dir\", \"C:/Users/Asus/Downloads/spark-3.5.0-bin-hadoop3/spark-3.5.0-bin-hadoop3/bin\") \\\r\n .getOrCreate()\r\n\r\n sc = spark.sparkContext\r\n\r\n # Load the dataset into an RDD\r\n data_rdd = sc.textFile(\r\n \"household_power_consumption.txt\").filter(lambda x: \"Global_active_power\" not in x)\r\n processed_rdd = data_rdd.map(process_line).filter(lambda record: None not in record)\r\n \r\n # For the stats\r\n stats_results = processed_rdd.map(stats_map_function).reduceByKey(\r\n stats_reduce_function).collect()[0][1]\r\n\r\n count = stats_results[4]\r\n print(\"Task 1: Minimum, Maximum, and Count\")\r\n print(\"Global Active Power - Min:\",\r\n stats_results[0][0], \", Max:\", stats_results[0][1], \", Count:\", count)\r\n print(\"Global Reactive Power - Min:\",\r\n stats_results[1][0], \", Max:\", stats_results[1][1], \", Count:\", count)\r\n print(\"Voltage - Min:\",\r\n stats_results[2][0], \", Max:\", stats_results[2][1], \", Count:\", count)\r\n print(\"Global Intensity - Min:\",\r\n stats_results[3][0], \", Max:\", stats_results[3][1], \", Count:\", count)\r\n\r\n print(\"\\nTask 2: Mean and Standard Deviation\")\r\n for i, name in enumerate([\"Global Active Power\", \"Global Reactive Power\", \"Voltage\", \"Global Intensity\"]):\r\n mean = stats_results[i][2] / count\r\n variance = (stats_results[i][3] / count) - (mean**2)\r\n stddev = variance**0.5\r\n print(name, \"- Mean:\", mean, \", Standard Deviation:\", stddev)\r\n\r\n\r\n # Task 3: Min-Max Normalization\r\n def normalize(record):\r\n return (\r\n (record[0] - stats_results[0][0]) /\r\n (stats_results[0][1] - stats_results[0][0]),\r\n (record[1] - stats_results[1][0]) /\r\n (stats_results[1][1] - stats_results[1][0]),\r\n (record[2] - stats_results[2][0]) /\r\n (stats_results[2][1] - stats_results[2][0]),\r\n (record[3] - stats_results[3][0]) /\r\n (stats_results[3][1] - stats_results[3][0])\r\n )\r\n\r\n\r\n normalized_rdd = processed_rdd.map(normalize)\r\n \r\n df = normalized_rdd.map(lambda record: Row(\r\n column1=record[0],\r\n column2=record[1],\r\n column3=record[2],\r\n column4=record[3]\r\n )).toDF()\r\n \r\n # Convert Spark DataFrame to Pandas DataFrame\r\n pandas_df = df.toPandas()\r\n\r\n # Define custom headers\r\n headers = [\"normalized global active power\",\r\n \"normalized global reactive power\", \"normalized voltage\", \"normalized global intensity\"]\r\n \r\n # Use Pandas to write the data to disk\r\n output_path = \"C:/Users/Asus/Desktop/112012049/normalized_data/normalized.csv\"\r\n pandas_df.to_csv(output_path, header=headers, index=False)\r\n \r\n # Stop the context\r\n sc.stop()","repo_name":"Yasar2019/BigDataMining-NTUT","sub_path":"112012049/hm00-112012049.py","file_name":"hm00-112012049.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12774230352","text":"import time\r\nimport configparser\r\nimport requests\r\nimport json\r\nimport tkinter as tk\r\nimport psycopg2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom configparser import RawConfigParser\r\nimport os\r\nfrom tkinter import ttk, Radiobutton, IntVar, filedialog,messagebox\r\nimport tkinter.messagebox as messagebox\r\nimport sys\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nimport threading\r\nimport signal\r\nimport urllib.request\r\nfrom PIL import Image\r\n\r\n\r\ndef restart_program():\r\n \"\"\"Restarts the current program, with file objects and descriptors cleanup.\"\"\"\r\n python = sys.executable\r\n os.execl(python, python, python, *sys.argv)\r\n\r\n\r\nif not os.path.exists('config.properties'):\r\n # create the config file with default values\r\n with open('config.properties', 'w') as f:\r\n f.write('[database]\\n'\r\n 'ip = 192.168.2.31\\n'\r\n 'user = test\\n'\r\n 'password = Penguineerstest\\n'\r\n 'name = PenIQ_Demo\\n'\r\n 'port = 5432\\n\\n'\r\n '[PE]\\n'\r\n 'url = https://sec.penguinin.com:9090/saad_pe/\\n'\r\n '[Server]\\n'\r\n 'url = http://192.168.2.250\\n'\r\n )\r\n\r\n# Continue with your next instructions here...\r\n\r\n\r\nconfig = RawConfigParser()\r\nconfig.read('config.properties')\r\nDB_IP = config.get('database', 'ip')\r\nDB_User = config.get('database', 'user')\r\nDB_PW = config.get('database', 'password')\r\nDB_name = config.get('database', 'name')\r\nDB_port = config.get('database', 'port')\r\nPE_Url = config.get('PE', 'url')\r\nServer_Url = config.get('Server', 'url')\r\nurl = PE_Url + 'PEService.svc/Initialize'\r\n\r\n\r\nheaders = {'Content-Type': 'application/json'}\r\n\r\n# Create a GUI window using tkinter with a title of \"CDF\"\r\nroot = tk.Tk()\r\nroot.title('CDF')\r\n\r\n# Create a cursor object to manipulate data in a database connection called conn\r\n# # rows_venues\r\njson_data = {\r\n 'DB_IP': DB_IP,\r\n 'DB_PW': DB_PW,\r\n 'DB_User': DB_User,\r\n 'DB_name': DB_name,\r\n 'DB_port': DB_port,\r\n 'PE_Url': PE_Url,\r\n 'Server_Url': Server_Url,\r\n}\r\ntry:\r\n rows_venues = requests.post(\r\n f'{Server_Url}/rows_venues', headers=headers, data=json.dumps(json_data), timeout=None)\r\n try:\r\n rows_venues = rows_venues.json()\r\n venue_names = [\"Select Venue\"]\r\n for row in rows_venues:\r\n venue_names.append(row[1])\r\n except:\r\n venue_names = [\"Select Venue\"]\r\n var = tk.StringVar(root)\r\n var.set(venue_names[0])\r\n # Create a list of floor names starting with the string \"Select Floor\"\r\n Floor_names = [\"Select Floor\"]\r\n floor_var = tk.StringVar(root)\r\n messagebox.showerror('Invalid IP',\r\n f'The specified IP address is invalid: {DB_IP} or The Database {DB_name} is not exist or mistyped or the Port Number {DB_port} is Wrong ')\r\n messagebox.showerror('Error',\r\n f'Check the Settings and Try Again ')\r\nexcept:\r\n messagebox.showerror('Error',\r\n f'Cannot connect to the server {Server_Url}, ')\r\n venue_names = [\"Select Venue\"]\r\n\r\n # Set the variable var to the first element of the venue_names list as the default option\r\n var = tk.StringVar(root)\r\n var.set(venue_names[0])\r\n\r\n # Create a list of floor names starting with the string \"Select Floor\"\r\n Floor_names = [\"Select Floor\"]\r\n floor_var = tk.StringVar(root)\r\n venue_names = [\"Select Venue\"]\r\n messagebox.showerror('Error',\r\n f'Check the Settings and Try Again ')\r\n # Set the variable floor_var to the first element of the Floor_names list as the default option\r\n\r\n\r\n # Set the variable var to the first element of the venue_names list as the default option\r\n\r\n\r\n\r\n# Store the venue names in a list called venue_names, starting with the string \"Select Venue\"\r\n\r\n# Set the variable var to the first element of the venue_names list as the default option\r\nvar = tk.StringVar(root)\r\nvar.set(venue_names[0])\r\n\r\n# Create a list of floor names starting with the string \"Select Floor\"\r\nFloor_names = [\"Select Floor\"]\r\nfloor_var = tk.StringVar(root)\r\n\r\n# Set the variable floor_var to the first element of the Floor_names list as the default option\r\nfloor_var.set(Floor_names[0])\r\n\r\n\r\n# Define a function restart_program() which will be used to restart the program\r\n# Needed python and os modules\r\n\r\n# Define a function select_venue(var) which will be used to choose a venue and get its data\r\ndef select_venue(var):\r\n global floor_dropdown\r\n\r\n # Get the id and name of the selected venue by executing an SQL SELECT query\r\n # query = f\"SELECT id,venue_name FROM penguin.tblvenues WHERE venue_name = '{var}'\"\r\n # cursor.execute(query)\r\n json_data = {\r\n 'DB_IP': DB_IP,\r\n 'DB_PW': DB_PW,\r\n 'DB_User': DB_User,\r\n 'DB_name': DB_name,\r\n 'DB_port': DB_port,\r\n 'PE_Url': PE_Url,\r\n 'Server_Url': Server_Url,\r\n 'venue_name': var\r\n }\r\n rows_venues_0 = requests.post(\r\n f'{Server_Url}/rows_venues_0', headers=headers, data=json.dumps(json_data), timeout=None)\r\n rows_venues_0 = rows_venues_0.json()\r\n # If the result is not None, then the selected venue exist in the database\r\n if rows_venues_0 is not None:\r\n # Destroy any previously existing floor dropdown menu\r\n floor_dropdown.destroy()\r\n # Set the global variables venue_id and venue_name to match the values attained by the query.\r\n # Get all floor names for the selected venue\r\n global venue_id, venue_name\r\n\r\n venue_id, venue_name = rows_venues_0[0]\r\n\r\n json_data = {\r\n 'DB_IP': DB_IP,\r\n 'DB_PW': DB_PW,\r\n 'DB_User': DB_User,\r\n 'DB_name': DB_name,\r\n 'DB_port': DB_port,\r\n 'PE_Url': PE_Url,\r\n 'Server_Url': Server_Url,\r\n 'venue_id': venue_id\r\n }\r\n\r\n rows_floors = requests.post(\r\n f'{Server_Url}/rows_floors', headers=headers, data=json.dumps(json_data), timeout=None)\r\n rows_floors = rows_floors.json()\r\n\r\n # Create a list of floor names starting with the string \"Select Floor\"\r\n global selected_floor_id\r\n selected_floor_id = None\r\n Floor_names = [\"Select Floor\"]\r\n\r\n # Append each floor name to the Floor_names list and create a (floor_name, floor_id) couple which will be used later\r\n for row in rows_floors:\r\n Floor_names.append(row[1])\r\n\r\n # Store the list of (floor_name, floor_id) tuples in a list called floor_names_and_ids\r\n floor_names_and_ids = [(row[1], row[0]) for row in rows_floors]\r\n\r\n # Set the default value for the variable floor_var to be the first item in the list Floor_names\r\n floor_var.set(Floor_names[0])\r\n\r\n # Create a drop-down selection widget called floor_dropdown with the variable floor_var and containing all elements of the list Floor_names\r\n floor_dropdown = tk.OptionMenu(root, floor_var, *Floor_names)\r\n\r\n # Set its grid position\r\n floor_dropdown.grid(row=1, column=0)\r\n\r\n # Define a function select_floor(*args) that captures the user-selected floor and sets the global variable selected_floor_id to the corresponding floor_id\r\n def select_floor(*args):\r\n global selected_floor_id, selected_floor_name\r\n selected_floor_name = floor_var.get()\r\n\r\n for name, floorid in floor_names_and_ids:\r\n if name == selected_floor_name:\r\n selected_floor_id = floorid\r\n\r\n # Create a button to select the floor and get its data\r\n # When floor_var is modified (i.e. the user selects a value), call the function select_floor()\r\n floor_var.trace(\"w\", select_floor)\r\n\r\n else:\r\n selected_floor_id = None\r\n venue_id = None\r\n\r\n # Call the select_floor() function at the end of select_venue() to have the content displayed.\r\n select_floor()\r\n\r\n # Check if conn exists before closing cursor and connection\r\n\r\n\r\n# Save the configuration values to the config file\r\ndef save_config():\r\n # Get the path of config file and Open the config file for reading\r\n config_file = os.path.join(os.getcwd(), 'config.properties')\r\n config = configparser.ConfigParser()\r\n config.read(config_file)\r\n\r\n # Get the new values from respective text boxes in UI\r\n new_ip = ip_textbox.get() # New IP value\r\n new_db_name = db_name_textbox.get() # New Database Name value\r\n new_db_port = db_port_textbox.get()\r\n new_Pe = Pe_textbox.get() # New PE URL value\r\n new_Server = Server_textbox.get() # New Server URL value\r\n # Write/Update the above new values in config file\r\n # set function represents section, key and the updated value is passed\r\n config.set('database', 'ip', new_ip)\r\n config.set('database', 'name', new_db_name)\r\n config.set('database', 'port', new_db_port)\r\n config.set('PE', 'url', new_Pe)\r\n config.set('Server', 'url', new_Server)\r\n\r\n # Save the changes by writing them into the config file object\r\n with open(config_file, 'w') as f:\r\n config.write(f)\r\n\r\n # Update message and restart the program\r\n message_label.config(text=\"Config saved successfully!\")\r\n restart_program()\r\n\r\n\r\ndef save_file_fp():\r\n # Open file dialog to choose save location\r\n file_path = filedialog.asksaveasfilename(initialfile=f\"linesToBeRepeated_fp for {venue_name} floor{selected_floor_name}.txt\",\r\n defaultextension=\".txt\",\r\n title=\"Save File\",\r\n filetypes=[(\"Text files\", \"*.txt\"), (\"All files\", \"*.*\")])\r\n if file_path:\r\n with open(file_path, \"w\") as output:\r\n output.write(str(linesToBeRepeated_fp))\r\n\r\n\r\ndef main_saveFiles_fp(linesToBeRepeated):\r\n # Replace it with your actual data\r\n venue_name = \"Venue\"\r\n selected_floor_name = \"Floor\"\r\n global root4\r\n root4 = tk.Tk()\r\n root4.title(\"Save File\")\r\n\r\n # Create a label to display information\r\n info_label = tk.Label(\r\n root4, text=f\"This will save '{linesToBeRepeated}' to a file.\")\r\n info_label.pack()\r\n\r\n # Create save button\r\n save_button = tk.Button(\r\n root4, text=\"Save\", command=lambda: save_file_fp(linesToBeRepeated))\r\n save_button.pack(pady=10)\r\n\r\n # Create cancel button\r\n cancel_button = tk.Button(root4, text=\"Cancel\", command=root4.quit)\r\n cancel_button.pack()\r\n\r\n root4.mainloop()\r\n\r\n# Insert/Update the given parameters by executing SQL queries on the database connection\r\n\r\n\r\ndef save_file_sig():\r\n # Open file dialog to choose save location\r\n file_path = filedialog.asksaveasfilename(initialfile=f\"SigsToBeRepeated for {venue_name} floor{selected_floor_name}.txt\",\r\n defaultextension=\".txt\",\r\n title=\"Save File\",\r\n filetypes=[(\"Text files\", \"*.txt\"), (\"All files\", \"*.*\")])\r\n if file_path:\r\n with open(file_path, \"w\") as output:\r\n output.write(str(linesToBeRepeated_sig))\r\n return\r\n\r\n\r\ndef main_saveFiles_sig(linesToBeRepeated):\r\n # Replace it with your actual data\r\n venue_name = \"Venue\"\r\n selected_floor_name = \"Floor\"\r\n global root2\r\n root2 = tk.Tk()\r\n root2.title(\"Save File\")\r\n\r\n # Create a label to display information\r\n info_label = tk.Label(\r\n root2, text=f\"This will save '{linesToBeRepeated}' to a file.\")\r\n info_label.pack()\r\n\r\n # Create save button\r\n save_button = tk.Button(\r\n root2, text=\"Save\", command=lambda: save_file_sig(linesToBeRepeated))\r\n save_button.pack(pady=10)\r\n\r\n # Create cancel button\r\n cancel_button = tk.Button(root2, text=\"Cancel\", command=root2.quit)\r\n cancel_button.pack()\r\n root2.mainloop()\r\n return\r\n\r\n\r\ndef Draw_CDF(Diff, min_sum_key, venue_name, selected_floor_name, diff_maxerorr, diff_value_95th):\r\n # sort the difference data from smallest to largest\r\n sorted_data = np.sort(Diff)\r\n # compute the cumulative distribution function (CDF) values corresponding to each data point in the sorted data\r\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\r\n # plot the CDF using the sorted data as x and CDF values as y, and label it with min_sum_key\r\n plt.plot(sorted_data, yvals, label=f\"{min_sum_key}\")\r\n plt.title(\r\n f\"max error : {diff_maxerorr:.2f}\\nvalue at 95th percentile: {diff_value_95th:.2f}\")\r\n # get the current figure instance\r\n fig1 = plt.gcf()\r\n # set the title of the figure with the venue_id and floor_id\r\n # show the legend of the figure\r\n plt.legend()\r\n # display the figure\r\n plt.show()\r\n # save the figure as a PNG file with the name based on the venue_id and floor_id\r\n fig1.savefig(\r\n f'Venue_Name_{venue_name} Floor_Name_{selected_floor_name}' + '.png', dpi=100)\r\n return\r\n\r\n# Define a function named calculate with no input parameter, which will use global variables to fetch values from previous operation\r\n# status_label = tk.Label(root, text=\"API is not working yet\", fg=\"red\")\r\n# status_label.grid(row=4, column=0)\r\n\r\n\r\ndef my_progress_bar():\r\n global root1\r\n root1 = tk.Tk()\r\n root1.title(\"Status\")\r\n root1.geometry(\"300x100\")\r\n\r\n # label to show the status\r\n # create progress bar\r\n global progress_bar\r\n progress_bar = ttk.Progressbar(\r\n root1, orient=\"horizontal\", length=200, mode=\"indeterminate\")\r\n progress_bar.pack(pady=20)\r\n progress_bar.start()\r\n\r\n # update progress bar value\r\n\r\n # set status message\r\n status_label = tk.Label(root1, text=\"processing...\",\r\n font=(\"Arial Bold\", 12))\r\n status_label.pack()\r\n\r\n root1.mainloop()\r\n\r\n\r\ndef calculate():\r\n\r\n global venue_id\r\n global selected_floor_id\r\n if venue_id != None and selected_floor_id != None:\r\n # Reassigning the same values for the venueID and floorID for new calculation.\r\n venue_id = venue_id\r\n\r\n floor_id = selected_floor_id\r\n # Assign values to Monte Carlo iteration, maximum error and 95 percent error; Get values from UI inputs or assign default value (10, 10, 5) in case of non-numeric inputs\r\n montecarloIter = int(E3.get()) if str(E3.get()).isnumeric() else 10\r\n max_error = int(E4.get()) if str(E4.get()).isnumeric() else 10\r\n ninety_five_percent_error = int(\r\n E5.get()) if str(E5.get()).isnumeric() else 5\r\n\r\n # Create a dictionary object named json_data, which includes venueID, floorID, Monte Carlo iteration, maximum error, and 95% error as key-value pairs.\r\n\r\n json_data = {\r\n \"venue_id\": venue_id,\r\n \"floor_id\": floor_id,\r\n \"montecarloIter\": montecarloIter,\r\n \"max_error\": max_error,\r\n \"ninety_five_percent_error\": ninety_five_percent_error,\r\n 'DB_IP': DB_IP,\r\n 'DB_PW': DB_PW,\r\n 'DB_User': DB_User,\r\n 'DB_name': DB_name,\r\n 'DB_port': DB_port,\r\n 'PE_Url': PE_Url,\r\n 'Server_Url': Server_Url\r\n }\r\n\r\n # Update message label with start message\r\n def make_api_request():\r\n response = requests.post(\r\n f'{Server_Url}/D_O', headers=headers, data=json.dumps(json_data), timeout=None)\r\n return response\r\n\r\n def CDF():\r\n json_data = {\r\n \"venue_id\": venue_id,\r\n \"floor_id\": floor_id,\r\n \"montecarloIter\": montecarloIter,\r\n \"max_error\": max_error,\r\n \"ninety_five_percent_error\": ninety_five_percent_error,\r\n 'DB_IP': DB_IP,\r\n 'DB_PW': DB_PW,\r\n 'DB_User': DB_User,\r\n 'DB_name': DB_name,\r\n 'PE_Url': PE_Url,\r\n 'DB_port': DB_port,\r\n 'Server_Url': Server_Url,\r\n 'Response': 'Yes'\r\n }\r\n response = requests.post(\r\n f'{Server_Url}/CDF', headers=headers, data=json.dumps(json_data), timeout=None)\r\n\r\n res_text = response.json()\r\n\r\n diff_all_str = res_text.get(\"response\", {}).get(\"DiffAll\", \"\")\r\n # create a list of float values from space-separated string \"diff_all_str\".\r\n diff_all = [float(x) for x in diff_all_str]\r\n\r\n # Get values of minimum difference, maximum error distance, 95th percentile value from JSON response and assign them to available variables.\r\n diff_min = res_text.get(\"response\", {}).get(\"min_sum_key\", \"\")\r\n diff_maxerorr = res_text.get(\"response\", {}).get(\"max_error_D\", \"\")\r\n diff_value_at_95th_percentile = res_text.get(\r\n \"response\", {}).get(\"value_at_95th_percentile\", \"\")\r\n # If calculated maximum error and 95% percentile error less than maximum configuration setting for the same, draw CDF using values computed in step 2, print Minimum key values, and selected floor ID.\r\n if float(diff_maxerorr) < max_error or float(diff_value_at_95th_percentile) < ninety_five_percent_error:\r\n Draw_CDF(diff_all, diff_min, venue_name, selected_floor_name, diff_maxerorr,\r\n diff_value_at_95th_percentile)\r\n # insert_min_key(diff_min, floor_id=floor_id)\r\n json_data = {\r\n \"diff_min\": diff_min,\r\n \"floor_id\": floor_id,\r\n 'DB_IP': DB_IP,\r\n 'DB_PW': DB_PW,\r\n 'DB_User': DB_User,\r\n 'DB_name': DB_name,\r\n 'PE_Url': PE_Url,\r\n 'Server_Url': Server_Url,\r\n 'Response': 'Yes'\r\n }\r\n response = requests.post(f'{Server_Url}/Upload_Keys', headers=headers, data=json.dumps(json_data),\r\n timeout=None)\r\n root1.withdraw()\r\n\r\n # The following code creates a confirmation window containing a plot with sorted data and y-values.\r\n # It also defines an action function that saves a figure based on the response to a button ('Yes' or 'No').\r\n # Finally, it destroys the confirmation window.\r\n\r\n else:\r\n # Sort the data\r\n sorted_data = np.sort(diff_all)\r\n # Create y-values\r\n yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)\r\n\r\n # Create the confirmation window\r\n window = tk.Tk()\r\n\r\n # Plot the sorted data and y-values\r\n fig, ax = plt.subplots()\r\n ax.plot(sorted_data, yvals, label=f\"{diff_min}\")\r\n plt.title(\r\n f\"max error: {diff_maxerorr:.2f}\\nvalue at 95th percentile: {diff_value_at_95th_percentile:.2f}\")\r\n plt.legend()\r\n\r\n # Turn the plot into a Tkinter widget\r\n canvas = FigureCanvasTkAgg(fig, master=window)\r\n canvas.draw()\r\n canvas.get_tk_widget().pack()\r\n plt.close(fig)\r\n\r\n # Define an action function for the buttons\r\n\r\n def action(button):\r\n if button == 'Yes':\r\n # Save the figure\r\n fig.savefig(\r\n f'Venue_Name {venue_name} Floor_Name {selected_floor_name}' + '.png', dpi=100)\r\n # Insert the min key based on the floor_id\r\n json_data = {\r\n \"diff_min\": diff_min,\r\n \"floor_id\": floor_id,\r\n 'DB_IP': DB_IP,\r\n 'DB_PW': DB_PW,\r\n 'DB_User': DB_User,\r\n 'DB_name': DB_name,\r\n 'PE_Url': PE_Url,\r\n 'Server_Url': Server_Url,\r\n 'Response': 'Yes'\r\n }\r\n fig.savefig(\r\n f'CDF for Venue_Name_{venue_name}_Floor_Name_{selected_floor_name}.png', dpi=100)\r\n response = requests.post(f'{Server_Url}/Upload_Keys', headers=headers, data=json.dumps(json_data),\r\n timeout=None)\r\n root1.withdraw()\r\n elif button == 'Check Design':\r\n window.withdraw()\r\n Design_Check()\r\n else:\r\n # Save the figure without adding a min key\r\n fig.savefig(\r\n f'CDF for Venue_Name {venue_name} Floor_Name {selected_floor_name}' + '.png', dpi=100)\r\n json_data = {\r\n \"floor_id\": floor_id,\r\n 'DB_IP': DB_IP,\r\n 'DB_PW': DB_PW,\r\n 'DB_User': DB_User,\r\n 'DB_name': DB_name,\r\n 'DB_port': DB_port,\r\n }\r\n result = messagebox.askyesno(\"Warning\",\r\n f\"The FingerPrint for {venue_name} floor {selected_floor_name} will be deleted. Are you Sure ? \")\r\n if result:\r\n response = requests.post(f'{Server_Url}/Delete_FP', headers=headers, data=json.dumps(json_data),\r\n timeout=None)\r\n window.destroy()\r\n root1.withdraw()\r\n else:\r\n pass\r\n\r\n # Creating the 'Yes' button\r\n # This code creates a tkinter label object, sets its text, color and font size, and adds it to the main window with some padding.\r\n label = tk.Label(window, text=\"Do you Approve this CDF ?\",\r\n fg=\"blue\", font=(\"Helvetica\", 16))\r\n label.pack(padx=10, pady=10)\r\n\r\n # This code creates a tkinter button object 'Yes', which when clicked calls the action() function with an argument 'Yes'. The button is added to the main window.\r\n yes_button = tk.Button(\r\n window, text=\"Yes\", command=lambda: action('Yes'))\r\n yes_button.pack()\r\n\r\n # This code creates a tkinter button object 'No', which when clicked calls the action() function with an argument 'No'. The button is also added to the main window.\r\n no_button = tk.Button(\r\n window, text=\"No\", command=lambda: action('No'))\r\n no_button.pack()\r\n Check_Design_button = tk.Button(\r\n window, text=\"Check Design\", command=lambda: action('Check Design'))\r\n Check_Design_button.pack()\r\n\r\n def Design_Check():\r\n json_data = {\r\n \"venue_id\": venue_id,\r\n \"floor_id\": floor_id,\r\n 'DB_IP': DB_IP,\r\n 'DB_PW': DB_PW,\r\n 'DB_User': DB_User,\r\n 'DB_name': DB_name,\r\n 'DB_port': DB_port,\r\n \"ninety_five_percent_error\": ninety_five_percent_error\r\n }\r\n response = requests.post(\r\n f'{Server_Url}/Design', headers=headers, data=json.dumps(json_data), timeout=None)\r\n res_text = response.json()\r\n map_url = res_text.get(\"response\", {}).get(\"map_url\", \"\")\r\n if len(map_url) != 0:\r\n map = np.array(Image.open(urllib.request.urlopen(map_url)))\r\n final = res_text.get(\"response\", {}).get(\"final\", \"\")\r\n if final is None:\r\n root1.withdraw()\r\n messagebox.showerror(\r\n '', f'Design for {venue_name} floor {selected_floor_name} is good ')\r\n return\r\n final = np.array(final)\r\n areasInd = res_text.get(\"response\", {}).get(\"areasInd\", \"\")\r\n areasInd = np.array(areasInd)\r\n if len(final) == 0 or len(areasInd) == 0:\r\n root1.withdraw()\r\n messagebox.showerror(\r\n '', f'Design for {venue_name} floor {selected_floor_name} is good ')\r\n return\r\n plt.ioff()\r\n clusters_labels = res_text.get(\r\n \"response\", {}).get(\"clusters_labels\", \"\")\r\n clusters_labels = np.array(clusters_labels)\r\n fig, ax3 = plt.subplots()\r\n ax3.imshow(map)\r\n ax3.set_title('Highlighted area need more beacon')\r\n scatter = ax3.scatter(\r\n final[:, 0], final[:, 1], cmap='viridis', marker='s', s=1)\r\n fig.savefig(\r\n f'Area with weak coverage to be repeated for {venue_name} floor {selected_floor_name}.png', dpi=300, bbox_inches='tight')\r\n plt.show()\r\n return\r\n\r\n def plot_Dens_orian():\r\n\r\n map_url_fp = res_text.get(\"response\", {}).get(\"map_url_fp\", \"\")\r\n data_sensors_fp = res_text.get(\r\n \"response\", {}).get(\"data_sensors_fp\", \"\")\r\n probPoints_fp = res_text.get(\"response\", {}).get(\"probPoints_fp\", \"\")\r\n if len(map_url_fp) != 0:\r\n map_image_fp = np.array(Image.open(urllib.request.urlopen(map_url_fp)))\r\n data_sensors_fp = np.array(data_sensors_fp)\r\n probPoints_fp = np.array(probPoints_fp)\r\n stop_fp = res_text.get(\"response\", {}).get(\"stop_fp\", \"\")\r\n data_sensors_sig = res_text.get(\r\n \"response\", {}).get(\"data_sensors_sig\", \"\")\r\n data_sensors_sig = np.array(data_sensors_sig)\r\n probPoints_sig = res_text.get(\"response\", {}).get(\"probPoints_sig\", \"\")\r\n probPoints_sig = np.array(probPoints_sig)\r\n map_url_sig = res_text.get(\"response\", {}).get(\"map_url_sig\", \"\")\r\n if len(map_url_sig) != 0:\r\n map_image_sig = np.array(Image.open(urllib.request.urlopen(map_url_sig)))\r\n global linesToBeRepeated_fp\r\n linesToBeRepeated_fp = res_text.get(\r\n \"response\", {}).get(\"linesToBeRepeated_fp\", \"\")\r\n linesToBeRepeated_fp = np.array(linesToBeRepeated_fp)\r\n global linesToBeRepeated_sig\r\n linesToBeRepeated_sig = res_text.get(\r\n \"response\", {}).get(\"linesToBeRepeated_sig\", \"\")\r\n linesToBeRepeated_sig = np.array(linesToBeRepeated_sig)\r\n stop_sig = res_text.get(\"response\", {}).get(\"stop_sig\", \"\")\r\n if len(map_url_sig) == 0 and len(map_url_fp) == 0:\r\n CDF()\r\n return\r\n if stop_sig == 'True' and choice == 2:\r\n messagebox.showerror(\r\n 'Warning', f'Signatures with not enough data and/or collected in a wrong direction were detected . Please repeate these Signatures to Continue')\r\n window = tk.Tk()\r\n fig, ax2 = plt.subplots()\r\n # Subplot for fp_result\r\n ax2.imshow(map_image_sig)\r\n ax2.scatter(data_sensors_sig[probPoints_sig == 1, 1],\r\n data_sensors_sig[probPoints_sig == 1, 2], marker='+')\r\n ax2.legend(['Sig Lines to be repeated'])\r\n ax2.set_title('Sig Lines to be repeated')\r\n\r\n root1.withdraw()\r\n # cv2.imwrite(\"full_resolution_map_image.jpg\", fig)\r\n plt.savefig(\r\n f'Sig Lines to be repeated for {venue_name} floor {selected_floor_name}.png', dpi=300, bbox_inches='tight')\r\n plt.close(fig)\r\n\r\n canvas = FigureCanvasTkAgg(fig, master=window)\r\n canvas.draw()\r\n canvas.get_tk_widget().pack()\r\n\r\n # Define the action function for the buttons\r\n\r\n def action(button):\r\n if button == 'Save':\r\n window.destroy()\r\n # response = requests.post(f'{Server_Url}/CDF', headers=headers, data=json.dumps(json_data), timeout=None)\r\n else:\r\n window.destroy()\r\n root1.withdraw()\r\n # insert_min_key(diff_min, floor_id=floor_id)\r\n\r\n # Create the 'Yes' button\r\n\r\n yes_button = tk.Button(window, text=\"Save Signatures\", command=lambda: (\r\n save_file_sig(), action('Save')))\r\n yes_button.pack()\r\n\r\n no_button = tk.Button(window, text=\"Cancel\",\r\n command=lambda: action('No'))\r\n\r\n no_button.pack()\r\n return\r\n if int(stop_fp) == 2 and choice == 2:\r\n messagebox.showerror(\r\n 'Error', f'Lines with not enough data and/or collected in a wrong direction were detected . Please repeate these Lines and export the FingerPrint again to Continue')\r\n window = tk.Tk()\r\n fig, ax1 = plt.subplots()\r\n # Subplot for fp_result\r\n ax1.imshow(map_image_fp)\r\n ax1.scatter(data_sensors_fp[probPoints_fp == 1, 1],\r\n data_sensors_fp[probPoints_fp == 1, 2], marker='+')\r\n ax1.legend(['Fp Lines to be repeated'])\r\n ax1.set_title('Fp Lines to be repeated')\r\n root1.withdraw()\r\n plt.savefig(\r\n f'Fp Lines to be repeated for {venue_name} floor {selected_floor_name}.png', dpi=300, bbox_inches='tight')\r\n canvas = FigureCanvasTkAgg(fig, master=window)\r\n canvas.draw()\r\n canvas.get_tk_widget().pack()\r\n\r\n # Define the action function for the buttons\r\n\r\n def action(button):\r\n if button == 'Yes':\r\n window.destroy()\r\n root1.withdraw()\r\n # response = requests.post(f'{Server_Url}/CDF', headers=headers, data=json.dumps(json_data), timeout=None)\r\n else:\r\n window.destroy()\r\n root1.withdraw()\r\n # insert_min_key(diff_min, floor_id=floor_id)\r\n\r\n # Create the 'Yes' button\r\n\r\n yes_button = tk.Button(window, text=\"Saved Lines\", command=lambda: (\r\n save_file_fp(), action('Yes')))\r\n yes_button.pack()\r\n\r\n no_button = tk.Button(window, text=\"Cancel\",\r\n command=lambda: action('No'))\r\n\r\n no_button.pack()\r\n return\r\n\r\n if len(probPoints_sig) == 0:\r\n pass\r\n else:\r\n window1 = tk.Tk()\r\n fig, ax2 = plt.subplots()\r\n # Subplot for fp_result\r\n ax2.imshow(map_image_sig)\r\n ax2.scatter(data_sensors_sig[probPoints_sig == 1, 1],\r\n data_sensors_sig[probPoints_sig == 1, 2], marker='+')\r\n ax2.legend(['Sig Lines to be repeated'])\r\n ax2.set_title('Sig Lines to be repeated')\r\n\r\n # cv2.imwrite(\"full_resolution_map_image.jpg\", fig)\r\n plt.savefig(f'Sig Lines to be repeated for {venue_name} floor {selected_floor_name}.png', dpi=300,\r\n bbox_inches='tight')\r\n plt.close(fig)\r\n\r\n canvas = FigureCanvasTkAgg(fig, master=window1)\r\n canvas.draw()\r\n canvas.get_tk_widget().pack()\r\n\r\n # Define the action function for the buttons\r\n\r\n def action(button):\r\n if button == 'Yes':\r\n window1.destroy()\r\n # response = requests.post(f'{Server_Url}/CDF', headers=headers, data=json.dumps(json_data), timeout=None)\r\n else:\r\n window1.destroy()\r\n # insert_min_key(diff_min, floor_id=floor_id)\r\n\r\n # Create the 'Yes' button\r\n\r\n yes_button = tk.Button(window1, text=\"Save Signatures\", command=lambda: (\r\n save_file_sig(), action('Yes')))\r\n yes_button.pack()\r\n\r\n no_button = tk.Button(window1, text=\"Cancel\",\r\n command=lambda: action('No'))\r\n\r\n no_button.pack()\r\n plt.close()\r\n window1.wait_window()\r\n\r\n if len(data_sensors_fp) == 0:\r\n pass\r\n else:\r\n window = tk.Tk()\r\n fig, ax1 = plt.subplots()\r\n # Subplot for fp_result\r\n ax1.imshow(map_image_fp)\r\n ax1.scatter(data_sensors_fp[probPoints_fp == 1, 1],\r\n data_sensors_fp[probPoints_fp == 1, 2], marker='+')\r\n ax1.legend(['Fp Lines to be repeated'])\r\n ax1.set_title('Fp Lines to be repeated')\r\n plt.savefig(f'Fp Lines to be repeated for {venue_name} floor {selected_floor_name}.png', dpi=300,\r\n bbox_inches='tight')\r\n canvas = FigureCanvasTkAgg(fig, master=window)\r\n canvas.draw()\r\n canvas.get_tk_widget().pack()\r\n\r\n # Define the action function for the buttons\r\n\r\n def action(button):\r\n if button == 'Yes':\r\n window.destroy()\r\n CDF()\r\n elif button == 'Saved and cancle':\r\n window.destroy()\r\n root1.withdraw()\r\n elif button == 'Continue Without Save':\r\n window.destroy()\r\n CDF()\r\n else:\r\n window.destroy()\r\n root1.withdraw()\r\n # insert_min_key(diff_min, floor_id=floor_id)\r\n\r\n # Create the 'Yes' button\r\n\r\n saved_continue_button = tk.Button(\r\n window, text=\"Continue and Save Lines id as text \", command=lambda: (save_file_fp(), action('Yes')))\r\n saved_continue_button.pack()\r\n continue_button = tk.Button(window, text=\"Continue Without Saving\",\r\n command=lambda: (action('Continue Without Save')))\r\n continue_button.pack()\r\n saved_cancle_button = tk.Button(window, text=\"Cancle and Save Lines id as text\",\r\n command=lambda: (save_file_fp(), action('Saved and cancle')))\r\n saved_cancle_button.pack()\r\n\r\n no_button = tk.Button(window, text=\"Cancel\",\r\n command=lambda: action('No'))\r\n\r\n no_button.pack()\r\n plt.close()\r\n if len(data_sensors_fp) == 0 and len(data_sensors_sig) == 0:\r\n CDF()\r\n return\r\n\r\n try:\r\n response = requests.get(url)\r\n # Check the status code of the response (200 = OK)\r\n if response.status_code == 200:\r\n try:\r\n api_thread = threading.Thread(target=make_api_request)\r\n bar_thread = threading.Thread(target=my_progress_bar)\r\n # start both threads\r\n # api_thread.start()\r\n bar_thread.start()\r\n\r\n # wait for both threads to complete before exiting\r\n res_text = make_api_request().json()\r\n plot_Dens_orian()\r\n\r\n except Exception as e:\r\n Check_Fp = requests.post(\r\n f'{Server_Url}/Check_Fp', headers=headers, data=json.dumps(json_data), timeout=None)\r\n Check_Fp = Check_Fp.json()\r\n if Check_Fp[0][0] == 0:\r\n messagebox.showerror(\r\n 'Error', f'There is no Fingerprint data for this {selected_floor_name} floor')\r\n progress_bar.stop()\r\n root1.withdraw()\r\n pass\r\n Check_Fp_sensors = requests.post(\r\n f'{Server_Url}/Check_Fp_sensors', headers=headers, data=json.dumps(json_data), timeout=None)\r\n Check_Fp_sensors = Check_Fp_sensors.json()\r\n if Check_Fp_sensors[0][0] == 0:\r\n messagebox.showerror(\r\n 'Error', f'There is no Fingerprint Sensors data for this {selected_floor_name} floor')\r\n progress_bar.stop()\r\n root1.withdraw()\r\n pass\r\n\r\n Check_Sig = requests.post(\r\n f'{Server_Url}/Check_Sig', headers=headers, data=json.dumps(json_data), timeout=None)\r\n Check_Sig = Check_Sig.json()\r\n if Check_Sig[0][0] == 0:\r\n messagebox.showerror(\r\n 'Error', f'There is no Signature data for this {selected_floor_name} floor')\r\n progress_bar.stop()\r\n root1.withdraw()\r\n pass\r\n Check_Sig_sensors = requests.post(\r\n f'{Server_Url}/Check_Sig_sensors', headers=headers, data=json.dumps(json_data), timeout=None)\r\n Check_Sig_sensors = Check_Sig_sensors.json()\r\n if Check_Sig_sensors[0][0] == 0:\r\n messagebox.showerror(\r\n 'Error', f'There is no Signature Sensors data for this {selected_floor_name} floor')\r\n progress_bar.stop()\r\n root1.withdraw()\r\n pass\r\n # Extract the \"DiffAll\" value string from the JSON response and then split into list.\r\n\r\n else:\r\n messagebox.showerror(\r\n 'PE Url Error ', 'The PE Url is Not Responding . Please Check Your PE Url.')\r\n except:\r\n pass\r\n\r\n\r\n# Use python library Requests to perform an HTTP post request to local host server running on port 5000 by providing header parameters, data as Request body. Get response in JSON format.\r\n\r\ndef show_choice():\r\n global choice\r\n choice = var_2.get()\r\n if choice == 1:\r\n pass\r\n elif choice == 2:\r\n pass\r\n\r\n\r\nchoice = 2\r\n\r\n# Creating a label for the number of repetitions\r\nL3 = tk.Label(root, text='Number of Repetions :', font=40)\r\n# Setting its position in row 2 and column 0 of the grid layout\r\nL3.grid(row=2, column=0)\r\n\r\n# Creating an Entry widget for inputting the number of repetitions\r\nE3 = tk.Entry(root, fg='red')\r\n# Setting default value to \"10\" using the insert() method\r\nE3.insert(tk.END, '10')\r\n# Setting its position in row 2 and column 1 of the grid layout\r\nE3.grid(row=2, column=1)\r\n\r\n# Creating a label for the maximum accepted error\r\nL4 = tk.Label(root, text='Max Accepted Error:', font=40)\r\n# Setting its position in row 3 and column 0 of the grid layout\r\nL4.grid(row=3, column=0)\r\n\r\n# Creating an Entry widget for inputing the maximum accepted error\r\nE4 = tk.Entry(root, fg='red')\r\n# Setting default value to \"10\" using the insert() method\r\nE4.insert(tk.END, '10')\r\n# Setting its position in row 3 and column 1 of the grid layout\r\nE4.grid(row=3, column=1)\r\n\r\n# Creating a label for the 95 percent error\r\nL5 = tk.Label(root, text='95 percent error:', font=40)\r\n# Setting its position in row 4 and column 0 of the grid layout\r\nL5.grid(row=4, column=0)\r\n\r\n# Creating an Entry widget for inputting the 95 percent error\r\nE5 = tk.Entry(root, fg='red')\r\n# Setting default value to \"5\" using the insert() method\r\nE5.insert(tk.END, '5')\r\n# Setting its position in row 4 and column 1 of the grid layout\r\nE5.grid(row=4, column=1)\r\n\r\n# Create a dropdown object with a root window and select a venue from a predefined list\r\ndropdown = tk.OptionMenu(root, var, *venue_names, command=select_venue)\r\ndropdown.grid(row=0, column=0)\r\n\r\n# Create another dropdown object to choose a floor\r\nfloor_dropdown = tk.OptionMenu(root, floor_var, *Floor_names)\r\nfloor_dropdown.grid(row=1, column=0)\r\n\r\n# Create a button object for submitting data to a given function\r\n\r\nsubmit_button = tk.Button(root, text=\"Submit\", command=calculate)\r\nsubmit_button.grid(row=5, column=0)\r\n\r\n# Create an output label widget\r\noutput_label = tk.Label(root, text=\"\")\r\noutput_label.grid(row=7, column=0, columnspan=2)\r\nvar_2 = IntVar(value=2)\r\nvar_2.set(2)\r\n\r\n\r\nyes_button = Radiobutton(root, text=\"Yes\", variable=var_2,\r\n value=1, command=show_choice)\r\n\r\nno_button = Radiobutton(root, text=\"No\", variable=var_2,\r\n value=2, command=show_choice)\r\n\r\n# Define and create label widgets and text entry fields\r\nip_label = tk.Label(root, text=\"IP Database :\")\r\nip_textbox = tk.Entry(root)\r\nip_textbox.insert(tk.END, DB_IP)\r\ndb_name_label = tk.Label(root, text=\"Database Name:\")\r\ndb_name_textbox = tk.Entry(root)\r\ndb_name_textbox.insert(tk.END, DB_name)\r\ndb_port_label = tk.Label(root, text=\"Database port:\")\r\ndb_port_textbox = tk.Entry(root)\r\ndb_port_textbox.insert(tk.END, DB_port)\r\nPe_label = tk.Label(root, text=\"Pe Url :\")\r\nPe_textbox = tk.Entry(root, width=50)\r\nPe_textbox.insert(tk.END, PE_Url)\r\nServer_label = tk.Label(root, text=\"Server Url :\")\r\nServer_textbox = tk.Entry(root, width=33)\r\nServer_textbox.insert(tk.END, Server_Url)\r\nquestion_label = tk.Label(root, text=\"Skip the Density and orientation test ?\")\r\n\r\n\r\n# Create two buttons for saving and canceling changes\r\nmessage_label = tk.Label(root)\r\nsave_button = tk.Button(root, text=\"Save\", command=save_config)\r\ncancel_button = tk.Button(root, text=\"Exit\", command=root.quit)\r\n\r\n# Place all the labeling widgets and button objects on their respective row and column values\r\nip_label.grid(row=6, column=0)\r\nip_textbox.grid(row=6, column=1)\r\ndb_name_label.grid(row=7, column=0)\r\ndb_name_textbox.grid(row=7, column=1)\r\ndb_port_label.grid(row=8, column=0)\r\ndb_port_textbox.grid(row=8, column=1)\r\nPe_label.grid(row=9, column=0)\r\nPe_textbox.grid(row=9, column=1)\r\nServer_label.grid(row=10, column=0)\r\nServer_textbox.grid(row=10, column=1)\r\nquestion_label.grid(row=11, column=0)\r\nyes_button.grid(row=11, column=1)\r\nno_button.grid(row=12, column=1)\r\nmessage_label.grid(row=14, columnspan=3)\r\nsave_button.grid(row=13, column=0)\r\ncancel_button.grid(row=13, column=1)\r\n\r\n# Create a list of all labels and text entry widgets to be toggled in \"Advanced Options\" button\r\nlabels = [L3, L4, L5, ip_label, ip_textbox,\r\n db_name_label, message_label, Pe_label, Server_label, db_port_label, yes_button]\r\nentries = [E3, E4, E5, save_button, cancel_button, db_name_textbox,\r\n Pe_textbox, Server_textbox, db_port_textbox, question_label, no_button]\r\n\r\n# Hide all the labels and text entries added recently by looping through them and calling grid_remove()\r\nfor label in labels:\r\n label.grid_remove()\r\nfor entry in entries:\r\n entry.grid_remove()\r\n\r\n\r\n# Define a function to toggle the hidden text entry fields on click event of Advanced Options button\r\n\r\n\r\ndef toggle_entries():\r\n for label in labels:\r\n if label.winfo_ismapped(): # check if the label widget is already displayed or not\r\n submit_button = tk.Button(root, text=\"Submit\", command=calculate)\r\n submit_button.grid(row=5, column=0)\r\n label.grid_remove()\r\n entries[labels.index(label)].grid_remove()\r\n else:\r\n label.grid()\r\n entries[labels.index(label)].grid()\r\n\r\n\r\ndef on_closing():\r\n os.kill(os.getpid(), signal.SIGTERM)\r\n root.destroy()\r\n\r\n\r\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\r\n# Create another button for changing advanced settings and then add it to the GUI\r\ntoggle_button = tk.Button(\r\n root, text=\"Advanced Options\", command=toggle_entries)\r\ntoggle_button.grid(row=14, columnspan=9)\r\nsignal.signal(signal.SIGTERM, on_closing)\r\n# Run the root window and GUI interface using mainloop() method.\r\nroot.mainloop()\r\n","repo_name":"nabilse01/Site-Calibration-Tool","sub_path":"GUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":42894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34219927238","text":"from flask import Flask\nimport config\n\n# bot = \"botlib.Bot(creds, config)\"\napp = Flask(__name__)\n\nasync def hello_world():\n return \"

Hello, World!

\"\n\ndef api_start(bot_obj):\n global bot\n\n bot = bot_obj\n \n app.add_url_rule('/hello', view_func=hello_world)\n\n app.run( \n host=config.SERVICE_HOST, \n port=config.SERVICE_PORT\n )","repo_name":"r3t4k3r/matrum-bridge-core","sub_path":"src/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43473909156","text":"from __future__ import division\nfrom __future__ import absolute_import\nimport torch\nfrom torch import nn\nfrom torch import distributions\nimport torch.nn.functional as F\nfrom .CaptioningModel import CaptioningModel\n\n\nclass BottomupTopdownAttention(CaptioningModel):\n def __init__(self, vocab_size, bos_idx, with_relu=False, with_visual_sentinel=False, det_feat_size=2048,\n input_encoding_size=1000, rnn_size=1000, att_size=512, ss_prob=.0):\n super(BottomupTopdownAttention, self).__init__()\n self.vocab_size = vocab_size\n self.bos_idx = bos_idx\n self.with_relu = with_relu\n self.with_visual_sentinel = with_visual_sentinel\n self.det_feat_size = det_feat_size\n self.input_encoding_size = input_encoding_size\n self.rnn_size = rnn_size\n self.att_size = att_size\n\n self.embed = nn.Embedding(vocab_size, input_encoding_size)\n self.lstm_cell_1 = nn.LSTMCell(det_feat_size + rnn_size + input_encoding_size, rnn_size)\n self.lstm_cell_2 = nn.LSTMCell(rnn_size + det_feat_size, rnn_size)\n self.att_va = nn.Linear(det_feat_size, att_size, bias=False)\n self.att_ha = nn.Linear(rnn_size, att_size, bias=False)\n self.att_a = nn.Linear(att_size, 1, bias=False)\n\n if self.with_visual_sentinel:\n self.W_sx = nn.Linear(det_feat_size + rnn_size + input_encoding_size, rnn_size)\n self.W_sh = nn.Linear(rnn_size, rnn_size)\n self.W_sah = nn.Linear(rnn_size, att_size, bias=False)\n self.W_sas = nn.Linear(rnn_size, att_size, bias=False)\n self.W_sa = nn.Linear(att_size, 1, bias=False)\n self.fc_sentinel = nn.Linear(rnn_size, det_feat_size)\n\n self.out_fc = nn.Linear(rnn_size, vocab_size)\n\n self.ss_prob = ss_prob\n self.init_weights()\n\n def init_weights(self):\n std = 0.00999999977648\n nn.init.normal_(self.embed.weight, std=std)\n nn.init.normal_(self.out_fc.weight, std=std)\n nn.init.constant_(self.out_fc.bias, 0)\n nn.init.normal_(self.att_va.weight, std=std)\n nn.init.normal_(self.att_ha.weight, std=std)\n nn.init.normal_(self.att_a.weight, std=std)\n nn.init.normal_(self.lstm_cell_1.weight_ih, std=std)\n nn.init.normal_(self.lstm_cell_1.weight_hh, std=std)\n nn.init.constant_(self.lstm_cell_1.bias_ih, 0)\n nn.init.constant_(self.lstm_cell_1.bias_ih[self.rnn_size:2*self.rnn_size], 1)\n nn.init.constant_(self.lstm_cell_1.bias_hh, 0)\n nn.init.constant_(self.lstm_cell_1.bias_hh[self.rnn_size:2*self.rnn_size], 1)\n nn.init.normal_(self.lstm_cell_2.weight_ih, std=std)\n nn.init.normal_(self.lstm_cell_2.weight_hh, std=std)\n nn.init.constant_(self.lstm_cell_2.bias_ih, 0)\n nn.init.constant_(self.lstm_cell_2.bias_ih[self.rnn_size:2 * self.rnn_size], 1)\n nn.init.constant_(self.lstm_cell_2.bias_hh, 0)\n nn.init.constant_(self.lstm_cell_2.bias_hh[self.rnn_size:2 * self.rnn_size], 1)\n\n if self.with_visual_sentinel:\n nn.init.normal_(self.W_sx.weight, std=std)\n nn.init.constant_(self.W_sx.bias, 0)\n nn.init.normal_(self.W_sh.weight, std=std)\n nn.init.constant_(self.W_sh.bias, 0)\n nn.init.normal_(self.W_sah.weight, std=std)\n nn.init.normal_(self.W_sas.weight, std=std)\n nn.init.normal_(self.W_sa.weight, std=std)\n nn.init.normal_(self.fc_sentinel.weight, std=std)\n nn.init.constant_(self.fc_sentinel.bias, 0)\n\n def init_state(self, b_s, device):\n h0_1 = torch.zeros((b_s, self.rnn_size), requires_grad=True).to(device)\n c0_1 = torch.zeros((b_s, self.rnn_size), requires_grad=True).to(device)\n h0_2 = torch.zeros((b_s, self.rnn_size), requires_grad=True).to(device)\n c0_2 = torch.zeros((b_s, self.rnn_size), requires_grad=True).to(device)\n return h0_1, c0_1, h0_2, c0_2\n\n def step(self, t, state, prev_output, detections, seq, *args, mode='teacher_forcing'):\n assert (mode in ['teacher_forcing', 'feedback'])\n device = detections.device\n b_s = detections.size(0)\n bos_idx = self.bos_idx\n state_1, state_2 = state[:2], state[2:]\n detections_mask = (torch.sum(detections, -1, keepdim=True) != 0).float()\n detections_mean = torch.sum(detections, 1) / torch.sum(detections_mask, 1)\n\n if mode == 'teacher_forcing':\n if self.training and t > 0 and self.ss_prob > .0:\n # Scheduled sampling\n coin = detections.data.new(b_s).uniform_(0, 1)\n coin = (coin < self.ss_prob).long()\n distr = distributions.Categorical(logits=prev_output)\n action = distr.sample()\n it = coin * action.data + (1 - coin) * seq[:, t - 1].data\n it = it.to(device)\n else:\n it = seq[:, t]\n elif mode == 'feedback': # test\n if t == 0:\n it = detections.data.new_full((b_s,), bos_idx).long()\n else:\n it = prev_output\n\n xt = self.embed(it)\n if self.with_relu:\n xt = F.relu(xt)\n input_1 = torch.cat([state_2[0], detections_mean, xt], 1)\n\n if self.with_visual_sentinel:\n g_t = torch.sigmoid(self.W_sx(input_1) + self.W_sh(state_1[0]))\n state_1 = self.lstm_cell_1(input_1, state_1)\n\n att_weights = torch.tanh(self.att_va(detections) + self.att_ha(state_1[0]).unsqueeze(1))\n att_weights = self.att_a(att_weights)\n\n if self.with_visual_sentinel:\n s_t = g_t * torch.tanh(state_1[1])\n fc_sentinel = self.fc_sentinel(s_t).unsqueeze(1)\n if self.with_relu:\n fc_sentinel = F.relu(fc_sentinel)\n detections = torch.cat([fc_sentinel, detections], 1)\n detections_mask = (torch.sum(detections, -1, keepdim=True) != 0).float()\n sent_att_weights = torch.tanh(self.W_sas(s_t) + self.att_ha(state_1[0])).unsqueeze(1)\n sent_att_weights = self.W_sa(sent_att_weights)\n att_weights = torch.cat([sent_att_weights, att_weights], 1)\n\n att_weights = F.softmax(att_weights, 1)\n att_weights = detections_mask * att_weights\n att_weights = att_weights / torch.sum(att_weights, 1, keepdim=True)\n att_detections = torch.sum(detections * att_weights, 1)\n input_2 = torch.cat([state_1[0], att_detections], 1)\n\n state_2 = self.lstm_cell_2(input_2, state_2)\n out = F.log_softmax(self.out_fc(state_2[0]), dim=-1)\n return out, (state_1[0], state_1[1], state_2[0], state_2[1])\n","repo_name":"aimagelab/speaksee","sub_path":"speaksee/models/bottomup_topdown_attention.py","file_name":"bottomup_topdown_attention.py","file_ext":"py","file_size_in_byte":6660,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"54"} +{"seq_id":"24718467881","text":"import sys\nimport os\nfrom parser_module import Parser\nfrom code_module import Code\nfrom symbols_module import SymbolTable\n\n\ndef main():\n input_filename = get_full_input_filename()\n\n if not input_filename:\n print('You need to specify an input filename.')\n return\n\n if not os.path.exists(input_filename):\n print('The file {} does not exists.'.format(input_filename))\n return\n\n output_filename = get_full_output_filename(input_filename)\n\n # parse labels\n st = SymbolTable()\n\n p0 = Parser(input_filename)\n position_address = 0\n\n while True:\n if not p0.has_more_commands():\n break\n\n p0.advance()\n command_type = p0.command_type()\n symbol = p0.symbol()\n\n if command_type == 'L_COMMAND':\n if not st.contains(symbol):\n st.add_entry(symbol, position_address)\n else:\n position_address += 1\n\n # parse addresses\n p1 = Parser(input_filename)\n variable_address = 16\n\n while True:\n if not p1.has_more_commands():\n break\n\n p1.advance()\n command_type = p1.command_type()\n symbol = p1.symbol()\n\n if command_type == 'A_COMMAND':\n if not st.contains(symbol) and not is_number(symbol):\n st.add_entry(symbol, variable_address)\n variable_address += 1\n\n # second pass\n p2 = Parser(input_filename)\n with open(output_filename, 'w') as fout:\n while True:\n if not p2.has_more_commands():\n break\n\n p2.advance()\n command = decode_command(p2, st)\n if command:\n fout.write(command + '\\n')\n\n\ndef decimal_to_binary_str(dec_value):\n return format(dec_value, '015b')\n\n\ndef is_number(string):\n try:\n int(string)\n return True\n except:\n return False\n\n\ndef decode_command(p, st):\n command_type = p.command_type()\n command_decoded = None\n c = Code()\n\n if command_type == 'A_COMMAND':\n symbol = p.symbol()\n if st.contains(symbol):\n symbol_value = st.get_address(symbol)\n command_decoded = '0' + decimal_to_binary_str(symbol_value)\n else:\n command_decoded = '0' + decimal_to_binary_str(int(symbol))\n\n elif command_type == 'C_COMMAND':\n command_decoded = '111' + \\\n c.comp(p.comp()) + c.dest(p.dest()) + c.jump(p.jump())\n\n return command_decoded\n\n\ndef get_full_input_filename():\n try:\n input_filename = sys.argv[1]\n return os.path.abspath(os.path.join(input_filename))\n except IndexError:\n return None\n\n\ndef get_full_output_filename(input_filename):\n return input_filename[:-3] + 'hack'\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"myroslav-tkachenko/nand2tetris-assembler","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7393393701","text":"from tkinter import *\nimport time\nroot= Tk()\ncanvas= Canvas(root,width=500,height=300)\ncanvas.pack()\ncanvas.create_polygon(10,10,10,60,50,35)\ndef PressKey(event):\n canvas.move(1,10,0)#1 specify polygon,x amounnt move,yamount move\n root.update()#update screen or window\n\nroot.bind(\"\",PressKey)\nroot.mainloop()","repo_name":"vikesh343/tkinter","sub_path":"spaceToMove.py","file_name":"spaceToMove.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23561644083","text":"import os\nimport sys\nimport site\nimport time\n\nsim_hz = 333.0\nsim_rate = 1.0 / sim_hz\nsmooth_controls = False\nenable_second_sim = False\nasync_playground = False\n\nteleport_on_hit = False\nteleport_off_track = False\n\nauto_clutch = True\nauto_shift = True\nauto_blip = True\n\ntrack_name = 'driftplayground'\ncar_model = 'ks_toyota_ae86_drift'\n#car_model = 'ks_toyota_supra_mkiv_drift'\n\ncar_tunes = {\n \"ks_toyota_ae86_drift\" : {\n \"FRONT_BIAS\" : 55.0,\n \"DIFF_POWER\" : 30.0,\n \"DIFF_COAST\" : 30.0,\n \"FINAL_RATIO\" : 5.0,\n \"PRESSURE_LF\" : 28.0,\n \"PRESSURE_RF\" : 28.0,\n \"PRESSURE_LR\" : 28.0,\n \"PRESSURE_RR\" : 28.0,\n },\n \"ks_toyota_supra_mkiv_drift\" : {\n \"FRONT_BIAS\" : 55.0,\n \"DIFF_POWER\" : 90.0,\n \"DIFF_COAST\" : 90.0,\n \"FINAL_RATIO\" : 5.0,\n \"TURBO_0\" : 100.0,\n \"TURBO_1\" : 100.0,\n \"PRESSURE_LF\" : 28.0,\n \"PRESSURE_RF\" : 28.0,\n \"PRESSURE_LR\" : 28.0,\n \"PRESSURE_RR\" : 28.0,\n }\n}\n\nbase_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\nbin_dir = os.path.join(base_dir, 'bin')\nsite.addsitedir(bin_dir)\nos.add_dll_directory(bin_dir)\n\nimport PyProjectD as pd\npd.setLogFile(os.path.join(base_dir, 'projectd.log'), True);\n\nif async_playground:\n pd.launchPlaygroundInOwnThread(base_dir)\nelse:\n pd.initPlayground(base_dir)\n\nsim = pd.createSimulator(base_dir)\npd.loadTrack(sim, track_name)\ncar = pd.addCar(sim, car_model)\n\npd.teleportCarToSpline(sim, car, 0.01)\npd.setCarAutoTeleport(sim, car, teleport_on_hit, teleport_off_track, 1) # 0:Start, 1:Nearest, 2:Random\npd.setCarAssists(sim, car, auto_clutch, auto_shift, auto_blip)\n\nif car_model in car_tunes:\n for name, value in car_tunes[car_model].items():\n pd.setCarTune(sim, car, name, value)\n\nif enable_second_sim:\n sim2 = pd.createSimulator(base_dir)\n pd.loadTrack(sim2, track_name)\n car2 = pd.addCar(sim2, car_model)\n\n pd.teleportCarToSpline(sim2, car2, 0.5)\n pd.setCarAutoTeleport(sim2, car2, True, True, 1)\n\nwhile not pd.isPlaygroundInitialized():\n time.sleep(0.1)\n\npd.setRenderHz(int(sim_hz), not async_playground)\npd.setActiveSimulator(sim, not async_playground)\npd.setActiveCar(car, True, True)\n\nstate = pd.CarState()\ncontrols = pd.CarControls()\ncontrols.gas = 0.0\ncontrols.clutch = 1\n#controls.requestedGearIndex = 2 # 0=R, 1=N, 2=H1, 3=H2, 4=H3, 5=H4, 6=H5, 7=H6\n\npd.writeLog('MAIN LOOP')\nwhile not pd.isPlaygroundExited():\n active_sim = pd.getActiveSimulator()\n active_car = pd.getActiveCar()\n \n pd.setCarControls(active_sim, active_car, smooth_controls, controls)\n \n if async_playground:\n pd.stepSimulator(active_sim, sim_rate)\n time.sleep(sim_rate)\n \n pd.tickPlayground()\n pd.getCarState(active_sim, active_car, state)\n rpm = state.engineRPM\n\npd.shutAll()\n","repo_name":"wongfei/projectd-core","sub_path":"pyprojectd/projectd_demo.py","file_name":"projectd_demo.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"33065171984","text":"import tempfile\nimport random\nimport math\nimport time\nimport datetime\n\ntry:\n import tetrino\nexcept Exception as e:\n class TetrinoDump:\n @staticmethod \n def resolve(cmd):\n return \"TETRINO NOT ACCESSIBLE UNDER CONSTRUCTION ..:)...\"\n tetrino = TetrinoDump()\n\n\ntshape = [0xf000,\n 0xe200,\n 0xe400,\n 0xe800,\n 0xcc00,\n 0xc600,\n 0xc440,\n 0xc880,\n 0x8E00,\n 0x8c80,\n 0x8c40,\n 0x88c0,\n 0x8888,\n 0x6c00,\n 0x4E00,\n 0x4C80,\n 0x44C0,\n 0x4C40,\n 0x2E00]\n\ncharline = [\"....\", \"...#\", \"..#.\", \"..##\",\n \".#..\", \".#.#\", \".##.\", \".###\",\n \"#...\", \"#..#\", \"#.#.\", \"#.##\",\n \"##..\", \"##.#\", \"###.\",\"####\"]\n\nclass Tetrino(object):\n def __init__(self):\n self.data = []\n self.result = []\n\n def do_play(self):\n ''' run tetrino module and save in temp area\n '''\n if len(self.data) < 1:\n return\n afile = tempfile.mkstemp(prefix = 'fillit-',suffix=datetime.datetime.utcnow().strftime(\"-%Y-%m-%d-%H-%M\"))[1]\n tet = list()\n n = 65;\n with open(afile,\"w\") as f:\n for i in self.data:\n letters = list()\n for s in i:\n f.write(s)\n letters.append(s.replace(\"#\",\"%c\" % n))\n f.write('\\n')\n n += 1\n tet.append(letters)\n f.write('\\n')\n self.data = tet\n start = time.time()\n result = tetrino.resolve(afile)\n self.delta = time.time() - start\n rep = len(result)\n self.sqa = int(math.sqrt(rep))\n self.stat = 0\n for x in result:\n if x == '.':\n self.stat += 1\n if rep > 0:\n self.stat = 100 - (self.stat * 100 / rep)\n self.result = list(map(lambda i: result[i:i+self.sqa], range(0,rep,self.sqa)))\n with open(afile,\"a+\") as out:\n out.write(\"grid:\\n\")\n out.write('{}\\nsec:{}-{}%'.format('\\n'.join(self.result), self.delta, self.stat))\n\n def show_in_line(self, col = 4):\n cdata = self.data[:]\n all = []\n line = []\n for i,d in enumerate(self.data):\n if i % col == 0:\n aline = []\n for j in range(col):\n try:\n x = cdata.pop(0)\n aline.append(x)\n except:\n pass\n all.append(aline)\n\n all_line = ['Input:']\n for x in all:\n for i in range(4):\n aline = list()\n for xx in x:\n aline.append(xx[i])\n all_line.append(\" \".join(aline))\n all_line.append(\"-\")\n all_line = all_line[:-1]\n return all_line\n\n def build_random(self, nb_tetrino):\n self.data = []\n for i in range(nb_tetrino):\n self.data.append(Tetrino.as_char(tshape[random.randint(0,18)]))\n\n @staticmethod\n def as_char(shape):\n res = list()\n l1 = charline[(shape & 0xf000) >> 12]\n l2 = charline[(shape & 0x0f00) >> 8]\n l3 = charline[(shape & 0x00f0) >> 4]\n l4 = charline[shape & 0x000f]\n return [l1,l2,l3,l4]\n\nif __name__ == '__main__':\n t = Tetrino()\n t.build_random(14)\n t.do_play()\n t.print_result()","repo_name":"erictexier/passeit","sub_path":"flask/webapp/flask_blog/fillit/do_tetrino.py","file_name":"do_tetrino.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7276456770","text":"#!/usr/local/bin/python3\nimport sys\n\nfrom sklearn import datasets\nfrom sklearn.ensemble import (AdaBoostClassifier, ExtraTreesClassifier,\n RandomForestClassifier)\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\n\ndef getIrisData():\n print(\"Preparing data ...\")\n iris = datasets.load_iris()\n X = iris.data[:, :2]\n y = iris.target\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=100\n )\n print(\"Done\\n\")\n return X_train, y_train, X_test, y_test\n\n\ndef run(clf, X_train, y_train, X_test, y_test):\n clf.fit(X_train, y_train)\n acc = clf.score(X_test, y_test)\n print(f\"Accuracy of using {type(clf).__name__}: {acc}\\n\")\n\n\nif __name__ == \"__main__\":\n X_train, y_train, X_test, y_test = getIrisData()\n run(LogisticRegression(), X_train, y_train, X_test, y_test)\n run(SVC(), X_train, y_train, X_test, y_test)\n run(DecisionTreeClassifier(), X_train, y_train, X_test, y_test)\n run(AdaBoostClassifier(), X_train, y_train, X_test, y_test)\n run(ExtraTreesClassifier(), X_train, y_train, X_test, y_test)\n run(RandomForestClassifier(), X_train, y_train, X_test, y_test)\n","repo_name":"mbilab/AI-environment","sub_path":"iris_sklearn.py","file_name":"iris_sklearn.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30590315555","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nimport numpy as np\nimport os\nimport argparse\nimport re\n\nfrom fastspeech2 import FastSpeech2\nfrom loss import FastSpeech2Loss\nfrom dataset import Dataset\nfrom text import text_to_sequence, sequence_to_text\nimport hparams as hp\nimport utils\nimport audio as Audio\n\n# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice='cuda'\n\ndef get_FastSpeech2(num):\n checkpoint_path = os.path.join(hp.checkpoint_path, \"checkpoint_{}.pth.tar\".format(num))\n model = nn.DataParallel(FastSpeech2())\n model.load_state_dict(torch.load(checkpoint_path)['model'])\n model.requires_grad = False\n model.eval()\n return model\n\ndef evaluate(model, step):\n torch.manual_seed(0)\n \n # Get dataset\n dataset = Dataset(\"val.txt\", sort=False)\n loader = DataLoader(dataset, batch_size=hp.batch_size*4, shuffle=False, collate_fn=dataset.collate_fn, drop_last=False, num_workers=0, )\n \n # Get loss function\n Loss = FastSpeech2Loss().to(device)\n\n # Evaluation\n d_l = []\n f_l = []\n e_l = []\n if hp.vocoder=='WORLD':\n ap = []\n sp_l = []\n sp_p_l = []\n else:\n mel_l = []\n mel_p_l = []\n current_step = 0\n idx = 0\n for i, batchs in enumerate(loader):\n for j, data_of_batch in enumerate(batchs):\n # Get Data\n id_ = data_of_batch[\"id\"]\n condition = torch.from_numpy(data_of_batch[\"condition\"]).long().to(device)\n mel_refer = torch.from_numpy(data_of_batch[\"mel_refer\"]).float().to(device)\n if hp.vocoder=='WORLD':\n ap_target = torch.from_numpy(data_of_batch[\"ap_target\"]).float().to(device)\n sp_target = torch.from_numpy(data_of_batch[\"sp_target\"]).float().to(device)\n else:\n mel_target = torch.from_numpy(data_of_batch[\"mel_target\"]).float().to(device)\n D = torch.from_numpy(data_of_batch[\"D\"]).long().to(device)\n log_D = torch.from_numpy(data_of_batch[\"log_D\"]).float().to(device)\n #print(D,log_D)\n f0 = torch.from_numpy(data_of_batch[\"f0\"]).float().to(device)\n energy = torch.from_numpy(data_of_batch[\"energy\"]).float().to(device)\n src_len = torch.from_numpy(data_of_batch[\"src_len\"]).long().to(device)\n mel_len = torch.from_numpy(data_of_batch[\"mel_len\"]).long().to(device)\n max_src_len = np.max(data_of_batch[\"src_len\"]).astype(np.int32)\n max_mel_len = np.max(data_of_batch[\"mel_len\"]).astype(np.int32)\n \n with torch.no_grad():\n # Forward\n if hp.vocoder=='WORLD':\n# print(condition.shape,mel_refer.shape, src_len.shape, mel_len.shape, D.shape, f0.shape, energy.shape, max_src_len.shape, max_mel_len.shape)\n ap_output, sp_output, sp_postnet_output, log_duration_output, f0_output,energy_output, src_mask, ap_mask,sp_mask ,variance_adaptor_output,decoder_output= model(\n condition, src_len, mel_len, D, f0, energy, max_src_len, max_mel_len)\n \n ap_loss, sp_loss, sp_postnet_loss, d_loss, f_loss, e_loss = Loss(\n log_duration_output, D, f0_output, f0, energy_output, energy, ap_output=ap_output, \n sp_output=sp_output, sp_postnet_output=sp_postnet_output, ap_target=ap_target, \n sp_target=sp_target,src_mask=src_mask, ap_mask=ap_mask,sp_mask=sp_mask)\n total_loss = ap_loss + sp_loss + sp_postnet_loss + d_loss + f_loss + e_loss\n else:\n mel_output, mel_postnet_output, log_duration_output, f0_output,energy_output, src_mask, mel_mask, _ = model(\n condition,mel_refer, src_len, mel_len, D, f0, energy, max_src_len, max_mel_len)\n \n mel_loss, mel_postnet_loss, d_loss, f_loss, e_loss = Loss(\n log_duration_output, log_D, f0_output, f0, energy_output, energy, mel_output=mel_output,\n mel_postnet_output=mel_postnet_output, mel_target=mel_target, src_mask=~src_mask, mel_mask=~mel_mask)\n total_loss = mel_loss + mel_postnet_loss + d_loss + f_loss + e_loss\n \n t_l = total_loss.item()\n if hp.vocoder=='WORLD':\n ap_l = ap_loss.item()\n sp_l = sp_loss.item()\n sp_p_l = sp_postnet_loss.item()\n else:\n m_l = mel_loss.item()\n m_p_l = mel_postnet_loss.item()\n d_l = d_loss.item()\n f_l = f_loss.item()\n e_l = e_loss.item()\n \n \n # Run vocoding and plotting spectrogram only when the vocoder is defined\n for k in range(len(mel_target)):\n basename = id_[k]\n gt_length = mel_len[k]\n out_length = out_mel_len[k]\n\n mel_target_torch = mel_target[k:k+1, :gt_length].transpose(1, 2).detach() \n mel_postnet_torch = mel_postnet_output[k:k+1, :out_length].transpose(1, 2).detach()\n\n if hp.vocoder == 'melgan':\n utils.melgan_infer(mel_target_torch, vocoder, os.path.join(hp.eval_path, 'ground-truth_{}_{}.wav'.format(basename, hp.vocoder)))\n utils.melgan_infer(mel_postnet_torch, vocoder, os.path.join(hp.eval_path, 'eval_{}_{}.wav'.format(basename, hp.vocoder)))\n elif hp.vocoder == 'waveglow':\n utils.waveglow_infer(mel_target_torch, vocoder, os.path.join(hp.eval_path, 'ground-truth_{}_{}.wav'.format(basename, hp.vocoder)))\n utils.waveglow_infer(mel_postnet_torch, vocoder, os.path.join(hp.eval_path, 'eval_{}_{}.wav'.format(basename, hp.vocoder)))\n elif hp.vocoder=='WORLD':\n utils.world_infer(mel_postnet_torch.numpy(),f0_output, os.path.join(hp.eval_path, 'eval_{}_{}.wav'.format(basename, hp.vocoder)))\n utils.world_infer(mel_target_torch.numpy(),f0, os.path.join(hp.eval_path, 'ground-truth_{}_{}.wav'.format(basename, hp.vocoder)))\n np.save(os.path.join(hp.eval_path, 'eval_{}_mel.npy'.format(basename)), mel_postnet.numpy())\n\n f0_ = f0[k, :gt_length].detach().cpu().numpy()\n energy_ = energy[k, :gt_length].detach().cpu().numpy()\n f0_output_ = f0_output[k, :out_length].detach().cpu().numpy()\n energy_output_ = energy_output[k, :out_length].detach().cpu().numpy()\n\n utils.plot_data([(mel_postnet[0].numpy(), f0_output_, energy_output_), (mel_target_.numpy(), f0_, energy_)], \n ['Synthesized Spectrogram', 'Ground-Truth Spectrogram'], filename=os.path.join(hp.eval_path, 'eval_{}.png'.format(basename)))\n idx += 1\n \n current_step += 1 \n\n d_l = sum(d_l) / len(d_l)\n f_l = sum(f_l) / len(f_l)\n e_l = sum(e_l) / len(e_l)\n \n if hp.vocoder=='WORLD':\n ap_l = sum(ap_l) / len(ap_l)\n sp_l = sum(sp_l) / len(sp_l)\n sp_p_l = sum(sp_p_l) / len(sp_p_l) \n else:\n mel_l = sum(mel_l) / len(mel_l)\n mel_p_l = sum(mel_p_l) / len(mel_p_l) \n \n str1 = \"FastSpeech2 Step {},\".format(step)\n str2 = \"Duration Loss: {}\".format(d_l)\n str3 = \"F0 Loss: {}\".format(f_l)\n str4 = \"Energy Loss: {}\".format(e_l)\n str5 = \"Mel Loss: {}\".format(mel_l)\n str6 = \"Mel Postnet Loss: {}\".format(mel_p_l)\n\n print(\"\\n\" + str1)\n print(str2)\n print(str3)\n print(str4)\n print(str5)\n print(str6)\n\n with open(os.path.join(hp.log_path, \"eval.txt\"), \"a\") as f_log:\n f_log.write(str1 + \"\\n\")\n f_log.write(str2 + \"\\n\")\n f_log.write(str3 + \"\\n\")\n f_log.write(str4 + \"\\n\")\n f_log.write(str5 + \"\\n\")\n f_log.write(str6 + \"\\n\")\n f_log.write(\"\\n\")\n\n return d_l, f_l, e_l, mel_l, mel_p_l\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--step', type=int, default=30000)\n args = parser.parse_args()\n \n # Get model\n model = get_FastSpeech2(args.step).to(device)\n print(\"Model Has Been Defined\")\n num_param = utils.get_param_num(model)\n print('Number of FastSpeech2 Parameters:', num_param)\n \n # Load vocoder\n if hp.vocoder == 'melgan':\n vocoder = utils.get_melgan()\n elif hp.vocoder == 'waveglow':\n vocoder = utils.get_waveglow()\n vocoder.to(device)\n \n # Init directories\n if not os.path.exists(hp.log_path):\n os.makedirs(hp.log_path)\n if not os.path.exists(hp.eval_path):\n os.makedirs(hp.eval_path)\n \n evaluate(model, args.step, vocoder)\n","repo_name":"xushengyuan/FastSing2","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":8972,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"54"} +{"seq_id":"2894791090","text":"\"\"\"\nGarbage and convoluted code attempting to parse the formless REF data using web of science.\n\"\"\"\n\nimport glob\nimport difflib\nimport json\nfrom wos import WosClient\nimport wos.utils\nfrom xmljson import badgerfish as bf\n\nrefs = {}\nerrors = 0\ntotal_cit = 0\n\nwith WosClient() as client:\n for infile in glob.glob(\"stage1/*.json\"):\n total_cit += 1\n with open(infile, \"r\") as infile_data:\n data = json.load(infile_data)\n \n if data[\"valid\"] is False:\n continue\n \n all_Z = set()\n all_found = True\n for cit in data[\"citations\"]:\n if cit[\"valid\"] is False:\n all_found = False\n break\n \n # Parse cit\n wos_query = \"\"\n if cit[\"authors\"]:\n wos_query += 'AU=\"'\n for num, aut in enumerate(cit[\"authors\"]):\n if num >= 1:\n wos_query += \" AND \"\n tmp = aut.split()\n\n # Last, First Initial\n author = tmp[-1] + \", \" + \" \".join(tmp[:-1]) \n print(author)\n wos_query += author\n wos_query += '\"'\n \n if cit[\"year\"]:\n wos_query += ' AND PY=\"%d\"' % cit[\"year\"]\n \n if cit[\"title\"]:\n wos_query += ' AND TI=\"%s\"' % cit[\"title\"].replace(\" and \", \"\")\n \n wos_query = wos_query.encode(\"UTF-8\") \n print(wos_query) \n raise Exception()\n data = wos.utils.query(client, wos_query)\n print(data)\n data = bf.data(data)\n print(data)\n raise Exception()\n","repo_name":"MolSSI-BSE/basis_set_exchange-xmlconvert","sub_path":"references/wos_doi_parse_stage3.py","file_name":"wos_doi_parse_stage3.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4832293183","text":"# kafi sahi concept\n# will make a great interview question\ndef canPartitionKSubsets(self, arr: List[int], k: int) -> bool:\n s = sum(arr)\n N = len(arr)\n \n if s%k or Ntarget:\n return False\n \n arr.sort(reverse = True)\n seen = [False]*len(arr)\n \n @cache\n def magic(i,k,summ,t):\n if k == 0:\n return True\n \n if summ==target:\n if k == 1: return True\n else: return magic(0,k-1,0,t)\n \n for j in range(i, len(arr)):\n if seen[j] or summ + arr[j]>target:\n continue\n \n seen[j] = True\n temp = seen[:]\n if magic(j+1,k,summ+arr[j],tuple(temp)): return True\n\n seen[j] = False\n \n return False\n \n return magic(0,k,0,tuple(seen))","repo_name":"Bidipto/DSApedia","sub_path":"Leetcode/DP/[IMP]698. Partition to K Equal Sum Subsets.py","file_name":"[IMP]698. Partition to K Equal Sum Subsets.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29395057158","text":"# -*- coding: utf-8 -*-\n\nfrom api.api import API\nfrom pages.android.common.super_page import SuperPage\nfrom pages.android.ffan.my_ffan_page_configs import MyFfanPageConfigs as MFPC\nfrom pages.logger import logger\n\n\nclass MyFfanPage(SuperPage):\n '''\n 作者 刘涛\n 首页=>我的页面\n '''\n def __init__(self, testcase, driver, logger):\n super(MyFfanPage, self).__init__(testcase, driver, logger)\n\n def validSelf(self):\n '''\n usage : 进入到应用首页,检查ffan logo\n '''\n logger.info(\"Check 我的页面 begin\")\n API().assertElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_member_card_bag,\n MFPC.verify_view_timeout)\n API().assertElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n MFPC.resource_id_txt_user_nick_name_tv,\n MFPC.verify_view_timeout)\n logger.info(\"Check 我的页面 end\")\n\n def validSelfOK(self):\n '''\n usage : 进入到应用首页,检查ffan logo\n '''\n\n a = API().assertElementByTextOK(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_member_card_bag,\n MFPC.verify_view_timeout)\n if a == \"会员卡包\":\n return True\n print('haha')\n print(a)\n else:\n return False\n\n\n def clickOnLogin(self):\n '''\n usage: 点击登录按钮\n '''\n logger.info(\"Click 登录 begin\")\n API().clickElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n MFPC.resource_id_tv_login_tv,\n MFPC.click_view_timeout)\n logger.info(\"Check 登录 end\")\n\n def validLoginStatus(self):\n '''\n usage: 验证登录状态\n '''\n API().assertElementByResourceId(self.testcase,\n self.logger,\n self.driver,\n MFPC.resource_id_txt_user_nick_name_tv,\n 90)\n\n def clickOnSettings(self):\n '''\n usage: 点击设置\n '''\n API().scrollToText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_settins)\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_settins,\n MFPC.verify_view_timeout)\n\n def clickOnMyQueue(self):\n '''\n usage : 点击我的排队\n '''\n logger.info(\"Click 我的排队 begin\")\n API().scrollToText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_my_queue)\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_my_queue,\n MFPC.verify_view_timeout)\n logger.info(\"Click 我的排队 end\")\n\n def clickOnMyTicket(self):\n '''\n usage : 点击我的票��\n '''\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_my_ticket,\n MFPC.verify_view_timeout)\n\n def clickOnMyOrder(self):\n '''\n usage : 点击我的订单\n '''\n logger.info(\"Click 我的订单 begin\")\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_my_order,\n MFPC.click_view_timeout)\n logger.info(\"Click 我的订单 end\")\n\n def clickOnToBePaid(self):\n '''\n usage : 点击我的订单待付款\n '''\n logger.info(\"Click 待付款 begin\")\n# width = API().getWidthOfDevice(self.driver, self.logger)\n# hight = API().getHeightOfDevice(self.driver, self.logger)\n# for _ in range(3):\n# API().scroll(self.driver, self.logger, width / 2, hight / 2, width / 2, hight / 3)\n\n API().clickElementByXpath(self.testcase,\n self.driver,\n self.logger,\n MFPC.xpath_to_be_paid,\n MFPC.click_view_timeout)\n logger.info(\"Click 待付款 end\")\n\n def validSelfToBePaid(self):\n '''\n usage : 进入待付款页面,判断显示是否正确\n '''\n logger.info(\"Check 待付款页面 start\")\n API().assertElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_to_be_paid,\n MFPC.verify_view_timeout)\n logger.info(\"Check 待付款页面 end\")\n\n def clickOnUse(self):\n '''\n usage : 点击我的订单可使用\n '''\n logger.info(\"Click 可使用 begin\")\n API().clickElementByXpath(self.testcase,\n self.driver,\n self.logger,\n MFPC.xpath_use,\n MFPC.click_view_timeout)\n logger.info(\"Click 可使用 end\")\n\n def validSelfUse(self):\n '''\n usage : 进入可使用页面,判断显示是否正确\n '''\n logger.info(\"Check 可使用页面 begin\")\n API().assertElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_use,\n MFPC.verify_view_timeout)\n logger.info(\"Check 可使用页面 end\")\n\n def clickOnComments(self):\n '''\n usage : 点击我的订单我的点评\n '''\n logger.info(\"Click 我的点评 begin\")\n API().clickElementByXpath(self.testcase,\n self.driver,\n self.logger,\n MFPC.xpath_comments,\n MFPC.click_view_timeout)\n logger.info(\"Click 我的点评 end\")\n\n def validSelfCommets(self):\n '''\n usage : 进入我的点评页面,判断显示是否正确\n '''\n logger.info(\"Check 我的点评页面 begin\")\n API().assertElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_comments,\n MFPC.verify_view_timeout)\n logger.info(\"Check 我的点评页面 end\")\n\n def clickOnReturnRefund(self):\n '''\n usage : 点击我的订单退货退款\n '''\n logger.info(\"Click 退货退款 begin\")\n width = API().getWidthOfDevice(self.driver, self.logger)\n hight = API().getHeightOfDevice(self.driver, self.logger)\n for _ in range(2):\n API().scroll(self.driver, self.logger, width / 2, hight / 2, width / 2, hight / 3)\n API().clickElementByXpath(self.testcase,\n self.driver,\n self.logger,\n MFPC.xpath_return_refund_scroll,\n MFPC.click_view_timeout)\n logger.info(\"Click 退货退款 end\")\n\n def validSelfReturnRefund(self):\n '''\n usage : 进入退货退款页面,判断显示是否正确\n '''\n logger.info(\"Check 退货退款页面 begin\")\n API().assertElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_return_refund,\n MFPC.verify_view_timeout)\n logger.info(\"Check 退货退款页面 end\")\n\n def clickOnMyLike(self):\n '''\n usage : 点击我的喜欢\n '''\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_my_like,\n MFPC.verify_view_timeout)\n\n def isLoginStatus(self):\n '''\n usage: 返回登录状态\n '''\n return API().validElementByResourceId(self.driver,\n self.logger,\n MFPC.resource_id_txt_user_nick_name_tv,\n MFPC.verify_view_timeout)\n\n def clickOnParkingPayment(self):\n '''\n usage : 点击停车缴费\n '''\n logger.info(\"Click 停车交费 begin\")\n API().scrollToText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_parking_payment)\n width = API().getWidthOfDevice(self.driver, self.logger)\n hight = API().getHeightOfDevice(self.driver, self.logger)\n API().scroll(self.driver, self.logger, width / 2, hight / 2, width / 2, hight / 3)\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_parking_payment,\n MFPC.verify_view_timeout)\n logger.info(\"Click 停车交费 end\")\n\n def getTicketNumber(self):\n '''\n usage : 获取我的票券数量\n '''\n logger.info(\"Get 我的票券数量 begin\")\n ticketNumber = API().getTextByResourceId(self.testcase,\n self.driver,\n self.logger,\n MFPC.resource_id_txt_ticket_number_tv,\n MFPC.verify_view_timeout)\n logger.info(\"Get 我的票券数量 end\")\n return ticketNumber\n\n def clickOnMyBill(self):\n '''\n usage : 点击我的零花钱\n '''\n logger.info(\"Click 我的零花钱 begin\")\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n MFPC.text_my_bill,\n MFPC.click_view_timeout)\n logger.info(\"Click 我的零花钱 end\")\n","repo_name":"liu111xiao111/UItest","sub_path":"pages/android/ffan/my_ffan_page.py","file_name":"my_ffan_page.py","file_ext":"py","file_size_in_byte":11166,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"74323272161","text":"import torch.nn as nn\nimport numpy as np\nimport random\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport torch\n\nclass VilDataset(Dataset):\n def __init__(self, train=True, root='./data', transform=None):\n super().__init__()\n if train:\n npy = ['SEVIR_IR069_STORMEVENTS_2018_0101_0630.npy', 'SEVIR_IR069_STORMEVENTS_2018_0701_1231.npy']\n else:\n npy = ['SEVIR_IR069_RANDOMEVENTS_2018_0101_0430.npy']\n\n data = []\n for file in npy:\n data.append(np.load(f'{root}/{file}'))\n\n self.data = np.concatenate(data)\n #N, L, H, W = self.data.shape\n # self.data = self.data.reshape([N L, H, W])\n self.transform = transform\n self.mean = 0\n self.std = 1\n\n def __len__(self):\n return self.data.shape[0]\n\n def __getitem__(self, index):\n img = self.data[index].reshape(20, 1, 128, 128)\n if self.transform:\n img = self.transform(img)\n\n input_img = img[:10]\n output_img = img[10:]\n input_img = img[:10]\n output_img = img[10:]\n input_img = torch.from_numpy(input_img)\n output_img = torch.from_numpy(output_img)\n input_img = input_img.contiguous().float()\n output_img = output_img.contiguous().float()\n return input_img, output_img\n\n\ndef load_data(batch_size, val_batch_size,\n data_root, num_workers):\n train_set = VilDataset(train=True, root='./data', transform=None)\n test_set = VilDataset(train=True, root='./data', transform=None)\n\n dataloader_train = DataLoader(train_set, batch_size=batch_size, shuffle=True, pin_memory=True,\n num_workers=num_workers)\n dataloader_validation = DataLoader(test_set, batch_size=val_batch_size, shuffle=False,\n pin_memory=True, num_workers=num_workers)\n dataloader_test = DataLoader(test_set, batch_size=val_batch_size, shuffle=False, pin_memory=True,\n num_workers=num_workers)\n mean, std = 0, 1\n\n return dataloader_train, dataloader_validation, dataloader_test, mean, std\n\n\nif __name__ == '__main__':\n dataset = VilDataset(root='/root/Model_Phy/data')\n input_img, output_img = dataset[1]\n # Assuming `input_img` is a NumPy array of shape (10, 64, 64, 1)\n fig, axes = plt.subplots(nrows=1, ncols=10)\n\n for i in range(10):\n axes[i].imshow(input_img[i, :, :, 0], cmap=None)\n axes[i].axis('off')\n\n plt.show()\n\n fig, axes = plt.subplots(nrows=1, ncols=10)\n\n for i in range(10):\n axes[i].imshow(output_img[i, :, :, 0], cmap=None)\n axes[i].axis('off')\n\n plt.show()","repo_name":"easylearningscores/PastNet","sub_path":"API/dataloader_sevir.py","file_name":"dataloader_sevir.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"25895923865","text":"from Bollinger_strat import bollinger_strat\nfrom dictionary import settings\nfrom Candle import Candle\nimport statistics\n\ncandle_memory = 30\n\nbollinger_setup = 30\n\nstd_mult = 2\n\nclass Bot:\n def __init__(self):\n self.usdt_eth_candles = list()\n self.usdt_btc_candles = list()\n self.btc_eth_candles = list()\n\n def settings(self, command):\n if (command[0] == \"initial_stack\"):\n settings.update({\"USDT\" : command[1]})\n else:\n settings.update({command[0] : command[1]})\n\n def next_candles(self, command):\n candles = command.split(';')\n for strCandle in candles:\n candle = strCandle.split(',')\n if (candle[0] == \"BTC_ETH\"):\n self.btc_eth_candles.append(Candle(candle))\n if (len(self.btc_eth_candles) > candle_memory + 1):\n self.btc_eth_candles.pop(0)\n elif (candle[0] == \"USDT_BTC\"):\n self.usdt_btc_candles.append(Candle(candle))\n if (len(self.usdt_btc_candles) > candle_memory + 1):\n self.usdt_btc_candles.pop(0)\n elif (candle[0] == \"USDT_ETH\"):\n self.usdt_eth_candles.append(Candle(candle))\n if (len(self.usdt_eth_candles) > candle_memory + 1):\n self.usdt_eth_candles.pop(0)\n\n def stacks(self, command):\n stacks = command.split(\",\")\n for stack in stacks:\n money = stack.split(\":\")\n settings.update({money[0] : money[1]})\n\n def update(self, command):\n if (command[0] == \"next_candles\"):\n self.next_candles(command[1])\n elif (command[0] == \"stacks\"):\n self.stacks(command[1])\n\n def action(self, candles):\n node = bollinger_strat(candles, bollinger_setup)\n result = \"\"\n toSell = 0\n toBuy = 0\n\n pair = candles[-1].getPair().split(\"_\")\n acc1 = float(settings[pair[0]])\n acc2 = float(settings[pair[1]])\n\n if (node == 1):\n if (acc1 != 0):\n toBuy = acc1 / candles[-1].getClose()\n elif (node == -1):\n if (acc2 != 0):\n toSell = acc2\n if (toBuy > 0):\n result += \"buy \" + candles[-1].getPair() + \" \" + str(toBuy) + \";\"\n newAcc1 = acc1 - (toBuy * candles[-1].getClose())\n settings.update({pair[0] : str(newAcc1)})\n settings.update({pair[1] : str(acc2 + toBuy)})\n elif (toSell > 0):\n result += \"sell \" + candles[-1].getPair() + \" \" + str(toSell) + \";\"\n newAcc1 = acc1 + toSell * candles[-1].getClose()\n settings.update({pair[0] : str(newAcc1)})\n settings.update({pair[1] : str(acc2 - toSell)})\n return result\n\n def sendAction(self):\n usdt_btc = self.action(self.usdt_btc_candles)\n usdt_eth = self.action(self.usdt_eth_candles)\n btc_eth = self.action(self.btc_eth_candles)\n if (usdt_btc == \"\" and usdt_eth == \"\" and btc_eth == \"\"):\n print(\"pass\")\n else:\n print(usdt_btc + usdt_eth + btc_eth)\n\n def parser(self, command):\n if (command[0] == \"settings\"):\n self.settings(command[1:])\n elif (command[0] == \"update\"):\n self.update(command[2:])\n elif (command[0] == \"action\"):\n self.sendAction()\n else:\n print(\"KO\")\n exit()\n\n def run(self):\n while (True):\n string = input()\n string = string.split(\" \")\n self.parser(string)","repo_name":"hadi-ilies/tradingBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"21750816843","text":"import SimpleITK as sitk\nimport dicom2nifti as d2n\n\n# Convert a DICOM series (one scan) into a single nifti (.nii.gz) file\ndef convert_dicom_to_nifti(dicom_directory, output_file):\n # https://github.com/icometrix/dicom2nifti/issues/11\n # Download latest GDCM and install it: https://github.com/malaterre/GDCM/releases\n # d2n.settings.set_gdcmconv_path('C:/Program Files/GDCM 3.0/bin/gdcmconv.exe')\n d2n.dicom_series_to_nifti(dicom_directory, output_file, reorient_nifti=True)\n\n# Resample an image to have isotropic voxel sizes\ndef resample_image(in_file, out_file):\n image = sitk.ReadImage(in_file)\n\n # Desired iso-tropic voxel side length\n desired_spacing = 0.5\n\n # In slice size and spacing\n current_n_vox = image.GetWidth()\n current_spacing = image.GetSpacing()\n new_n_vox_in_slice = int(current_n_vox * current_spacing[0] / desired_spacing)\n\n # voxel size and number of voxels in the direction of the patient\n depth_spacing = current_spacing[2]\n n_vox_depth = image.GetDepth()\n\n new_n_vox_depth = int(n_vox_depth * depth_spacing / desired_spacing)\n\n new_volume_size = [new_n_vox_in_slice, new_n_vox_in_slice, new_n_vox_depth]\n print(f\"New volume size: {new_volume_size}\")\n\n # Create new image with desired properties\n new_image = sitk.Image(new_volume_size, image.GetPixelIDValue())\n new_image.SetOrigin(image.GetOrigin())\n new_image.SetSpacing([desired_spacing, desired_spacing, desired_spacing])\n new_image.SetDirection(image.GetDirection())\n\n # Make translation with no offset, since sitk.Resample needs this arg.\n translation = sitk.TranslationTransform(3)\n translation.SetOffset((0, 0, 0))\n\n interpolator = sitk.sitkLinear\n\n # Create final resampled image\n resampled_image = sitk.Resample(image, new_image, translation, interpolator)\n\n sitk.WriteImage(resampled_image, out_file)\n\nif __name__ == '__main__':\n dicom_directory = \"C:/Users/annae/OneDrive - Danmarks Tekniske Universitet/Bachelorprojekt/Data/Pancreas-data/PANCREAS_0009/11-24-2015-PANCREAS0009-Pancreas-12471/Pancreas-31748\"\n\n nifti_file = \"C:/Users/annae/OneDrive - Danmarks Tekniske Universitet/Bachelorprojekt/Data/Pancreas-data-slicer/Pancreas_0009-001/pancreas_0009-001.nii.gz\"\n nifti_isotropic_file = \"C:/Users/annae/OneDrive - Danmarks Tekniske Universitet/Bachelorprojekt/Data/Pancreas-data-slicer/Pancreas_0009-001/pancreas_0009-001_isotropic.nii.gz\"\n slicer_scan_dir = \"C:/Users/annae/OneDrive - Danmarks Tekniske Universitet/Bachelorprojekt/Data/Pancreas-data-slicer/Pancreas_0009-001\"\n crop_volume = \"C:/Users/annae/OneDrive - Danmarks Tekniske Universitet/Bachelorprojekt/Data/Pancreas-data-slicer/Pancreas_0009-001/pancreas_0009-001_isotropic_cropped.nii.gz\"\n\n # Choose what to do:\n convert_dicom_to_nifti(dicom_directory, nifti_file)\n print('Nifti file created!')\n resample_image(nifti_file, nifti_isotropic_file)\n print('Isotropic file created!')","repo_name":"annaekner/DRL-PathTracing","sub_path":"resample_data.py","file_name":"resample_data.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29395374578","text":"# -*- coding: utf-8 -*-\n\nfrom api.api import API\nfrom pages.android.common.super_page import SuperPage\nfrom pages.android.shanghu.xinzengyuangong_page_configs import XinZengYuanGongPageConfigs as XZYGPC\nfrom pages.logger import logger\n\n\nclass XinZengYuanGongPage(SuperPage):\n '''\n 作者 乔佳溪\n 员工管理\n '''\n def __init__(self, testcase, driver, logger):\n super(XinZengYuanGongPage, self).__init__(testcase, driver, logger)\n\n def clickOnChooseRole(self):\n '''\n usage: 选择角色\n '''\n logger.info(\"Click 选择角色操作 begin\")\n logger.info(\"Click 选择角色 begin\")\n API().clickElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.resource_id_choose,\n XZYGPC.verify_timeout)\n API().waitBySeconds(2)\n API().screenShot(self.driver, \"xuanZeJueSe\")\n logger.info(\"Click 选择角色 end\")\n logger.info(\"Click 游客角色 begin\")\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.text_role,\n XZYGPC.verify_timeout)\n API().screenShot(self.driver, \"xuanZeJueSe\")\n logger.info(\"Click 游客角色 end\")\n logger.info(\"Click 确定 begin\")\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.text_confirm,\n XZYGPC.verify_timeout)\n logger.info(\"Click 确定 end\")\n logger.info(\"Click 选择角色操作 end\")\n\n def inputUserName(self):\n '''\n usage: 输入用户名\n '''\n logger.info(\"Input 用户名 begin\")\n API().inputStringByResourceId(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.resource_id_name,\n XZYGPC.account_name,\n XZYGPC.verify_timeout)\n logger.info(\"Input 用户名 end\")\n\n def inputPhone(self):\n '''\n usage: 输入手机号\n '''\n logger.info(\"Input 手机号 begin\")\n API().inputStringByResourceId(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.resource_id_phone,\n XZYGPC.account_phone,\n XZYGPC.verify_timeout)\n logger.info(\"Input 手机号 end\")\n\n def clickOnSave(self):\n '''\n usage: 选择保存\n '''\n logger.info(\"Click 保存 begin\")\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.text_save,\n XZYGPC.verify_timeout)\n logger.info(\"Click 保存 end\")\n\n def clickOnChangeRole(self):\n '''\n usage: 变更角色\n '''\n logger.info(\"Click 角色 radio button begin\")\n API().clickElementByResourceId(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.resource_id_choose,\n XZYGPC.verify_timeout)\n\n API().waitBySeconds(3)\n\n API().screenShot(self.driver, \"xuanZeJueSe\")\n API().scrollToText(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.text_edit_role)\n\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.text_edit_role,\n XZYGPC.verify_timeout)\n\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.text_confirm,\n XZYGPC.verify_timeout)\n API().screenShot(self.driver, \"xuanZeJueSe\")\n logger.info(\"Click 角色 radio button end\")\n\n def inputEditName(self):\n '''\n usage: 编辑姓名\n '''\n logger.info(\"Input 姓名 begin\")\n name = API().getTextByResourceId(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.resource_id_name,\n XZYGPC.verify_timeout)\n\n name = name + \"ceshi\"\n if len(name) > 20:\n name = \"ceshi\"\n API().inputStringByResourceId(self.testcase,\n self.driver,\n self.logger,\n XZYGPC.resource_id_name,\n name,\n XZYGPC.verify_timeout)\n logger.info(\"Input 姓名 end\")\n","repo_name":"liu111xiao111/UItest","sub_path":"pages/android/shanghu/xinzengyuangong_page.py","file_name":"xinzengyuangong_page.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"34255612713","text":"class Solution:\n def combinationSum(self, candidates, target):\n def findNumbers(ans, arr, temp, su, index):\n if su == 0:\n ans.append(list(temp))\n for i in range(index, len(arr)):\n if(su - arr[i]) >= 0:\n temp.append(arr[i])\n findNumbers(ans, arr, temp, su - arr[i], i)\n temp.remove(arr[i])\n\n ans = []\n temp = []\n candidates = sorted(list(set(candidates)))\n findNumbers(ans, candidates, temp, target, 0)\n\n return ans\n\nsol = Solution()\n\nprint(sol.combinationSum([3,4, 5, 2], 7))","repo_name":"allyanna/leetcode-with-friends","sub_path":"python/39_Combination_Sum.py","file_name":"39_Combination_Sum.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36133907972","text":"from django.shortcuts import render\nfrom .serializers import UserSerializer, OrderSerializer, ProducerSerializer, CarSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework import generics, status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import AuthenticationFailed\nfrom .models import User, Item, Car, Order, Mark, Producer\nfrom django.forms.models import model_to_dict\nimport jwt\nimport datetime\nimport json\n\nimport base64\nfrom django.core.files.base import ContentFile\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom PIL import Image\nfrom io import BytesIO\n\n\nclass ItemViews(APIView):\n\n def post(self, request):\n\n item = Item.objects.filter(name=request.data['item']).first()\n marks = item.marks.all()\n pass\n\n\nclass RegisterUserView(APIView):\n\n def post(self, request):\n serializer = UserSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n responce = Response()\n responce.data = {\n 'message': 'success',\n 'data': serializer.data\n }\n return responce\n\n\nclass AllBrands(APIView):\n\n def get(self, request):\n\n producers = Producer.objects.filter().all()\n\n producersSerializers = ProducerSerializer(data=producers, many=True)\n producersSerializers.is_valid()\n responce = Response()\n responce.data = {\n 'brands': producersSerializers.data\n }\n\n return responce\n\n\ndef create_image_from_base64(base64_string):\n # Декодируем base64 строку\n decoded_image = base64.b64decode(base64_string)\n\n # Создаем объект Image из декодированных данных\n image = Image.open(BytesIO(decoded_image))\n\n # Может потребоваться изменить размер или выполнить другую обработку изображения здесь,\n # в зависимости от ваших требований\n\n # Получаем тип изображения\n image_format = image.format.lower()\n\n # Создаем временный объект InMemoryUploadedFile\n image_io = BytesIO()\n image.save(image_io, format=image_format)\n image_file = InMemoryUploadedFile(\n image_io, None, \"image.png\", f'images/{image_format}', len(\n image_io.getvalue()), None\n )\n\n return image_file\n\n\nclass GetOrder(APIView):\n\n def post(self,request):\n\n responce = Response()\n\n idOrder = request.data['id']\n\n try:\n order = Order.objects.filter(id=idOrder).first()\n\n marks = Mark.objects.filter(order=order).all()\n\n images = []\n\n for item in marks:\n with open(item.photo.path, 'rb') as f:\n encoded = base64.b64encode(f.read())\n \n images.append({\n 'mark':item.mark,\n 'base64_file':encoded,\n 'number':item.mark,\n 'image':''\n })\n\n producer = {\n 'name':order.brand.name,\n 'truck':order.brand.truck,\n 'passenger': order.brand.passenger,\n 'condition': order.brand.condition\n }\n \n carNumber = {\n 'number':order.carNumber.number\n }\n\n responceOrder = {\n 'brand':producer,\n 'carNumber': carNumber,\n 'images':images\n }\n\n responce.data = {\n 'status': True,\n 'message': 'OK',\n 'data':responceOrder\n }\n\n return responce\n\n\n except Exception as e:\n responce.data = {\n 'status': False,\n 'message': e\n }\n\n return responce\n\nclass CreateOrder(APIView):\n\n def post(self, request):\n\n responce = Response()\n\n try:\n carBody = request.data['car']\n brandBody = request.data['brand']\n userBody = request.data['user']\n marksBody = request.data['marks']\n\n newCar = Car()\n newCar.number = carBody['number']\n newCar.image = create_image_from_base64(carBody['base64_file'])\n newCar.save()\n\n brand = Producer.objects.filter(name=brandBody).first()\n user = User.objects.filter(id=userBody['id']).first()\n\n user.balance = user.balance + brand.summa\n\n user.save()\n\n newOrder = Order()\n newOrder.carNumber = newCar\n newOrder.brand = brand\n newOrder.user = user\n newOrder.save()\n\n idOrder = newOrder.id\n\n for item in marksBody:\n image_field = create_image_from_base64(item['base64_file'])\n\n newMark = Mark()\n newMark.mark = item['mark']\n newMark.photo = image_field\n newMark.order = newOrder\n newMark.save()\n\n responce.data = {\n 'status': True,\n 'message': 'OK'\n }\n\n return responce\n except Exception as e:\n responce.data = {\n 'status': False,\n 'message':e \n }\n \n return responce\n\n\nclass CheckCar(APIView):\n\n def post(self, request):\n\n number = request.data['number']\n imageBase64 = request.data['base64_file']\n image_field = create_image_from_base64(imageBase64)\n\n car = Car.objects.filter(number=number).first()\n\n if car is None:\n responce = Response()\n\n responce.data = {\n 'status': True,\n 'message': 'OK'\n }\n\n return responce\n else:\n responce = Response()\n\n responce.data = {\n 'status': False,\n 'message': 'Такая машина уже есть!'\n }\n\n return responce\n\n\nclass CheckAccessView(APIView):\n\n def post(self, request):\n\n name = request.data['name']\n password = request.data['password']\n\n user = User.objects.filter(name=name).first()\n\n if user is None:\n raise AuthenticationFailed('User not found')\n\n if user.password != password:\n raise AuthenticationFailed('Incorrect password')\n\n payload = {\n 'id': user.id,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),\n 'iat': datetime.datetime.utcnow()\n }\n\n token = jwt.encode(payload, 'secret', algorithm='HS256')\n\n if user.district == \"\" or user.latitude == \"\" or user.longitude == \"\":\n responce = Response()\n\n responce.data = {\n 'status': False,\n }\n\n return responce\n else:\n serializer = UserSerializer(user)\n\n orders = Order.objects.filter(user=user).all()\n\n responce = Response()\n\n responce.set_cookie(key='jwt', value=token, httponly=True)\n\n responce.headers = {\n 'jwt': token,\n }\n orderSerializer = OrderSerializer(orders, many=True)\n responce.data = {\n 'status': True,\n 'jwt': token,\n 'user': serializer.data,\n 'orders': orderSerializer.data\n }\n\n return responce\n\n\nclass LoginView(APIView):\n\n def post(self, request):\n\n # a = Item.objects.first().marks.all()\n # for item in a:\n # print(item.mark)\n\n name = request.data['name']\n password = request.data['password']\n lat = request.data['latitude']\n lon = request.data['longitude']\n district = request.data['district']\n fio = request.data['fio']\n\n user = User.objects.filter(name=name).first()\n\n user.latitude = lat\n user.longitude = lon\n user.district = district\n user.fio = fio\n\n user.save()\n\n if user is None:\n raise AuthenticationFailed('User not found')\n\n if user.password != password:\n raise AuthenticationFailed('Incorrect password')\n\n payload = {\n 'id': user.id,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),\n 'iat': datetime.datetime.utcnow()\n }\n\n token = jwt.encode(payload, 'secret', algorithm='HS256')\n serializer = UserSerializer(user)\n responce = Response()\n\n orders = Order.objects.filter(user=user).all()\n orderSerializer = OrderSerializer(orders, many=True)\n\n responce.set_cookie(key='jwt', value=token, httponly=True)\n\n responce.headers = {\n 'jwt': token,\n }\n\n responce.data = {\n 'status': True,\n 'jwt': token,\n 'user': serializer.data,\n 'orders': orderSerializer.data\n }\n\n return responce\n\n\nclass LogoutView(APIView):\n\n def post(self, request):\n responce = Response()\n responce.delete_cookie('jwt')\n responce.data = {\n 'status': True\n }\n return responce\n","repo_name":"damir-dev-21/agromash_vulk_backend","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2439721165","text":"# coding: utf-8\r\n\r\nimport logging\r\nimport json\r\n\r\nimport pymysql\r\nimport scrapy\r\n\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\nfrom qq.models import TencentArtist, TencentTable\r\n\r\nengine = create_engine(\"mysql+pymysql://root:root@192.168.0.13:3306/albums_from_web?charset=utf8\")\r\n\r\n\r\nclass TencentAlbum(scrapy.Spider):\r\n name = \"TencentAlbum\"\r\n # custom_settings = {\"DOWNLOAD_DELAY\": 0.5}\r\n album_url = \"https://y.qq.com/n/yqq/album/{}.html\"\r\n singer_album_url = \"https://c.y.qq.com/v8/fcg-bin/fcg_v8_singer_album.fcg?g_tk=5381&loginUin=2403635410&hostUin=0&format=jsonp&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0&singermid={}&order=time&begin=0&num=133&exstatus=1\"\r\n headers = {\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\",\r\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\r\n }\r\n\r\n def start_requests(self):\r\n Session = sessionmaker(bind=engine)\r\n session = Session()\r\n artists = session.query(TencentArtist.artist_mid).all()\r\n session.close()\r\n for artist in artists[400000:]:\r\n Session = sessionmaker(bind=engine)\r\n session = Session()\r\n if_exist = session.query(TencentTable).filter(TencentTable.artist_mid == artist).first()\r\n session.close()\r\n if if_exist:\r\n continue\r\n album_url = self.singer_album_url.format(artist)\r\n yield scrapy.Request(album_url, headers=self.headers, callback=self.parse, meta=dict(artist_mid=artist))\r\n\r\n def parse(self, response):\r\n artist_mid = response.meta[\"artist_mid\"]\r\n json_response = json.loads(response.body)\r\n code = json_response[\"code\"]\r\n if code != 0:\r\n raise Exception\r\n album_list = json_response[\"data\"][\"list\"]\r\n for album in album_list:\r\n album_mid = album[\"albumMID\"]\r\n aurl = self.album_url.format(album_mid)\r\n name = album[\"albumName\"]\r\n album_id = album[\"albumID\"]\r\n artist_name = album[\"singerName\"]\r\n listen_count = album[\"listen_count\"]\r\n release_company = album[\"company\"]\r\n Session = sessionmaker(bind=engine)\r\n session = Session()\r\n if_exist = session.query(TencentTable).filter(TencentTable.album_id==album_id).first()\r\n if if_exist:\r\n session.close()\r\n continue\r\n aalbum = TencentTable(url=aurl, name=name, album_id=album_id, artist_name=artist_name, listen_count=listen_count, release_company=release_company)\r\n session.add(aalbum)\r\n session.commit()\r\n session.close()\r\n\r\n","repo_name":"tuchuanchuan/qq-spider","sub_path":"qq/spiders/qq_album_url.py","file_name":"qq_album_url.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71874119840","text":"import pandas as pd\r\nimport tushare as ts\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as ticker\r\nimport matplotlib.animation as animation\r\nfrom IPython.display import HTML\r\n\r\n#画图显示中文\r\nplt.rcParams['font.sans-serif'] = [u'SimHei']\r\nplt.rcParams['axes.unicode_minus'] = False\r\n\r\n#加载锂电池板块股票的数据(爬虫来自东方财富网板块数据)\r\nlist_data=pd.read_excel('D:\\pycharm\\pythonProject\\Reptile\\锂电池板块的股票.xlsx')\r\nlist_code=list_data['股票代码'].astype('str').apply(lambda x:x.zfill(6)).tolist()#因为深成的票有00几开头,用Zfill在左填充0\r\ndic=dict(zip(list_code,list_data['股票名称']))#将股票代码与股票名称封装为字典\r\n\r\n#通过Tushare获取股票数据\r\ndef get_Data(code):\r\n data=ts.get_hist_data(code,start='2021-01-01')\r\n return data\r\n\r\n#获取每只票成交量的函数\r\ndef creat_data(code):\r\n p_change_list=get_Data(code)['volume']\r\n D = pd.DataFrame(p_change_list).reset_index().sort_values(by='date')\r\n D['Code'] = code\r\n #循环以实现累加的目的\r\n i = 0\r\n while i < D.shape[0] - 1:\r\n D.iloc[i + 1, 1] += D.iloc[i, 1]\r\n i += 1\r\n return D\r\n\r\n#执行函数,获取想要的数据框,由时间,成交量,股票代码,股票名称几个维度组成\r\ndef get():\r\n #建立空表来装数据框\r\n XX=[]\r\n #遍历需要的股票代码\r\n for i in list_code:\r\n XX.append(creat_data(i))#调用封装函数组成数据框\r\n ALL=pd.concat(XX,axis=0)#按行进行合并\r\n\r\n #通过字典匹配股票代码对应的股票名称\r\n list_all=[dic[x]for x in ALL['Code']]\r\n ALL['股票名称']=list_all\r\n return ALL\r\n\r\n#获取数据\r\ndf=get()\r\nprint(df)\r\n#将字典写入,前面有个dic,现在需要弄个反向字典\r\ndic_2=dict(zip(list_data['股票名称'],list_code))#将股票代码与股票名称封装为字典\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Begins~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n#为了得到今年所有股市开市日期,用以下方法\r\nwant_date_list=ts.get_hist_data('601899',start='2021-01-01').reset_index().sort_values(by='date')#任意股票的历史数据,只要时间项\r\nDict_date=dict(zip(range(0,want_date_list.shape[0]),want_date_list['date']))#得到需要的字典,这样就能以该字典去索引日期赋值于函数\r\n\r\n#获取指定颜色\r\nColors=('#adb0ff', '#ffb3ff', '#90d595', '#e48381',\r\n '#aafbff', '#f7bb5f', '#eafb50')\r\n\r\n#指定绘图布局\r\nfig, ax = plt.subplots(figsize=(15, 8))\r\n\r\n#画图函数,单独运行即可成为图片\r\ndef draw_barchart(n):\r\n Date=Dict_date[n]\r\n dff = df[df['date'].eq(Date)].sort_values(by='volume', ascending=True).tail(10)\r\n ax.clear()\r\n ax.barh(dff['股票名称'], dff['volume'], color=Colors)\r\n dx = dff['volume'].max() / 200\r\n for i, (value, name) in enumerate(zip(dff['volume'], dff['股票名称'])):\r\n ax.text(value - dx, i, name, size=14, weight=600, ha='right', va='bottom')\r\n ax.text(value - dx, i - .25, dic_2[name], size=10, color='#444444', ha='right', va='baseline')\r\n ax.text(value + dx, i, f'{value:,.0f}', size=14, ha='left', va='center')\r\n # ... polished styles\r\n ax.text(1, 0.4, Date, transform=ax.transAxes, color='#777777', size=46, ha='right', weight=800)\r\n ax.text(0, 1.06, 'Volume (成交量:手)', transform=ax.transAxes, size=12, color='#777777')\r\n ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))#定制化刻度标签形式\r\n ax.xaxis.set_ticks_position('top')#设置该格式位置\r\n ax.tick_params(axis='x', colors='#777777', labelsize=12)#设置轴的格式\r\n ax.set_yticks([])#将y标签设为空值\r\n ax.margins(0, 0.01)#缩放坐标轴\r\n ax.grid(which='major', axis='x', linestyle='-')#表框格式\r\n ax.set_axisbelow(True)\r\n ax.text(0, 1.12, '锂电池板块累计成交量',\r\n transform=ax.transAxes, size=20, weight=600, ha='left')#设置标题\r\n plt.box(False)\r\n plt.show()\r\n\r\n#使用animation动态画图工具,生成gif文件\r\nanimator = animation.FuncAnimation(fig, draw_barchart,frames=range(0,4))#调用连续作图工具,frame为绘图函数的取值范围\r\nHTML(animator.to_jshtml())#生成HTML文件\r\n\r\nanimator.save(\"test3.gif\",writer='pillow')#保存为GIF","repo_name":"YjhUnique/Trends-picture","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13420349522","text":"from __future__ import print_function\n\n\"\"\"IMPORT DEPENDENCIES\"\"\"\nimport os\nimport math\nimport concurrent.futures\nfrom multiprocessing import cpu_count\nimport psutil\nimport resource\n\n\"\"\"\nFunc: Threshold number of workers if available RAM is insufficient with number of workers and file sizes\n@param args_dict: Argument dictionary\n@param file_list: List of file names used to check for max file size\n- Get max file size in list and total memory available\n- If it seems like the expansion of memory will kill the process, modify the number of workers allowed at a time to ensure RAM doesn't get overused at once\n- Assume incoming BAM files will expand a lot more than a normal, non-binary file\nNotes: If running into issues with OOM kills or a BrokenProcessPool, try upping the factor variable as a temporary fix until can make a better RAM-sensitive mod\n\"\"\"\ndef threshold_ram(\n args_dict,\n file_list):\n\n total = psutil.virtual_memory()[1] # Get available memory\n\n file_sizes = [] # Get max file size\n for file in file_list:\n file_sizes.append(os.path.getsize(str(args_dict['input']) + str(file)))\n\n _max = max(file_sizes)\n\n if file[-6:] == '.fastq':\n return cpu_count(), cpu_count() # records are read line by line without storage, low memory footprint\n\n elif file[-4:] == '.bam' or file[-4:] == '.sam':\n factor = 6 # Experimental factor\n\n else:\n factor = 1\n\n threshold_workers = int(math.floor((total * 1.5) / (_max * factor))) # Set threshold based on max file size in set\n if threshold_workers < 1:\n threshold_workers = 1\n\n if threshold_workers > cpu_count():\n threshold_workers = cpu_count()\n\n if threshold_workers < args_dict['workers']: # Modify if set # of workers is greater than memory threshold\n threshold_threads = int(math.floor(args_dict['threads'] / threshold_workers))\n print('Resetting parallelization specs based on max file size to be processed:\\nMax number of workers: ' + str(threshold_workers) + '\\nNumber of threads per worker (where available): ' + str(threshold_threads))\n return threshold_threads, threshold_workers\n else:\n return args_dict['threads'], args_dict['workers']\n\n\"\"\"\nFunc: Determine number of processors to use\n@param args_dict: Argument dictionary\n@param mod_workers: Call to allow number of workers be equal to number of processors, else process one file at a time with all available processors\n- Check number given as max processors and use that if not None\n- If None specified (no user input), set cores equal to number available on system\n- Determine number of workers to use per job based on user input\n - If modified, workers equal number of cores\n - If not modified, workers equal 1, so one worker is using all available cores\n\"\"\"\ndef get_cores(\n args_dict,\n mod_workers):\n\n if 'max_processors' in args_dict and args_dict['max_processors'] != None:\n cores = args_dict['max_processors']\n else:\n cores = cpu_count() #Number of CPU cores on your system\n\n if mod_workers == True:\n workers = cores\n else:\n workers = 1\n\n return cores, workers\n\n\"\"\"\nFunc: Run function and files on pools\n@param func: function name to be executed on every object passed to the pool\n@param args_iter: List of lists of file name and args_dict\n@param args_dict: Argument dictionary\n- Create batches of n args_iter objects. Each batch based on number of workers available at a given time\n- Concurrently execute each file within a batch, clean process memory, pass in next batch, etc.\n\"\"\"\ndef run_pools(\n func,\n args_iter,\n args_dict):\n\n pools = int(math.ceil(len(args_iter) / args_dict['workers']))\n\n if pools < 1:\n pools = 1\n\n it_list = []\n range_number = 0\n for x in range(pools):\n it_list.append([iter for iter in args_iter[range_number:range_number + args_dict['workers']]])\n range_number += args_dict['workers']\n\n batch_number = 1\n for batch in it_list:\n with concurrent.futures.ProcessPoolExecutor(max_workers=args_dict['workers']) as executor:\n for file in zip(batch, executor.map(func, batch)):\n print(file[0][0], \"has been processed.\")\n print('Processing of batch {0} of {1} complete...'.format(batch_number, pools))\n batch_number += 1\n\n\"\"\"\nFunc: Parallelize function on list of files\n@param func: function name to be executed on every object passed to the pool\n@param file_list: List of file names used to process\n@param args_dict: Argument dictionary\n@param mod_workers: Call to allow number of workers be equal to number of processors, else process one file at a time with all available processors\n\"\"\"\ndef parallelize(\n func,\n file_list,\n args_dict,\n mod_workers=False):\n\n args_iter = [[file, args_dict] for file in file_list]\n\n # Get number of cores\n args_dict['threads'], args_dict['workers'] = get_cores(\n args_dict,\n mod_workers)\n\n # Check and apply RAM threshold if necessary\n if mod_workers == True:\n args_dict['threads'], args_dict['workers'] = threshold_ram(\n args_dict,\n file_list)\n\n # Use all cores at once for processes that can not be multiprocessed themselves\n elif mod_workers == 'all':\n args_dict['threads'], args_dict['workers'] = cpu_count(), cpu_count()\n\n else:\n pass\n\n run_pools(\n func,\n args_iter,\n args_dict)\n\n\"\"\"\nFunc: Parallelize function on list of files for PE data\n@param func: function name to be executed on every object passed to the pool\n@param file_list: List of file names used to process\n@param args_dict: Argument dictionary\n@param mod_workers: Call to allow number of workers be equal to number of processors, else process one file at a time with all available processors\n\"\"\"\ndef parallelize_pe(\n func,\n file_list,\n args_dict,\n mod_workers=False):\n\n # Pair files for paired-end processing\n c1 = 0\n args_iter = []\n for c in range(int(len(file_list)/2)):\n c2 = c1 + 1\n args_iter.append([file_list[c1], file_list[c2], args_dict])\n c1 += 2\n\n args_iter = [[x[0], x[1], x[2]] for x in args_iter]\n\n args_dict['threads'], args_dict['workers'] = get_cores(\n args_dict,\n mod_workers)\n\n\n # Check and apply RAM threshold if necessary\n if mod_workers == True:\n args_dict['threads'], args_dict['workers'] = threshold_ram(\n args_dict,\n file_list)\n\n run_pools(\n func,\n args_iter,\n args_dict)\n","repo_name":"XPRESSyourself/XPRESSpipe","sub_path":"xpresspipe/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":6543,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"4076774466","text":"from CvPythonExtensions import *\nimport PyHelpers\nimport CvUtil\nimport ScreenInput\nimport CvScreenEnums\n\nPyPlayer = PyHelpers.PyPlayer\nPyInfo = PyHelpers.PyInfo\n\n# globals\ngc = CyGlobalContext()\nArtFileMgr = CyArtFileMgr()\nlocalText = CyTranslator()\n\nMOVIE_SCREEN_WONDER = 0\nMOVIE_SCREEN_RELIGION = 1\nMOVIE_SCREEN_PROJECT = 2\n\nclass CvWonderMovieScreen:\n\t\"Wonder Movie Screen\"\n\n\tdef __init__(self):\n\t\tself.fDelay = -1.0\n\t\tself.fTime = 0.0\n\t\tself.bDone = false\n\n\tdef interfaceScreen (self, iMovieItem, iCityId, iMovieType):\n\t\t# iMovieItem is either the WonderID, the ReligionID, or the ProjectID, depending on iMovieType\n\t\t\n\t\tif CyUserProfile().getGraphicOption(GraphicOptionTypes.GRAPHICOPTION_NO_MOVIES):\n\t\t\treturn\n\t\t\n\t\tself.Z_CONTROLS = -2.2\n\n\t\tself.X_SCREEN = 0\n\t\tself.Y_SCREEN = 0\n\t\tself.W_SCREEN = 1024\n\t\tself.H_SCREEN = 768\n\n\t\tself.X_WINDOW = 250\n\t\tself.Y_WINDOW = 40\n\t\tself.W_WINDOW = 760\n\t\tself.H_WINDOW = 590\n\t\tself.Y_TITLE = self.Y_WINDOW + 20\n\t\tself.iWonderId = iMovieItem\n\t\t\n\t\tself.X_EXIT = self.X_WINDOW + self.W_WINDOW/2 - 50\n\t\tself.Y_EXIT = self.Y_WINDOW + self.H_WINDOW - 50\n\t\tself.W_EXIT = 120\n\t\tself.H_EXIT = 30\n\t\t\n\t\tself.X_MOVIE = 20\n\t\tself.Y_MOVIE = 50\n\t\tself.W_MOVIE = 720\n\t\tself.H_MOVIE = 480\n\t\t\n\t\tself.iMovieType = iMovieType\n\t\tself.fTime = 0.0\n\t\tself.fDelay = 1.5\n\t\tself.bDone = false\n\t\t\n\t\t# not all projects have movies\n\t\tself.szMovieFile = None\n\t\tif self.iMovieType == MOVIE_SCREEN_PROJECT:\n\t\t\tszArtDef = gc.getProjectInfo(iMovieItem).getMovieArtDef()\n\t\t\tif (len(szArtDef) > 0):\n\t\t\t\tself.szMovieFile = CyArtFileMgr().getMovieArtInfo(szArtDef).getPath()\n\t\telif self.iMovieType == MOVIE_SCREEN_WONDER:\n\t\t\tself.szMovieFile = gc.getBuildingInfo(iMovieItem).getMovie()\n\t\telif self.iMovieType == MOVIE_SCREEN_RELIGION:\n\t\t\tself.szMovieFile = gc.getReligionInfo(iMovieItem).getMovieFile()\n\t\tif (self.szMovieFile == None or len(self.szMovieFile) == 0):\n\t\t\treturn\n\t\t\n\t\tplayer = PyPlayer(CyGame().getActivePlayer())\n\t\t\n\t\t# move the camera and mark the interface camera as dirty so that it gets reset - JW\n\t\tif self.iMovieType == MOVIE_SCREEN_WONDER:\n\t\t\tCyInterface().lookAtCityBuilding(iCityId, iMovieItem)\n\t\telse:\n\t\t\tCyInterface().lookAtCityBuilding(iCityId, -1)\n\t\tCyInterface().setDirty(InterfaceDirtyBits.SelectionCamera_DIRTY_BIT, True)\n\t\t\n\t\tscreen = CyGInterfaceScreen( \"WonderMovieScreen\" + str(iMovieItem), CvScreenEnums.WONDER_MOVIE_SCREEN )\n\t\tscreen.addPanel(\"WonderMoviePanel\", \"\", \"\", true, true,\n\t\t\tself.X_WINDOW, self.Y_WINDOW, self.W_WINDOW, self.H_WINDOW, PanelStyles.PANEL_STYLE_MAIN)\n\t\t\n\t\tscreen.showWindowBackground( True )\n\t\tscreen.setDimensions(screen.centerX(self.X_SCREEN), screen.centerY(self.Y_SCREEN), self.W_SCREEN, self.H_SCREEN)\n\t\tscreen.setRenderInterfaceOnly(False)\n\t\tscreen.showScreen(PopupStates.POPUPSTATE_IMMEDIATE, False)\n\t\tscreen.enableWorldSounds( false )\n\t\t \t\t\n\t\t# Header...\n\t\tszHeaderId = \"WonderTitleHeader\" + str(iMovieItem)\n\t\tif self.iMovieType == MOVIE_SCREEN_RELIGION:\n\t\t\tszHeader = localText.getText(\"TXT_KEY_MISC_REL_FOUNDED_MOVIE\", (gc.getReligionInfo(iMovieItem).getTextKey(), ))\n\t\telif self.iMovieType == MOVIE_SCREEN_WONDER:\n\t\t\tszHeader = gc.getBuildingInfo(iMovieItem).getDescription()\n\t\telif self.iMovieType == MOVIE_SCREEN_PROJECT:\n\t\t\tszHeader = gc.getProjectInfo(iMovieItem).getDescription()\n\n\t\tscreen.setLabel(szHeaderId, \"Background\", u\"\" + szHeader + \"\", CvUtil.FONT_CENTER_JUSTIFY,\n\t\t\t\tself.X_WINDOW + self.W_WINDOW / 2, self.Y_TITLE, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)\n\t\t\t\t\n\t\tscreen.hide(\"Background\")\n\n\t\tscreen.playMovie(\"\", 0, 0, 0, 0, 0) # dummy call to hide screen if no movies are supposed to be shown\n\n\tdef playMovie(self):\n\t\t\t\n\t\tscreen = CyGInterfaceScreen( \"WonderMovieScreen\" + str(self.iWonderId), CvScreenEnums.WONDER_MOVIE_SCREEN )\n\t\tscreen.setRenderInterfaceOnly(True)\n\t\tscreen.show(\"Background\")\n\n\t\t# Play the movie\n\t\tif self.iMovieType == MOVIE_SCREEN_RELIGION:\n\t\t\tscreen.addReligionMovieWidgetGFC( \"ReligionMovie\", self.szMovieFile, self.X_WINDOW + self.X_MOVIE, self.Y_WINDOW + self.Y_MOVIE, self.W_MOVIE, self.H_MOVIE, WidgetTypes.WIDGET_GENERAL, -1, -1)\n\t\t\tCyInterface().playGeneralSound(gc.getReligionInfo(self.iWonderId).getMovieSound())\t\t\n\t\telse:\n\t\t\tscreen.playMovie(self.szMovieFile, self.X_WINDOW + self.X_MOVIE, self.Y_WINDOW + self.Y_MOVIE, self.W_MOVIE, self.H_MOVIE, -2.3 )\n\t\t\t\n\t\tscreen.setButtonGFC(\"WonderExit\" + str(self.iWonderId), localText.getText(\"TXT_KEY_MAIN_MENU_OK\", ()), \"\", self.X_EXIT, self.Y_EXIT, self.W_EXIT, self.H_EXIT, WidgetTypes.WIDGET_CLOSE_SCREEN, -1, -1, ButtonStyles.BUTTON_STYLE_STANDARD )\n\n\t# Will handle the input for this screen...\n\tdef handleInput (self, inputClass):\n\t\tif (inputClass.getNotifyCode() == NotifyCode.NOTIFY_MOVIE_DONE):\n\t\t\tif (not self.bDone):\n\t\t\t\tscreen = CyGInterfaceScreen( \"WonderMovieScreen\" + str(self.iWonderId), CvScreenEnums.WONDER_MOVIE_SCREEN )\n\t\t\t\tif self.iMovieType == MOVIE_SCREEN_WONDER:\n\t\t\t\t\tszHelp = CyGameTextMgr().getBuildingHelp(self.iWonderId, False, False, False, None)\n\t\t\t\telif self.iMovieType == MOVIE_SCREEN_PROJECT:\n\t\t\t\t\tszHelp = CyGameTextMgr().getProjectHelp(self.iWonderId, False, None)\n\t\t\t\telse:\n\t\t\t\t\tszHelp = \"\"\n\t\t\t\t\n\t\t\t\tif len(szHelp) > 0:\n\t\t\t\t\tscreen.addPanel(\"MonkeyPanel\", \"\", \"\", true, true, self.X_WINDOW + self.X_MOVIE + self.W_MOVIE / 8 - 10, self.Y_WINDOW + self.Y_MOVIE + 90, 3 * self.W_MOVIE / 4 + 20, self.H_MOVIE - 180, PanelStyles.PANEL_STYLE_MAIN_BLACK50)\t\n\t\t\t\t\tscreen.addMultilineText(\"MonkeyText\", szHelp, self.X_WINDOW + self.X_MOVIE + self.W_MOVIE / 8, self.Y_WINDOW + self.Y_MOVIE + 100, 3 * self.W_MOVIE / 4, self.H_MOVIE - 200, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\t\n\t\t\t\tself.bDone = true\n\n\t\treturn 0\n\n\tdef update(self, fDelta):\n\t\n\t\tif self.fDelay > 0:\n\t\t\tself.fTime += fDelta\n\t\t\tif self.fTime > self.fDelay:\n\t\t\t\tself.playMovie()\n\t\t\t\tself.fDelay = -1\n\t\treturn\n","repo_name":"max-zanko/civ4-beyond-the-sword-sdk","sub_path":"Assets/Python/Screens/CvWonderMovieScreen.py","file_name":"CvWonderMovieScreen.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"7678843110","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 17 12:17:45 2017\n\n@author: nberliner\n\"\"\"\nimport numpy as np\n\nfrom geopy.distance import vincenty\nfrom scipy.spatial.distance import cdist\n\n\nfrom data.data import load_krill_data, breeding_locations\n\n\n\nclass KrillBase():\n \n def __init__(self):\n \n self.df_krill = load_krill_data()\n self.df_breeding = breeding_locations()\n self.distMat = self._compute_distMat(self.df_krill, self.df_breeding)\n \n self.krillbase = None\n \n def _compute_distMat(self, df_krill, df_breeding):\n fname = '../data/interim/krill_distMat.npy'\n try:\n distMat = np.load(fname)\n print(\"Found krill pre-computed distance matrix in data/interim\")\n except IOError:\n print(\"Computing krill distMat and caching result in data/interim/\")\n print(\"This can take a while.. (apologies for computing this via brute force)\")\n # Extract the latitude and longitude values\n data_krill = df_krill[['LATITUDE', 'LONGITUDE']].values\n data_breeding = df_breeding[['latitude_epsg_4326', 'longitude_epsg_4326']].values\n\n # Define the distance function\n metric = lambda lat, lng: vincenty(lat, lng).meters / 1000. # in kilometers\n\n # Compute the full distance matrix\n distMat = cdist(data_breeding, data_krill, metric=metric)\n np.save(fname, distMat)\n\n return(distMat)\n \n def create(self, radius):\n \"\"\"\n Assemble the features that computes the average number of observed krill per location for the\n specified radius.\n \"\"\"\n self.krillbase = dict()\n for idx, site_id in enumerate(list(self.df_breeding.index)):\n \n krill_stations = np.where(self.distMat[idx,:] <= radius)[0]\n for year in range(1980,2017):\n if len(krill_stations) == 0:\n krill = np.nan\n else:\n # Select only those observations that are within the range and the year\n krill = self.df_krill.iloc[krill_stations,:].copy()\n krill = krill[(krill['SEASON'] == year)]['STANDARDISED_KRILL_UNDER_1M2']\n krill = krill.sum() / krill_stations.shape[0]\n \n self.krillbase[(site_id, year)] = krill\n \n def query(self, site_id, year, nan_value=0):\n \"\"\"\n Get the krill concentration for a given site and year. If no krill was observed, set the value\n to nan_value.\n \"\"\"\n val = self.krillbase[(site_id, year)]\n if np.isnan(val):\n val = nan_value\n \n return(val)","repo_name":"nberliner/Random-Walk-of-the-Penguins","sub_path":"src/features/krillbase.py","file_name":"krillbase.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33404372977","text":"import pika\nimport random\nimport json\n\n\nif __name__ == '__main__':\n credentials = pika.PlainCredentials('USERNAME', 'PASSWD')\n parameters = pika.ConnectionParameters(host='192.168.0.156',\n port=5672,\n virtual_host='/',\n credentials=credentials)\n connection = pika.BlockingConnection(parameters)\n\n channel = connection.channel()\n\n channel.exchange_declare(exchange='workers', exchange_type='direct')\n channel.queue_declare(queue='worker-q1-python', durable=True)\n channel.queue_bind(exchange='workers', queue='worker-q1-python')\n\n index = 0\n while True:\n index += 1\n if index > 1000:\n break\n\n message = {\n \"id\": index,\n \"first_arg\": random.randint(1, 100),\n \"second_arg\": random.randint(1, 100),\n }\n\n channel.basic_publish(\n exchange='workers',\n routing_key='worker-q1-python',\n body=json.dumps(message),\n properties=pika.BasicProperties(\n delivery_mode=2\n ))\n print(\" [x] Sent %r\" % message)\n\n connection.close()\n","repo_name":"revolman/configs","sub_path":"python/rabbitMQ/sender_with_payload.py","file_name":"sender_with_payload.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41153375207","text":"import matplotlib.pyplot as plt\nimport statistics\nimport numpy as np\nimport math\nimport hmm\ndef standart_instate(self,obs):\n\tmean=self.mean\n\tstdev= self.stdev\n\tif obs>mean+stdev:\n\t\treturn 0\n\telif obs>mean+0.5*stdev:\n\t\treturn 1\n\telif obs>mean-0.5*stdev:\n\t\treturn 2\n\telif obs>mean-stdev:\n\t\treturn 3\n\telse:\n\t\treturn 4\ndef goglobal(self,obs):\n\tt = 10 - int(obs*10-12)\n\treturn t\ndef getcolor(i):\n\tif i==0:\n\t\treturn '#cc0099'\n\telif i==1:\n\t\treturn '#3399ff'\n\telif i==2:\n\t\treturn '#99ff00'\n\telif i==3:\n\t\treturn 'grey'\n\telse:\n\t\treturn 'silver'\n\ndef yinstate(obs,y):\n\tmean = statistics.mean(y)\n\tstdev = statistics.stdev(y)\n\tif obs>mean+stdev:\n\t\treturn 0\n\telif obs>mean+0.5*stdev:\n\t\treturn 1\n\telif obs>mean-0.5*stdev:\n\t\treturn 2\n\telif obs>mean-stdev:\n\t\treturn 3\n\telse:\n\t\treturn 4\n\ndef getMargins(state, y):\n\ta = []\n\tres = []\n\ty = list(map(lambda x: yinstate(x,y)==state,y))\n\tfor i in range (1,len(y)):\n\t\tif y[i]:\n\t\t\tif i==1: a.append(i)\n\t\t\telif y[i-1]==False: a.append(i)\n\t\t\tif i==len(y)-1 or y[i+1]==False:\n\t\t\t\ta.append(i)\n\t\t\t\tres.append(a)\n\t\t\t\ta=[]\n\treturn res\n\nn = 88;\nx = range(n)\ny = np.loadtxt('raw_data/proc_bezrobitta.txt',delimiter='\\t')\nh= max(y)\nl= min(y)\nplt.plot(x, y,color='b')\nsecond_model=hmm.SHMM(5,5,y,standart_instate,standart_instate)\nsecond_model.show()\nsecond_model.Baum_Welch()\nsecond_model.show()\nsecond_model.print(\"article.txt\")\n#second_model.print(\"files/test_model2.txt\")\nprint(\"Prediction\")\nfor i in [0,1,2,3,4]:\n\tprint(\"State \", i,\"P(Crisis) = \", second_model.PredictCrisis(4,i))\n\nplt.show()\t","repo_name":"pustovitDmytro/hmm","sub_path":"article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37637639365","text":"\"\"\"Tests for xml_parser.py.\"\"\"\nfrom defusedxml import ElementTree\nimport pytest\n\nfrom radikopodcast.exceptions import XmlParseError\nfrom radikopodcast.radikoxml.xml_parser import XmlParserProgram, XmlParserStation\n\n\nclass TestXmlParserStation:\n \"\"\"Tests for XmlParserStation.\"\"\"\n\n @staticmethod\n @pytest.mark.parametrize(\n (\"xml\", \"expect\"),\n [(\"\", \"Can't find title. XML: \"), (\"</radiko>\", \"No title text. XML: \")],\n )\n def test_xml_parser_program_title_error(xml: str, expect: str) -> None:\n \"\"\"XmlParserProgram should raise XmpParserError when XML is invalid.\"\"\"\n # Reason: To omit coding fixture\n element_tree = ElementTree.fromstring(xml, forbid_dtd=True)\n parser = XmlParserProgram(element_tree, None, None, \"JP13\") # type: ignore[arg-type]\n with pytest.raises(XmlParseError) as excinfo:\n # Reason: Property has logic. pylint: disable=pointless-statement\n parser.title\n assert expect in str(excinfo.value)\n\n @staticmethod\n @pytest.mark.parametrize(\n (\"xml\", \"expect\"),\n [(\"<stations />\", \"Can't find id. XML: \"), (\"<stations><id /></stations>\", \"No id text. XML: \")],\n )\n def test_xml_parser_station_id_error(xml: str, expect: str) -> None:\n \"\"\"XmlParserProgram should raise XmpParserError when XML is invalid.\"\"\"\n xml_parser_program = XmlParserStation(ElementTree.fromstring(xml, forbid_dtd=True))\n with pytest.raises(XmlParseError) as excinfo:\n # Reason: Property has logic. pylint: disable=pointless-statement\n xml_parser_program.id\n assert expect in str(excinfo.value)\n\n @staticmethod\n @pytest.mark.parametrize(\n (\"xml\", \"expect\"),\n [(\"<stations />\", \"Can't find name. XML: \"), (\"<stations><name /></stations>\", \"No name text. XML: \")],\n )\n def test_xml_parser_station_name_error(xml: str, expect: str) -> None:\n \"\"\"XmlParserProgram should raise XmpParserError when XML is invalid.\"\"\"\n xml_parser_program = XmlParserStation(ElementTree.fromstring(xml, forbid_dtd=True))\n with pytest.raises(XmlParseError) as excinfo:\n # Reason: Property has logic. pylint: disable=pointless-statement\n xml_parser_program.name\n assert expect in str(excinfo.value)\n","repo_name":"road-master/radiko-podcast","sub_path":"tests/radikoxml/test_xml_parser.py","file_name":"test_xml_parser.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"6902401448","text":"def imageSmoother(M):\n m=len(M)\n n=len(M[0]) if m else 0\n for i in range(m):\n for j in range(n):\n summ=0\n cnt=0\n for ind_i in range(max(0,i-1),min(m,i+2)):\n for ind_j in range(max(0,j-1),min(n,j+2)):\n summ+=M[ind_i][ind_j]&0xFF\n cnt+=1\n \n M[i][j]|=(summ//cnt)<<8\n \n for i in range(m):\n for j in range(n):\n M[i][j]=(M[i][j]>>8)&0xFF\n return M\n\nM=[[1,1,1],\n [1,0,1],\n [1,1,1]]\nprint(imageSmoother(M))\n","repo_name":"Wanlingj/LeetCode","sub_path":"Google/Array and Strings/Image Smoother.py","file_name":"Image Smoother.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21294122096","text":"import os\nimport mygene\nimport json\nimport pandas as pd\nfrom biomedkg_utils import switch_dictset_to_dictlist, switch_dictlist_to_dictset, output_edgefile_onerel_noweight\ndef download_transcription_factor_data_from_grndb():\n grndb_link_prefix = \"http://www.grndb.com/download/txt?condition=\"\n tissues = ['Heart_GTEx','Adult-Heart', 'Fetal-Heart','whole_NeonatalHeart',\n 'Blood_Vessel_GTEx','Adipose_Tissue_GTEx', \n 'Blood_GTEx', 'Adrenal_Gland_GTEx', 'Breast_GTEx', 'Colon_GTEx', \n 'Esophagus_GTEx','Kidney_GTEx','Liver_GTEx','Lung_GTEx',\n 'Muscle_GTEx','Esophagus_GTEx', 'Nerve_GTEx', 'Ovary_GTEx', \n 'Pancreas_GTEx','Pituitary_GTEx', 'Prostate_GTEx', 'Salivary_Gland_GTEx', \n 'Skin_GTEx','Small_Intestine_GTEx', 'Spleen_GTEx', 'Stomach_GTEx', \n 'Testis_GTEx', 'Thyroid_GTEx', 'Uterus_GTEx', 'Vagina_GTEx']\n\n if not os.path.exists('input/GRNdb'):\n os.mkdir('input/GRNdb')\n\n for tissue in tissues:\n url = grndb_link_prefix+tissue\n os.system(f'wget -N -P input/GRNdb/ {url}') \n\n\ndef map_tf_name_to_target_name():\n # Collect gene names of all TFs and targets from each tissue\n target_gene_names = set()\n tf_gene_names = set()\n tf_gene_name_to_target_gene_name = dict()\n root = 'input/GRNdb/'\n\n for file in os.listdir(root):\n if 'condition' not in file:\n continue\n tissue_df = pd.read_table(os.path.join(root, file))\n try:\n high_conf_df = tissue_df[tissue_df['Confidence']=='High']\n except:\n print(f'Issue with {file}. Columns = {tissue_df.columns}')\n continue\n target_gene_names = target_gene_names.union(set(high_conf_df['gene']))\n tf_gene_names = tf_gene_names.union(set(high_conf_df['TF']))\n high_conf_dict = high_conf_df.groupby('TF')['gene'].apply(set).to_dict()\n\n # Add this file to the TF-to-Target dictionary\n for tf, target in high_conf_dict.items():\n tf_gene_name_to_target_gene_name.setdefault(tf, []).extend(target)\n\n gene_names = tf_gene_names.union(target_gene_names)\n tf_gene_name_to_target_gene_name = switch_dictset_to_dictlist(tf_gene_name_to_target_gene_name)\n json.dump(tf_gene_name_to_target_gene_name, open('output/gene2gene/tf_gene_name_to_target_gene_name.json','w'))\n json.dump(list(gene_names), open('output/gene2gene/gene_names_tf.json','w'))\n \n\ndef map_gene_name_to_id_via_mygeneinfo():\n # Use MyGeneInfo Python client to map gene names to IDs\n mg = mygene.MyGeneInfo()\n\n gene_names = json.load(open('output/gene2gene/gene_names_tf.json'))\n params = {'qterms':gene_names, \n 'species':9606,\n 'scopes':'symbol',\n 'entrezonly':True,\n 'as_dataframe':True}\n mg_df = mg.querymany(**params)\n mg_df = mg_df[mg_df['symbol'].isin(gene_names)]\n gene_name_to_gene_id = dict(zip(mg_df['symbol'], [int(id_) for id_ in mg_df['_id']]))\n json.dump(gene_name_to_gene_id, open('output/gene2gene/gene_name_to_gene_id.json','w'))\n \n \ndef map_tf_to_target_with_ids():\n # Import\n with open('output/gene2gene/tf_gene_name_to_target_gene_name.json') as fin:\n tf_gene_name_to_target_gene_name = json.load(fin)\n with open('output/gene2gene/gene_name_to_gene_id.json') as fin:\n gene_name_to_gene_id = json.load(fin)\n with open('output/protein2gene/all_entrez2uniprot.json') as fin:\n gene_id_to_protein_id = json.load(fin)\n\n #print(len(tf_gene_name_to_target_gene_name))\n #print(len(gene_name_to_gene_id))\n #print(len(gene_id_to_protein_id))\n\n tf_gene_id_to_target_gene_id = {}\n tf_protein_id_to_target_gene_id = {}\n for tf_name, target_names in tf_gene_name_to_target_gene_name.items():\n\n # Map TF gene ID & protein ID -to- target gene ID\n\n # TF gene ID and protein ID\n try:\n tf_id = str(gene_name_to_gene_id[tf_name])\n except:\n continue\n try:\n tf_protein_ids = gene_id_to_protein_id[tf_id]\n except:\n tf_protein_ids = -1\n\n # Target gene ID\n for target_name in target_names:\n try:\n target_id = gene_name_to_gene_id[target_name]\n except:\n continue\n\n # TF -to- Target\n tf_gene_id_to_target_gene_id.setdefault(tf_id, set()).add(str(target_id))\n if tf_protein_ids != -1:\n for tf_protein_id in tf_protein_ids:\n tf_protein_id_to_target_gene_id.setdefault(tf_protein_id, set()).add(str(target_id)) \n \n # Export\n tf_gene_id_to_target_gene_id = switch_dictset_to_dictlist(tf_gene_id_to_target_gene_id)\n with open('output/gene2gene/tf_gene_id_to_target_gene_id.json','w') as fout:\n json.dump(tf_gene_id_to_target_gene_id, fout)\n \n tf_protein_id_to_target_gene_id = switch_dictset_to_dictlist(tf_protein_id_to_target_gene_id)\n with open('output/protein2gene/tf_protein_id_to_target_gene_id.json','w') as fout:\n json.dump(tf_protein_id_to_target_gene_id, fout)\n \n \ndef export_transcription_factor_edges():\n tf_gene_id_to_target_gene_id = json.load(open('output/gene2gene/tf_gene_id_to_target_gene_id.json'))\n tf_protein_id_to_target_gene_id = json.load(open('output/protein2gene/tf_protein_id_to_target_gene_id.json'))\n\n output_edgefile_onerel_noweight(outpath='output/gene2gene/Gene_(Entrez)_targets_Gene_(Entrez).csv',\n columns=['Gene (Entrez)','Gene (Entrez)', 'Relationship'],\n dictionary=tf_gene_id_to_target_gene_id,\n rel='-genes_transcription_factor_targets->', \n prefix_col1='Entrez:', \n prefix_col2='Entrez:',\n edges_to_use_folder=False,\n )\n\n output_edgefile_onerel_noweight(outpath='output/protein2gene/Protein_(UniProt)_targets_Gene_(Entrez).csv',\n columns=['Protein (UniProt)','Gene (Entrez)', 'Relationship'],\n dictionary=tf_protein_id_to_target_gene_id,\n rel='-transcription_factor_targets->', \n prefix_col1='UniProt:', \n prefix_col2='Entrez:',\n )\n print('Exported transcription factor edges')\n \nif __name__ == '__main__': \n #download_transcription_factor_data_from_grndb()\n map_tf_name_to_target_name()\n map_gene_name_to_id_via_mygeneinfo()\n map_tf_to_target_with_ids()\n export_transcription_factor_edges()","repo_name":"Yijia-Xiao/Know2BIO","sub_path":"dataset/create_edge_files_utils/protein_to_gene_ie_transcription_factor_edges.py","file_name":"protein_to_gene_ie_transcription_factor_edges.py","file_ext":"py","file_size_in_byte":6714,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"44192182262","text":"import os\nimport numpy as np\n\n\nclass Decoder(object):\n \"\"\"\n Viterbi decoder for one-state HMMs.\n \"\"\"\n\n def __init__(self, feeder, language_model, bigram=False):\n \"\"\"\n Initialize decoder.\n :param feeder: (object) feeder object\n :param language_model: (dict) n-gram language model\n :param bigram: (boolean) bigram language model used\n \"\"\"\n self.network = self._get_network(feeder)\n self.language_model = language_model\n self.bigram = bigram\n\n self.transition = np.array([hmm[\"transition\"] for hmm in self.network])\n self.selfloop = np.array([hmm[\"selfloop\"] for hmm in self.network])\n\n def _get_network(self, feeder):\n \"\"\"\n Build HMMs network for decoding.\n :param feeder: (object) feeder object\n :return: (list) network structure with selfloop and transition probabilities\n \"\"\"\n leaves_path = os.path.basename(feeder.features_path).split(\"_\")[0] + \".csv\"\n\n # get correct order of HMMs according to OHE\n hmms = feeder.one_hot_decode(np.eye(len(feeder.encoder.classes_), dtype=int))\n hmms_order = {phoneme: i for i, phoneme in enumerate(hmms)}\n\n # build network structure\n with open(os.path.join(\"..\", \"data\", \"hmm\", leaves_path)) as fr:\n network = []\n for i, line in enumerate(fr.readlines()):\n params = line.strip().split(\",\")\n network.append((hmms_order[params[0]], {\n \"phoneme\": params[0],\n \"transition\": np.log10(float(params[1])),\n \"selfloop\": np.log10(float(params[2]))\n }))\n\n return [node[1] for node in sorted(network)]\n\n def _get_penalty(self, min_index=None):\n \"\"\"\n Get language model penalty.\n :param min_index: (int) index of last visited HMM\n :return: (ndarray) language model penalties per HMM\n \"\"\"\n fallback = np.log10(1e-7)\n\n if min_index and self.bigram:\n keys = [(self.network[min_index][\"phoneme\"], hmm[\"phoneme\"]) for hmm in self.network]\n else:\n keys = [(hmm[\"phoneme\"],) for hmm in self.network]\n\n return np.array([self.language_model.get(key, fallback) for key in keys])\n\n def _get_min_likelyhood(self, trellis, min_index):\n \"\"\"\n Calculate minimum likelyhood.\n :param trellis:\n :param min_index: (int) index of last visited HMM\n :return: (tuple) index and minimum likelyhood\n \"\"\"\n likelyhood = trellis - self.transition - self._get_penalty(min_index)\n min_index = np.argmin(likelyhood)\n\n return min_index, likelyhood[min_index]\n\n def decode(self, observations):\n \"\"\"\n Find and decode most likely path.\n :param observations: (ndarray) phoneme probabilities per timesteps\n :return: (ndarray) decoded transcription\n \"\"\"\n observations = np.log10(observations)\n\n # initialize\n trellis_dim = (len(self.network), len(observations))\n\n trellis = np.zeros(trellis_dim)\n backpointer = np.ones(trellis_dim).astype(np.int32)\n min_index = None\n\n trellis[:, 0] = -observations[0] - self._get_penalty()\n\n # find most likely paths\n for t in range(1, len(observations)):\n previous_trellis = trellis[:, t - 1]\n\n # last emitting state is the one with lowest likelyhood\n min_index, min_likelyhood = self._get_min_likelyhood(previous_trellis, min_index)\n\n previous_state = np.repeat(min_likelyhood, trellis_dim[0])\n same_state = trellis[:, t - 1] - self.selfloop\n\n concatenated_states = np.array([previous_state, same_state])\n\n trellis[:, t] = np.min(concatenated_states, axis=0) - observations[t]\n backpointer[:, t] = np.argmin(concatenated_states, axis=0)\n\n # decode best path\n tokens = [trellis[:, -1].argmin()]\n\n for t in range(len(observations) - 1, 0, -1):\n if backpointer[tokens[-1], t]:\n continue\n\n tokens.append(trellis[:, t - 1].argmin())\n\n return [self.network[token][\"phoneme\"] for token in tokens[::-1]]\n","repo_name":"MajerMartin/phoneme_classification","sub_path":"src/lib/decoders/Decoder.py","file_name":"Decoder.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"40142252757","text":"from random import randint\nfrom time import time\n\ndef random_array(n=10000, left=1, right=10000):\n\treturn [randint(left,right) for _ in range(n)]\n\ndef nearly_ordered_array(n,switch_num):\n\tmyarr = list(range(n))\n\tfor _ in range(switch_num):\n\t\ta = randint(0, n-1)\n\t\tb = randint(0, n-1)\n\t\tmyarr[a],myarr[b] = myarr[b],myarr[a]\n\treturn myarr\n\ndef is_sorted(array):\n\tsorted = True\n\tfor i in range(len(array)-1):\n\t\tif array[i] > array[i+1]:\n\t\t\tsorted = False\n\t\t\tbreak\n\treturn sorted\n\ndef test_sort(sort_name, sort,array):\n\ttemp_array = array[:]\n\tstart = time()\n\tsort(array)\n\tspend = time() - start\n\t#不正确时会抛出异常\n\ttry:\n\t\tassert(is_sorted(array))\n\texcept AssertionError:\n\t\tprint('the ordinary array is : ',temp_array)\n\t\tprint('the result of array is : ',array)\n\n\telse:\n\t\tprint('[success] ' + sort_name + ' : '+ str(spend) + ' seconds')\n","repo_name":"Touchfl0w/Algorithm-Practices","sub_path":"Sort-Basic/sort_helper.py","file_name":"sort_helper.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70021891681","text":"'''\n题目:跳跃游戏 II\n描述:\n给定一个非负整数数组,你最初位于数组的第一个位置。\n\n数组中的每个元素代表你在该位置可以跳跃的最大长度。\n\n你的目标是使用最少的跳跃次数到达数组的最后一个位置。\n\n示例:\n\n输入: [2,3,1,1,4]\n输出: 2\n解释: 跳到最后一个位置的最小跳跃数是 2。\n 从下标为 0 跳到下标为 1 的位置,跳 1 步,然后跳 3 步到达数组的最后一个位置。\n说明:\n\n假设你总是可以到达数组的最后一个位置。\n'''\n\n'''\n动态规划,从后往前求所有的,96 ms\n'''\nclass Solution:\n def jump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) <= 1: return 0\n # steps[i]表示从i位置跳到末尾需要的最小步数\n steps = {len(nums) - 1: 0}\n for i in range(len(nums) - 2, -1, -1):\n if nums[i] + i >= len(nums) - 1:\n steps[i] = 1\n continue\n\n steps[i] = len(nums)\n for j in range(nums[i], 0, -1):\n steps[i] = min(steps[i], 1 + steps[i + j])\n if steps[i] == 2:\n # steps[i]最小只能是2,不能更小了,所以可以break\n break\n\n return steps[0]\n\n\n'''\n52 ms\n'''\nclass Solution:\n def jump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n k=len(nums)-1\n j=0;maxs=0;result=0\n\n while j<k:\n if nums[j]==1:\n j+=1;result+=1\n\n elif nums[j]+j>=k:\n return result+1\n else:\n maxs=1+nums[j+1]\n sym=1\n for i in range(1,nums[j]+1):\n if i+nums[j+i]>maxs:\n maxs=i+nums[j+i]\n sym=i\n\n j+=sym;result+=1\n\n return result\n\n'''\n56 ms\n'''\nclass Solution:\n def jump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n farthest = 0\n end = 0\n res = 0\n for i in range(len(nums) - 1):\n farthest = max(farthest, i + nums[i])\n if i == end:\n res += 1\n end = farthest\n return res","repo_name":"txwjj33/leetcode","sub_path":"problems_100/045_jump.py","file_name":"045_jump.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39808894380","text":"\"\"\"\n 1、 The distributions of gas temperature (a), radial velocity (b), baryon number density (c) and O VI number density (d) along the LOS at galactic longitude l = 0o and different galactic latitudes b = 30o (black solid line), 60o (red solid line), 90o (blue solid line). Also the synthesized spectra of O VI absorption showed in panel (e): the solid lines are the spectra from our simulation, and the dashed lines are the results of fitted spectra. The green dashed line in panel (c) is the mean baryon number density of the universe.\n 2、最终得到的图片为:光谱.pdf\n 3、注意要有李辉的模拟数据'snapshot_155.hdf5'才能运行本程序\n\n\"\"\"\n\nimport gc #释放内存,不能超算内存不够用\nimport yt\nimport os, sys, time\nimport numpy as np\nimport h5py\nimport numpy as np\nimport math\nimport scipy.interpolate\nfrom scipy.interpolate import interp1d\n#import select_snapshot_number\nfrom decimal import *\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom astropy.modeling import models, fitting \nfrom scipy import integrate\nfrom lmfit import Model\n\nPROTONMASS = 1.67262178e-24\nMSUN = 1.989e33\nMPC = 3.085678e24\nKPC = 3.085678e21\nECHARGE = 4.80320425e-10 # 3.0e9 ?\nEMASS = 9.10938215e-28\nCLIGHT = 2.99792458e10\nkB=1.3806505e-16 # 用的单位制是厘米克秒制 k=(1.38e-23)*(e3)*(e4). is the Boltzmann constant in CGS units \nKBev=(1.380649e-23)/(1.602176e-19) #以电子伏特表示的单位即ev/k,为的是画(K*T)的all-sky图。is the Boltzmann constant\nh=4.1356676969e-15 #单位为ev·s\nhubble_constant=70*1e5/MPC #第一个70是因为李辉的哈勃小常数为0.7,1e5是转换km为cm,哈勃常数的值为70km s-1 Mpc-1,这样这里单位制全部是厘米克秒制\n\nfrom scipy.interpolate import griddata\n\n\n\nbegintime=time.perf_counter() \n\n\n#画图,详见网址:https://matplotlib.org/stable/gallery/subplots_axes_and_figures/ganged_plots.html#sphx-glr-gallery-subplots-axes-and-figures-ganged-plots-py\n#fig, axs = plt.subplots(1, 1, sharex=True)#四行一列,共用x轴\n#fig.subplots_adjust(hspace=0)\nfig, axs = plt.subplots(1, 1)\n\n\n#voigt函数x为积分波长范围,lam为OVI共振频率,bpar为doppler参数(包含温度和湍流速度,logn为OVI柱密度取对数,gam为Einstein A coefficent)\ndef Abvoigt(x, lam, bpar, logn, z, fosc, gam):\n '''\n my voigt profile model\n '''\n\n x = np.array(x, dtype='float64')\n wave = x\n\n c = 2.99792e10 # cm/s\n m_e = 9.1094e-28 # g\n e = 4.8032e-10 # cgs units\n\n b = bpar*1e5\n C_a = np.sqrt(np.pi)*e**2*fosc*lam*1.e-8/m_e/c/b\n a = lam*1.e-8*gam/(4.*np.pi*b)\n\n dl_D = b/c*lam\n x = x/(z+1.)\n u = (x - lam)/dl_D + 0.00001\n #Voigt Profile Approximation from T. Tepper-Garcia 2006, 2007.\n P = u**2\n H0 = np.exp(-u**2)\n Q = 1.5/u**2\n H = H0 - a/np.sqrt(np.pi)/P * (H0*H0*(4.*P*P + 7.*P + 4. + Q) - Q - 1) #跟莫厚俊书本711页(16.106)类似\n tau = np.float64(C_a) * 10 ** logn * H\n flux = np.exp(-tau)\n return flux\n\n#其中(g1,g2)为文档Parameters of Lines.pdf中的(gi,gk)\ndef obgamma(lam, g1, g2, f):\n '''\n calculate the Einstein A coefficent\n '''\n return 0.6770e16*f*(g1/g2)/lam**2\n\n\n\n#插值得到每个温度下OVI的电离度 ,插值方法见百度“https://blog.csdn.net/weixin_44524040/article/details/95221757” 1 代码,采用线性插值\n\n\ndef ion_fracZ(T,H_number_density_each_bin):\n ionization_data = np.loadtxt('collion.txt')\n values=10**(ionization_data[:,2])\n OVI_ionization_temperature_table=10**(ionization_data[:,1])\n H_number_density_table=10**(ionization_data[:,0])\n\n points=np.transpose(np.vstack((OVI_ionization_temperature_table,H_number_density_table)) ) #np.transpose见https://www.cnblogs.com/sggggr/p/12192242.html\n grid_x=T\n grid_y=H_number_density_each_bin\n \n grid_z1 = griddata(points, values, (grid_x, grid_y), method='nearest')\n\n \n \n \n return grid_z1\n\n\n\n \n#得到光深\ndef optical(nOVI,Doppler_T,Radial_velocity,Radius): \n\n\n f=fosc\n q=np.array(math.pi*ECHARGE**2/(EMASS*CLIGHT)*f)\n B=Doppler_T\n OVI_number_density=nOVI\n Velocity=Radial_velocity\n v0=CLIGHT/wavelength\n optical_depth=[]\n \n \n redshift1=Radius*hubble_constant/CLIGHT #这里的红移是每个格点的红移。\n \n for v in V:\n \n optical_depth.append( integrate.trapz(q*CLIGHT/(math.pi**(1/2)*v0*B)*OVI_number_density*np.exp( -((1+redshift1)*v/v0 -1+ Velocity/CLIGHT)**2 * CLIGHT**2/B**2)*CLIGHT/hubble_constant, redshift1 ) ) # 方老师2002文章(THE ASTROPHYSICAL JOURNAL, 564:604-623, 2002)公式(5),其中dl/dz=c/H,因为哈勃公示v=cz=Hl可得\n\n \n\n optical_depth=np.array(optical_depth)\n\n return optical_depth\n\n\n\n\n\n\n\n\n\n\nds = yt.load('snapshot_155.hdf5')\n\nlalo_each_step=5 #精度和纬度的间隔\n\n\n#只要length_all_bin长度大于218.808到l=270,b=-45就断了\n\nlongitude=np.arange(0,1,1)\n#longitude=np.arange(0,365,lalo_each_step) #经度取值范围[0,360],把360包含进来的原因是为了高分辨插值。\n#longitude=np.arange(0,1,1) #经度取值范围\n\nlatitude=np.arange(30,91,30)\n#latitude=np.arange(-90,91,lalo_each_step) #纬度取值范围\n#latitude=np.arange(0,1,1) #纬度取值范围\n\n\nlatitude_length_list=np.arange(len(latitude))\nlongitude_length_list=np.arange(len(longitude))\n\n\nOVI_column_number_density=np.zeros((len(longitude),len(latitude)))\nEquivalent_width=np.zeros((len(longitude),len(latitude))) \nH_column_number_density=np.zeros((len(longitude),len(latitude)))\nDoppler=np.zeros((len(longitude),len(latitude)))\nlength_all_bin=260 #单位都是导致后面换成CGS时都要用上KPC\n\n\n\nx_center_of_mass=300.0\ny_center_of_mass=300.0\nz_center_of_mass=300.0\n\n\n \nx1= x_center_of_mass -8.2 # 地球所在位置的坐标值。\n \ny1= y_center_of_mass\n \nz1= z_center_of_mass\n\n\n\n\n\n#从经度开始大循环,纬度小循环\nfor l,l_location in zip(longitude,longitude_length_list):\n\n\n \n \n print(\"我是经度:%d\"%l)\n \n Color=['black','red','blue']\n \n #循坏纬度\n for b,b_location,c in zip(latitude,latitude_length_list,Color) :\n\n \n\n x2= length_all_bin *math.cos(math.pi/180 * b)*math.cos(math.pi/180 *l) + x1 # 求坐标换成直角坐标的公式,并且考虑到观测者不是位于零点。x=r*cos(b)*cos(l) 以及角度和弧度的转换乘以math.pi/180\n\n y2= length_all_bin *math.cos(math.pi/180 * b)*math.sin(math.pi/180 *l) + y1 # y=r*cos(b)*sin(l) \n \n z2= length_all_bin *math.sin(math.pi/180 *b) + z1 # z=r*sin(b)\n \n \n \n ray = ds.ray([x1, y1, z1], [x2, y2, z2])\n \n \n rsort = np.argsort(ray[\"radius\"])\n \n \n radii = ray['radius'].in_units('kpc')[rsort]\n T = ray[('gas', 'temperature')].in_cgs()[rsort]\n \n OVI_metallicity=4.9e-04 #采用Annu. Rev. Astron. Astrophys. 2009. 47:481–522中相对于氢元素数密度的OVII金属丰度,即李辉推荐太阳丰度中的OVII丰度,\n Radius=(radii.d)*KPC\n H_number_density_each_bin=ray[('gas', 'H_nuclei_density')].in_cgs()[rsort]\n H_column_number_density[l_location,b_location]=integrate.trapz(H_number_density_each_bin.d, Radius) \n\n \n \n nOxy = H_number_density_each_bin.d * OVI_metallicity\n \n \n ionfrac_OVI=ion_fracZ(T.d,H_number_density_each_bin.d)\n nOVI=nOxy*ionfrac_OVI\n \n \n columOVI= integrate.trapz(nOVI,(radii.d)*KPC)\n print(np.log10(columOVI))#14.807742996285528\n \n \n Doppler_T= (2*kB*T.d/(16*PROTONMASS) )**(1/2)\n \n\n \n \n Radial_velocity=ray[('gas', 'radial_velocity')].in_cgs()[rsort]\n Radial_velocity=Radial_velocity.d \n \n\n lam, fosc = 1031.9261, 1.33e-1 #OVI波长以艾米为单位,和振子强度(为文档Parameters of Lines.pdf中的fik)\n gam = obgamma(lam, 2, 4, fosc) #calculate the Einstein A coefficent\n \n\n \n\n wavelength = lam * 1e-8 #OVI的波长,以厘米为单位\n v12 = CLIGHT/wavelength #OVI的频率\n \n\n\n dv=0.0001e15\n \n \n V = np.arange(2.898e15,2.910e+15,dv) \n \n E=h*V \n wave=CLIGHT/V *1e8 #乘以1e8是因为从厘米到艾米单位���转换,这样得到的波长位于(1030.25003608,1034.4805314)艾米 \n\n \n optical_depth = optical(nOVI,Doppler_T,Radial_velocity,Radius) \n flux =np.exp(-optical_depth)\n \n\n\n \n \n\n\n mod = Model(Abvoigt) \n para = mod.make_params(lam=lam, bpar=100, logn=15.5, z=0, fosc=fosc, gam=gam)\n para['lam'].vary, para['fosc'].vary, para['gam'].vary = False, False, False\n para['bpar'].min, para['bpar'].max = 0, 3000\n para['bpar'].brute_step = 0.1\n para['logn'].min, para['logn'].max = 5, 30\n para['logn'].brute_step = 0.01\n para['z'].min, para['z'].max = -0.1, 0.1\n para['z'].brute_step = 1e-4\n\n out = mod.fit(flux, para, x=wave, method='leastsq') #以Voigt函数为模版利用最小二乘法进行拟合,其中para为拟合参数(这里固定lam、fosc、gam,但bpar、logn、z程序会自动改变而达到最佳拟合效果),x为拟合区间\n\n Doppler[l_location,b_location]=out.best_values['bpar'] #得到最佳拟合值doppler参数 \n OVI_column_number_density[l_location,b_location]=out.best_values['logn'] #得到最佳拟合值OVI柱密度的对数值\n redshit= out.best_values['z']\n \n #利用得到的最佳拟合值bpar、logn再次带入voigt进行计算得到拟合曲线的光深。\n flux_fitting=Abvoigt(x=wave, lam=lam, bpar=Doppler[l_location,b_location], logn=OVI_column_number_density[l_location,b_location], z=redshit, fosc=fosc, gam=gam)\n \n Equivalent_width[l_location,b_location]=(-integrate.trapz(1-flux_fitting,wave))*1e3 # 负号是因为积分波长间隔为负数,*1e3是以毫艾为单位。\n \n print(\"OVI_column_number_density__longitude=%d,lattitude=%d is %f\"%(l,b,OVI_column_number_density[l_location, b_location])) #17.538821\n \n\n #以下是画图\n\n axs.set_ylabel('exp(-τ)')\n axs.set_xlabel('Wave Length ($\\AA$)')\n #plt.xlim(1e-1,260)\n axs.plot(wave, flux,'-',lw=1, color=c,label=\"$l$=0$^{o}$, $b$=%d$^{o}$\"%b)\n axs.plot(wave, out.best_fit,'k:',lw=1, color=c)\n axs.tick_params(axis='both', which='major', direction='in', labelsize=10, pad=8)\n axs.tick_params(axis='both', which='minor', direction='in')\n axs.xaxis.set_ticks_position('both')\n axs.yaxis.set_ticks_position('both')\n axs.set_xlim(1030.5, 1033)\n axs.set_ylim(0,1.1)\n axs.set_xticks([1030.5, 1031.0, 1031.5, 1032.0, 1032.5, 1033.0],['1030.5','1031.0','1031.5','1032.0','1032.5','1033.0'])\n \n hl=axs.legend(loc='lower right',frameon=False,fontsize='large')\n\n\nplt.text(1032.8,1.03,\"(e)\",fontdict={'size':'16','color':'black'})\n\nplt.savefig(\"光谱.pdf\",format='pdf', dpi=1000)\n\n\n\n\n\"\"\"\n #以下是画图\n plt.style.use('classic')\n \n \n ax=plt.subplot(111)\n plt.plot(wave, flux,'-',lw=1, color=c,label=\"$l$=0$^{o}$, $b$=%d$^{o}$\"%b)\n plt.plot(wave, out.best_fit,'k:',lw=1, color=c)\n plt.xlim(1030.5, 1033)\n plt.ylim(0,1.1)\n \n plt.xlabel(\"wave length ($\\AA$)\")\n plt.ylabel(\"exp(-τ)\")\n #plt.title(\"l=0$^{o}$, b=180$^{o}$, logn=15.5 cm$^{-2}$, N_really=14.8 cm$^{-2}$, N_fitting=17.5 cm$^{-2}$\")\n \n plt.xticks([1030.5, 1031.0, 1031.5, 1032.0, 1032.5, 1033.0],['1030.5','1031.0','1031.5','1032.0','1032.5','1033.0'])\n hl=plt.legend(loc='lower right',frameon=False,fontsize='large')\n \n \n plt.text(1032.8,1.03,\"(e)\",fontdict={'size':'16','color':'black'})\n \n\"\"\"\n\n\n\n\n\n\n\n","repo_name":"zhangzhijiexmu/jupyter-notebook","sub_path":"T、V、n、n_{OVI } vs Distance and exp(τ) vs Wave Length/光谱.py","file_name":"光谱.py","file_ext":"py","file_size_in_byte":12461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7231671031","text":"from PIL import Image\r\nimport numpy as np\r\nfrom utils import *\r\nfrom Spacial import *\r\nimport time\r\nfrom load_model import *\r\n\r\n\r\n\r\n\"\"\"\r\ntest = np.zeros([100,100,3])\r\n\r\nfor i in range(100):\r\n\tfor j in range(100):\r\n\t\ttest[i][j][0] = (j+100-i)/200\r\n\t\ttest[i][j][1] = (j+100-i)/200\r\n\t\ttest[i][j][2] = (j+100-i)/200\r\n\r\nim = Matrix2Image(test)\r\n\r\nim.show()\r\n\"\"\"\r\n\r\narr = np.array\r\n\r\nmy_space = Spacial()\r\n\r\nmodel_list,max_index,min_index = get_model('horse.obj',detail=True)\r\n\r\nmy_space.AddObjects(model_list)\r\n\r\nLight = Parallel_light(orientation = arr([-1.,0.,-1.]))\r\n\r\nmy_space.AddLight(Light)\r\n\r\ncamera = Camera()\r\n\r\ncamera = auto_camera(camera,max_index,min_index,resolution_width = 20, resolution_height = 20, match_size = True, axis = 2)\r\n\r\nmy_space.setCamera(camera)\r\n\r\nrender = my_space.Render(max_iter = 2)\r\n\r\nim = Matrix2Image(render)\r\n\r\nim.show()\r\n\r\npath_to_save = 'my_space_final_horse.png'\r\n\r\nim.save(path_to_save)\r\n\r\n\"\"\"\r\nmy_circle = Circle(np.array([0,0,0]),2,np.array([1.,1.,1.]),np.array([1.,0.,0.]),np.array([1.,0.,0.]))\r\n\r\nmy_circle2 = Circle(np.array([0,0,6]),3,np.array([1.,1.,1.]),np.array([0.,1.,0.]),np.array([0.,1.,0.]))\r\n\r\nmy_Triangle = Polynominal([arr([-10,10,-10]),arr([-10,10,10]),arr([-10,-10,10]),arr([-10,-10,-10])],np.array([1.,1.,1.]),np.array([1.,1.,1.]),np.array([1.,1.,1.]))\r\n\r\nmy_cube = Cube(arr([0,0,0]), 10,10,10, rotation = arr([0.,0.90,0.]))\r\n\r\n#my_space.AddObject(my_circle)\r\n\r\nLight_1 = PointLight(arr([0.,0.,40.]))\r\nLight_2 = Parallel_light(orientation=arr([-1.,0.,-1.]))\r\n\r\nmy_space.AddLight(Light_2)\r\n\r\n#my_space.AddObject(my_circle2)\r\n\r\n#my_space.AddObjects(my_Triangle)\r\n\r\nmy_space.AddObjects(my_cube)\r\n\r\nbefore = time.time()\r\nrender = my_space.Render(orth = True, resolution_width = 200, resolution_height = 200, width = 20, height = 20)\r\nafter = time.time()\r\n\r\nprint(\"Total time used to render: {}\".format(after-before))\r\n\r\nim = Matrix2Image(render)\r\n\r\nim.show()\r\n\r\nim.save(path_to_save)\r\n\r\n#my_space.render_test(orth = True, y = 0, z = 9.5)\r\n\"\"\"\r\n\r\n\"\"\"\r\nmy_space = Spacial()\r\n\r\nplane1 = Cube(arr([0,0,30]),10,80,10)\r\nplane2 = Cube(arr([0,0,50]),10,60,10)\r\ncube1 = Cube(arr([0,-15,40]),10,10,10)\r\ncube2 = Cube(arr([0,15,40]),10,10,10)\r\ncube3 = Cube(arr([0,-20,12.5]),10,10,25)\r\ncube4 = Cube(arr([0,20,12.5]),10,10,25)\r\nsurface = Plane(arr([0.,0.,-5.]),arr([0.,0.,1.]),texture_s = arr([1.,1.,1.]), texture_d = arr([1.,1.,1.]), texture_a = arr([1.,1.,1.]), ratio_s = 0.2, ratio_d = 0.8, ratio_a = 0.0, specular = True, decay = 0.5)\r\n\r\n#new_triangle = CompleteTriangle(arr([0.,-20.,-20.]),arr([0.,20.,-20.]),arr([0.,0.,20.]),arr([1.,-1.,0.]),arr([1.,1.,0.]),arr([1.,0.,1.]),\r\n#\ttexture_s = arr([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]),texture_d = arr([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]),texture_a = arr([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]))\r\n\r\nci = Circle(position = arr([0.,0.,5.]),radius = 5,texture_a = arr([1.,0.,0.]), texture_d = arr([1.,0.,0.]))\r\n\r\nmy_camera = Camera(position = arr([50,0,10]))\r\n\r\nLight = PointLight(arr([70,0,70]))\r\n\r\nLight_2 = Parallel_light(orientation = arr([-1,0,-1]))\r\n\r\n#my_space.AddObject(ci)\r\n\r\n\r\n\r\nmy_space.AddObjects(plane1)\r\nmy_space.AddObject(surface)\r\nmy_space.AddObjects(plane2)\r\nmy_space.AddObjects(cube1)\r\nmy_space.AddObjects(cube2)\r\nmy_space.AddObjects(cube3)\r\nmy_space.AddObjects(cube4)\r\nmy_space.AddObject(ci)\r\n\r\n#my_space.AddObject(new_triangle)\r\nmy_space.AddLight(Light_2)\r\n\r\nmy_space.setCamera(my_camera)\r\n\r\n#my_space.render_test(orth = True, y = 0, z = 0)\r\n\r\nbefore = time.time()\r\nrender = my_space.Render(orth = False, resolution_width = 1920, resolution_height = 1080, width = 160, height = 90, number = [1,1], max_iter = 2)\r\nafter = time.time()\r\n\r\nprint(\"Total time used to render: {}\".format(after-before))\r\n\r\nim = Matrix2Image(render)\r\n\r\nim.show()\r\n\r\npath_to_save = 'my_space_spe.png'\r\n\r\nim.save(path_to_save)\r\n\r\n\"\"\"","repo_name":"Frozenmad/Ray-Tracing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"71396843041","text":"from ._button_lights import ButtonLights\nfrom ._view import View\nfrom ._display import Display\nfrom config import RelayConfig\n\n\nrelay_menu_view_name: str = \"relay_menu_view\"\n\n\nclass RelayMenuView(View):\n\n def __init__(self, button_ligts: ButtonLights, relay_config: RelayConfig):\n super().__init__(button_ligts)\n self._relay_config = relay_config\n\n def name(self):\n return relay_menu_view_name\n\n def esc(self, display: Display):\n display.switch_view(\"menu_view\")\n\n def ok(self, display: Display):\n self._switch_state()\n\n def draw(self):\n self.write_line(0, \"Relay Config\")\n line = \"> Active: \"\n if self._relay_config.get_active():\n line += \"yes\"\n else:\n line += \"no\"\n self.write_line(1, line)\n self.write_line(2, \"\")\n self.write_line(3, \"\")\n\n self.set_button_lights(ok=True, esc=True)\n\n self.flush()\n\n def _switch_state(self):\n self._relay_config.set_active(not self._relay_config.get_active())\n self.draw()\n","repo_name":"ihrigb/stagebuzzer","sub_path":"display/_relay_menu_view.py","file_name":"_relay_menu_view.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26111099776","text":"import utils\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport models\nimport math\n#DATA PREPROCESSING\ndata_by_paramters, data_parameters = utils.read_data(\"validationData.csv\") #Read in CSV data into two lists. First contains the data organized by different action potential firign paramters. Second contains a list of the firing paramters used.\ncropped_data_by_paramters = utils.crop_data(data_by_paramters,1000) #Crop data of constant voltage\n#rand_data_by_parameters, rand_data_parameters = utils.randomize_data(cropped_data_by_paramters, data_parameters) #Randomize the dataset in a way that keeps the parameters and firing data associated with those paramters at the same index\nscaled_rand_data_by_parameters = utils.scale_data_using_data_removal(cropped_data_by_paramters,2) #Shrink the random data to be smaller\nnormalized_data = utils.normalize_data(scaled_rand_data_by_parameters,(-1,1)) #Normalize the data to values between -1 and 1 so that high values dont prevent the model from training properly\nparameters_split = utils.prepare_tags(data_parameters) #Split string tags into numbers\ntensor_data = torch.from_numpy(normalized_data).float() #Conevrt Normalized and preprocessed data into a tensor that can be fed into the network\ntensor_parameters = torch.from_numpy(parameters_split).float() #Convert paramters into a tensor that can be fed into the network\n\ndevice = torch.device('cuda')\ntime_series_dims = normalized_data[0].ndim #Get size of time series data recordings in this case it is only voltage\nLSTM_hidden_layer = 400 #How many LSTMS are in this layer\nnum_of_parameters = parameters_split[0].size #Get number of parameters that need to be predicted\nlearning_rate = 0.00001 #Learning rate for model\nepochs=2000 #How many times will the model go through the data\nmodel_save_increments = 100 #How often will loss be measured and will the model be saved\ntime_series_length = len(normalized_data[0])\nhidden_layer_size = 200\nnum_of_layers = 1\n\n#model = ActionPotentialParamterPredictor(input_size=time_series_dims, hidden_layer_size=LSTM_hidden_layer, output_size=num_of_parameters)\n#model = models.FullyConnectedModel(input_size=time_series_length,hidden_size=hidden_layer_size,num_of_hidden_layers=num_of_layers,output_size=num_of_parameters)\nmodel = models.SingleLSTMLayer(input_size=time_series_dims, hidden_layer_size=LSTM_hidden_layer, output_size=2)\nmodel.to(device)\nsodiumErrors = []\npotassiumErrors = []\nfor epoch in range(4000):\n if(epoch%100 == 0):\n print(epoch)\n model.load_state_dict(torch.load(\"/home/techgarage/ActionPotentialAnalysis/models/SingleLSTMLayer_400_1e-06/APModel\"+str(epoch)+\".pth\"))\n model.eval()\n\n sodium_pred = []\n potassium_pred = []\n\n sodium_truth = []\n potassium_truth = []\n\n for paramter, data in zip(tensor_parameters,tensor_data):\n with torch.no_grad():\n pred = model(data.to(device))\n #print(pred)\n sodium_pred.append(pred.cpu().numpy()[0])\n potassium_pred.append(pred.cpu().numpy()[1])\n sodium_truth.append(paramter.cpu().numpy()[0])\n potassium_truth.append(paramter.cpu().numpy()[1])\n\n averageError = 0\n count = 0\n for pred, truth in zip(sodium_pred, sodium_truth):\n if truth != 0:\n count += 1\n error = abs(pred - truth)/truth\n averageError += error\n sodiumAverageError = averageError/(count+1)*100\n\n averageError = 0\n count = 0\n for pred, truth in zip(potassium_pred, potassium_truth):\n if truth != 0:\n count += 1\n error = abs(pred - truth)/truth\n averageError += error\n potassiumAverageError = averageError/(count+1)*100\n print(potassiumAverageError)\n print(sodiumAverageError)\n sodiumErrors.append(sodiumAverageError)\n potassiumErrors.append(potassiumAverageError)\n\nfig, axs = plt.subplots(2)\nfig.suptitle('Accuracy over training')\naxs[0].plot(sodiumErrors)\naxs[0].set(xlabel='num of epochs', ylabel='Sodium Pred Error (%)')\naxs[1].plot(potassiumErrors)\naxs[1].set(xlabel='num of epochs', ylabel='Potassium Pred Error (%)')\nplt.show()\n\n# fig, axs = plt.subplots(2)\n# fig.suptitle('Vertically stacked subplots')\n# axs[0].plot(sodium_pred, label=\"sodium_pred\")\n# axs[1].plot(potassium_pred, label=\"potassium_pred\")\n# axs[0].plot(sodium_truth, label=\"sodium_truth\")\n# axs[1].plot(potassium_truth, label=\"potassium_truth\")\n# axs[0].legend(loc=\"upper left\")\n# axs[1].legend(loc=\"upper left\")\n# plt.show()","repo_name":"gatordevin/ActionPotentialAnalysis","sub_path":"testModel.py","file_name":"testModel.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36877655183","text":"#!/usr/bin/env python3\n# -*- coding: utf-8, vim: expandtab:ts=4 -*-\n\nfrom NLPCanvas import *\nfrom EdgeTypeFilter import *\nfrom PyQt4 import QtGui\n\n\"\"\"\n * An EdgeTypeFilterPanel controls an EdgeTypeFilter and requests an update for an NLPCanvas whenever the filter is\n * changed.\n \"\"\"\nclass EdgeTypeFilterPanel:\n def __init__(self, gui, canvas=NLPCanvas, edgeTypeFilter=EdgeTypeFilter):\n self._nlpCanvas = canvas\n self._types = gui.edgeTypeListWidget\n self._types.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)\n self._listModel = []\n self._matches = gui.matchesCheckBox\n self._falsePositives = gui.falsePositiveCheckBox\n self._falseNegatives = gui.falseNegativeCheckBox\n self._justChanged = set()\n\n self._edgeTypeFilter = edgeTypeFilter\n self._nlpCanvas.addListener(listener=self)\n edgeTypeFilter.addListener(listener=self)\n\n self.updateTypeList()\n self.updateSelection()\n\n def valueChanged():\n print(\"Edge type widget selection changed\")\n self._justChanged.clear()\n for index in range(0,len(self._types)):\n t = str(self._listModel[index])\n self._justChanged.add(t)\n if self._types.isItemSelected(self._types.item(index)):\n self._edgeTypeFilter.addAllowedPrefixType(t)\n else:\n self._edgeTypeFilter.removeAllowedPrefixType(t)\n self._justChanged.clear()\n self._nlpCanvas.updateNLPGraphics()\n self._types.itemSelectionChanged.connect(valueChanged)\n\n def matchActionPerformed(value):\n #if self._matches.checkState() == 2: #Checked\n if value == 2: #Checked\n self._edgeTypeFilter.addAllowedPostfixType(\"Match\")\n else:\n self._edgeTypeFilter.removeAllowedPostfixType(\"Match\")\n self._justChanged.clear()\n self._nlpCanvas.updateNLPGraphics()\n self._matches.stateChanged.connect(matchActionPerformed)\n\n def negativeActionPerformed(value):\n if value == 2: #Checked\n self._edgeTypeFilter.addAllowedPostfixType(\"FN\")\n else:\n self._edgeTypeFilter.removeAllowedPostfixType(\"FN\")\n\n self._nlpCanvas.updateNLPGraphics()\n self._falseNegatives.stateChanged.connect(negativeActionPerformed)\n\n def positiveActionPerformed(value):\n if value == 2: # Checked\n self._edgeTypeFilter.addAllowedPostfixType(\"FP\")\n else:\n self._edgeTypeFilter.removeAllowedPostfixType(\"FP\")\n\n self._nlpCanvas.updateNLPGraphics()\n self._falsePositives.stateChanged.connect(positiveActionPerformed)\n\n \"\"\"\n * Separates the types in <code>usedTypes</code> into prefix and postfix types.\n *\n * @param usedTypes the types to separate.\n * @param prefixTypes the target set for prefix types.\n * @param postfixTypes the target set for postfix types.\n \"\"\"\n def separateTypes(self, usedTypes, prefixTypes, postfixTypes):\n for t in usedTypes:\n index = t.find(':')\n if index == -1:\n prefixTypes.add(t)\n else:\n prefixTypes.add(t[0:index])\n postfixTypes.add(t[index+1:])\n \"\"\"\n * Updates the set of selected (set to be visible) edge types.\n \"\"\"\n def updateSelection(self):\n #TODO: deselecting items?\n for index in range(0,len(self._types)):\n t = str(self._types.item(index))\n if self._edgeTypeFilter.allowsPrefix(t):\n self._types.setItemSelected(self._types.item(index), True)\n\n \"\"\"\n * Updates the list of available edge types and the set FP/FN/Match checkboxes.\n \"\"\"\n def updateTypeList(self):\n prefixTypes = set()\n postfixTypes = set()\n self.separateTypes(self._nlpCanvas.usedTypes, prefixTypes, postfixTypes)\n allTypes = []\n allTypes.extend(prefixTypes)\n\n self._falseNegatives.setEnabled(\"FP\" in postfixTypes)\n if self._edgeTypeFilter.allowsPostfix(\"FP\"):\n self._falseNegatives.setCheckState(2) #Checked\n else:\n self._falseNegatives.setCheckState(0) #Unchecked\n self._falsePositives.setEnabled(\"FN\" in postfixTypes)\n if self._edgeTypeFilter.allowsPostfix(\"FN\"):\n self._falsePositives.setCheckState(2) # Checked\n else:\n self._falsePositives.setCheckState(0) # Unchecked\n self._matches.setEnabled(\"Match\" in postfixTypes)\n if self._edgeTypeFilter.allowsPostfix(\"Match\"):\n self._matches.setCheckState(2) # Checked\n else:\n self._matches.setCheckState(0) # Unchecked\n\n self._listModel = []\n self._types.clear()\n for t in allTypes:\n self._listModel.append(t)\n self._types.addItem(t)\n \"\"\"\n * Updates the type list and the selection. Afterwards request for repaint is issued.\n \"\"\"\n def instanceChanged(self):\n self.updateTypeList()\n self.updateSelection()\n\n \"\"\"\n * Updates the selection.\n *\n * @param type type string that was allowed or disallowed.\n * @see com.googlecode.whatswrong.EdgeTypeFilter.Listener#changed(String)\n \"\"\"\n def changed(self, type):\n if type not in self._justChanged:\n self.updateSelection()","repo_name":"kalregi/What-sWrong_SVG","sub_path":"EdgeTypeFilterPanel.py","file_name":"EdgeTypeFilterPanel.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42304899710","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport tensorflow_datasets as tfds\nfrom tensorflow.keras import datasets, layers, models, utils\n\nimport statistics\nimport matplotlib.pyplot as plt\n\nfrom cleverhans.tf2.attacks.projected_gradient_descent import projected_gradient_descent\nfrom cleverhans.tf2.attacks.fast_gradient_method import fast_gradient_method\nfrom cleverhans.tf2.attacks.carlini_wagner_l2 import carlini_wagner_l2\nfrom cleverhans.tf2.attacks.momentum_iterative_method import momentum_iterative_method\nfrom cleverhans.tf2.attacks.spsa import spsa\n\ntfd = tfp.distributions\ntfpl = tfp.layers\n\nx = ['bicycle', 'bird']\n\ndef get_data_bob():\n test_dataset = utils.image_dataset_from_directory('./bob_dataset/test', label_mode='binary',\n image_size=(32, 32))\n\n normalization_layer = layers.Rescaling(1./255)\n test_dataset = test_dataset.map(lambda x, y: (normalization_layer(x), y))\n\n test_dataset = tfds.as_numpy(test_dataset)\n\n test_images = np.zeros((300, 32, 32, 3))\n test_labels = np.zeros(300)\n i = 0\n\n for element in test_dataset:\n j = len(element[0])\n test_images[i:i+j] = element[0]\n test_labels[i:i+j] = element[1].reshape(element[1].shape[0], )\n i += j\n\n test_images = test_images.reshape(test_images.shape[0], 32, 32, 3)\n test_images = tf.constant(test_images)\n\n test_labels = tf.cast(test_labels, tf.int32)\n\n test_labels = utils.to_categorical(test_labels)\n\n return test_images, test_labels\n\nclass BOBSequence(tf.keras.utils.Sequence):\n\n def __init__(self, data=None, batch_size=128):\n if data:\n images, labels = data\n else:\n images, labels = BOBSequence.__generate_fake_data(\n num_images=128, num_classes=2)\n self.images, self.labels = BOBSequence.__preprocessing(images, labels)\n self.batch_size = batch_size\n\n @staticmethod\n def __preprocessing(images, labels):\n return images, labels\n\n def __generate_fake_data(num_images, num_classes):\n images = np.random.randint(low=0, high=256,\n size=(num_images, 32, 32, 3))\n labels = np.random.randint(low=0, high=num_classes,\n size=num_images)\n return images, labels\n\n def __len__(self):\n return int(tf.math.ceil(len(self.images) / self.batch_size))\n\n def __getitem__(self, idx):\n batch_x = self.images[idx * self.batch_size: (idx + 1) * self.batch_size]\n batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]\n return batch_x, batch_y\n\ndef main():\n\n batch_size = 128\n\n test_i, test_l = get_data_bob()\n\n heldout_seq = BOBSequence(data=(test_i, test_l), batch_size=batch_size)\n\n def create_model_det():\n\n model = models.Sequential()\n model.add(layers.Conv2D(32, kernel_size=3, activation='relu', input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(32, kernel_size=3, activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Dropout(0.2))\n model.add(layers.Conv2D(64, kernel_size=3, activation='relu'))\n model.add(layers.Conv2D(64, kernel_size=3, activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Dropout(0.2))\n model.add(layers.Conv2D(128, kernel_size=3, activation='relu'))\n model.add(layers.Conv2D(128, kernel_size=3, activation='relu'))\n model.add(layers.Flatten())\n model.add(layers.Dropout(0.2))\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dropout(0.3))\n model.add(layers.Dense(2, activation=tf.nn.softmax))\n\n optimizer = tf.keras.optimizers.Adam(lr=0.001)\n\n model.compile(optimizer,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n\n def create_model_bay():\n\n kl_divergence_fn = (lambda q, p, _: tfd.kl_divergence(q, p) / tf.cast(850, dtype=tf.float32))\n\n model = models.Sequential()\n model.add(tfp.layers.Convolution2DFlipout(64, kernel_size=11, kernel_divergence_fn=kl_divergence_fn,\n activation='relu', padding='same', strides=4, input_shape=(32, 32, 3)))\n model.add(layers.MaxPooling2D(pool_size=[2, 2], strides=[2, 2], padding='same'))\n model.add(tfp.layers.Convolution2DFlipout(64, kernel_size=5, kernel_divergence_fn=kl_divergence_fn,\n activation='relu', padding='same'))\n model.add(layers.MaxPooling2D(pool_size=[2, 2], strides=[2, 2], padding='same'))\n model.add(tfp.layers.Convolution2DFlipout(192, kernel_size=3, kernel_divergence_fn=kl_divergence_fn,\n activation='relu', padding='same'))\n model.add(tfp.layers.Convolution2DFlipout(384, kernel_size=3, kernel_divergence_fn=kl_divergence_fn,\n activation='relu', padding='same'))\n model.add(tfp.layers.Convolution2DFlipout(256, kernel_size=3, kernel_divergence_fn=kl_divergence_fn,\n activation='relu', padding='same'))\n model.add(layers.MaxPooling2D(pool_size=[2, 2], strides=[2, 2], padding='same'))\n model.add(layers.Flatten())\n model.add(tfp.layers.DenseFlipout(128, kernel_divergence_fn=kl_divergence_fn, activation='relu'))\n model.add(tfp.layers.DenseFlipout(2, kernel_divergence_fn=kl_divergence_fn, activation=tf.nn.softmax))\n\n #optimizer = tf.keras.optimizers.Adam(lr=0.001)\n optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9, nesterov=False)\n\n model.compile(optimizer,\n loss='categorical_crossentropy',\n metrics=['accuracy'], experimental_run_tf_function=False)\n\n return model\n\n model_det = create_model_det()\n model_bay = create_model_bay()\n\n model_det.load_weights('./model_weights/bob_det_sm').expect_partial()\n model_bay.load_weights('./model_weights/bob_bay_sm').expect_partial()\n\n\n def det_batch_spsa(batch_number=0, n_images=batch_size, thresholds=[0.0], figures=0, epsilon=0.1):\n test_images, test_labels = BOBSequence.__getitem__(heldout_seq, batch_number)\n\n data_type = f\"spsa{epsilon}\"\n\n res = []\n\n x_32 = tf.cast(test_images, tf.float32)\n\n spsas = []\n\n for i in range(n_images):\n x_spsa = spsa(model_fn=model_det, x=x_32[i][None, :, :, :], y=test_labels[i], spsa_iters=1, delta=0.01,\n eps=epsilon, nb_iter=100, clip_max=1.0, clip_min=-1.0, learning_rate=0.5, is_debug=False)\n spsas.append(x_spsa)\n\n for j in range(len(thresholds)):\n acc = 0\n pred_correct = 0.0\n pred_incorrect = 0.0\n guessed = 0\n\n for i in range(n_images):\n x_spsa = spsas[i]\n pred = model_det.predict(x_spsa, verbose=0)\n if np.max(pred[0]) >= thresholds[j]:\n guessed += 1\n if np.argmax(pred[0]) == np.argmax(test_labels[i]):\n acc += 1\n pred_correct += np.max(pred[0])\n else:\n pred_incorrect += np.max(pred[0])\n pred_correct = pred_correct / acc if acc > 0 else -1\n pred_incorrect = pred_incorrect / (guessed - acc) if guessed - acc > 0 else -1\n acc = acc / guessed if guessed > 0 else -1\n\n res.append((thresholds[j], acc, guessed / n_images, pred_correct, pred_incorrect))\n\n if j == 0:\n print(f\"\\n\\n\\nDeterministic model accuracy with {data_type} data:\\n\")\n\n if thresholds[j] == 0.0:\n print(f\"Accuracy using no confidence threshold:\\t\\t\\t\\t{res[j][1] * 100:4.1f}%\")\n else:\n print(f\"Accuracy using a confidence threshold of {res[j][0]}:\\t\\t\\t{res[j][1] * 100:4.1f}%\"\n f\"\\t({res[j][2] * 100:4.1f}% of images used)\")\n print(f\"Prediction confidence among correctly classified images:\\t{res[j][3]:4.3f}\")\n print(f\"Prediction confidence among incorrectly classified images:\\t{res[j][4]:4.3f}\\n\")\n\n for i in range(min(n_images, figures)):\n plt.imshow(tf.reshape(spsas[i], (32, 32, 3)))\n plt.title(f\"{data_type} image (det)\")\n plt.savefig(f\"./images_bob/{data_type}_{i}_det_image.jpeg\", bbox_inches='tight')\n plt.clf()\n plt.bar(x, pred[0])\n plt.title(\"prediction (det)\")\n plt.ylim([0, 1])\n plt.savefig(f\"./images_bob/{data_type}_{i}_prediction.jpeg\", bbox_inches='tight')\n plt.clf()\n\n return res\n\n def bay_batch_clean(batch_number=0, n_images=batch_size, n_rep=100):\n test_images, test_labels = BOBSequence.__getitem__(heldout_seq, batch_number)\n\n prob_bay_batch = np.zeros((n_rep, n_images, 10))\n\n for i in range(n_rep):\n predictions = model_bay.predict_on_batch(test_images)\n for j in range(n_images):\n for k in range(10):\n prob_bay_batch[i][j][k] = predictions[j][k]\n\n prob_bay_batch_median = np.zeros((n_images, 10))\n prob_bay_batch_threshold = np.zeros((n_images, 10))\n\n for k in range(10):\n for i in range(n_images):\n list = []\n for j in range(n_rep):\n list.append(prob_bay_batch[j][i][k])\n median = statistics.median(list)\n prob_bay_batch_median[i][k] = median\n if median > 0.2:\n prob_bay_batch_threshold[i][k] = 1\n\n n_wrong = 0.0\n n_guessed = 0.0\n\n for i in range(n_images):\n summed = np.sum(prob_bay_batch_threshold[i])\n if summed == 1:\n n_guessed += 1\n guess = np.argmax(prob_bay_batch_threshold[i])\n actual = np.argmax(test_labels[i])\n if guess != actual:\n n_wrong += 1\n\n perc_abstained = (n_images - n_guessed) / n_images\n\n accuracy = -1\n if perc_abstained < 1:\n accuracy = (n_guessed - n_wrong) / n_guessed\n\n return accuracy, perc_abstained\n\n\n def bay_batch_spsa_fb(batch_number=0, n_images=batch_size, thresholds=[0.0], figures=0, iterations=100, epsilon=0.1):\n data_type = f\"spsa{epsilon}\"\n\n test_images, test_labels = BOBSequence.__getitem__(heldout_seq, batch_number)\n\n x_32 = tf.cast(test_images, tf.float32)\n\n model_outputs = np.zeros((iterations, n_images, 2))\n\n spsas = []\n\n for i in range(n_images):\n x_spsa = spsa(model_fn=model_bay, x=x_32[i][None, :, :, :], y=test_labels[i], spsa_iters=1, delta=0.01,\n eps=epsilon, nb_iter=100, clip_max=1.0, clip_min=-1.0, learning_rate=0.5, is_debug=False)\n spsas.append(x_spsa)\n\n for i in range(iterations):\n for j in range(n_images):\n prediction = model_bay.predict(spsas[j], verbose=0)\n for k in range(2):\n model_outputs[i][j][k] = prediction[0][k]\n\n means = np.mean(model_outputs, axis=0)\n mean_max = means.max(axis=-1)\n predictions = means.argmax(axis=-1)\n\n res = []\n\n for i in range(len(thresholds)):\n acc = 0\n mean_correct = 0.0\n mean_incorrect = 0.0\n guessed = 0\n\n for j in range(n_images):\n if mean_max[j] >= thresholds[i]:\n guessed += 1\n if predictions[j] == np.argmax(test_labels[j]):\n acc += 1\n mean_correct += mean_max[j]\n else:\n mean_incorrect += mean_max[j]\n\n mean_correct = mean_correct/acc if acc > 0 else -1\n mean_incorrect = mean_incorrect/(guessed-acc) if guessed - acc > 0 else -1\n acc = acc / guessed if guessed > 0 else -1\n\n res.append((thresholds[i], acc, guessed/n_images, mean_correct, mean_incorrect))\n\n if i == 0:\n print(f\"\\n\\n\\nDeterministic model accuracy with {data_type} data:\\n\")\n\n if thresholds[i] == 0.0:\n print(f\"Accuracy using no confidence threshold:\\t\\t\\t\\t{res[i][1] * 100:4.1f}%\")\n else:\n print(f\"Accuracy using a confidence threshold of {res[i][0]}:\\t\\t\\t{res[i][1] * 100:4.1f}%\"\n f\"\\t({res[i][2] * 100:4.1f}% of images used)\")\n print(f\"Prediction confidence among correctly classified images:\\t{res[i][3]:4.3f}\")\n print(f\"Prediction confidence among incorrectly classified images:\\t{res[i][4]:4.3f}\\n\")\n\n for i in range(min(n_images, figures)):\n plt.imshow(tf.reshape(spsas[i], (32, 32, 3)))\n plt.title(f\"{data_type} image (bay)\")\n plt.savefig(f\"./images_bob/{data_type}_{i}_bay_image.jpeg\", bbox_inches='tight')\n plt.clf()\n plt.bar(x, means[i])\n plt.title(\"prediction (bay)\")\n plt.ylim([0, 1])\n plt.savefig(f\"./images_bob/{data_type}_{i}_bay_prediction.jpeg\", bbox_inches='tight')\n plt.clf()\n\n return res\n\n n_images = 128\n figures = 10\n thresholds = [0.0, 0.65, 0.8]\n epsilons = [0.05]\n\n for e in epsilons:\n det_batch_spsa(n_images=n_images, figures=figures, thresholds=thresholds, epsilon=e)\n\n for e in epsilons:\n bay_batch_spsa_fb(n_images=n_images, figures=figures, thresholds=thresholds, epsilon=e)\n\n\n\n\n\n\nif __name__ == \"__main__\": main()","repo_name":"Yaloron/thesis_code","sub_path":"clever_adv_bob.py","file_name":"clever_adv_bob.py","file_ext":"py","file_size_in_byte":13958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"343334164","text":"from mrjob.job import MRJob\r\n\r\nclass mapreduce(MRJob):\r\n def mapper(self, _, line):\r\n fields=line.split(\",\")\r\n if len(fields)==4:\r\n (userId, movieId, rating ,timestamp)=fields\r\n if movieId !='movieId':\r\n result=(movieId,('R',rating))\r\n yield result\r\n elif len(fields)==3:\r\n (movieId, title, genres)=fields\r\n if movieId !='movieId':\r\n result=(movieId, ('T', title))\r\n yield result\r\n\r\n def reducer(self, movieId, values):\r\n ratings = []\r\n title = None\r\n\r\n for vtype, value in values:\r\n if vtype == 'R':\r\n try:\r\n rating = float(value)\r\n ratings.append(rating)\r\n except ValueError:\r\n pass\r\n elif vtype == 'T':\r\n title = value\r\n\r\n if ratings:\r\n average_rating = sum(ratings) / len(ratings)\r\n yield title, average_rating\r\n\r\nif __name__ == '__main__':\r\n mapreduce.run()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"BartoszP37/BIGDATA","sub_path":"zadanie2-wersja2/mapreduce.py","file_name":"mapreduce.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43241423217","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\n\n\nclass L2Norm(nn.Module):\n \"\"\"\n Layer learns to scale the l2 normalized features from conv4_3\n source:\n https://github.com/amdegroot/ssd.pytorch/blob/5b0b77faa955c1917b0c710d770739ba8fbff9b7/layers/modules/l2norm.py#L7 \n \"\"\" \n def __init__(self,\n in_channels,\n scale):\n super(L2Norm, self).__init__()\n self.in_channels = in_channels\n self.gamma = scale\n self.eps = 1e-10\n self.weight = nn.Parameter(torch.Tensor(self.in_channels))\n self.reset_parameters()\n\n def reset_parameters(self):\n init.constant_(self.weight,self.gamma)\n\n def forward(self, x):\n # consider the l2norm along the axis 1\n norm = x.pow(2).sum(dim=1, keepdim=True).sqrt()+self.eps\n # normalize data on l2norm\n x = torch.div(x,norm)\n # multiply by weights\n out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x\n return out","repo_name":"BloomBabe/SSD-pytorch","sub_path":"ssd/modules/l2norm.py","file_name":"l2norm.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37904912506","text":"def handler():\n list_of_tuples = [\n ('Russia', '25'),\n ('France', '132'),\n ('Germany', '132'),\n ('Spain', '178'),\n ('Italy', '162'),\n ('Portugal', '17'),\n ('Finland', '3'),\n ('Hungary', '2'),\n ('The Netherlands', '28'),\n ('The USA', '610'),\n ('The United Kingdom', '95'),\n ('China', '83'),\n ('Iran', '76'),\n ('Turkey', '65'),\n ('Belgium', '34'),\n ('Canada', '28'),\n ('Switzerland', '26'),\n ('Brazil', '25'),\n ('Austria', '14'),\n ('Israel', '12')\n ]\n\n new_list_of_tuples = list()\n for i in list_of_tuples:\n new_list_of_tuples.append((i[0], int(i[1])))\n output = dict(new_list_of_tuples)\n output = {k: v for k, v in sorted(output.items(), key=lambda item: item[0])}\n sorted_output = {k: v for k, v in sorted(output.items(), key=lambda item: item[1], reverse=True)}\n for k, v in sorted_output.items():\n print(k)\n return\n\ndef main():\n handler()\n return\n\nif __name__ == '__main__':\n main()","repo_name":"andydardgallard/21_School_Python_Data_Science","sub_path":"Day01/ex06/dict_sorter.py","file_name":"dict_sorter.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28549391038","text":"#this script handles all functions neccessaray for the training part\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms, models\nimport glob, os\nimport argparse\nfrom os import listdir\nimport json\n\ndef set_data_dir(data_dir):\n\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n\n return (train_dir,valid_dir,test_dir)\n \ndef load_data(train_dir, valid_dir, test_dir):\n \t# Setting Transformations\n data_transforms_training = transforms.Compose([transforms.RandomRotation(25),\n \t transforms.RandomHorizontalFlip(),\n transforms.RandomResizedCrop(224),\n \t transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n\n data_transforms_validation = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n\n data_transforms_testing = transforms.Compose([transforms.RandomResizedCrop(224),\n \t transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n\n\t# Load the datasets with ImageFolder\n image_datasets_training = datasets.ImageFolder(train_dir, transform=data_transforms_training)\n image_datasets_validation = datasets.ImageFolder(valid_dir, transform=data_transforms_validation)\n image_datasets_testing = datasets.ImageFolder(test_dir, transform=data_transforms_testing)\n\n\t# Using the image datasets and the trainforms, define the dataloaders\n trainloader = torch.utils.data.DataLoader(image_datasets_training, batch_size=64, shuffle=True)\n validationloader = torch.utils.data.DataLoader(image_datasets_validation, batch_size=32)\n testloader = torch.utils.data.DataLoader(image_datasets_testing, batch_size=32) \n \n return (trainloader,validationloader,testloader, image_datasets_training)\n\ndef load_model(model_from_input):\n # Loading resnet18/alexnet/densenet121 Model\n if model_from_input == 'resnet18':\n model = models.resnet18(pretrained=True)\n elif model_from_input == 'alexnet':\n model = models.alexnet(pretrained=True)\n else:\n model = models.densenet121(pretrained=True)\n \n return model\n\ndef define_classifier(model, hidden_layers, architecture):\n \t# Freeze parameters so we don't backprop through them\n for param in model.parameters():\n param.requires_grad = False\n\t\n #Create Classifier for densenet121\n if architecture == \"densenet121\":\n from collections import OrderedDict\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(1024, hidden_layers)),\n ('relu', nn.ReLU()),\n ('fc2', nn.Linear(hidden_layers, 102)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n\n model.classifier = classifier\n \n else:\n from collections import OrderedDict\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(9216, hidden_layers)),\n ('relu', nn.ReLU()),\n ('fc2', nn.Linear(hidden_layers, 102)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n\n model.classifier = classifier\n\n return (classifier, param)\n \ndef training_network(model,classifier,trainloader,validationloader, cuda, epochs):\n\n epochs = epochs\n steps = 0\n running_loss = 0\n print_every = 40\n for e in range(epochs):\n criterion = nn.NLLLoss()\n # Only train the classifier parameters, feature parameters are frozen\n optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)\n \n if cuda == 'gpu':\n # Move model parameters to the GPU\n model.cuda()\n \n else:\n model.cpu()\n \n for ii, (inputs, labels) in enumerate(trainloader):\n inputs, labels = Variable(inputs), Variable(labels)\n steps+=1\n if cuda == 'gpu':\n # Move input and label tensors to the GPU\n inputs, labels = inputs.cuda(), labels.cuda()\n\n optimizer.zero_grad()\n outputs = model.forward(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.data[0]\n\n if steps % print_every == 0:\n # Model in inference mode, dropout is off\n model.eval()\n\n accuracy = 0\n validation_loss = 0\n for ii, (inputs, labels) in enumerate(validationloader):\n\n #images = images.resize_(images.size()[0], 784)\n # Set volatile to True so we don't save the history\n inputs = Variable(inputs, volatile=True)\n labels = Variable(labels, volatile=True)\n if cuda == 'gpu':\n # Move input and label tensors to the GPU\n inputs, labels = inputs.cuda(), labels.cuda()\n\n output = model.forward(inputs)\n validation_loss += criterion(output, labels).data[0]\n\n ## Calculating the accuracy \n # Model's output is log-softmax, take exponential to get the probabilities\n ps = torch.exp(output).data\n # Class with highest probability is our predicted class, compare with true label\n equality = (labels.data == ps.max(1)[1])\n # Accuracy is number of correct predictions divided by all predictions, just take the mean\n accuracy += equality.type_as(torch.FloatTensor()).mean()\n\n print(\"Epoch: {}/{}.. \".format(e+1, epochs),\n \"Training Loss: {:.3f}.. \".format(running_loss/print_every),\n \"Validation Loss: {:.3f}.. \".format(validation_loss/len(validationloader)),\n \"Validation Accuracy: {:.3f}\".format(accuracy/len(validationloader)))\n\n running_loss = 0\n\n # Make sure dropout is on for training\n model.train()\n \n if ii==3:\n break\n\n return (model, epochs, optimizer) \n\ndef save_checkpoint(image_datasets_training, epochs, model, optimizer, filepath_checkpoint):\n #saving the trained model_state_dict\n torch.save({\n 'epochs': epochs,\n 'state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'model_class_to_idx': model.class_to_idx,\n },filepath_checkpoint)\n \n return(filepath_checkpoint)\n\ndef load_checkpoint(filepath_checkpoint, model):\n #loading the saved model\n print(\"=> loading checkpoint\")\n checkpoint = torch.load(filepath_checkpoint, map_location=lambda storage, loc: storage)\n epochs = checkpoint['epochs']\n model.load_state_dict(checkpoint['state_dict'])\n model.class_to_idx = checkpoint['model_class_to_idx']\n print(\"=> loaded checkpoint\")\n \n return(epochs,model.load_state_dict, model.class_to_idx)","repo_name":"chrisy-d1989/udacityaipython","sub_path":"Part2/model_functions.py","file_name":"model_functions.py","file_ext":"py","file_size_in_byte":7980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8143712778","text":"\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\"\n def maxDepth(self, root): \n '''\n 层次遍历\n '''\n # write your code here\n Depth = 0\n stact = [root]\n if root:\n while stact:\n for i in range(len(stact)):\n cur = stact.pop(0)\n if cur.left:\n stact.append(cur.left)\n if cur.right:\n stact.append(cur.right)\n Depth += 1\n return Depth\n\n def maxDepth2(self, root, comp=lambda x,y: x if x>y else y):\n '''\n 递归\n '''\n if root is None:\n return 0\n return comp(self.maxDepth2(root.left),self.maxDepth2(root.right)) + 1\n \n\n\nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n tmp = root.right\n tmp.left = TreeNode(4)\n tmp.right = TreeNode(5)\n\n tmp = Solution()\n res = tmp.maxDepth2(root)\n print(res)\n","repo_name":"2048JiaLi/Lint-Code","sub_path":"97.二叉树最大深度.py","file_name":"97.二叉树最大深度.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70318836641","text":"# 242: Valid anagram\n# neetcode: https://youtu.be/9UtInBqnCgA\n# Time: O(S + T)\n# Space: O(S + T)\n\nclass Solution(object):\n def isAnagram(self, s, t):\n\n if len(s) != len(t):\n return False\n\n count_s, count_t = {}, {}\n\n # create a map for each string to store the count of characters\n\n for i in range(len(s)):\n count_s[s[i]] = 1 + count_s.get(s[i], 0) # 0 is default if not in the map\n count_t[t[i]] = 1 + count_t.get(t[i], 0)\n\n # compare each pair in the maps\n # the number of each character should be the same if they are anagrams\n\n for char in count_s:\n if count_s[char] != count_t.get(char, 0):\n return False\n\n return True\n\n\ns = 'anagram'\nt = 'nagaram'\n\nprint(Solution().isAnagram(s, t))\n","repo_name":"allguitars/leetcode-python","sub_path":"01_arrays_and_hashing/242_valid_anagram/242_1_hashmap.py","file_name":"242_1_hashmap.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72015524640","text":"import sys\nimport math\nimport numpy as np\n# Minimum distance classifier as a linear machine\n\ndef readFile(file, classes, features):\n global n_classes, n_features, n_objects\n f = open(file, \"r\")\n header = f.readline()\n n_classes = int(header.split()[0])\n n_features = int(header.split()[1])\n n_objects = int(header.split()[2])\n for i in range(n_objects):\n feature = []\n line = f.readline()\n classes.append(line.split()[0])\n for j in range(n_features): feature.append(line.split()[j+1])\n features.append(feature)\n f.close()\n\ndef elementsClass(file, classes):\n number = np.zeros(n_classes)\n types = list(set(classes))\n f = open(file, \"r\")\n header = f.readline()\n for i in range(n_objects):\n line = f.readline()\n clase = int(line.split()[0])\n number[clase - 1] = number[clase - 1]+1\n f.close()\n return number\n\ndef gravityCenters(classes, features, elements_for_class):\n p = [[0.0 for i in range(n_features)] for j in range(n_classes)]\n for i in range(n_objects):\n clase = int(classes[i]) - 1\n for j in range(n_features): p[clase][j] = p[clase][j] + float(features[i][j])\n for i in range(n_classes):\n for j in range(n_features): p[i][j] = p[i][j] / elements_for_class[i]\n return p\n\ndef printGravityCenters(output_file, gravity_centers):\n index = 1\n for i in gravity_centers:\n output_file.write(\"P\"+str(index)+\"\\t\")\n for j in i: output_file.write(str(\"%.3f\" % j)+\"\\t\")\n index = index + 1\n output_file.write(\"\\n\")\n\ndef printWeights(output_file, weights):\n index = 1\n for i in weights:\n output_file.write(\"w\"+str(index)+\"\\t\")\n for j in i: output_file.write(str(\"%.3f\" % j)+\"\\t\")\n index = index + 1\n output_file.write(\"\\n\")\n\ndef printValues(output_file, mean_values, desviations):\n output_file.write(\"\\nmv\\t\")\n for i in mean_values: output_file.write(str(\"%.3f\" % i)+\"\\t\")\n output_file.write(\"\\nsd\\t\")\n for i in desviations: output_file.write(str(\"%.3f\" % i)+\"\\t\")\n output_file.write(\"\\n\")\n\ndef standardise(p, mean_values, desviations):\n for i in range(n_classes):\n for j in range(n_features):\n p[i][j] = (p[i][j] - mean_values[j])/desviations[j];\n\ndef calculateValues(mean_values, desviations, features):\n for j in range(n_features):\n mean_values.append(0)\n for i in range(n_objects): mean_values[j] = mean_values[j] + float(features[i][j])\n mean_values[j] = mean_values[j] / n_objects\n desviations.append(0)\n for i in range(n_objects): desviations[j] = desviations[j] + (float(features[i][j])-mean_values[j])**2\n desviations[j] = math.sqrt(desviations[j] / n_objects)\n\ndef calculateWeights(output_file, gravity_centers, mean_values, desviations):\n w = [[0.0 for i in range(n_features+1)] for j in range(n_classes)]\n for i in range(n_classes):\n for j in range(n_features): w[i][j] = 2*gravity_centers[i][j]\n for k in range(n_features): w[i][j+1] = w[i][j+1] + gravity_centers[i][k]**2\n w[i][j+1] = w[i][j+1]*-1\n output_file.write(\"\\nWeights before standardisation:\\n\")\n printWeights(output_file, w)\n for i in range(n_classes):\n suma = 0.0\n for j in range(n_features):\n w[i][j] = w[i][j] / desviations[j]\n suma = suma + mean_values[j]*w[i][j]\n w[i][j+1] = w[i][j+1] - suma\n output_file.write(\"\\nWeights after standardisation:\\n\")\n printWeights(output_file, w)\n return w\n\ndef printStatistics(output_file, matrix):\n #Confussion matrix\n output_file.write(\"\\nConfussion matrix:\\n\")\n output_file.write(\"\\t\")\n for i in range(n_classes): output_file.write(\" \"+str(i+1)+\"\\t\")\n output_file.write(\"\\n\")\n for i in range(n_classes):\n output_file.write(str(i+1)+\"\\t\")\n for j in range(n_classes): output_file.write(\" \"+str(\"%.1f\" % matrix[i][j])+\"\\t\")\n output_file.write(\"\\n\")\n #Probabilities a priori\n output_file.write(\"\\nProbabilities a priori:\\n\")\n output_file.write(\"\\t\")\n for i in range(n_classes): output_file.write(\" \"+str(i+1)+\"\\t\")\n output_file.write(\"\\n\")\n for i in range(n_classes):\n total = sum(matrix[i])\n output_file.write(str(i+1)+\"\\t\")\n for j in range(n_classes):\n result = matrix[i][j] / total\n output_file.write(str(\"%.4f\" % result)+\"\\t\")\n output_file.write(\"\\n\")\n #Probabilities a posteriori\n output_file.write(\"\\nProbabilities a posteriori:\\n\")\n output_file.write(\"\\t\")\n for i in range(n_classes): output_file.write(\" \"+str(i+1)+\"\\t\")\n output_file.write(\"\\n\")\n column = [sum([row[i] for row in matrix]) for i in range(0,len(matrix[0]))] #columns sum\n for i in range(n_classes):\n output_file.write(str(i+1)+\"\\t\")\n for j in range(n_classes):\n result = matrix[j][i] / column[i]\n output_file.write(str(\"%.4f\" % result)+\"\\t\")\n output_file.write(\"\\n\")\n\ndef test(output_file, test_file, weights):\n f = open(test_file, \"r\")\n header = f.readline()\n n_classes = int(header.split()[0])\n n_features = int(header.split()[1])\n n_objects = int(header.split()[2])\n error = 0\n cm = [[0 for i in range(n_classes)] for j in range(n_classes)]\n output_file.write(\"\\nObject\\t True class \\t Assigned class\\n\")\n for i in range(n_objects):\n feature = []\n line = f.readline()\n real_class = int(line.split()[0])\n for j in range(n_features): feature.append(float(line.split()[j+1]))\n g = []\n for j in range(n_classes):\n g.append(0.0)\n for k in range(n_features): g[j] = g[j] + feature[k]*weights[j][k]\n g[j] = g[j] + weights[j][k+1]\n assigned_class = g.index(max(g)) + 1\n # print(\"Max \", str(round(max(g), 2)))\n if i < 9: output_file.write(\" \"+str(i+1)+\"\\t\\t\\t\\t\\t\"+str(real_class)+\"\\t\\t\\t\\t\\t\\t\"+str(assigned_class)+\"\\n\")\n else: output_file.write(str(i+1)+\"\\t\\t\\t\\t\\t\"+str(real_class)+\"\\t\\t\\t\\t\\t\\t\"+str(assigned_class)+\"\\n\")\n if(assigned_class != real_class): error = error + 1\n cm[real_class-1][assigned_class-1] = cm[real_class-1][assigned_class-1]+1\n error = (100*error)/n_objects\n output_file.write(\"\\nError rate: \"+str(\"%.1f\" % error)+\" %\\n\")\n printStatistics(output_file, cm)\n f.close()\n\ndef train(output_file, train_file, classes, features):\n elements = elementsClass(train_file, classes)\n p = gravityCenters(classes, features, elements)\n output_file.write(\"Class gravity centers before standardisation:\\n\")\n printGravityCenters(output_file, p)\n mean_values = []\n desviations = []\n calculateValues(mean_values, desviations, features)\n printValues(output_file, mean_values, desviations)\n standardise(p, mean_values, desviations)\n output_file.write(\"\\nClass gravity centers after standardisation:\\n\")\n printGravityCenters(output_file, p)\n return calculateWeights(output_file, p, mean_values, desviations)\n\ndef main():\n train_file = input(\"Enter train file: \")\n test_file = input(\"Enter test file: \")\n output_file = input(\"Enter output file: \")\n output = open(output_file, \"w\")\n\n classes = []\n features = []\n readFile(train_file, classes, features)\n weights = train(output, train_file, classes, features)\n test(output, test_file, weights)\n output.close()\n\nmain()\n","repo_name":"ines-sanluis/statistical-patterns","sub_path":"minimum_distance_classifier_linear_machine.py","file_name":"minimum_distance_classifier_linear_machine.py","file_ext":"py","file_size_in_byte":7421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13758520117","text":"import sys\nimport urllib2\nimport simplejson\nfrom animations import FadeAnimation\n\nURL = \"http://sitesquares.herokuapp.com/colors?tail=24\"\nif __name__ == \"__main__\":\n import time\n out = FadeAnimation()\n out.FADERATE = 8.0\n out.start()\n\n while True:\n f = urllib2.urlopen(URL)\n data = f.read()\n data = data[1:-2]\n data = simplejson.loads(data)\n pix = [(0.0,0.0,0.0)]*24\n for i in range(24):\n item = data[i]\n color = item[\"color\"]\n r = int('0x'+color[0:2],16)\n g = int('0x'+color[2:4],16)\n b = int('0x'+color[4:6],16)\n pix[i]=(r*4.0,g*4.0,b*4.0)\n out.write(pix)\n time.sleep(2.7)\n","repo_name":"acm-uiuc/chroma-scripts","sub_path":"animations/sitesquares/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"54"} +{"seq_id":"70825907043","text":"from neunet.autograd import Tensor\r\nimport numpy as np\r\n\r\n\r\nclass _BatchNorm2dTensor(Tensor): # tensor for static backpropagation\r\n def __init__(self, data, args, op):\r\n super().__init__(data, args, op)\r\n\r\n def backward(self, grad=1):\r\n X, weight, bias, X_centered, stddev_inv, affine = self.args\r\n \r\n batch_size = X.data.shape[0] * X.data.shape[2] * X.data.shape[3]\r\n\r\n axis = (0, 2, 3)\r\n # _axis = list(axis) if isinstance(axis, tuple) else axis\r\n X_hat = X_centered * stddev_inv[..., None, None]\r\n\r\n weight_data = weight.data[..., None, None] if affine else 1\r\n\r\n dX_hat = weight_data * grad\r\n dstddev_inv = -0.5 * np.power(stddev_inv[..., None, None], 3) * np.sum(dX_hat * X_centered, axis = axis, keepdims = True)\r\n dvar = np.ones_like(X.data) * dstddev_inv * 2 * X_centered / batch_size #np.prod(np.array(X.shape)[_axis])\r\n dmean = np.ones_like(X.data) * np.sum(dX_hat * stddev_inv[..., None, None], axis = axis, keepdims = True) * (-1) / batch_size #np.prod(np.array(X.shape)[_axis])\r\n grad_X = dX_hat * stddev_inv[..., None, None] + dvar + dmean\r\n\r\n if affine:\r\n grad_weight = np.sum(grad * X_hat, axis = (0, 2, 3), keepdims = True).reshape(weight.data.shape)\r\n grad_bias = np.sum(grad, axis = (0, 2, 3), keepdims = True).reshape(bias.data.shape)\r\n\r\n X.backward(grad_X)\r\n if affine:\r\n weight.backward(grad_weight)\r\n bias.backward(grad_bias)\r\n\r\n\r\n\r\nclass BatchNorm2d(): # layer with static backpropagation\r\n def __init__(self, num_features, eps = 1e-5, momentum = 0.1, affine = True):\r\n self.num_features = num_features\r\n self.eps = eps\r\n self.momentum = momentum\r\n self.affine = affine\r\n\r\n self.running_mean = Tensor(np.zeros((1, num_features)), dtype=np.float32)\r\n self.running_var = Tensor(np.ones((1, num_features)), dtype=np.float32)\r\n\r\n if affine:\r\n self.weight = Tensor(np.ones((1, num_features)), dtype=np.float32)\r\n self.bias = Tensor(np.zeros((1, num_features)), dtype=np.float32)\r\n else:\r\n self.weight = None\r\n self.bias = None\r\n\r\n self.train = True\r\n\r\n def forward(self, X):\r\n\r\n if self.train:\r\n mean = np.mean(X.data, axis = (0, 2, 3))\r\n var = np.var(X.data, axis = (0, 2, 3))\r\n \r\n\r\n self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * mean\r\n self.running_var = self.momentum * self.running_var + (1 - self.momentum) * var\r\n else:\r\n mean = self.running_mean\r\n var = self.running_var\r\n\r\n X_centered = X.data - mean[..., None, None]\r\n stddev_inv = 1 / np.sqrt(var + self.eps)\r\n\r\n O = X_centered * stddev_inv[..., None, None]\r\n\r\n if self.affine:\r\n O = self.weight.data[..., None, None] * O + self.bias.data[..., None, None]\r\n\r\n \r\n return _BatchNorm2dTensor(O, [X, self.weight, self.bias, X_centered, stddev_inv, self.affine], \"batchnorm2d\")\r\n\r\n def __call__(self, X):\r\n return self.forward(X)\r\n\r\n\r\n\r\n# class BatchNorm2d(): #layer with dynamic backpropagation\r\n# def __init__(self, num_features, eps = 1e-5, momentum = 0.1, affine = True):\r\n# self.num_features = num_features\r\n# self.eps = eps\r\n# self.momentum = momentum\r\n# self.affine = affine\r\n\r\n# self.running_mean = Tensor(np.zeros((1, num_features)), dtype=np.float32)\r\n# self.running_var = Tensor(np.ones((1, num_features)), dtype=np.float32)\r\n\r\n# if affine:\r\n# self.weight = Tensor(np.ones((1, num_features)), dtype=np.float32)\r\n# self.bias = Tensor(np.zeros((1, num_features)), dtype=np.float32)\r\n# else:\r\n# self.weight = None\r\n# self.bias = None\r\n\r\n# self.train = True\r\n\r\n# def forward(self, X):\r\n\r\n# if self.train:\r\n# mean = X.mean(axis = (0, 2, 3))\r\n# var = X.var(axis = (0, 2, 3))\r\n \r\n\r\n# self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * mean.data\r\n# self.running_var = self.momentum * self.running_var + (1 - self.momentum) * var.data\r\n# else:\r\n# mean = self.running_mean\r\n# var = self.running_var\r\n\r\n# X_centered = X - mean[..., None, None]\r\n\r\n# stddev_inv = 1 / Tensor.sqrt(var + self.eps)\r\n\r\n# O = X_centered * stddev_inv[..., None, None]\r\n\r\n# if self.affine:\r\n# O = self.weight[..., None, None] * O + self.bias[..., None, None]\r\n\r\n \r\n# return O\r\n\r\n# def __call__(self, X):\r\n# return self.forward(X)\r\n\r\n\r\n# x_rand = np.random.randn(2, 3, 2, 2)\r\n# x_rand = np.arange(0, 24).reshape(2, 3, 2, 2)\r\n# x = Tensor(x_rand)\r\n# bn = BatchNorm2d(3)\r\n\r\n# bn.train = True\r\n# y = bn(x)\r\n\r\n# print(y.data)\r\n\r\n# y.backward(np.ones_like(y.data))\r\n\r\n# print(x.grad)\r\n\r\n# import torch\r\n# import torch.nn as nn\r\n\r\n\r\n# x = torch.tensor(x_rand, requires_grad=True, dtype=torch.float32)\r\n# bn = nn.BatchNorm2d(3)\r\n\r\n# bn.train()\r\n# y = bn(x)\r\n# # print(y)\r\n\r\n# y.backward(torch.ones_like(y))\r\n\r\n# print(x.grad)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"AkiRusProd/numpy-nn-model","sub_path":"neunet/nn/layers/batchnorm2d.py","file_name":"batchnorm2d.py","file_ext":"py","file_size_in_byte":5246,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"54"} +{"seq_id":"27626696759","text":"\nfrom scipy.integrate import odeint\nfrom math import cos, sin, pi\nimport numpy as np \n\ndef robot_dynamic(y, t, Mode):\n\tdesired_velocity = [sin(Mode * pi/4), cos(Mode * pi/4)]\n\tpx, py, vx, vy = y\n\tdifvx = vx-desired_velocity[0]\n\tdifvy = vy-desired_velocity[1]\n\tvx_dot = -1.8 * difvx + 0.1*difvy\n\tvy_dot = 0.1 * difvx - 1.8*difvy\n\tpx_dot = vx\n\tpy_dot = vy\n\tdydt = [px_dot, py_dot, vx_dot, vy_dot]\n\treturn dydt\n\ndef TC_Simulate(Mode,initialCondition,time_bound):\n\ttime_step = 0.01;\n\ttime_bound = float(time_bound)\n\t\n\tnumber_points = int(np.ceil(time_bound/time_step))\n\tt = [i*time_step for i in range(0,number_points)]\n\tif t[-1] != time_step:\n\t\tt.append(time_bound)\n\tnewt = []\n\tfor step in t:\n\t\tnewt.append(float(format(step, '.2f')))\n\tt = newt\n\tlabel = {\n\t\t\"0\":0,\n\t\t\"UP\":0,\n\t\t\"1\":1,\n\t\t\"UPRIGHT\":1,\n\t\t\"2\":2,\n\t\t\"RIGHT\":2,\n\t\t\"3\":3,\n\t\t\"DOWNRIGHT\":3,\n\t\t\"4\":4,\n\t\t\"DOWN\":4,\n\t\t\"5\":5,\n\t\t\"DOWNLEFT\":5,\n\t\t\"6\":6,\n\t\t\"LEFT\":6,\n\t\t\"7\":7,\n\t\t\"UPLEFT\":7\n\t}\n\n\tsol = odeint(robot_dynamic,initialCondition,t,args=(label[Mode],),hmax = time_step)\n\n\t# Construct the final output\n\ttrace = []\n\tfor j in range(len(t)):\n\t\t#print t[j], current_psi\n\t\ttmp = []\n\t\ttmp.append(t[j])\n\t\ttmp.append(float(sol[j,0]))\n\t\ttmp.append(float(sol[j,1]))\n\t\ttmp.append(float(sol[j,2]))\n\t\ttmp.append(float(sol[j,3]))\n\t\ttrace.append(tmp)\n\treturn trace\n\nif __name__ == \"__main__\":\n\tsol = TC_Simulate('7',[0.0,0.0,1.0,0.0],1)\n\tfor s in sol:\n\t\tprint(s)","repo_name":"qibolun/DryVR_0.2","sub_path":"examples/carinmaze/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"30165153051","text":"\"\"\"User View tests.\"\"\"\r\n\r\n# run these tests like:\r\n#\r\n# FLASK_ENV=production python -m unittest test_user_views.py\r\n\r\n\r\nimport os\r\nfrom unittest import TestCase\r\n\r\nfrom models import db, Message, User, Playlist, PlaylistUser, Friend, Song, Like, PlaylistSong\r\nimport json\r\n\r\nos.environ['DATABASE_URL'] = \"postgresql:///tabular-test\"\r\n\r\nfrom app import app, CURR_USER_KEY\r\n# Don't have WTForms use CSRF at all, since it's a pain to test\r\n\r\napp.config['WTF_CSRF_ENABLED'] = False\r\n\r\n\r\nclass PlaylistViewTestCase(TestCase):\r\n \"\"\"Test views for messages.\"\"\"\r\n\r\n def setUp(self):\r\n \"\"\"Create test client, add sample data.\"\"\"\r\n\r\n db.drop_all()\r\n db.create_all()\r\n\r\n self.client = app.test_client()\r\n\r\n # Add sample users\r\n\r\n testuser = User.register(username=\"testuser\",\r\n email=\"test@test.com\",\r\n password=\"testuser\",\r\n first_name='testuser',\r\n last_name=None)\r\n testuser.id = 8989\r\n self.testuser_id = 8989\r\n\r\n user2 = User.register(username=\"user2\",\r\n email=\"user2@test.com\",\r\n password=\"testuser\",\r\n first_name='user2',\r\n last_name=None)\r\n user2.id = 9546\r\n self.user2_id = 9546\r\n\r\n user3 = User.register(username=\"user3\",\r\n email=\"user3@test.com\",\r\n password=\"testuser\",\r\n first_name='user3',\r\n last_name=None)\r\n user3.id = 6745\r\n self.user3_id = 6745\r\n\r\n user4 = User.register(username=\"user4\",\r\n email=\"user4@test.com\",\r\n password=\"testuser\",\r\n first_name='user4',\r\n last_name=None)\r\n user4.id = 76845\r\n self.user4_id = 76845\r\n\r\n nathan = User.register(username='nathan', email='nathan@test.com',\r\n password='nathan', first_name='nathan', last_name=None)\r\n nathan.id = 12647\r\n self.nathan_id = 12647\r\n\r\n db.session.commit()\r\n\r\n # add user created playlists\r\n\r\n testuserp1 = Playlist(id=1111, name='testuser Jams',\r\n user_id=self.testuser_id)\r\n testuserp2 = Playlist(\r\n id=2222, name='testuser Favorites', user_id=self.testuser_id)\r\n\r\n user2p1 = Playlist(id=3333, name='user2 Jams', user_id=self.user2_id)\r\n user2p2 = Playlist(id=4444, name='user2 Favorites',\r\n user_id=self.user2_id)\r\n\r\n user3p1 = Playlist(id=5555, name='user3 Jams', user_id=self.user3_id)\r\n\r\n db.session.add_all([testuserp1, testuserp2, user2p1, user2p2, user3p1])\r\n db.session.commit()\r\n\r\n # Add playlist/user relationship for created playlists\r\n pu1 = PlaylistUser(user_id=self.testuser_id, playlist_id=1111)\r\n pu2 = PlaylistUser(user_id=self.testuser_id, playlist_id=2222)\r\n pu3 = PlaylistUser(user_id=self.user2_id, playlist_id=3333)\r\n pu4 = PlaylistUser(user_id=self.user2_id, playlist_id=4444)\r\n pu5 = PlaylistUser(user_id=self.user3_id, playlist_id=5555)\r\n db.session.add_all([pu1, pu2, pu3, pu4, pu5])\r\n db.session.commit()\r\n\r\n # add some iked playlists\r\n liked_playlist1 = PlaylistUser(\r\n user_id=self.testuser_id, playlist_id=3333)\r\n liked_playlist2 = PlaylistUser(\r\n user_id=self.nathan_id, playlist_id=5555)\r\n db.session.add_all([liked_playlist1, liked_playlist2])\r\n db.session.commit()\r\n\r\n # Add friend relationships\r\n f1 = Friend(user_1=self.testuser_id, user_2=self.user2_id)\r\n f2 = Friend(user_1=self.testuser_id, user_2=self.user3_id)\r\n\r\n db.session.add_all([f1, f2])\r\n db.session.commit()\r\n\r\n # Add a few songs\r\n\r\n s1 = Song(id=4444, title='Jammin',\r\n artist='Bob Marley', tab_url='placeholder')\r\n s2 = Song(id=5555, title='Rock and Roll',\r\n artist='Led Zeppelin', tab_url='placeholder')\r\n s3 = Song(id=6666, title='Angie',\r\n artist='The Rolling Stones', tab_url='placeholder')\r\n db.session.add_all([s1, s2, s3])\r\n db.session.commit()\r\n\r\n # Add some songs to testuser's likes\r\n l1 = Like(user_id=self.testuser_id, song_id=4444)\r\n l2 = Like(user_id=self.testuser_id, song_id=5555)\r\n db.session.add_all([l1, l2])\r\n db.session.commit()\r\n\r\n # Add some songs to playlists\r\n ps1 = PlaylistSong(song_id=4444, playlist_id=1111)\r\n ps2 = PlaylistSong(song_id=5555, playlist_id=1111)\r\n ps3 = PlaylistSong(song_id=6666, playlist_id=3333)\r\n ps4 = PlaylistSong(song_id=4444, playlist_id=5555)\r\n db.session.add_all([ps1, ps2, ps3, ps4])\r\n db.session.commit()\r\n\r\n self.testuser = User.query.get(8989)\r\n self.user2 = User.query.get(9546)\r\n self.user3 = User.query.get(6745)\r\n self.user4 = User.query.get(76845)\r\n self.nathan = User.query.get(12647)\r\n self.testuserp1 = Playlist.query.get(1111)\r\n self.testuserp2 = Playlist.query.get(2222)\r\n self.user2p1 = Playlist.query.get(3333)\r\n self.user2p2 = Playlist.query.get(4444)\r\n self.user3p1 = Playlist.query.get(5555)\r\n self.song1 = Song.query.get(4444)\r\n self.song2 = Song.query.get(5555)\r\n self.song3 = Song.query.get(6666)\r\n\r\n def test_own_playlist_page(self):\r\n \"\"\"\r\n Test that a user can view their own playlist page if logged in,\r\n with options to add or remove songs, or delete playlist\r\n \"\"\"\r\n\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n resp = c.get(f'/playlists/{self.testuserp1.id}')\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn('Jammin', str(resp.data))\r\n self.assertIn('Rock and Roll', str(resp.data))\r\n self.assertNotIn('Angie', str(resp.data))\r\n self.assertIn('Delete Playlist', str(resp.data))\r\n self.assertIn('Add Songs', str(resp.data))\r\n self.assertIn('Remove From Playlist', str(resp.data))\r\n\r\n def test_other_playlist_page(self):\r\n \"\"\"\r\n Test that a user can view another user's playlist page if logged in,\r\n but does not have the option to add songs or delete playlist\r\n \"\"\"\r\n\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n resp = c.get(f'/playlists/{self.user2p1.id}')\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertNotIn('Jammin', str(resp.data))\r\n self.assertNotIn('Rock and Roll', str(resp.data))\r\n self.assertIn('Angie', str(resp.data))\r\n self.assertNotIn('Delete Playlist', str(resp.data))\r\n self.assertNotIn('Add Songs', str(resp.data))\r\n self.assertNotIn('Remove From Playlist', str(resp.data))\r\n\r\n def test_nonexistent_playlist_page(self):\r\n \"\"\"\r\n Test that a 404 is returned if a user tries to view the page\r\n for a playlist that does not exist\r\n \"\"\"\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n resp = c.get('/playlists/12548')\r\n\r\n self.assertEqual(resp.status_code, 404)\r\n self.assertIn(\r\n 'The resource or page you are looking for', str(resp.data))\r\n\r\n def test_likes_page(self):\r\n \"\"\"Test that a user can view their liked songs page if logged in\"\"\"\r\n\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n resp = c.get('/playlists/0')\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn('Jammin', str(resp.data))\r\n self.assertIn('Rock and Roll', str(resp.data))\r\n self.assertNotIn('Angie', str(resp.data))\r\n\r\n def test_delete_own_playlist(self):\r\n \"\"\"Test that the user can delete their own playlist if logged in\"\"\"\r\n self.assertEqual(len(self.testuser.playlists), 3)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n resp = c.post(\r\n f'/playlists/{self.testuserp1.id}/delete', follow_redirects=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.testuser = User.query.get(self.testuser_id)\r\n self.assertEqual(len(self.testuser.playlists), 2)\r\n self.assertIn('MY PLAYLISTS', str(resp.data))\r\n\r\n def test_delete_other_user_playlist(self):\r\n \"\"\"Test that the user cannot delete another user's playlist\"\"\"\r\n self.assertEqual(len(self.user2.playlists), 2)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n resp = c.post(\r\n f'/playlists/{self.user2p1.id}/delete', follow_redirects=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.user2 = User.query.get(self.user2_id)\r\n self.assertEqual(len(self.user2.playlists), 2)\r\n self.assertIn('You may not delete', str(resp.data))\r\n self.assertIn('DASHBOARD', str(resp.data))\r\n\r\n def test_delete_nonexistent_playlist(self):\r\n \"\"\"\r\n Test that a 404 is returned if a user tries to delete\r\n a playlist that does not exist\r\n \"\"\"\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n resp = c.post('/playlists/12548/delete')\r\n\r\n self.assertEqual(resp.status_code, 404)\r\n self.assertIn(\r\n 'The resource or page you are looking for', str(resp.data))\r\n\r\n def test_add_song_to_own_playlist(self):\r\n \"\"\"Test if user can add a song to their own playlist\"\"\"\r\n\r\n self.assertEqual(len(self.testuserp2.songs), 0)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n obj = {'songInfo': {\r\n 'id': self.song1.id,\r\n 'title': self.song1.title,\r\n 'artist': self.song1.artist,\r\n 'tab_url': self.song1.tab_url\r\n }, 'playlists': [{\"id\": self.testuserp2.id, \"user_id\": self.testuser.id, \"name\": self.testuserp2.name}]\r\n\r\n }\r\n\r\n resp = c.post('/playlists/add-song',\r\n json={\"json\": json.dumps(obj)}, headers={\r\n \"Referer\": '/playlists/2222'\r\n }, follow_redirects=True)\r\n self.testuserp2 = Playlist.query.get(2222)\r\n self.assertEqual(len(self.testuserp2.songs), 1)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(self.testuserp2.name, str(resp.data))\r\n self.assertIn(self.song1.title, str(resp.data))\r\n\r\n def test_add_song_to_other_user_playlist(self):\r\n \"\"\"Test that user cannot add a song to another user's playlist\"\"\"\r\n\r\n self.assertEqual(len(self.user2p2.songs), 0)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n obj = {'songInfo': {\r\n 'id': self.song1.id,\r\n 'title': self.song1.title,\r\n 'artist': self.song1.artist,\r\n 'tab_url': self.song1.tab_url\r\n }, 'playlists': [{\"id\": self.user2p2.id, \"user_id\": self.user2.id, \"name\": self.user2p2.name}]\r\n\r\n }\r\n\r\n resp = c.post('/playlists/add-song',\r\n json={\"json\": json.dumps(obj)}, follow_redirects=True)\r\n self.user2p2 = Playlist.query.get(4444)\r\n self.assertEqual(len(self.user2p2.songs), 0)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn('DASHBOARD', str(resp.data))\r\n self.assertIn('You may not', str(resp.data))\r\n\r\n def test_remove_song_from_own_playlist(self):\r\n \"\"\"Test if user can remove a song from one of their own playlists\"\"\"\r\n\r\n self.assertEqual(len(self.user2p1.songs), 1)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.user2.id\r\n\r\n obj = {\r\n 'id': self.song3.id,\r\n 'title': self.song3.title,\r\n 'artist': self.song3.artist,\r\n 'tab_url': self.song3.tab_url\r\n }\r\n\r\n resp = c.post(f'/playlists/{self.user2p1.id}/remove-song', json={\"json\": json.dumps(obj)}, headers={\r\n \"Referer\": f'/playlists/{self.user2p1.id}'}, follow_redirects=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.user2p1 = Playlist.query.get(3333)\r\n self.assertIn(\r\n self.user2p1.name, str(resp.data))\r\n self.assertNotIn(self.song3.title, str(resp.data))\r\n self.assertEqual(len(self.user2p1.songs), 0)\r\n\r\n def test_remove_song_from_other_user_playlist(self):\r\n \"\"\"Test that the user cannot remove a song from another user's playlist\"\"\"\r\n\r\n self.assertEqual(len(self.user3p1.songs), 1)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n obj = {\r\n 'id': self.song1.id,\r\n 'title': self.song1.title,\r\n 'artist': self.song1.artist,\r\n 'tab_url': self.song1.tab_url\r\n }\r\n\r\n resp = c.post(f'/playlists/{self.user3p1.id}/remove-song',\r\n json={\"json\": json.dumps(obj)}, follow_redirects=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.user3p1 = Playlist.query.get(5555)\r\n self.assertIn('DASHBOARD', str(resp.data))\r\n self.assertIn('You may not remove', str(resp.data))\r\n self.assertEqual(len(self.user3p1.songs), 1)\r\n\r\n def test_remove_from_nonexistent_playlist(self):\r\n \"\"\"\r\n Test that a 404 is returned if a user tries to remove a song\r\n from a playlist that does not exist\r\n \"\"\"\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n obj = {\r\n 'id': self.song1.id,\r\n 'title': self.song1.title,\r\n 'artist': self.song1.artist,\r\n 'tab_url': self.song1.tab_url\r\n }\r\n\r\n resp = c.post('/playlists/12548/remove-song',\r\n json={\"json\": json.dumps(obj)})\r\n\r\n self.assertEqual(resp.status_code, 404)\r\n self.assertIn(\r\n 'The resource or page you are looking for', str(resp.data))\r\n\r\n def test_like_playlist(self):\r\n \"\"\"Test that a user can like another user's playlist\"\"\"\r\n\r\n self.assertEqual(len(self.user3.playlists), 1)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.user3.id\r\n\r\n resp = c.post(f'/playlists/{self.user2p1.id}/like', headers={\r\n 'Referer': f'/users/{self.user3.id}/playlists'}, follow_redirects=True)\r\n\r\n self.user3 = User.query.get(self.user3.id)\r\n self.assertEqual(len(self.user3.playlists), 2)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn('Playlist successfully added', str(resp.data))\r\n self.assertIn(self.user2p1.name, str(resp.data))\r\n\r\n def test_like_own_playlist(self):\r\n \"\"\"Test that a user cannot like their own playlist\"\"\"\r\n\r\n self.assertEqual(len(self.user2.playlists), 2)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.user2.id\r\n\r\n resp = c.post(f'/playlists/{self.user2p1.id}/like', headers={\r\n 'Referer': f'/users/{self.user2.id}/playlists'}, follow_redirects=True)\r\n\r\n self.user2 = User.query.get(self.user2.id)\r\n self.assertEqual(len(self.user2.playlists), 2)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn('You cannot like', str(resp.data))\r\n self.assertIn(self.user2p1.name, str(resp.data))\r\n self.assertIn(self.user2p2.name, str(resp.data))\r\n\r\n def test_like_already_liked_playlist(self):\r\n \"\"\"Test that a user cannot like a playlist that they have already liked\"\"\"\r\n\r\n self.assertEqual(len(self.nathan.playlists), 1)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.nathan.id\r\n\r\n resp = c.post(f'/playlists/{self.user3p1.id}/like', headers={\r\n 'Referer': f'/users/{self.nathan.id}/playlists'}, follow_redirects=True)\r\n\r\n self.nathan = User.query.get(self.nathan.id)\r\n self.assertEqual(len(self.nathan.playlists), 1)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn('You already like', str(resp.data))\r\n self.assertIn(self.user3p1.name, str(resp.data))\r\n\r\n def test_like_nonexistent_playlist(self):\r\n \"\"\"Test that a 404 is returned if a user tries to like a playlist that does not exist\"\"\"\r\n with self.client as c:\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n resp = c.post(f'playlists/837364/like')\r\n\r\n self.assertEqual(resp.status_code, 404)\r\n\r\n def test_unlike_playlist(self):\r\n \"\"\"Test that a user can unlike a liked playlist\"\"\"\r\n\r\n self.assertEqual(len(self.nathan.playlists), 1)\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.nathan.id\r\n\r\n resp = c.post(f'/playlists/{self.user3p1.id}/unlike', headers={\r\n 'Referer': f'/users/{self.nathan.id}/playlists'}, follow_redirects=True)\r\n\r\n self.nathan = User.query.get(self.nathan.id)\r\n self.assertEqual(len(self.nathan.playlists), 0)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn('Playlist successfully removed', str(resp.data))\r\n self.assertNotIn(self.user3p1.name, str(resp.data))\r\n\r\n def test_unlike_playlist_not_liked(self):\r\n \"\"\"\r\n Test that a user cannot unlike a playlist that is not in \r\n their liked playlists\r\n \"\"\"\r\n with self.client as c:\r\n\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.user4.id\r\n\r\n resp = c.post(f'/playlists/{self.user3p1.id}/unlike', headers={\r\n 'Referer': f'/users/{self.user4.id}/playlists'}, follow_redirects=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn('This playlist is not', str(resp.data))\r\n self.assertIn('MY PLAYLISTS', str(resp.data))\r\n\r\n def test_unlike_nonexistent_playlist(self):\r\n \"\"\"Test that a 404 is returned if a user tries to unlike a playlist that does not exist\"\"\"\r\n with self.client as c:\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = self.testuser.id\r\n\r\n resp = c.post(f'playlists/837364/unlike')\r\n\r\n self.assertEqual(resp.status_code, 404)\r\n","repo_name":"15ogburnw/capstone-1-tabular","sub_path":"test_playlist_views.py","file_name":"test_playlist_views.py","file_ext":"py","file_size_in_byte":20117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40769493382","text":"import re\nfrom collections import defaultdict\n\n\ndef parse(filename):\n text = open(filename).read().split('\\n')\n regions = [list(map(int, re.findall(\"\\d+\", l))) for l in text]\n grid = defaultdict(int)\n for _, a, b, c, d in regions:\n for x in range(a, a + c):\n for y in range(b, b + d):\n grid[x + 1j * y] += 1\n return regions, grid\n\n\ndef part1(grid):\n print(len([1 for v in grid.values() if v > 1]))\n\n\ndef part2(regions, grid):\n for q, a, b, c, d in regions:\n good = True\n for x in range(a, a + c):\n for y in range(b, b + d):\n if grid[x + 1j * y] > 1:\n good = False\n if good:\n print(q)\n\n\nregions, grid = parse(\"input\")\npart1(grid)\npart2(regions, grid)\n","repo_name":"obfuscat3d/AdventOfCode","sub_path":"2018/d3/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8723897287","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 24 10:38:43 2018\r\n\r\n@author: shubham\r\n\"\"\"\r\n\r\nimport writeToCsv as wtv\r\nimport requests\r\nimport re\r\n\r\ndef save_url_Status_Time(url):\r\n print(\"in save_url_Status_Time\" )\r\n res=requests.get(url)\r\n if(re.match(\"^[2]\\d\\d$\", str(res.status_code))):\r\n status=\"UP\"\r\n else:\r\n status=\"Down\"\r\n responce_time=str(res.elapsed.total_seconds()*1000)+\"ms\"\r\n wtv.writeToFile(url,status,responce_time)\r\n print(\"out save_url_Status_Time\" )\r\n \r\n\r\n\r\ndef getStatus(url,time):\r\n print(\"in getStatus\" )\r\n res=requests.get(url)\r\n if(re.match(\"^[2]\\d\\d$\", str(res.status_code))):\r\n status=\"UP\"\r\n else:\r\n status=\"Down\"\r\n responce_time=str(time)+\"ms\"\r\n wtv.writeToFile(url,status,responce_time)\r\n print(\"out getStatus\" )\r\n \r\n \r\ndef get_URL_status(url):\r\n status='Down'\r\n try:\r\n res=requests.get(url)\r\n# print(res.status_code)\r\n if(re.match(\"^[2]\\d\\d$\", str(res.status_code))):\r\n status=\"UP\"\r\n else:\r\n status=\"Down\"\r\n return status\r\n except:\r\n return status\r\n\r\ndef saveFlow(url,status,time):\r\n print(\"in saveFlow\" )\r\n wtv.writeToFile(url,status,str(time/1000)+\"sec\")\r\n print(\"out saveFlow\" )\r\n \r\n#print(get_URL_status('https://www.ica.se/logga-in/?returnurl=https%3A%2F%2Fwww.ica.se%2F'))","repo_name":"shubham1809/MonitoringScript","sub_path":"checkStatusofURL.py","file_name":"checkStatusofURL.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"46188490371","text":"import numpy as np\n\nclass Node(object):\n def __init__(self, inbound_nodes=[]):\n #Nodes from which this node receives values\n self.inbound_nodes = inbound_nodes\n #Nodes to which thiss node passes values\n self.outbound_nodes = []\n # A calculated value\n self.value = None\n #partials of this node w.r.t. input\n self.gradients = {}\n # for each inbound node here add this node as an outbound node\n for n in self.inbound_nodes:\n n.outbound_nodes.append(self)\n def forward(self):\n #compute the output value based on 'inbound_nodes' and store the results in self.value\n pass\n def backward(self):\n pass\n\n\nclass Input(Node):\n def __init__(self):\n #input node has no inbound nodes\n Node.__init__(self)\n #it is the only node where the values can be passed as an argument to forward()\n def forward(self) :\n pass\n def backward(self):\n self.gradients = {self: 0}\n for n in self.outbound_nodes:\n grad_cost = n.gradients[self]\n self.gradients[self] += grad_cost*1\n\nclass Add(Node):\n #node that perform calculation: Addition\n #it takes two inbound nodes and addes the values of those nodes\n def __init__(self, *inputs):\n Node.__init__(self, inputs)\n def forward(self):\n self.value = 0\n for n in self.inbound_nodes:\n self.value += n.value\n #self.value = self.inbound_nodes[0].value + self.inbound_nodes[1].value\n\nclass Linear(Node):\n def __init__(self, X, W, b):\n Node.__init__(self, [X, W, b])\n \n def forward(self):\n X = self.inbound_nodes[0].value\n W = self.inbound_nodes[1].value\n b = self.inbound_nodes[2].value\n Z = np.dot(X, W)\n self.value = Z + b\n def backward(self):\n self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}\n for n in self.outbound_nodes:\n grad_cost = n.gradients[self]\n self.gradients[self.inbound_nodes[0]] += np.dot(grad_cost, self.inbound_nodes[1].value.T)\n self.gradients[self.inbound_nodes[1]] += np.dot(self.inbound_nodes[0].value.T, grad_cost)\n self.gradients[self.inbound_nodes[2]] += np.sum(grad_cost, axis=0, keepdims=False)\n\nclass Sigmoid(Node):\n def __init__(self, node):\n Node.__init__(self, [node])\n def _sigmoid(self, x):\n return 1./(1.+np.exp(-x))\n def forward(self):\n self.value = self._sigmoid(self.inbound_nodes[0].value)\n def backward(self):\n self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}\n for n in self.outbound_nodes:\n grad_cost = n.gradients[self]\n sigmoid = self.value\n self.gradients[self.inbound_nodes[0]] += sigmoid * (1 - sigmoid) * grad_cost\n\nclass MSE(Node):\n def __init__(self, y, a):\n Node.__init__(self, [y, a])\n def forward(self):\n y = self.inbound_nodes[0].value.reshape(-1, 1)\n a = self.inbound_nodes[1].value.reshape(-1, 1)\n self.m = self.inbound_nodes[0].value.shape[0]\n self.diff = y-a\n error = np.square(self.diff)\n self.value = np.mean(error)\n def backward(self):\n self.gradients[self.inbound_nodes[0]] = (2 / self.m) * self.diff\n self.gradients[self.inbound_nodes[1]] = (-2 / self.m) * self.diff\n\ndef topological_sort(feed_dict):\n \"\"\"\n Sort generic nodes in topological order using Kahn's Algorithm.\n\n `feed_dict`: A dictionary where the key is a `Input` node and the value is the respective value feed to that node.\n\n Returns a list of sorted nodes.\n \"\"\"\n\n input_nodes = [n for n in feed_dict.keys()]\n\n G = {}\n nodes = [n for n in input_nodes]\n while len(nodes) > 0:\n n = nodes.pop(0)\n if n not in G:\n G[n] = {'in': set(), 'out': set()}\n for m in n.outbound_nodes:\n if m not in G:\n G[m] = {'in': set(), 'out': set()}\n G[n]['out'].add(m)\n G[m]['in'].add(n)\n nodes.append(m)\n\n L = []\n S = set(input_nodes)\n while len(S) > 0:\n n = S.pop()\n\n if isinstance(n, Input):\n n.value = feed_dict[n]\n\n L.append(n)\n for m in n.outbound_nodes:\n G[n]['out'].remove(m)\n G[m]['in'].remove(n)\n # if no other incoming edges add to S\n if len(G[m]['in']) == 0:\n S.add(m)\n return L\n\n#def forward_pass(output_node, sorted_nodes):\n#def forward_pass(graph):\ndef forward_and_backward(graph):\n #performs forward pass on list of sorted nodes and returns output_node's value\n for n in graph:\n n.forward()\n for n in graph[::-1]:\n n.backward()\n #return output_node.value\n\ndef sgd_update(trainable, learning_rate=1e-2):\n for t in trainable:\n partial = t.gradients[t]\n t.value -= learning_rate * partial","repo_name":"MandyK94/MiniFlow","sub_path":"miniflow.py","file_name":"miniflow.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33472760783","text":"import numpy as np\nfrom .abstract_class import InnerProdSketcher, InnerProdSketch, compute_adaptive_threshold, hash_kwise\nfrom numba import njit\n\n#\n# Threshold Sampling Sketch\n#\nclass TSSketch(InnerProdSketch):\n def __init__(self, sk_indices: np.ndarray, sk_values: np.ndarray, threshold: float, vector_norm: float, norm: int) -> None:\n self.sk_indices: np.ndarray = sk_indices\n self.sk_values: np.ndarray = sk_values\n self.threshold: float = threshold\n self.vector_norm: float = vector_norm\n self.norm: int = norm\n \n @staticmethod\n @njit(parallel=False)\n def inner_product_numba(sk_indicesA, sk_valuesA, normA, thresholdA, vector_normA, sk_indicesB, sk_valuesB, normB, thresholdB, vector_normB):\n i = 0\n j = 0\n ip_est = 0\n cnt = 0\n while i < len(sk_indicesA) and j < len(sk_indicesB):\n ia, va = sk_indicesA[i], sk_valuesA[i]\n ib, vb = sk_indicesB[j], sk_valuesB[j]\n if ia == ib:\n if normA == 0:\n denominator = min(1, \n thresholdA * (1 / vector_normA), \n thresholdB * (1 / vector_normB))\n else:\n denominator = min(1, \n thresholdA * ((va / vector_normA) ** 2)**(normA/2), \n thresholdB * ((vb / vector_normB) ** 2)**(normB/2))\n ip_est += va * vb / denominator\n cnt += 1\n if ia <= ib:\n i += 1\n else:\n j += 1\n # return ip_est\n return (ip_est, cnt)\n \n def inner_product(self, other: 'TSSketch') -> float:\n return self.inner_product_numba(self.sk_indices, self.sk_values, self.norm, self.threshold, self.vector_norm, other.sk_indices, other.sk_values, other.norm, other.threshold, other.vector_norm)\n \n # ip_est = 0\n # cnt = 0\n # for iia, ia in enumerate(self.sk_indices):\n # if ia in other.sk_indices:\n # iib = np.where(other.sk_indices == ia)[0][0]\n # va = self.sk_values[iia]\n # vb = other.sk_values[iib]\n # if self.norm == 0:\n # denominator = min(1, \n # self.threshold * (1 / self.vector_norm), \n # other.threshold * (1 / other.vector_norm))\n # else:\n # denominator = min(1, \n # self.threshold * ((va / self.vector_norm) ** 2)**(self.norm/2), \n # other.threshold * ((vb / other.vector_norm) ** 2)**(other.norm/2))\n # ip_est += va * vb / denominator\n # cnt+=1\n # print(f\"cnt: {cnt}\")\n # return (ip_est, cnt)\n\n\nclass TS(InnerProdSketcher):\n def __init__(self, sketch_size: int, seed: int, norm: int) -> None:\n self.sketch_size: int = sketch_size\n self.seed: int = seed\n self.norm: int = norm\n\n def sketch(self, vector: np.ndarray) -> TSSketch:\n vector_norm = np.linalg.norm(vector, ord=self.norm)\n vector_nonzeroIndex = np.nonzero(vector)[0]\n T = compute_adaptive_threshold(abs(vector), self.sketch_size, l_norm=self.norm)\n hashes, values = hash_kwise(vector, self.seed)\n if self.norm == 0:\n index_under_threshold = hashes <= T * (1/vector_norm)\n else:\n index_under_threshold = hashes <= T * ((vector[vector_nonzeroIndex]/vector_norm)**2)**(self.norm/2)\n sk_indices = vector_nonzeroIndex[index_under_threshold]\n sk_values = values[index_under_threshold]\n\n k_min = np.argsort(sk_indices)[:sk_indices.size]\n sk_indices = sk_indices[k_min]\n sk_values = sk_values[k_min]\n return TSSketch(sk_indices, sk_values, T, vector_norm, self.norm)","repo_name":"VIDA-NYU/SamplingMethodsForInnerProductSketching","sub_path":"src/threshold_sampling.py","file_name":"threshold_sampling.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3681890470","text":"\"\"\"\nProvides function for loading .csv MALDI Data into dictionary of dataframes with keys according to the filename\n\"\"\"\nimport os\nfrom datetime import date\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom utils.peaks import plot_vert\nfrom utils.timer import timer\n\n\n@timer\ndef load_ms_csv(path: str):\n \"\"\"\n load ms data from .csv export into a dictionary with key according to file name and dataframes of m/z and signal.\n \"\"\"\n\n # create empty directory\n data = {}\n\n # for each file in the path directory:\n for file in os.listdir(path):\n\n # if the file is a txt file:\n if file.endswith(\".txt\"):\n\n # print message without starting new line\n print(f\"fetching data for {file}...\", end=\"\\r\")\n\n # load m/z and signal values and put them in DataFrame with filename as key\n mz = np.loadtxt(path + file, usecols=(0,))\n signal = np.loadtxt(path + file, usecols=(1,))\n data[file[: len(file) - 11].replace(\"_\", \"\")] = pd.DataFrame(\n {\"mz\": mz, \"signal\": signal}\n )\n # return the dataframe\n return data\n\n\n@timer\ndef plot_ms(\n plots: list[list[str]],\n df: dict,\n check: list[float] = None,\n xlim: tuple[float, float] = None,\n ylim: tuple[float, float] = None,\n):\n \"\"\"\n creates a plot of the ms data with each spectrum in subplot.\n\n input:\n plots: list containing lists of names of the spectra in df to plot in each subplot.\n check (optional: plot vertical lines at m/z provided for control\n\n output:\n a plot object.\n \"\"\"\n # create plot object\n f, axs = plt.subplots(len(plots), 1, figsize=(15, 8), sharex=True, sharey=True)\n\n # for each list in plots:\n for i, subplot in enumerate(plots):\n\n # if this isn't the first plot, create new subplot\n if len(plots) > 1:\n ax = f.add_subplot(axs[i])\n\n # for each str in subplot:\n for j, plot in enumerate(subplot):\n\n # store values for plotting\n mz = df[plot][\"mz\"]\n signal = df[plot][\"signal\"]\n\n # if vertical lines, plot them in the subplot\n\n # plot the data, add labels\n plt.plot(mz, signal + 10000 * j, \"-\", linewidth=1)\n plt.xlabel(\"m/z\")\n plt.ylabel(\"Signal\")\n\n # if user adds x limits, change them\n if xlim:\n plt.xlim(xlim)\n\n if ylim:\n plt.ylim(ylim)\n else:\n # y limits is 0 to 105% of highest signal\n plt.ylim([0, max(signal) * 1.05])\n\n # add grid and put legend at best location.\n plt.grid()\n plt.legend(subplot, loc=\"best\")\n if check:\n for line in check:\n plot_vert(line)\n\n # title above the entire plot.\n plt.suptitle(\"Mass Spectrum - %s\" % date.today())\n","repo_name":"SimonBirgersson/Projects","sub_path":"utils/maldi_tof.py","file_name":"maldi_tof.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"27935949921","text":"import os\nimport conversion\nimport evaluation\nimport json\n\ndef evaluate_via(weightsPath, imagesPaths, viaGroundTruthAnnotationsPaths, outputPath, outputModelPath, limit):\n # Only converts data, does not store it\n assert len(imagesPaths) == len(viaGroundTruthAnnotationsPaths)\n\n if len(imagesPaths) == 0:\n return\n\n convertedGroundTruthPaths = []\n for index in range(len(imagesPaths)):\n viaGroundTruthPath = viaGroundTruthAnnotationsPaths[index]\n convertedViaData = conversion.via_data_to_coco_evaluation_format(imagesPaths[index], viaGroundTruthPath)\n convertedViaDataPath = os.path.splitext(viaGroundTruthPath)[0] + \"_converted.json\"\n with open(convertedViaDataPath, \"w\") as convertedJsonFile:\n json.dump(convertedViaData, convertedJsonFile)\n convertedGroundTruthPaths.append(convertedViaDataPath)\n\n evaluation.evaluate(weightsPath, imagesPaths, convertedGroundTruthPaths, outputPath, outputModelPath, limit)\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Use the COCO evaluation functions on the detection results by the \" +\n \"model with pre-computed weights.\")\n parser.add_argument(\"--weightsPath\",\n \"-w\", \n required=True, \n metavar=\"/path/to/network/weights/\",\n help=\"The path to the pre-trained network weights in hf5 format.\")\n parser.add_argument(\"--imagesPaths\",\n \"-i\",\n required=True,\n nargs='+',\n metavar=\"/paths/to/images/\",\n help=\"The path to the folders with the images that are to be evaluated.\")\n parser.add_argument(\"--groundTruthPaths\",\n \"-g\",\n required=True,\n nargs='+',\n metavar=\"/paths/to/viagrountruths/\",\n help=\"The paths to the ground truth files in VIA format.\")\n parser.add_argument(\"--outputPath\",\n \"-o\",\n required=True,\n metavar=\"/path/to/output/\",\n help=\"The path to the folder where the results are to be stored. Results \" +\n \"are the logs as well as the images with the drawn in bounding boxes.\")\n parser.add_argument(\"--outputModelPath\",\n \"-m\",\n required=False,\n metavar=\"/path/to/model/output\",\n help=\"The path to the folder where the model stores outputs like its internal logs \" +\n \"and the weights if it is being trained. If unsure set to --outputPath.\")\n parser.add_argument(\"--limit\",\n \"-l\",\n required=True,\n default=100,\n help=\"The number of images to be used for evaluation. If limit is 0, all images will be used.\")\n\n args = parser.parse_args()\n\n limit = args.limit if args.limit else 30\n limit = int(limit)\n if limit < 0:\n limit = 0\n\n if not args.outputModelPath:\n args.outputModelPath = args.outputPath\n\n evaluate_via(args.weightsPath, args.imagesPaths, args.groundTruthPaths, args.outputPath, args.outputModelPath, limit)","repo_name":"florianblume/forschungsprojekt","sub_path":"mrcnn_vufo_no_mask_branch_added_regularizer/evaluation_via.py","file_name":"evaluation_via.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29277038726","text":"from prog_models.models import BatteryCircuit\nfrom prog_algs import *\n\ndef run_example():\n ## Setup\n def future_loading(t, x = None):\n # Variable (piece-wise) future loading scheme \n if (t < 600):\n i = 2\n elif (t < 900):\n i = 1\n elif (t < 1800):\n i = 4\n elif (t < 3000):\n i = 2\n else:\n i = 3\n return {'i': i}\n batt = BatteryCircuit()\n ## State Estimation - perform a single ukf state estimate step\n # filt = state_estimators.UnscentedKalmanFilter(batt, batt.parameters['x0'])\n filt = state_estimators.ParticleFilter(batt, batt.parameters['x0'])\n\n import matplotlib.pyplot as plt # For plotting\n print(\"Prior State:\", filt.x.mean)\n print('\\tSOC: ', batt.event_state(filt.x.mean)['EOD'])\n fig = filt.x.plot_scatter(label='prior')\n example_measurements = {'t': 32.2, 'v': 3.915}\n t = 0.1\n filt.estimate(t, future_loading(t), example_measurements)\n print(\"Posterior State:\", filt.x.mean)\n print('\\tSOC: ', batt.event_state(filt.x.mean)['EOD'])\n filt.x.plot_scatter(fig= fig, label='posterior')\n\n ## Prediction - Predict EOD given current state\n # Setup prediction\n mc = predictors.MonteCarlo(batt)\n if isinstance(filt, state_estimators.UnscentedKalmanFilter):\n samples = filt.x.sample(20)\n else: # Particle Filter\n samples = filt.x.raw_samples()\n\n # Predict with a step size of 0.1\n (times, inputs, states, outputs, event_states, eol) = mc.predict(samples, future_loading, dt=0.1)\n\n # The results of prediction can be accessed by sample, e.g.,\n times_sample_1 = times[1]\n states_sample_1 = states[1]\n # now states_sample_1[n] corresponds to time_sample_1[n]\n # you can also plot the results (state_sample_1.plot())\n\n # You can also access a state at a specific time using the .snapshot function\n states_time_1 = states.snapshot(1)\n # now you have all the samples from the times[sample][1]\n \n ## Print Metrics\n print(\"\\nEOD Predictions (s):\")\n from prog_algs.metrics import samples as metrics \n print('\\tPercentage between 3005.2 and 3005.6: ', metrics.percentage_in_bounds(eol, [3005.2, 3005.6])*100.0, '%')\n print('\\tAssuming ground truth 3002.25: ', metrics.eol_metrics(eol, 3005.25))\n print('\\tP(Success) if mission ends at 3002.25: ', metrics.prob_success(eol, 3005.25))\n\n # Plot state transition \n fig = states.snapshot(0).plot_scatter(label = \"t={}\".format(int(times[0][0])))\n states.snapshot(10).plot_scatter(fig = fig, label = \"t={}\".format(int(times[0][10])))\n states.snapshot(50).plot_scatter(fig = fig, label = \"t={}\".format(int(times[0][50])))\n\n states.snapshot(-1).plot_scatter(fig = fig, label = \"t={}\".format(int(times[0][-1])))\n plt.show()\n\n# This allows the module to be executed directly \nif __name__ == '__main__':\n run_example()\n","repo_name":"standardgalactic/prog_algs","sub_path":"examples/basic_example.py","file_name":"basic_example.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"2926105448","text":"import csv\nfrom datetime import datetime\nimport difflib\nimport os\nimport pwd\nimport re\n\nfrom absl import app\nfrom absl import flags\nfrom google.cloud import storage\nfrom jinja2 import Environment\nfrom jinja2 import FileSystemLoader\nimport requests\n\nfrom nl_server import gcs\nfrom nl_server.embeddings import Embeddings\n\n_SV_THRESHOLD = 0.5\n_NUM_SVS = 10\n_SUB_COLOR = '#ffaaaa'\n_ADD_COLOR = '#aaffaa'\n_PROPERTY_URL = 'https://autopush.api.datacommons.org/v2/node'\n_GCS_BUCKET = 'datcom-embedding-diffs'\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'base', '', 'Base index. Can be a versioned embeddings file name on GCS '\n 'or a local file with absolute path')\nflags.DEFINE_string(\n 'test', '', 'Test index. Can be a versioned embeddings file name on GCS '\n 'or a local file with absolute path')\nflags.DEFINE_string('queryset', '', 'Full path to queryset CSV')\nflags.DEFINE_string('indextype', '',\n 'The base index type such as small or medium_ft')\n\n_TEMPLATE = 'tools/nl/svindex_differ/template.html'\n_REPORT = '/tmp/diff_report.html'\n_FILE_PATTERN_EMBEDDINGS = r'embeddings_.*_\\d{4}_\\d{2}_\\d{2}_\\d{2}_\\d{2}_\\d{2}\\.csv'\n_FILE_PATTERN_FINETUNED_EMBEDDINGS = r'embeddings_.*_\\d{4}_\\d{2}_\\d{2}_\\d{2}_\\d{2}_\\d{2}\\.ft.*\\.csv'\n\nAUTOPUSH_KEY = os.environ.get('AUTOPUSH_KEY')\nassert AUTOPUSH_KEY\n\n\ndef _get_sv_names(sv_dcids):\n \"\"\"Get stat var names by making a mixer call\"\"\"\n headers = {'Content-Type': 'application/json'}\n headers['x-api-key'] = AUTOPUSH_KEY\n resp = requests.post(_PROPERTY_URL,\n json={\n 'nodes': sv_dcids,\n 'property': '->name'\n },\n headers=headers).json()\n result = {}\n for node, node_arcs in resp.get('data', {}).items():\n for v in node_arcs.get('arcs', {}).get('name', {}).get('nodes', []):\n if 'value' in v:\n result[node] = (v['value'])\n break\n return result\n\n\ndef _prune(res):\n svs = []\n sv_info = {}\n for i in range(len(res['SV'])):\n score = res['CosineScore'][i]\n if i < _NUM_SVS and score >= _SV_THRESHOLD:\n sv = res['SV'][i]\n svs.append(sv)\n sv_info[sv] = {\n 'sv': sv,\n 'rank': i + 1,\n 'score': score,\n 'sentence_scores': res['SV_to_Sentences'].get(sv, [])\n }\n return svs, sv_info\n\n\ndef _get_file_name():\n \"\"\"Get the file name to use\"\"\"\n username = pwd.getpwuid(os.getuid()).pw_name\n date = datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\n return f'{username}_{FLAGS.indextype}_{date}.html'\n\n\ndef _maybe_copy_embeddings(file):\n if re.match(_FILE_PATTERN_EMBEDDINGS, file) or re.match(\n _FILE_PATTERN_FINETUNED_EMBEDDINGS, file):\n lpath = gcs.local_path(file)\n if os.path.exists(lpath):\n return lpath\n return gcs.download_embeddings(file)\n assert file.startswith('/'), \\\n f'File should either be {_FILE_PATTERN_EMBEDDINGS} or {_FILE_PATTERN_FINETUNED_EMBEDDINGS} or an absolute local path'\n return file\n\n\ndef _get_diff_table(diff_list, base_sv_info, test_sv_info):\n \"\"\"Given a list of diffs produced by the difflib.Differ, get the rows of\n sv information to show in the diff table.\n \"\"\"\n diff_table_rows = []\n last_added = -1\n for i, diff in enumerate(diff_list):\n if i <= last_added:\n continue\n last_added = i\n # difflib.Differ will add 2 characters in front of the original text for every line.\n diff_sv = diff[2:]\n next_diff = None\n if i < len(diff_list) - 1:\n next_diff = diff_list[i + 1]\n # If theres no + or -, that means this line is the same in both base and test.\n if not diff.startswith('+') and not diff.startswith('-'):\n info = base_sv_info.get(diff_sv, test_sv_info.get(diff_sv))\n diff_table_rows.append((info, info))\n # If the line starts with -, this means it was present in base but not in test.\n elif diff.startswith('-'):\n base_info = base_sv_info.get(diff_sv, {})\n base_info['color'] = _SUB_COLOR\n test_info = {}\n if next_diff and next_diff.startswith('+'):\n test_sv = next_diff[2:]\n test_info = test_sv_info.get(test_sv, {})\n test_info['color'] = _ADD_COLOR\n last_added = i + 1\n diff_table_rows.append((base_info, test_info))\n # Otherwise, the line started with +, which means it was present in test but not base.\n else:\n base_info = {}\n test_info = test_sv_info.get(diff_sv, {})\n test_info['color'] = _ADD_COLOR\n if next_diff and next_diff.startswith('-'):\n base_sv = next_diff[2:]\n base_info = base_sv_info.get(base_sv, {})\n base_info['color'] = _SUB_COLOR\n last_added = i + 1\n diff_table_rows.append((base_info, test_info))\n return diff_table_rows\n\n\ndef _extract_model_name(embeddings_name: str, embeddings_file_path: str) -> str:\n model_path = \"\"\n if \"ft\" in embeddings_file_path:\n # This means we are using embeddings built on finetuned model.\n # Download the model if needed.\n\n # Extract the model name.\n # test embeddings name is of the form:\n # <embeddings_size_*>.<ft_final_*>.<ft_intermediate_*>.<base_model>.csv\n # OR <embeddings_size_*>.<ft_final_*>.<base_model>.csv\n # The model name is comprised of all the parts between <embeddings_size_*>.\n # and \".csv\".\n parts = embeddings_name.split(\".\")\n model_name = \".\".join(parts[1:-1])\n print(f\"finetuned model_name: {model_name}\")\n model_path = gcs.download_model_folder(model_name)\n\n assert \"ft_final\" in model_path\n assert len(model_path.split(\".\")) >= 2\n\n return model_path\n\n\ndef run_diff(base_file, test_file, base_model_path, test_model_path, query_file,\n output_file):\n env = Environment(loader=FileSystemLoader(os.path.dirname(_TEMPLATE)))\n template = env.get_template(os.path.basename(_TEMPLATE))\n\n print(\"=================================\")\n print(\n f\"Setting up the Base Embeddings from: {base_file}; Base model from: {base_model_path}\"\n )\n base = Embeddings(base_file, base_model_path)\n print(\"=================================\")\n print(\n f\"Setting up the Test Embeddings from: {test_file}; Test model from: {test_model_path}\"\n )\n test = Embeddings(test_file, test_model_path)\n print(\"=================================\")\n\n # Get the list of diffs\n diffs = []\n all_svs = set()\n with open(query_file) as f:\n for row in csv.reader(f):\n if not row:\n continue\n query = row[0].strip()\n if not query or query.startswith('#') or query.startswith('//'):\n continue\n assert ';' not in query, 'Multiple query not yet supported'\n base_svs, base_sv_info = _prune(base.detect_svs(query))\n test_svs, test_sv_info = _prune(test.detect_svs(query))\n for sv in base_svs + test_svs:\n all_svs.add(sv)\n if base_svs != test_svs:\n diff_list = list(difflib.Differ().compare(base_svs, test_svs))\n diff_table_rows = _get_diff_table(diff_list, base_sv_info, test_sv_info)\n diffs.append((query, diff_table_rows))\n\n # Update the diffs with sv names\n sv_names = _get_sv_names(list(all_svs))\n for _, diff_table_rows in diffs:\n for row in diff_table_rows:\n for info in row:\n info['name'] = sv_names.get(info.get('sv'), '')\n\n # Render the html with the diffs\n with open(output_file, 'w') as f:\n f.write(\n template.render(base_file=FLAGS.base, test_file=FLAGS.test,\n diffs=diffs))\n print('')\n print(f'Saving locally to {output_file}')\n print('')\n sc = storage.Client()\n bucket = sc.bucket(_GCS_BUCKET)\n\n # Upload diff report html to GCS\n print(\"Attempting to write to GCS\")\n gcs_filename = _get_file_name()\n blob = bucket.blob(gcs_filename)\n # Since the files can be fairly large, use a 10min timeout to be safe.\n blob.upload_from_filename(output_file, timeout=600)\n print(\"Done uploading to gcs.\")\n print(f\"\\t Diff report Filename: {gcs_filename}\")\n\n\ndef main(_):\n assert FLAGS.base and FLAGS.test and FLAGS.queryset\n base_file = _maybe_copy_embeddings(FLAGS.base)\n test_file = _maybe_copy_embeddings(FLAGS.test)\n\n base_model_path = _extract_model_name(FLAGS.base, base_file)\n test_model_path = _extract_model_name(FLAGS.test, test_file)\n\n run_diff(base_file, test_file, base_model_path, test_model_path,\n FLAGS.queryset, _REPORT)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n","repo_name":"CaravanStudios/dc-website","sub_path":"tools/nl/svindex_differ/differ.py","file_name":"differ.py","file_ext":"py","file_size_in_byte":8340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"34216973617","text":"# -*- coding: utf-8 -*-\nimport vk_api\nimport logging\n\ntry:\n from _token import my_token\nexcept ImportError:\n my_token = None\n print('Ошибка импорта my_token')\n exit()\n\nfrom vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType\nfrom vk_api.utils import get_random_id\n\n\ngroup_id = 'group_id'\nlog = logging.getLogger('bot') # имя логера\n\n\ndef configure_logging():\n stream_handler = logging.StreamHandler()\n # stream_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')).\n stream_handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))\n stream_handler.setLevel(logging.INFO)\n log.addHandler(stream_handler)\n\n file_handler = logging.FileHandler('bot.log', encoding='utf8')\n file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt='%d.%m.%Y %H:%M'))\n log.setLevel(logging.DEBUG)\n log.addHandler(file_handler)\n file_handler.setLevel(logging.DEBUG)\n\n\nclass Bot:\n def __init__(self, _group_id, token):\n self.group_id = group_id\n self.my_token = my_token\n self.vk = vk_api.VkApi(token=token)\n self.long_poller = VkBotLongPoll(self.vk, self.group_id)\n self.api = self.vk.get_api()\n\n def run(self):\n for event in self.long_poller.listen():\n try:\n self.on_event(event)\n except Exception:\n log.exception('ошибка в обработке события')\n # print('Error - ', ex) заменили на log.exception\n\n def on_event(self, event):\n if event.type == VkBotEventType.MESSAGE_NEW:\n log.debug('отправляем сообщение назад') # что бы видеть как разные уровни логирования срабатывают\n self.api.messages.send(message=event.message.text,\n random_id=get_random_id(),\n peer_id=event.message.peer_id)\n\n else:\n log.info('Мы не умеем обрабатывать события такого типа %s', event.type)\n\n\nif __name__ == '__main__':\n configure_logging()\n bot = Bot(group_id, my_token)\n bot.run()\n","repo_name":"IgorGeraskin22/Vkontakte-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31274441669","text":"from vector_cube import *\nfrom vector_graph_functions import *\n\n\npoints = [Point(3, [(i // 9) % 3, (i // 3) % 3, i % 3]) for i in range(27)]\n\nedges = []\n\nprint(points)\n\n\nfor i in range(len(points)):\n for j in range(i+1, len(points)):\n if points[i].distance(points[j]) == 1:\n edges.append(Edge(points[i], points[j]))\n\n\n\nvector_graph_to_nx_graph(edges)","repo_name":"gorkemyar/CubeColoring","sub_path":"vector/cube_test2.py","file_name":"cube_test2.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18798167933","text":"from pymongo import MongoClient\nimport os \nfrom alive_progress import alive_bar\nimport numpy as np\nfrom numpy import dot\nfrom numpy.linalg import norm\n\n\ndef fetch_all_data():\n data_list=[]\n mongoclinet= MongoClient(\"mongodb+srv://Kashyap:Kt1234@cmpe-297-project.so36aaq.mongodb.net/?retryWrites=true&w=majority\")\n\n\n ## Creating Database\n mydb = mongoclinet[\"Cmpe-297-database\"]\n\n ## Collection\n mycol = mydb[\"Data\"]\n cursor = mycol.find({})\n with alive_bar(13355) as bar:\n for document in cursor:\n data_list.append([document[\"document_data\"],document[\"document_key\"]])\n bar()\n mongoclinet.close()\n return data_list\n\n\nif __name__ == \"__main__\":\n data=fetch_all_data()\n #print(data[0])\n ","repo_name":"KashyapTamakuwala/Cmpe-297-LSH-Project","sub_path":"KeywordExtractor-Datasets-master/retervie_data.py","file_name":"retervie_data.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15196013084","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 21 00:11:01 2020\r\n\r\n@author: Punit\r\n\"\"\"\r\n\r\n\r\n# This is the code that is used to set up the problem and produce test data for your submission. \r\n# It is reproduced here for your information, or if you would like to run your submission outside of . \r\n# You should not copy/paste this code into the code box below. This code is run automatically 'behind the scenes' before\r\n# your submitted code. \r\n\r\nimport numpy as np\r\nimport time\r\nfrom io import BytesIO\r\nimport time\r\nten_sec = int(round(time.time()/20))\r\n#314, 10,11\r\nif ten_sec %3 == 0:\r\n prng = np.random.RandomState(314)\r\nelif ten_sec%3 == 1:\r\n prng = np.random.RandomState(11)\r\nelse:\r\n prng = np.random.RandomState(10)\r\n\r\ndata = np.loadtxt(BytesIO(data_files[\"data/wine/wine.data\"]), dtype=float, delimiter=',')\r\noverall_data = data[:,1:]\r\noverall_labels = data[:,0].astype(int)\r\n\r\nmax_array = np.amax(overall_data, axis = 0)\r\nmin_array = np.amin(overall_data, axis = 0)\r\ndenominator = max_array - min_array\r\nnum_data = len(overall_labels)\r\nfor i in range(num_data):\r\n overall_data[i] = (overall_data[i]-min_array)/denominator\r\n\r\nindices = np.ones(len(overall_labels),dtype= bool)\r\nindices[10:21] = False\r\nindices[60:71] = False\r\nindices[131:141] = False\r\n\r\ntraining_data = overall_data[indices]\r\ntraining_label = overall_labels[indices]\r\nreversed_indices = np.logical_not(indices)\r\ntesting_data = overall_data[reversed_indices]\r\ntesting_label = overall_labels[reversed_indices]\r\n\r\n#shuffle\r\nperm = prng.permutation(len(training_label))\r\ntraining_data = training_data[perm]\r\ntraining_label = training_label[perm]\r\n\r\ndef softmax(x):\r\n \"\"\"\r\n A numerically stable version of the softmax function\r\n \"\"\"\r\n exps = np.exp(x - np.max(x))\r\n return exps / np.sum(exps)\r\n\r\ndef to_one_hot(y, k):\r\n \"\"\"\r\n @brief Convert numeric class values to one hot vectors\r\n @param y An array of labels of length N\r\n @param k Number of classes\r\n @return A 2d array of shape (N x K), where K is the number of classes\r\n \"\"\"\r\n n = y.shape[0]\r\n one_hot = np.zeros((n, k))\r\n one_hot[np.arange(n), y - 1] = 1\r\n return one_hot\r\n\r\ndef accuracy(y, y_hat):\r\n \"\"\"\r\n @param y ground truth labels of shape (N x K)\r\n @param y_hat Estimated probability distributions of shape (N x K)\r\n @return the accuracy of the prediction as a scalar\r\n \"\"\"\r\n return (np.argmax(y, axis=1) == np.argmax(y_hat, axis=1)).mean()\r\n\r\ndef clear_grad(model):\r\n \"\"\"\r\n clear the gradient in the parameters and replace them with 0's\r\n \"\"\"\r\n for name, param, grad in model.named_parameters():\r\n name_split = name.split(\".\")\r\n child_name = name_split[0]\r\n param_name = name_split[1]\r\n model.children[child_name].grads[param_name] = np.zeros_like(model.children[child_name].grads[param_name])\r\n\r\nclass Module:\r\n def __init__(self):\r\n super().__init__()\r\n self.params = dict()\r\n self.grads = dict()\r\n self.children = dict()\r\n self.cache = dict()\r\n\r\n def _register_param(self, name: str, param: np.ndarray):\r\n \"\"\" the parameter can be accessed via self.params[name]\r\n the gradient can be accessed via self.grads[name]\r\n \"\"\"\r\n assert isinstance(param, np.ndarray)\r\n self.params[name] = param\r\n self.grads[name] = np.zeros_like(param)\r\n\r\n def _register_child(self, name: str, child: 'Module'):\r\n \"\"\" the module can be acccessed via self.children[name]\"\"\"\r\n assert isinstance(child, Module)\r\n self.children[name] = child\r\n\r\n def forward(self, *x):\r\n raise NotImplementedError\r\n\r\n def backward(self, *g):\r\n raise NotImplementedError\r\n\r\n def named_parameters(self, base: tuple = ()):\r\n \"\"\"recursively get all params in a generator\"\"\"\r\n assert self.params.keys() == self.grads.keys()\r\n for name in self.params:\r\n full_name = '.'.join(base + (name,))\r\n yield (full_name, self.params[name], self.grads[name])\r\n\r\n # recursively on others\r\n for child_name, child in self.children.items():\r\n yield from child.named_parameters(base=base + (child_name,))\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef weight_init(fan_in, fan_out):\r\n \"\"\"\r\n @param fan_in The number of input units\r\n @param fan_out The number of output units\r\n @return The 2d weight matrix initialized using xavier uniform initializer\r\n \"\"\"\r\n # IMPLEMENT ME\r\n #print(fan_in, fan_out)\r\n limits=np.sqrt(6/(fan_in+fan_out))\r\n #print(\"fan in is=\",fan_in)\r\n #print(\"fan out is=\", fan_out)\r\n return np.random.uniform(-limits,limits,(fan_out,fan_in))\r\n\r\nclass ReLU(Module):\r\n def __init__(self):\r\n super().__init__()\r\n \r\n \r\n\r\n def forward(self, x):\r\n \"\"\"\r\n @brief Takes a batch of input and compute the ReLU output\r\n @param x A numpy array as input (N, in_features)\r\n @return The output at the ReLU layer (N, in_features)\r\n \"\"\"\r\n # IMPLEMENT ME\r\n #print(x)\r\n self.relu_forward=np.copy(x)\r\n return x* (x > 0)\r\n\r\n def backward(self, g):\r\n \"\"\"\r\n @brief Compute the gradients for parameters\r\n @param g The gradient of previous layers\r\n @return The gradients of the loss w.r.t the input of this layer\r\n \"\"\"\r\n #print(g)\r\n \r\n self.relu_forward[self.relu_forward<=0]=0\r\n self.relu_forward[self.relu_forward>0]=1\r\n return g*self.relu_forward\r\n \r\n\r\nclass Linear(Module):\r\n def __init__(self, weight, bias):\r\n super().__init__()\r\n self._register_param('weight', weight)\r\n self._register_param('bias', bias)\r\n \r\n\r\n def forward(self, x):\r\n \"\"\"\r\n @brief Takes a batch of input and compute the linear output\r\n @param x A numpy array as input (N, in_features)\r\n @return The output of the linear layer (N, out_features)\r\n \"\"\"\r\n # IMPLEMENT ME\r\n self.input_linear=np.copy(x)\r\n #print(\"shape of input x and weight in the linear\",x.shape,self.params['weight'].shape)\r\n #print(\"this is w in linear \\n\",self.params['weight'] )\r\n self.lin_forward=x.dot(self.params['weight'].T)+self.params['bias']\r\n #print(\"shape of lin forward\",self.lin_forward.shape)\r\n return self.lin_forward\r\n\r\n def backward(self, g):\r\n \"\"\"\r\n @brief Compute the gradients for parameters\r\n @param g The gradient of previous layers (N, out_features)\r\n @return The gradients of the loss w.r.t the input of this layer (N, in_features)\r\n \"\"\"\r\n #IMPLEMENT ME\r\n #relu_forward\r\n #self.lin_forward.dot(g.T)\r\n \r\n #print(\"g shape in linear\",g.shape)\r\n #print(\"input x shape in linear\", self.input_linear.shape)\r\n lin_grad=g.dot(self.params['weight'])#+ np.sum(g, axis=0)\r\n self.grads['weight']=self.grads['weight']+g.T.dot(self.input_linear)\r\n self.grads['bias']=self.grads['bias']+np.sum(g, axis=0)\r\n return lin_grad\r\n\r\nclass NeuralNetwork(Module):\r\n def __init__(self, d, h, k):\r\n \"\"\"\r\n @brief Initialize weight and bias\r\n @param d size of the input layer\r\n @param h size of the hidden layer\r\n @param k size of the output layer\r\n \"\"\"\r\n super().__init__()\r\n wb = weight_init(d + 1, h)\r\n w1 = wb[:, :d]\r\n #print(\"this is the shape of w1\",w1.shape)\r\n #print(\"this is w1 \\n\",w1)\r\n b1 = wb[:, d]\r\n #print(\"hidden, initial, final layer \",h,d,k)\r\n wb = weight_init(h + 1, k)\r\n w2 = wb[:, :h]\r\n #print(\"this is the shape of w2\",w2.shape)\r\n #print(\"this is w2 \\n\",w2)\r\n b2 = wb[:, h]\r\n self._register_child('Linear1', Linear(w1, b1))\r\n self._register_child('ReLU', ReLU())\r\n self._register_child('Linear2', Linear(w2, b2))\r\n\r\n def forward(self, x):\r\n #Linear(self.params['Linear1'],self.params['Linear1'])\r\n \r\n #ReLU()\r\n #Linear2()\r\n \"\"\"\r\n @brief Takes a batch of samples and compute the feedforward output\r\n @param x A numpy array of shape (N x D)\r\n @return The output at the last layer (N x K)\r\n \"\"\"\r\n #IMPLEMENT ME\r\n \r\n self.input_data_shape=x.shape\r\n L1=self.children[\"Linear1\"]\r\n out_linear1=L1.forward(x)\r\n #print(\"after linear 1 \\n\",out_linear1 )\r\n #print(\"shape output of 1 linear layer is \",out_linear1.shape)\r\n relu_1=self.children[\"ReLU\"]\r\n out_relu=relu_1.forward(out_linear1)\r\n #print(\"shape output of ReLU layer is \\n \",out_relu)\r\n L2=self.children[\"Linear2\"]\r\n out_linear2=L2.forward(out_relu)\r\n #print(\"after L2\",out_linear2)\r\n out_forward=np.zeros((out_linear2.shape))\r\n #print(\"self.children bias shape\\n\",self.children[\"Linear1\"].grads[\"weight\"])\r\n return out_linear2\r\n \r\n\r\n def backward(self, y, y_hat):\r\n \"\"\"\r\n @brief Compute the gradients for Ws and bs, you don't\r\n need to return anything\r\n @param y ground truth label of shape (N x K)\r\n @param y_hat predictions of shape (N x K)\r\n \"\"\"\r\n #IMPLEMENT ME\r\n \r\n #print(y.shape,y_hat.shape)\r\n soft_grad=y_hat-y\r\n #print(\"this is soft_grad \\n\",soft_grad)\r\n L2=self.children[\"Linear2\"]\r\n out_linear2=L2.backward(soft_grad)\r\n relu_2=self.children[\"ReLU\"]\r\n out_relu2=relu_2.backward(out_linear2)\r\n L1=self.children[\"Linear1\"]\r\n out_linear1=L1.backward(out_relu2)\r\n\r\ndef update_param(model, lr):\r\n \"\"\"\r\n update the parameters of the network\r\n \"\"\"\r\n #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n #print(lr)\r\n #print(\"printing all the weigts of the model \\n\",model.children['Linear1'].grads['weight'])\r\n #print(\"printing all the weigts of the model \\n\",model.params['weight'])\r\n #my_module.params['weight']\r\n #print(\"input data shape\", model.input_data_shape)\r\n #print(\"printing all the weigts of the model \\n\",model.children['Linear1'].params['weight'].shape)\r\n model.children['Linear1'].params['weight']-=(lr/model.input_data_shape[0])*model.children['Linear1'].grads['weight']\r\n model.children['Linear1'].params['bias']-=(lr/model.input_data_shape[0])*model.children['Linear1'].grads['bias']\r\n model.children['Linear2'].params['weight']-=(lr/model.input_data_shape[0])*model.children['Linear2'].grads['weight']\r\n model.children['Linear2'].params['bias']-=(lr/model.input_data_shape[0])*model.children['Linear2'].grads['bias']\r\n \r\n\r\ndef train_one_epoch(model, x, y, test_x, test_y, lr):\r\n \"\"\"\r\n @brief Takes in a model and train it for one epoch.\r\n @param model The neural network\r\n @param x The features of training data (N x D)\r\n @param y The labels of training data (N x K)\r\n @param test_x The features of testing data (M x D)\r\n @param test_y The labels of testing data (M x K)\r\n @param lr Learning rate\r\n @return (train_accuracy, test_accuracy), the training accuracy and\r\n testing accuracy of the current epoch\r\n \"\"\"\r\n # IMPLEMENT ME\r\n clear_grad(model)\r\n train_accuracy=0.0\r\n test_accuracy=0.0\r\n #for epochs in range(10):\r\n #for batch in range(x.shape[0] // batch_size):\r\n #batch_indices = np.random.choice(range(x.shape[0]), size=batch_size)\r\n # Create a mini-batch of training data and labels\r\n #X_batch = x[batch_indices]\r\n #y_batch = y[batch_indices]\r\n #print(\"shape of training data\",x.shape)\r\n out_linear2=model.forward(x)\r\n #print(\"this is model output shape \\n\",out_linear2.shape)\r\n y_hat=np.zeros((out_linear2.shape))\r\n for out in range(out_linear2.shape[0]):\r\n y_hat[out]=softmax(out_linear2[out])\r\n #print(\"softmax shape \\n\",y_hat.shape)\r\n #print(\"this is y shape\",y.shape)\r\n train_accuracy=accuracy(y, y_hat)\r\n #print(\"train accuracy is\",train_accuracy)\r\n model.backward(y, y_hat)\r\n #print(\"after coming out of back prop\",out_back.shape)\r\n #print(\"printing shape in trainign \\n\",model.children['Linear2'].grads['bias'].shape)\r\n update_param(model, lr)\r\n test=model.forward(test_x)\r\n y_hat_test=np.zeros((test.shape))\r\n for out in range(test.shape[0]):\r\n y_hat_test[out]=softmax(test[out])\r\n test_accuracy=accuracy(test_y, y_hat_test)\r\n #print(\"test accuracy is\",test_accuracy)\r\n return (train_accuracy, test_accuracy)\r\n\r\n# Implement step 6 here\r\ny = to_one_hot(training_label, 3)\r\ntest_y = to_one_hot(testing_label, 3)\r\nmy_model=NeuralNetwork(13, 50, 3 )\r\ntrain_accuracy=np.zeros((100))\r\ntest_accuracy=np.zeros((100))\r\nfor epoch in range(100):\r\n train_accuracy[epoch], test_accuracy[epoch]=train_one_epoch(my_model, training_data , y , testing_data , test_y , 0.3)\r\nplt.plot(train_accuracy , label='train accuracy ')\r\nplt.plot(test_accuracy , label='test accuracy')\r\nplt.xlabel('Epochs')\r\nplt.ylabel('Accuracy ')\r\nplt.legend()\r\nprint(\" For the different learning parameter values I observed the following \\n \\\r\n 1. The value of 0.03 is makes the gradeint steps too low and given the fixed number of epochs the gradient does not converge \\n \\\r\n 2. The value of 3.0 is too high and instead of converging to optimum values our model overshoots it and ocscillates around the minima \\n \\\r\n 3. The value of 0.3 is the optimum values and gives us the testing accuracy of almost 100% as the number of epochs increases \")\r\nnn_model = my_model\r\n \r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"punitjha/ML-from-Scratch","sub_path":"Feed_Forward_Neural_Network/FFNN_scratch.py","file_name":"FFNN_scratch.py","file_ext":"py","file_size_in_byte":13999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5486503814","text":"import os \nimport numpy as np\nimport pandas as pd\nimport math\nimport time\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LinearRegression, Ridge \nimport statsmodels.api as sm\n\n\n\ndef SplitBasedSelectionForm(data, k, model, list_neigh, split_point1, split_point2, nb_classes, limit) :\n \n start_time = time.time()\n n = np.size(data,0) # number of instances\n p = np.size(data,1) # number of features\n\n Subgroups = set() # S is a set of the subgroups \n Subgroups.add(tuple(np.arange(n))) # first S is simply all the objects O i.e S = {0}\n \n W = dict ()\n data_neigh_O, target_neigh_O_proba = sampling_sb(data,np.arange(n),list_neigh,model)\n patterns = dict()\n # patterns = {attribute : a, value : v, operator : '>' or '<='}\n \n patterns[tuple(np.arange(n))] = (None,None,None)\n \n L_Patterns = []\n L_S = []\n\n improv = True\n splits = set ()\n newSubgroups = set()\n newSubgroups.add(tuple(np.arange(n))) # newSubgroups = {O}\n \n loss_subgroups = dict () # for the losses of the subgroups without spliting\n loss_subgroups [tuple(np.arange(n))] = calc_loss(data_neigh_O,target_neigh_O_proba, limit)\n #print('loss_all = ',loss_subgroups [tuple(np.arange(n))])\n #print(\"%s\" % (time.time() - start_time))\n\n iteration = 0 \n while len(Subgroups) < k and improv :\n print('trace ---', iteration)\n for s in newSubgroups : # s is tuple\n if len(s) > 1 and loss_subgroups[s] > 0 : \n list_loss_attributes = [] \n for a in range(0,p) : \n to_continue = False\n list_loss_values = []\n min_v = np.min(data[s,a]) # \n max_v = np.max(data[s,a]) # \n if (a < split_point1) or (a >= split_point2) : # numerical / boolean\n if min_v != max_v : \n steps = (pd.cut(data[s,a],2, retbins=True,include_lowest=True))[1][1:-1]\n to_continue = True\n \n else : \n if min_v == 0 and max_v > 0 :\n steps = np.array([0])\n to_continue = True\n if to_continue : \n \n len_steps = np.size(steps)\n j = 0 \n while j < len_steps :\n\n value = steps[j]\n\n # subgroup1 that satisfies the condition s [a > v]\n subgrp1 = tuple(np.asarray(s)[data[s,:][:,a] > value])\n\n # generating the new dataset of neighbors of the subgroup_1 elements \n data_neigh_sb1, target_neigh_sb1_proba = sampling_sb(data,subgrp1,list_neigh,model)\n\n # subgroup2 that satisfies the condition s [a <= v]\n subgrp2 = tuple(np.asarray(s)[data[s,:][:,a] <= value])\n\n # generating the new dataset of neighbors of the subgroup_1 elements \n data_neigh_sb2, target_neigh_sb2_proba = sampling_sb(data,subgrp2,list_neigh,model)\n\n #compute the loss and update the loss_subgroups dictionnary\n\n loss_subgroups[subgrp1] = calc_loss(data_neigh_sb1, target_neigh_sb1_proba, limit)\n loss_subgroups[subgrp2] = calc_loss(data_neigh_sb2, target_neigh_sb2_proba, limit)\n\n loss = loss_subgroups[subgrp1] + loss_subgroups[subgrp2]\n #print(\"loss des 2 sbgrps =\",loss)\n \n \n # store the losses \n list_loss_values.append((loss,value))\n\n #iterate over the j\n j += 1 \n\n # select the minimum loss and value that minimize the loss for each attribute a \n if list_loss_values :\n\n loss_opt_att = min(list_loss_values)\n\n # store the optimal loss for the attribute \n list_loss_attributes.append(loss_opt_att)\n \n else :\n list_loss_attributes.append((math.inf,None))\n\n # select the minimum loss and value that minimize the loss for the subgroup s\n loss_opt_s, value_opt = min(list_loss_attributes)\n attribute_opt = list_loss_attributes.index(min(list_loss_attributes))\n\n # add the best split for the subgroup (s) to the splits set \n splits.add((s,attribute_opt,value_opt,loss_opt_s))\n \n # Choose the subgroup split that leads to the minimum loss:\n best_split = splits.pop()\n tmp_split = best_split # to add it after \n \n s, a, v, loss_sb = best_split\n \n Subgroups.remove(s)\n best_loss_s = loss_set(Subgroups,loss_subgroups) + loss_sb\n Subgroups.add(s)\n \n for split in splits :\n s_, a_, v_, loss_sb_ = split\n Subgroups.remove(s_)\n if loss_set(Subgroups,loss_subgroups) + loss_sb_ < best_loss_s :\n best_loss_s = loss_set(Subgroups,loss_subgroups) + loss_sb_\n best_split = split\n Subgroups.add(s_)\n \n splits.add(tmp_split)\n \n s_best, a_best, v_best, loss_sb_min = best_split\n \n if loss_sb_min < loss_subgroups[s_best] :\n \n Subgroups.remove(s_best)\n \n sb1 = tuple(np.asarray(s_best)[data[s_best,:][:,a_best] > v_best]) \n sb2 = tuple(np.asarray(s_best)[data[s_best,:][:,a_best] <= v_best])\n\n Subgroups.add(sb1)\n Subgroups.add(sb2)\n \n if iteration == 0 :\n del patterns[s_best]\n patterns[sb1] = (a_best,'>',v_best)\n patterns[sb2] = (a_best,'<=',v_best)\n \n else : \n \n patterns[sb1] = patterns[s_best] + (a_best,'>',v_best)\n patterns[sb2] = patterns[s_best] + (a_best,'<=',v_best)\n del patterns[s_best]\n \n newSubgroups = {sb1, sb2}\n splits.remove(best_split)\n else :\n improv = False\n \n iteration = iteration + 1\n #print('{:.2e}'.format(loss_set(Subgroups,loss_subgroups)))\n #print(\"%s\" % (time.time() - start_time))\n\n S_copy = set ()\n S_copy = Subgroups.copy()\n \n patterns_copie = dict ()\n patterns_copie = patterns.copy()\n \n L_Patterns.append(patterns_copie)\n L_S.append(S_copy)\n \n return(L_S, L_Patterns)\n\n\ndef lin_models_for_sim(S,data_test,list_neigh,model,limit) :\n \n W_ = dict()\n for s in S :\n\n data_neigh_s, target_neigh_s_proba = sampling_sb(data_test,s,list_neigh,model)\n \n lr = Ridge(alpha = 1)\n model_lr = lr.fit(data_neigh_s[:,:limit],target_neigh_s_proba)\n W_[s] = model_lr\n del lr\n del model_lr \n return W_\n\ndef sampling_sb(dataset, subgroup, list_neigh, model) :\n \n n_neighs = list_neigh[0][0].shape[0]\n subgroup = np.asarray(subgroup)\n size = subgroup.size\n all_data = np.zeros((size*n_neighs, dataset.shape[1]))\n all_target = np.zeros((size*n_neighs, 19))\n \n for i in range(0,subgroup.size) :\n all_data[i*n_neighs : (i+1)*n_neighs,:] = list_neigh[subgroup[i]][0]\n all_target[i*n_neighs : (i+1)*n_neighs,:] = list_neigh[subgroup[i]][1]\n \n return (all_data, all_target)\n\n\ndef calc_loss (data,target_proba, limit) :\n \n lr = Ridge(alpha = 1)\n model_lr = lr.fit(data[:,:limit],target_proba)\n target_lr = model_lr.predict(data[:,:limit])\n return sum(sum(np.square(target_proba-target_lr)))\n\ndef loss_set(Subgroups, loss_subgroups):\n \n loss = 0\n if bool (Subgroups) == False : #empty\n return 0\n \n else : \n for s in Subgroups :\n loss = loss + loss_subgroups[s]\n\n return loss\n","repo_name":"RemilYoucef/split-sd4x","sub_path":"packages/subgroups_discovery.py","file_name":"subgroups_discovery.py","file_ext":"py","file_size_in_byte":8301,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"6676850329","text":"class Shope:\n\n def __init__(self,name):\n self.name = name\n self.cart = []\n \n\n def add_to_cart(self,item,price,quantity):\n self.cart.append({\n 'item': item,\n 'price': price,\n 'quantity': quantity\n })\n\n def checkOut(self,amount):\n price = 0\n for item in self.cart:\n print(item)\n\n price += item['price'] * item['quantity']\n \n \n print(f'Your net bill is : {price}')\n \n if amount < price:\n return f'Plese give me more monye : {price - amount}'\n elif amount > price:\n return f'You give me more monye than price : {amount - price}'\n else:\n return f'You give me adjact money : {price}'\n \n\n\n\n\n\n\nhandy = Shope(\"Sikder Stor\")\n\nhandy.add_to_cart(\"7 Up\",25,10)\nhandy.add_to_cart(\"Clemon\",15,150)\nhandy.add_to_cart(\"Mojo\",15,155)\nr = handy.checkOut(125 + 4700)\nprint(r)\n# print(handy.cart)\n\n","repo_name":"mrnayem2026/CSE-PH-2022","sub_path":"Python with VS Code/Week No 03/Revise Week no 03/module_08/shoping_2.py","file_name":"shoping_2.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27996840828","text":"#\n# @lc app=leetcode id=39 lang=python3\n#\n# [39] Combination Sum\n#\n\n# @lc code=start\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n \n res = []\n \n def backtrack(i, curList, curSum):\n if i == len(candidates) or curSum > target:\n return \n\n # found match sum \n if curSum == target:\n res.append(curList.copy())\n return \n\n curList.append(candidates[i])\n backtrack(i, curList, curSum + candidates[i])\n \n curList.pop()\n backtrack(i+1, curList, curSum)\n\n backtrack(0,[],0)\n \n return res\n \n# @lc code=end\n\n","repo_name":"wintai9899/Leetcode-Python","sub_path":"Backtrack/39.combination-sum.py","file_name":"39.combination-sum.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27345309373","text":"import heapq\nfrom collections import defaultdict\nfrom time import time\n\n\nclass Solution:\n def reorganizeString(self, s: str) -> str:\n chars = defaultdict(int)\n for char in s:\n chars[char] += 1\n h = []\n heapq.heapify(h)\n for k, v in chars.items():\n heapq.heappush(h, (-v, k))\n if -h[0][0] > (len(s) // 2 + len(s) % 2):\n return \"\"\n ans = ''\n while len(ans) < len(s):\n n1, char1 = heapq.heappop(h)\n ans += char1\n if h:\n n2, char2 = heapq.heappop(h)\n ans += char2\n if n2 != -1:\n heapq.heappush(h, (n2+1, char2))\n if n1 != -1:\n heapq.heappush(h, (n1+1, char1))\n return ans\n\n\nstart_time = time()\n\n# Example 1:\n# Input: s = \"aab\"\n_s = \"aab\"\n# Output: \"aba\"\n#\n# Example 2:\n# Input: s = \"aaab\"\n# Output: \"\"\n\nprint(Solution().reorganizeString(_s))\n\nprint(\"--- %s seconds ---\" % (time() - start_time))\n","repo_name":"Sadomtsevvs/Leetcode","sub_path":"767. Reorganize String.py","file_name":"767. Reorganize String.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33018352533","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\nfrom module.plugins.Hoster import Hoster\n\nclass VeehdCom(Hoster):\n __name__ = 'VeehdCom'\n __type__ = 'hoster'\n __pattern__ = r'http://veehd\\.com/video/\\d+_\\S+'\n __config__ = [\n ('filename_spaces', 'bool', \"Allow spaces in filename\", 'False'),\n ('replacement_char', 'str', \"Filename replacement character\", '_'),\n ]\n __version__ = '0.1'\n __description__ = \"\"\"Veehd.com Download Hoster\"\"\"\n __author_name__ = ('cat')\n __author_mail__ = ('cat@pyload')\n \n def _debug(self, msg):\n self.log.debug('[%s] %s' % (self.__name__, msg))\n \n def setup(self):\n self.html = None\n self.multiDL = True\n self.req.canContinue = True\n\n def process(self, pyfile):\n self.download_html()\n if not self.file_exists():\n self.offline()\n \n pyfile.name = self.get_file_name()\n self.download(self.get_file_url())\n \n def download_html(self):\n url = self.pyfile.url\n self._debug(\"Requesting page: %s\" % (repr(url),))\n self.html = self.load(url)\n \n def file_exists(self):\n if self.html is None:\n self.download_html()\n \n if '<title>Veehd' in self.html:\n return False\n return True\n \n def get_file_name(self):\n if self.html is None:\n self.download_html()\n \n match = re.search(r']*>([^<]+) on Veehd', self.html)\n if not match:\n self.fail(\"video title not found\")\n name = match.group(1)\n \n # replace unwanted characters in filename\n if self.getConf('filename_spaces'):\n pattern = '[^0-9A-Za-z\\.\\ ]+'\n else:\n pattern = '[^0-9A-Za-z\\.]+'\n \n name = re.sub('[^0-9A-Za-z\\.]+', self.getConf('replacement_char'),\n name)\n return name + '.avi'\n\n def get_file_url(self):\n \"\"\" returns the absolute downloadable filepath\n \"\"\"\n if self.html is None:\n self.download_html()\n\n match = re.search(r' 0:\n mass = extraFuel\n accumulatedFuelForMass = accumulatedFuelForMass + extraFuel\n else:\n break\n return accumulatedFuelForMass\n\nmasses = [int(m) for m in open(\"input.txt\").readlines()]\n\nfuelForMasses = [totalFuelForMass(m) for m in masses]\n\ntotalFuel = sum(fuelForMasses)\n\nprint(\"El fuel requerido es:\", totalFuel)\n","repo_name":"AlejandroFortt/AdventOfCode19","sub_path":"aoc01/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12824269800","text":"import threading\nfrom random import choice, randint\nimport queue\nimport requests\nfrom urllib.parse import urljoin\nfrom html.parser import HTMLParser\nfrom time import sleep\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass MyHTMLParser(HTMLParser):\n\n def __init__(self):\n super().__init__()\n self.links = []\n self.resources = []\n\n def handle_starttag(self, tag, attrs):\n dattrs = dict(attrs)\n if tag == 'a':\n self.links.append(dattrs.get('href', None))\n if tag in ('img', 'script'):\n self.resources.append(dattrs.get('src', None))\n if tag == 'link':\n self.links.append(dattrs.get('href', None))\n\n self.links = [link for link in self.links if link is not None]\n self.resources = [resource for resource in self.resources if resource is not None]\n\n def close(self):\n return (self.links, self.resources)\n\n\nclass SetQueue(queue.Queue):\n\n def _init(self, maxsize):\n self.queue = set()\n\n def _put(self, item):\n self.queue.add(item)\n\n def _get(self):\n return self.queue.pop()\n\n\nclass HTTP(threading.Thread):\n\n def __init__(self, config):\n threading.Thread.__init__(self)\n self.c = config\n self.q = SetQueue()\n self.start_page = choice(config['start-pages'].split())\n\n up, down = self.c['delay_range'].split('-')\n self.delay_range = (int(up), int(down))\n\n def _sanitize_url(self, page, url):\n if '//' in url:\n if url.startswith('//'):\n return 'http:' + url\n else:\n return url\n else:\n return urljoin(page, url)\n\n def _download_page(self, page):\n # Parse HTML\n html = requests.get(page).text\n p = MyHTMLParser()\n p.feed(html)\n links, resources = p.close()\n # Fill links to queue\n for link in links:\n if link == '#' or link.startswith('mailto:'):\n continue\n logger.debug('saving link to queue - {}'.format(link))\n self.q.put(self._sanitize_url(page, link))\n\n # And download all resources of the page\n for resource in resources:\n logger.debug('downloading resource - {}'.format(resource))\n _ = requests.get(self._sanitize_url(page, resource)).text\n\n def run(self):\n logger.info(\"Dtarting on page {}\".format(self.start_page))\n while True:\n if self.q.empty():\n page = self.start_page\n else:\n page = self.q.get()\n\n logger.info('Downloading data from {}'.format(page))\n self._download_page(page)\n\n delay = randint(*self.delay_range)\n logger.debug('Waiting for {} seconds'.format(delay))\n sleep(delay)\n","repo_name":"frenzymadness/HoneyNet","sub_path":"plugins/HTTP.py","file_name":"HTTP.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30070260607","text":"from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\n\nimport tweets\nfrom tweets.api.serializers import (\n TweetSerializerForCreate,\n TweetSerializer,\n TweetSerializerForDetail,\n)\nfrom tweets.models import Tweet\nfrom newsfeeds.services import NewsFeedService\nfrom tweets.services import TweetService\nfrom utils.decorators import required_params\nfrom utils.paginations import EndlessPagination\n\nclass TweetViewSet(viewsets.GenericViewSet,\n viewsets.mixins.CreateModelMixin,\n viewsets.mixins.ListModelMixin):\n \"\"\"\n API endpoint that allows users to create, list tweets\n \"\"\"\n queryset = Tweet.objects.all()\n serializer_class = TweetSerializerForCreate\n pagination_class = EndlessPagination\n\n def get_permissions(self):\n if self.action in ['list','retrieve']:\n return [AllowAny()]\n return [IsAuthenticated()]\n\n def retrieve(self, request, *args, **kwargs):\n serializer = TweetSerializerForDetail(\n self.get_object(),\n context={'request': request},\n )\n return Response(serializer.data)\n\n @required_params(params=['user_id'])\n def list(self, request, *args, **kwargs):\n user_id = request.query_params['user_id']\n cached_tweets = TweetService.get_cached_tweets(user_id)\n page = self.paginator.paginate_cached_list(cached_tweets, request)\n if page is None:\n # select * from twitter_tweets\n # where user_id = xxx\n # order by created_at desc\n # this SQL search will use the index of user and created_at\n # only user index is not enough\n queryset = Tweet.objects.filter(user_id=user_id).order_by('-created_at')\n page = self.paginate_queryset(queryset)\n serializer = TweetSerializer(\n page,\n context={'request': request},\n many=True,\n )\n return self.get_paginated_response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n overwrite create method,\n because we need to set default current login user as tweet.user\n \"\"\"\n serializer = TweetSerializerForCreate(\n data=request.data,\n context={'request': request},\n )\n if not serializer.is_valid():\n return Response({\n 'success': False,\n 'message': \"Please check input\",\n 'errors': serializer.errors,\n }, status=400)\n tweet = serializer.save()\n NewsFeedService.fanout_to_followers(tweet)\n serializer = TweetSerializer(tweet, context={'request': request})\n return Response(serializer.data, status=201)\n","repo_name":"ruoyuangao/django-twitter","sub_path":"tweets/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31456909753","text":"import json\nimport random\nimport sys\nimport time\n\nimport requests\nfrom django.contrib.auth.models import update_last_login\nfrom django.db.models import Q\nfrom django.shortcuts import render\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.hashers import make_password\n# email\nfrom django.core.mail import send_mail\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.encoding import force_bytes\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\n\n# Create your views here.\nfrom rest_framework import status, generics\nfrom rest_framework.generics import RetrieveAPIView\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom user.models import *\nfrom dataplan.models import *\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import *\n# Create your views here.\n\nclass AddDataPlan(APIView):\n permission_classes = (IsAuthenticated,)\n def post(self, request):\n plan_name = request.data.get('plan_name')\n plan_duration = request.data.get('plan_duration')\n plan_amt = request.data.get('plan_amt')\n is_plan_cancel_able = request.data.get('is_plan_cancel_able')\n\n try:\n create_data_plan = DataPlan.objects.create(\n plan_name = plan_name,\n duration_in_month = plan_duration,\n plan_amt = plan_amt,\n is_cancel_able = is_plan_cancel_able,\n )\n if create_data_plan:\n response = {\n 'success': 'True',\n 'status code': status.HTTP_200_OK,\n 'message': \"Data plan created\",\n }\n else:\n response = {\n 'success': 'True',\n 'status code': status.HTTP_200_OK,\n 'message': \"Data plan created\",\n }\n return Response(response)\n except Exception as e:\n response = 'on line {}'.format(\n sys.exc_info()[-1].tb_lineno), str(e)\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass AddPlan(APIView):\n permission_classes = (IsAuthenticated,)\n\n def post(self, request):\n bd_time = datetime.now()\n\n phone_number = request.data.get('phone_number')\n plan_id = request.data.get('plan_id')\n\n get_user_manager = UserCompanyManager.objects.filter(\n phn_cell=phone_number,\n is_primary_phone=True,\n ).values()\n \n if get_user_manager:\n for idx in get_user_manager:\n # if the plan is cancleable or not\n is_cancleable = DataPlan.objects.filter(\n id=plan_id).values_list('is_cancel_able').last()[0]\n print(\"line 54\", is_cancleable)\n # is there any active plan \n if Purches.objects.filter(company_id = idx['company_id']):\n is_there_any_active_plan = Purches.objects.filter(\n company_id = idx['company_id'],\n ).values_list('active_plan').last()[0]\n try:\n # no activat plan for this company\n if not is_there_any_active_plan:\n # not cancle able plans\n if not is_cancleable:\n get_plan_duration = DataPlan.objects.filter(\n id=plan_id).values_list('duration').first()[0]\n \n # define the end date time\n ending_datetime = bd_time + \\\n relativedelta(months=+get_plan_duration)\n\n # not cancle able data plan purches\n Purches.objects.create(\n user_id=idx['user_id'],\n company_id=idx['company_id'],\n data_plan_id=plan_id,\n data_plan_ended=ending_datetime,\n active_plan=True\n\n )\n else:\n # cancleable plans\n Purches.objects.create(\n user_id=idx['user_id'],\n company_id=idx['company_id'],\n data_plan_id=plan_id,\n active_plan=True\n\n )\n response = {\n 'success': 'True',\n 'status code': status.HTTP_200_OK,\n 'message': get_user_manager,\n }\n return Response(response)\n else:\n response = {\n 'success': 'False',\n 'status code': status.HTTP_403_FORBIDDEN,\n 'message': 'Already have an active plan',\n }\n return Response(response)\n except Exception as e:\n response = 'on line {}'.format(\n sys.exc_info()[-1].tb_lineno), str(e)\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n else:\n try:\n # not cancle able plans\n if not is_cancleable:\n get_plan_duration = DataPlan.objects.filter(\n id=plan_id).values_list('duration').first()[0]\n \n # define the end date time\n ending_datetime = bd_time + \\\n relativedelta(months=+get_plan_duration)\n # not cancle able data plan purches\n Purches.objects.create(\n user_id=idx['user_id'],\n company_id=idx['company_id'],\n data_plan_id=plan_id,\n data_plan_ended=ending_datetime,\n active_plan=True\n )\n else:\n # cancleable plans\n Purches.objects.create(\n user_id=idx['user_id'],\n company_id=idx['company_id'],\n data_plan_id=plan_id,\n active_plan=True\n )\n response = {\n 'success': 'True',\n 'status code': status.HTTP_200_OK,\n 'message': get_user_manager,\n }\n return Response(response)\n except Exception as e:\n response = 'on line {}'.format(\n sys.exc_info()[-1].tb_lineno), str(e)\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n else:\n if UserCompanyManager.objects.filter(phn_cell=phone_number):\n response = {\n 'success': 'False',\n 'status code': status.HTTP_400_BAD_REQUEST,\n 'message': 'Phone number is not a primary number, please change the number to primary',\n }\n else:\n response = {\n 'success': 'False',\n 'status code': status.HTTP_404_NOT_FOUND,\n 'message': 'Phone number not found',\n }\n\n return Response(response)\n\nclass CanclePlan(APIView):\n permission_classes = (IsAuthenticated,)\n def post(self, request):\n active_plan_id = request.data.get('active_plan_id')\n try:\n # if any active plan exists\n if Purches.objects.filter(id = active_plan_id,active_plan=True):\n\n # getting the cancleable active plan id\n get_active_cancle_able_plan_id = Purches.objects.filter(\n id = active_plan_id,\n active_plan=True,\n ).values_list(\n 'data_plan_id'\n ).first()[0]\n # checking the plan is cancleable or not\n check_if_the_data_plan_is_cancle_able = DataPlan.objects.filter(\n id =get_active_cancle_able_plan_id\n ).values_list('is_cancel_able').first()[0]\n\n if check_if_the_data_plan_is_cancle_able:\n # canceling the plan if its cancleable\n cancle_the_plan = Purches.objects.filter(\n id = active_plan_id\n ).update(\n active_plan=False\n )\n if cancle_the_plan:\n response = {\n 'success': 'True',\n 'status code': status.HTTP_200_OK,\n 'message': \"Plan Cancled\",\n }\n else:\n response = {\n 'success': 'False',\n 'status code': status.HTTP_400_BAD_REQUEST,\n 'message': \"Failed\",\n }\n\n else:\n response = {\n 'success': 'False',\n 'status code': status.HTTP_404_NOT_FOUND,\n 'message': \"Cancleable Plan Not Found\",\n }\n return Response(response)\n else:\n response = {\n 'success': 'False',\n 'status code': status.HTTP_404_NOT_FOUND,\n 'message': \"No active plan found\",\n }\n return Response(response)\n except Exception as e:\n response = 'on line {}'.format(\n sys.exc_info()[-1].tb_lineno), str(e)\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\n","repo_name":"mustahidhasan/healthos","sub_path":"src/Backend/dataplan/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"75112582882","text":"# Healthy Programmer Exercise 7 CWH Python Tut\n\"\"\"\n9 am to 5 pm office time\nwater - water.mp3 ---every 40 mins ----log drinking time in txt file\neyes - eyes.mp3 ----every 30 mins ---- log eyes exercise time\nphysical activity - physical.mp3 ----- every 45 mins ---log exercise time\n\"\"\"\n# Rule\n# Pygame module to play audio\n\n\nfrom pygame import mixer\nfrom time import time\nfrom datetime import datetime\nfrom time import strftime\n\ndef musiconloop(*audio):\n mixer.init()\n mixer.music.load(audio[0])\n mixer.music.play()\n while True:\n a = input(f\"Press {audio[1]} if you are done: \")\n if a == audio[1]:\n mixer.music.stop()\n break\n\ndef log_now(msg):\n with open(\"log.txt\", \"a\") as f:\n f.write(f\"{msg} at {datetime.now()}\\n\")\n\nif __name__ == \"__main__\":\n\n init_water = time()\n init_eyes = time()\n init_ex = time()\n water_secs = 40 * 60 # 40 minutes\n eyes_secs = 30 * 60 # 30 minutes\n ex_secs = 45 * 60 # 45 minutes\n\n office = strftime('%H:%M:%S') > '09:00:00' and strftime('%H:%M:%S') < '17:00:01'\n while(office):\n\n if time() - init_water > water_secs:\n print(\"Time to drink water\")\n musiconloop(\"water.mp3\", \"1\")\n init_water = time()\n log_now(\"Drank water\")\n\n if time() - init_eyes > eyes_secs:\n print(\"Time for eyes exercise\")\n musiconloop(\"eyes.mp3\", \"2\")\n init_eyes = time()\n log_now(\"Eyes relaxed\")\n\n if time() - init_ex > ex_secs:\n print(\"Time for physical activity\")\n musiconloop(\"physical.mp3\", \"3\")\n init_ex = time()\n log_now(\"Physical activity\")\n\n print(\"Office time over!!! You can go home!!!!\")\n","repo_name":"siddharthja1n/Python-Code-Exercise","sub_path":"Healthy_Programmer.py","file_name":"Healthy_Programmer.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24057798570","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport gc\nimport logging\nimport re\nimport struct\nimport weakref\n\nfrom twisted.names import (\n client as TwistedDNS,\n dns as DNS,\n)\nfrom twisted.internet import (\n error as TwistedError,\n interfaces as TwistedInterface,\n protocol as TwistedProtocol,\n reactor,\n)\nfrom zope import interface as ZopeInterface\n\nfrom s54http.utils import (\n Cache,\n daemonize,\n init_logger,\n NullProxy,\n parse_args,\n SSLCtxFactory,\n)\n\n\nlogger = logging.getLogger(__name__)\nconfig = {\n 'daemon': False,\n 'host': '0.0.0.0',\n 'port': 8080,\n 'ca': 'keys/ca.crt',\n 'key': 'keys/server.key',\n 'cert': 'keys/server.crt',\n 'dhparam': 'keys/dhparam.pem',\n 'pidfile': 's5p.pid',\n 'logfile': 'server.log',\n 'loglevel': 'INFO',\n 'dns': None,\n}\n_IP = re.compile(r'[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}')\n\n\nclass RemoteProtocol(TwistedProtocol.Protocol):\n\n def connectionMade(self):\n self.proxy = self.factory.proxy\n try:\n self.proxy.connectOk(self.transport)\n except ReferenceError:\n self.transport.abortConnection()\n\n def dataReceived(self, data):\n try:\n self.proxy.recvRemote(data)\n except ReferenceError:\n self.transport.abortConnection()\n\n\nclass RemoteFactory(TwistedProtocol.ClientFactory):\n\n protocol = RemoteProtocol\n\n def __init__(self, proxy):\n self.proxy = proxy\n\n def clientConnectionFailed(self, connector, reason):\n message = reason.getErrorMessage()\n try:\n self.proxy.connectErr(message)\n except ReferenceError:\n pass\n\n def clientConnectionLost(self, connector, reason):\n try:\n self.proxy.connectionClosed()\n except ReferenceError:\n pass\n\n\nclass SockProxy:\n\n __slots__ = [\n 'sock_id',\n 'dispatcher',\n 'remote_host',\n 'remote_port',\n 'remote_addr',\n 'resolver',\n 'address_cache',\n 'buffer',\n 'has_connect',\n 'transport',\n '__weakref__',\n ]\n\n def __init__(self, sock_id, dispatcher, host, port):\n self.sock_id = sock_id\n self.dispatcher = dispatcher\n self.remote_host = host\n self.remote_port = port\n self.resolver = dispatcher.resolver\n self.address_cache = dispatcher.address_cache\n self.buffer = b''\n self.has_connect = False\n self.remote_addr = None\n self.transport = None\n self.resolveHost(host)\n\n @property\n def isConnected(self):\n transport = self.transport\n if transport is None:\n return False\n if isinstance(transport, NullProxy):\n return False\n return True\n\n @property\n def isClosed(self):\n if (isinstance(self.dispatcher, NullProxy) and\n isinstance(self.transport, NullProxy) and\n self.remote_host is None and self.remote_port is None):\n return True\n else:\n return False\n\n def close(self, *, abort=True):\n self.dispatcher = NullProxy()\n self.buffer = b''\n self.resolver = None\n self.remote_addr = None\n self.remote_host = None\n self.remote_port = None\n if self.transport:\n if abort:\n self.transport.abortConnection()\n else:\n self.transport.loseConnection()\n self.transport = NullProxy()\n\n def connectRemote(self):\n factory = RemoteFactory(weakref.proxy(self))\n reactor.connectTCP(\n self.remote_addr,\n self.remote_port,\n factory\n )\n self.has_connect = True\n\n def resolveOk(self, records):\n if self.isClosed:\n return\n answers = records[0]\n for answer in answers:\n if answer.type != DNS.A:\n continue\n addr = answer.payload.dottedQuad().strip()\n self.address_cache[self.remote_host] = addr\n self.remote_addr = addr\n self.connectRemote()\n break\n else:\n self.resolveErr('no ipv4 address found')\n\n def resolveErr(self, reason=''):\n if self.isClosed:\n return\n logger.error(\n 'sock_id[%u] resolve host[%s] failed[%s]',\n self.sock_id,\n self.remote_host,\n reason\n )\n self.dispatcher.handleConnect(self.sock_id, 1)\n\n def resolveHost(self, host):\n if _IP.match(host):\n self.remote_addr = host\n else:\n try:\n self.remote_addr = self.address_cache[host]\n except KeyError:\n # getHostByName can't be used here, it may return ipv6 address\n self.resolver.lookupAddress(\n host\n ).addCallbacks(\n self.resolveOk,\n self.resolveErr\n )\n return\n self.connectRemote()\n\n def connectOk(self, transport):\n self.transport = transport\n if self.buffer:\n self.transport.write(self.buffer)\n self.buffer = b''\n\n def connectErr(self, message):\n logger.error(\n 'sock_id[%u] connect %s:%u failed[%s]',\n self.sock_id,\n self.remote_host,\n self.remote_port,\n message\n )\n self.dispatcher.handleConnect(self.sock_id, 1)\n\n def sendRemote(self, data):\n if self.isConnected:\n self.transport.write(data)\n else:\n self.buffer += data\n\n def recvRemote(self, data):\n self.dispatcher.handleRemote(self.sock_id, data)\n\n def connectionClosed(self):\n logger.info(\n 'sock_id[%u] connection[%s:%u] closed',\n self.sock_id,\n self.remote_host,\n self.remote_port\n )\n self.dispatcher.handleClose(self.sock_id)\n\n def pauseProducing(self):\n if self.transport is None:\n return\n self.transport.pauseProducing()\n\n def resumeProducing(self):\n if self.transport is None:\n return\n self.transport.resumeProducing()\n\n\nclass SocksDispatcher:\n\n __slots__ = [\n 'socks',\n 'transport',\n 'resolver',\n 'address_cache',\n ]\n\n def __init__(self, p):\n self.socks = {}\n self.transport = p.transport\n self.resolver = p.factory.resolver\n self.address_cache = p.factory.address_cache\n\n def dispatchMessage(self, message):\n type, = struct.unpack('!B', message[4:5])\n if 1 == type:\n self.connectRemote(message)\n elif 3 == type:\n self.sendRemote(message)\n elif 5 == type:\n self.closeRemote(message)\n elif 7 == type:\n self.closeTunnel()\n else:\n raise RuntimeError(f'receive unknown message type={type}')\n\n def connectRemote(self, message):\n \"\"\"\n type 1:\n +-----+------+----+------+------+\n | LEN | TYPE | ID | HOST | PORT |\n +-----+------+----+------+------+\n | 4 | 1 | 4 | | 2 |\n +-----+------+----+------+------+\n \"\"\"\n sock_id, = struct.unpack('!I', message[5:9])\n host = message[9:-2].tobytes().decode('utf-8').strip()\n port, = struct.unpack('!H', message[-2:])\n logger.info(\n 'sock_id[%u] connect %s:%u',\n sock_id,\n host,\n port\n )\n try:\n self.socks[sock_id] = SockProxy(\n sock_id,\n self,\n host,\n port,\n )\n except Exception as e:\n logger.error(\n 'sock_id[%u] SockProxy exception[%s]',\n sock_id,\n e\n )\n self.handleConnect(sock_id, 1)\n\n def handleConnect(self, sock_id, code):\n \"\"\"\n type 2:\n +-----+------+----+------+\n | LEN | TYPE | ID | CODE |\n +-----+------+----+------+\n | 4 | 1 | 4 | 1 |\n +-----+------+----+------+\n \"\"\"\n if 0 == code:\n return\n self.closeSock(sock_id, abort=True)\n message = struct.pack(\n '!IBIB',\n 10,\n 2,\n sock_id,\n code\n )\n self.transport.write(message)\n\n def sendRemote(self, message):\n \"\"\"\n type 3:\n +-----+------+----+------+\n | LEN | TYPE | ID | DATA |\n +-----+------+----+------+\n | 4 | 1 | 4 | |\n +-----+------+----+------+\n \"\"\"\n sock_id, = struct.unpack('!I', message[5:9])\n data = message[9:]\n try:\n sock = self.socks[sock_id]\n except KeyError:\n logger.error('sock_id[%u] receive data after closed', sock_id)\n else:\n sock.sendRemote(bytes(data))\n\n def handleRemote(self, sock_id, data):\n \"\"\"\n type 4:\n +-----+------+----+------+\n | LEN | TYPE | ID | DATA |\n +-----+------+----+------+\n | 4 | 1 | 4 | |\n +-----+------+----+------+\n \"\"\"\n total_length = 9 + len(data)\n header = struct.pack(\n '!IBI',\n total_length,\n 4,\n sock_id,\n )\n self.transport.writeSequence([header, data])\n\n def closeSock(self, sock_id, *, abort=False):\n try:\n sock = self.socks[sock_id]\n except KeyError:\n logger.error('sock_id[%u] closed again', sock_id)\n else:\n sock.close(abort=abort)\n del self.socks[sock_id]\n\n def closeRemote(self, message):\n \"\"\"\n type 5:\n +-----+------+----+\n | LEN | TYPE | ID |\n +-----+------+----+\n | 4 | 1 | 4 |\n +-----+------+----+\n \"\"\"\n sock_id, = struct.unpack('!I', message[5:9])\n logger.info('sock_id[%u] remote closed', sock_id)\n self.closeSock(sock_id, abort=True)\n\n def handleClose(self, sock_id):\n \"\"\"\n type 6:\n +-----+------+----+\n | LEN | TYPE | ID |\n +-----+------+----+\n | 4 | 1 | 4 |\n +-----+------+----+\n \"\"\"\n if sock_id not in self.socks:\n return\n logger.info('sock_id[%u] local closed', sock_id)\n self.closeSock(sock_id)\n message = struct.pack(\n '!IBI',\n 9,\n 6,\n sock_id\n )\n self.transport.write(message)\n\n def closeTunnel(self):\n \"\"\"\n type 7:\n +-----+------+\n | LEN | TYPE |\n +-----+------+\n | 4 | 1 |\n +-----+------+\n \"\"\"\n proxy = self.transport.getPeer()\n logger.info(\n 'proxy[%s:%u] closed tunnel',\n proxy.host,\n proxy.port\n )\n self.transport.loseConnection()\n\n def tunnelClosed(self):\n self.transport = NullProxy()\n for sock in self.socks.values():\n sock.close(abort=True)\n self.socks = {}\n gc.collect()\n\n\n@ZopeInterface.implementer(TwistedInterface.IPushProducer)\nclass Producer:\n\n __slots__ = ['dispatcher']\n\n def __init__(self, dispatcher):\n self.dispatcher = dispatcher\n\n def pauseProducing(self):\n logger.debug('remote socks pause receiving data')\n for sock in self.dispatcher.socks.values():\n sock.pauseProducing()\n\n def resumeProducing(self):\n logger.debug('remote socks resume receiving data')\n for sock in self.dispatcher.socks.values():\n sock.resumeProducing()\n\n def stopProducing(self):\n pass\n\n\nclass TunnelProtocol(TwistedProtocol.Protocol):\n\n @property\n def isVerified(self):\n if hasattr(self, 'dispatcher'):\n return True\n else:\n return False\n\n def connectionVerified(self):\n dispatcher = SocksDispatcher(self)\n producer = Producer(dispatcher)\n self.buffer = b''\n self.dispatcher = dispatcher\n self.transport.setTcpNoDelay(True)\n self.transport.setTcpKeepAlive(True)\n self.transport.registerProducer(producer, True)\n proxy = self.transport.getPeer()\n logger.info(\n 'proxy[%s:%u] connected',\n proxy.host,\n proxy.port\n )\n\n def connectionMade(self):\n connection = self.transport.getHandle()\n connection.protocol = weakref.proxy(self)\n\n def connectionLost(self, reason=None):\n proxy = self.transport.getPeer()\n if self.isVerified:\n self.transport.unregisterProducer()\n self.dispatcher.tunnelClosed()\n logger.info(\n 'proxy[%s:%u] lost',\n proxy.host,\n proxy.port\n )\n else:\n logger.error(\n 'proxy[%s:%u] closed[ssl error]',\n proxy.host,\n proxy.port,\n )\n\n def dataReceived(self, data):\n self.buffer += data\n while True:\n if len(self.buffer) < 4:\n return\n length, = struct.unpack('!I', self.buffer[:4])\n if len(self.buffer) < length:\n return\n message = memoryview(self.buffer)[:length]\n self.dispatcher.dispatchMessage(message)\n self.buffer = self.buffer[length:]\n\n\ndef _create_resolver(config):\n dns = config['dns']\n if dns is None:\n servers = None\n else:\n dns = dns.strip()\n if not dns:\n servers = None\n elif ':' in dns:\n address, port = dns.split(':')\n servers = [(address, int(port))]\n else:\n servers = [(dns, 53)]\n return TwistedDNS.createResolver(servers=servers)\n\n\ndef _create_tunnel_factory(config):\n factory = TwistedProtocol.ServerFactory()\n factory.protocol = TunnelProtocol\n factory.address_cache = Cache()\n factory.resolver = _create_resolver(config)\n return factory\n\n\ndef _create_ssl_context(config):\n from cryptography import x509 as X509\n from cryptography.hazmat.backends import default_backend\n\n with open(config['ca'], mode='rb') as fp:\n serial_number_ca = X509.load_pem_x509_certificate(\n fp.read(),\n default_backend()\n ).serial_number\n\n def verify(conn, x509, errno, errdepth, ok):\n if not ok:\n cn = x509.get_subject().commonName\n logger.error(\n 'proxy certificate verify error[errno=%d cn=%s]',\n errno,\n cn\n )\n elif x509.get_serial_number() == serial_number_ca:\n conn.protocol.connectionVerified()\n return ok\n\n return SSLCtxFactory(\n False,\n config['ca'],\n config['key'],\n config['cert'],\n dhparam=config['dhparam'],\n callback=verify\n )\n\n\ndef serve(config):\n ssl_ctx = _create_ssl_context(config)\n tunnel_factory = _create_tunnel_factory(config)\n address, port = config['host'], config['port']\n try:\n reactor.listenSSL(\n port,\n tunnel_factory,\n ssl_ctx,\n interface=address,\n )\n except TwistedError.CannotListenError:\n raise RuntimeError(\n f\"couldn't listen on :{port}, address already in use\"\n )\n logger.info('server running ...')\n reactor.run()\n\n\ndef main():\n parse_args(config)\n init_logger(config, logger)\n if config['daemon']:\n pidfile = config['pidfile']\n logfile = config['logfile']\n daemonize(\n pidfile,\n stdout=logfile,\n stderr=logfile\n )\n serve(config)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hack4code/s54http","sub_path":"s54http/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":15816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33072628789","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nfrom Precode import *\nimport numpy as np\ndata = np.load('AllSamples.npy')\n\n\n# In[2]:\n\n\nk1,i_point1,k2,i_point2 = initial_S1('7619') # please replace 0111 with your last four digit of your ID\n\n\n# In[3]:\n\n\nprint(k1)\nprint(i_point1)\nprint(k2)\nprint(i_point2)\n\n\n# In[4]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport copy\nplt.style.use('ggplot')\ncolors = [ 'greenyellow', 'slateblue', 'teal', 'orchid', 'indianred','red', 'green', 'yellow', 'blue', 'pink', 'gray', 'brown', 'orange', 'purple',]\nclass KMeansStrategy1(object):\n def __init__(self, k: int, points: list, data: list):\n self.k = k\n self.points = points\n self.data = data\n self.clusters = None\n self.loss = None\n \n #k: the number of clusters\n #points: 2d list of randomly generated mu values\n #data: 2d list of data points to be classified\n def calculateKMeans(self):\n changed = True\n while changed:\n self.clusters = {}\n for i in range(1, self.k + 1):\n self.clusters[i] = []\n for v in data:\n distance = float('inf')\n curr_cluster = 0\n for i in range(len(self.points)):\n dist = np.linalg.norm(self.points[i] - v)\n if dist < distance:\n distance = dist\n curr_cluster = i + 1\n self.clusters[curr_cluster].append(v.tolist())\n #Now shift mu of each cluster\n for i in range(len(self.points)):\n mu = np.mean(np.array(self.clusters[i+1]), axis=0)\n if np.array_equal(mu,self.points[i]):\n changed = False\n else:\n changed = True\n self.points[i] = mu\n \n \n \n def calculateObjFunction(self):\n summ = 0\n for i in range(self.k):\n for j in range(len(self.clusters[i+1])):\n summ += np.linalg.norm(self.clusters[i+1][j] - self.points[i]) ** 2\n \n self.loss = summ\n \n def showPlot(self):\n for k in KMS.clusters:\n for point in KMS.clusters[k]:\n plt.plot(point[0],point[1], 'o', color=colors[k-1], label=\"Cluster='{0}'\".format(k) )\n plt.plot(KMS.points[k-1][0],KMS.points[k-1][1], 'o', markersize=15, markeredgewidth=2.0, mec= 'k', color=colors[k-1], label=\"Cluster='{0}'\".format(k) )\n plt.show()\n \n def showInfo(self):\n print('After KMeans Algorithm\\n', pd.DataFrame(KMS.points, columns=[\"X1\", \"X2\"]), '\\n')\n print('loss: ', KMS.loss)\n \n \nclass Helper(object):\n def getRandomPoints(k: int, data: list):\n indices =np.random.choice(data.shape[0], k, replace=False)\n return data[indices]\n \n \n\n\n# In[5]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nplt.style.use('ggplot')\n#For the given points, k's\ncolors = [ 'greenyellow', 'slateblue', 'teal', 'orchid', 'indianred','red', 'green', 'yellow', 'blue', 'pink', 'gray', 'brown', 'orange', 'purple',]\nKMS = KMeansStrategy1(k1, copy.copy(i_point1), data)\nKMS.calculateKMeans()\nKMS.calculateObjFunction()\nKMS.showInfo()\nKMS.showPlot()\n\nKMS = KMeansStrategy1(k2, i_point2, data)\nKMS.calculateKMeans()\nKMS.calculateObjFunction()\nKMS.showInfo()\nKMS.showPlot()\n\n\n# In[6]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nplt.style.use('ggplot')\n#Lets try K clusters from 2 to 10\nkms_loss = []\ncolors = [ 'greenyellow', 'slateblue', 'teal', 'orchid', 'indianred','red', 'green', 'yellow', 'blue', 'pink', 'gray', 'brown', 'orange', 'purple',]\nfor k in range(2, 11):\n KMS = KMeansStrategy1(k, Helper.getRandomPoints(k, data), data)\n KMS.calculateKMeans()\n KMS.calculateObjFunction()\n kms_loss.append(KMS.loss)\n #KMS.showPlot()\n #KMS.showInfo()\n plt.plot(k, KMS.loss, 'o', markersize=15, markeredgewidth=2.0, mec= 'k', color='b', label=\"Cluster='{0}'\".format(k) )\n plt.xlabel(\"Number of Clusters\")\n plt.ylabel(\"Loss Value\")\n plt.title(\"Figure 2\")\n \n \n\n \n \n\n","repo_name":"codwithjimmy/StatisticalMachineLearning","sub_path":"Project2/Strategy1.py","file_name":"Strategy1.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16213718218","text":"#Listas\n\ninventario = [\"espada\", \"armadura\", \"escudo\", \"pocion de vida\"]\n\nif not inventario:\n\tprint(\"Tenes las manos vacias...\")\nelse:\n\tprint(\"Tus items: \")\n\n\tfor elemento in inventario:\n\t\tprint(\"*\",elemento)\n\t\t#print(\"*\",elemento.title()) ----> .title() solo para tuplas\n\nrespuesta = input(\"¿Querés agregar otro elemento?\\n(Presiona 0 para no agregar nada): \")\n\nwhile respuesta != \"0\":\n\tinventario.append(respuesta)\n\trespuesta = input(\"¿Querés agregar otro elemento?\\n(Presiona 0 para no agregar nada): \")\n\ninventario[0]=\"brújula\"\nprint(\"Tus items: \")\nfor elemento in inventario:\n\tprint(\"*\",elemento)\n\ninput(\"Enter para continuar...\")\n\n#Modulo en Python TimeIt y lo itera hasta poder darte un promedio de tiempo de ejecucion.","repo_name":"luzrubini/Pyhton","sub_path":"LuzRubini/clase_05/inventario_del_heroe_lista.py","file_name":"inventario_del_heroe_lista.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26922565113","text":"import sys\n\ndef main():\n N, M = map(int, sys.stdin.readline().split())\n distance = int(input())\n allNode = list(sys.stdin.readline().split())\n INF = float('inf')\n edges = [[INF for _ in range(N+1)] for i in range(N+1)] #edges 초기화\n for i in range(M):\n x, y, z = sys.stdin.readline().split()\n x = allNode.index(x) + 1 #Node마다 index번호 부여됨.\n y = allNode.index(y) + 1\n z = int(z)\n edges[x][y] = z\n edges[y][x] = z #양방향 그래프\n for i in range(N):\n edges[i+1][i+1] = 0 #본인으로 가는것 비용 0\n\n for k in range(1, N+1): #플로이드 알고리즘 (최소경로 탐색)\n for i in range(1, N+1):\n for j in range(1, N+1):\n if edges[i][j] > edges[i][k] + edges[k][j]:\n edges[i][j] = edges[i][k] + edges[k][j]\n count = 0\n result_n = \"\"\n result_c = 0\n for v in range(1, len(allNode)+1): #distance보다 거리가 짧은 출발지부터 (목적지)까지 찾기.\n count = 0\n for u in range(1, N+1):\n if(distance >= edges[v][u]): #distance보다 짧은 비용이면 count 증가\n count += 1\n if(result_c < count): #최대값인 노드 및 갯수 저장\n result_n = allNode[v-1]\n result_c = count\n \n print(result_n, result_c)\n\n \nif __name__ == '__main__':\n main()","repo_name":"DongYounYim/2021_Algorithm_and_learn_python","sub_path":".vscode/Coding_Test/Coding_Test(11)/hokangs.py","file_name":"hokangs.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7972386671","text":"#!/usr/bin/python\n#*-------------------------------------------------------------------------*\n#* factorial.py *\n#* calcula el factorial de un número *\n#* Dr.P.E.Colla (c) 2022 *\n#* Creative commons *\n#*-------------------------------------------------------------------------*\nimport sys\n#este programa calcula los factoriales comprendidos entre la variable \"desde\" y el número 60.\ndef factorial(desde): \n res=[]\n for i in range(desde, 61):\n fact = 1\n while(i > 1): \n fact *= i \n i -= 1\n res.append(fact)\n return res #devuelve una lista con los resultados de los factoriales\n \nif len(sys.argv) == 1:\n print(\"Debe informar un número!\")\n sys.exit()\nelse:\n num=int(sys.argv[1])\n#en este ciclo se muestran los resultados correspondientes a cada número\nfor numero in (factorial(num)):\n print(f\"El factorial de {num}! es {numero}\") \n num += 1\n\n","repo_name":"cristianchivisky/UADER_IS2_CHIVISKY","sub_path":"src/factorial/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27432796310","text":"import sys\nfrom collections import deque \n# sys.stdin = open('input.txt','r')\ninput = sys.stdin.readline\nN,M = map(int,input().split())\nboard = [list(map(int,input().split())) for _ in range(N)]\n\ndef solution(N,M,board):\n answer = [0,0]\n visit = [[0]*M for _ in range(N)]\n\n for y in range(N):\n for x in range(M):\n if board[y][x] ==1 and visit[y][x] ==0 :\n size = bfs(y,x,board,visit)\n answer[0]+=1\n answer[1] = max(answer[1],size)\n \n \n\n return '\\n'.join(map(str,answer))\n\n# \n\ndef bfs(sy,sx,board,visit):\n dx = (0,0,-1,1)\n dy = (-1,1,0,0)\n queue = deque([(sy,sx)])\n size = 1\n visit[sy][sx] = size\n \n while queue : \n y,x = queue.popleft()\n for i in range(4):\n ny = y+ dy[i]\n nx = x+ dx[i]\n if 0<=ny str:\n \"\"\"Return the API URL root, configurable via tap settings.\"\"\"\n return self.config[\"api_url\"]\n\n @property\n def authenticator(self) -> APIKeyAuthenticator:\n \"\"\"Return a new authenticator object.\"\"\"\n return APIKeyAuthenticator.create_for_stream(\n self,\n key=\"apikey\",\n value=self.config.get(\"api_key\"),\n location=\"header\"\n )\n\n @property\n def http_headers(self) -> dict:\n \"\"\"Return the http headers needed.\"\"\"\n headers = {}\n headers[\"Authorization\"] = \"SSWS \" + self.config.get(\"api_key\")\n return headers\n\n def get_url(self, context: Optional[dict]) -> str:\n url = \"\".join([self.url_base, self.path or \"\"])\n vals = copy.copy(dict(self.config))\n vals.update(context or {})\n\n for k, v in vals.items():\n search_text = \"\".join([\"{\", k, \"}\"])\n if search_text in url:\n url = url.replace(search_text, self._url_encode(v))\n return url\n\n\n def get_next_page_token(\n self,\n response: requests.Response,\n previous_token: Optional[Any]\n ) -> Optional[Any]:\n \"\"\"Return a token for identifying next page or None if no more pages.\"\"\"\n\n response_links = requests.utils.parse_header_links(response.headers['Link'].rstrip('>').replace('>,<', ',<'))\n for link in response_links:\n if link['rel'] == 'next':\n next_page_token = link['url']\n else:\n next_page_token = None\n return next_page_token\n\n\n def get_url_params(\n self,\n context: Optional[dict],\n next_page_token: Optional[Any]\n ) -> Dict[str, Any]:\n \"\"\"Return a dictionary of values to be used in URL parameterization.\"\"\"\n params: dict = {}\n params[\"limit\"] = self.limit\n if next_page_token:\n params[\"page\"] = next_page_token\n if self.replication_key:\n params[\"sort\"] = \"asc\"\n params[\"order_by\"] = self.replication_key\n return params\n\n def request_records(self, context: Optional[dict]) -> Iterable[dict]:\n \"\"\"Request records from REST endpoint(s), returning response records.\n\n\t\tIf pagination is detected, pages will be recursed automatically.\n\n\t\tArgs:\n\t\t\tcontext: Stream partition or context dictionary.\n\n\t\tYields:\n\t\t\tAn item for every record in the response.\n\n\t\tRaises:\n\t\t\tRuntimeError: If a loop in pagination is detected. That is, when two\n\t\t\t\tconsecutive pagination tokens are identical.\n\t\t\"\"\"\n next_page_token: Any = None\n finished = False\n decorated_request = self.request_decorator(self._request)\n\n while not finished:\n prepared_request = self.prepare_request(\n context, next_page_token=next_page_token\n )\n resp = decorated_request(prepared_request, context)\n for row in self.parse_response(resp):\n yield row\n previous_token = copy.deepcopy(next_page_token)\n next_page_token = self.get_next_page_token(\n response=resp, previous_token=previous_token\n )\n if next_page_token and next_page_token == previous_token:\n raise RuntimeError(\n f\"Loop detected in pagination. \"\n f\"Pagination token {next_page_token} is identical to prior token.\"\n )\n # Cycle until get_next_page_token() no longer returns a value\n finished = not next_page_token","repo_name":"Timfrazer/tap-okta","sub_path":"tap_okta/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71677876642","text":"from rest_framework import serializers\nfrom .models import User\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ['email', 'username', 'first_name', \n 'last_name', 'phone_number', 'revenue',\n 'pending_payments',\n 'is_staff', \n 'admin'\n ]","repo_name":"bellomusodiq/greentrust-backend","sub_path":"accounts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40142728885","text":"import json\nimport re\n\n\nclass NineOneDataWorker(object):\n def __init__(self, mysql_client):\n self.mysql_ml = mysql_client\n\n def run(self, derivate_data):\n mobile_list = self.get_mobile_list(derivate_data.get('X_User_addressBook'))\n if not mobile_list:\n return {}\n # 查mysql\n address_list = self.get_address_list(mobile_list)\n city_list = [x.get('city') for x in address_list]\n province_list = [x.get('province') for x in address_list]\n\n derivatives = {\n 'X_User_addressBook_contactCity_cnt': len(set(city_list)),\n 'X_User_addressBook_contactProvince_cnt': len(set(province_list))\n }\n return derivatives\n\n def get_address_list(self, mobile_list, table='mobile_phone_area'):\n mobile_prefix_list = [mobile[:7] for mobile in mobile_list]\n mobile_prefixs = tuple(set(mobile_prefix_list))\n if len(mobile_prefixs) == 1:\n sql = \"\"\"SELECT prefix, province, city, service_provider FROM {} WHERE prefix='{}';\"\"\".format(table, mobile_prefixs[0])\n else:\n sql = \"\"\"SELECT prefix, province, city, service_provider FROM {} WHERE prefix in {};\"\"\".format(table, mobile_prefixs)\n address_list = self.mysql_ml.query(sql)\n return address_list\n\n def get_mobile_list(self, address_book):\n mobile_list = []\n if address_book and address_book != '[]':\n address_book = json.loads(address_book)\n if type(address_book) is list:\n address_book = address_book[0].get('contents')\n else:\n address_book = address_book.get('contents')\n for c in address_book:\n try:\n mobile = c['mobile']\n if mobile:\n if mobile.startswith(\"+86\"):\n mobile = mobile[3:]\n\n match_res = re.compile(r\"^1(\\d){10}$\").match(mobile) # 电话号码基本特征\n if match_res:\n mobile_list.append(mobile)\n except:\n continue\n return mobile_list\n","repo_name":"ztttttttt/work_file_1","sub_path":"jdxserver/jdx_worker/worker/nineone_data_worker.py","file_name":"nineone_data_worker.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1686152641","text":"import tensorflow as tf\nimport re\nimport numpy as np\nfrom config.load_config import config\nfrom augmentations import transform\n\n\ndef read_labeled_tfrecord(example):\n tfrec_format = {\n \"image\": tf.io.FixedLenFeature([], tf.string),\n \"image_name\": tf.io.FixedLenFeature([], tf.string),\n \"patient_id\": tf.io.FixedLenFeature([], tf.int64),\n \"sex\": tf.io.FixedLenFeature([], tf.int64),\n \"age_approx\": tf.io.FixedLenFeature([], tf.int64),\n \"anatom_site_general_challenge\": tf.io.FixedLenFeature([], tf.int64),\n \"diagnosis\": tf.io.FixedLenFeature([], tf.int64),\n \"target\": tf.io.FixedLenFeature([], tf.int64),\n }\n example = tf.io.parse_single_example(example, tfrec_format)\n return example[\"image\"], example[\"target\"]\n\n\ndef read_unlabeled_tfrecord(example, return_image_name):\n tfrec_format = {\n \"image\": tf.io.FixedLenFeature([], tf.string),\n \"image_name\": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example, tfrec_format)\n return example[\"image\"], example[\"image_name\"] if return_image_name else 0\n\n\ndef prepare_image(img, augment=True, config=None):\n img = tf.image.decode_jpeg(img, channels=3)\n # Cast and normalize the image to [0,1]\n img = tf.cast(img, tf.float32) / 255.0\n\n if augment:\n img = transform(img, config)\n img = tf.image.random_crop(\n img,\n [config[\"CROP_SIZE\"][config[\"IMG_SIZES\"]], config[\"CROP_SIZE\"][config[\"IMG_SIZES\"]], 3],\n )\n img = tf.image.random_flip_left_right(img)\n img = tf.image.random_hue(img, 0.01)\n img = tf.image.random_saturation(img, 0.7, 1.3)\n img = tf.image.random_contrast(img, 0.8, 1.2)\n img = tf.image.random_brightness(img, 0.1)\n # resize is needed as we random cropped\n img = tf.image.resize(img, [config[\"IMG_SIZES\"], config[\"IMG_SIZES\"]])\n # I am not sure why values will go outside of the stipulated range of 0,1\n img = tf.clip_by_value(img, clip_value_min=0.0, clip_value_max=1.0)\n # investigate why this will prompt error in shapes when I run enumerate(iter(ds))\n\n # else:\n # img = tf.image.central_crop(\n # img, config[\"CROP_SIZE\"][config[\"IMG_SIZES\"]] / config[\"IMG_SIZES\"]\n # )\n\n img = tf.reshape(img, [config[\"IMG_SIZES\"], config[\"IMG_SIZES\"], 3])\n\n return img\n\n\n# function to count how many photos we have in\n# the number of data items is written in the name of the .tfrec files, i.e. flowers00-230.tfrec = 230 data items\n# note that if you are using 1 single tfrec then this code will not work because this assumes filenames is a list of tfrec\ndef count_data_items(filenames):\n n = [int(re.compile(r\"-([0-9]*)\\.\").search(filename).group(1)) for filename in filenames]\n return np.sum(n)\n\n","repo_name":"weihao94/SIIM-Melanoma-Classification-2020","sub_path":"load_tfrecords.py","file_name":"load_tfrecords.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"13655016994","text":"# _*_coding=utf-8 _*\nimport os\nimport datetime\nimport time\n\npath = r\"./data/system/\"\n\nCPU = path + \"monitor_CPU\" + \".csv\"\nMemory = path + \"monitor_Memory\" + \".csv\"\nDisk = path + \"monitor_Disk\" + \".csv\"\nIO = path + \"monitor_IO\" + \".csv\"\ncpu_temp = path + \"cpu_temp\" + \".txt\"\nswap = path + \"swap\" + \".csv\"\n\n\ndef measurement(new_str):\n if 'g' in new_str:\n new_str = str(int(float(new_str[:-1]) * 1024)) + 'm'\n return new_str\n elif 't' in new_str:\n new_str = str(int(float(new_str[:-1]) * 1024 * 1024)) + 'm'\n return new_str\n elif 'm' in new_str:\n new_str = str(int(new_str[:-1])) + 'm'\n return new_str\n else:\n new_str = str(int(float(new_str[:-1]) / 1024)) + 'm'\n return new_str\n\n\n# 统计方法\ndef statistic():\n # 统计CPU使用情况\n print('-----------------------------cpu---------------------------------------')\n pop_cpu = os.popen(\"top -b -n 2 -d 1 |grep Cpu | sed -n \\'2p\\'\")\n cpu_info = pop_cpu.read().decode('unicode-escape')\n print(cpu_info)\n cpu_info_rate = cpu_info.split('\\n')[0].split()\n print(cpu_info_rate)\n if not os.path.exists(CPU):\n with open(CPU, 'w') as cpu_f:\n cpu_f.write('time' + ',' + 'USR%+SYS%')\n cpu_f.write('\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + str(\n float(cpu_info_rate[1]) + float(cpu_info_rate[3])))\n cpu_f.close()\n else:\n with open(CPU, 'a') as cpu_f:\n cpu_f.write('\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + str(\n float(cpu_info_rate[1]) + float(cpu_info_rate[3])))\n cpu_f.close()\n # 统计内存使用情况\n print('-----------------------------Memory---------------------------------------')\n pop_memory = os.popen(\"free -m\")\n mem_info = pop_memory.read()\n print(mem_info)\n Mem = mem_info.split('\\n')[1].split()\n print(Mem)\n if not os.path.exists(Memory):\n with open(Memory, 'w') as mem_f:\n mem_f.write(\n 'time' + ',' + 'total' + ',' + 'used' + ',' + 'free' + ',' + 'shared' + ',' + 'buff/cache'\n + ',' + 'available')\n mem_f.write('\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + str(Mem[1])\n + ',' + str(Mem[2]) + ',' + str(Mem[3]) + ',' + str(Mem[4]) + ',' + str(Mem[5]) + ',' + str(\n Mem[6]))\n mem_f.close()\n else:\n with open(Memory, 'a') as mem_f:\n mem_f.write('\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + str(Mem[1])\n + ',' + str(Mem[2]) + ',' + str(Mem[3]) + ',' + str(Mem[4]) + ',' + str(Mem[5]) + ',' + str(\n Mem[6]))\n mem_f.close()\n Mem_swap = mem_info.split('\\n')[2].split()\n if not os.path.exists(swap):\n with open(swap, 'w') as swap_f:\n swap_f.write('time' + ',' + 'total' + ',' + 'used' + ',' + 'free')\n swap_f.write(\n '\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + str(Mem_swap[1]) + ',' + str(\n Mem_swap[2]) + ',' + str(Mem_swap[3]))\n swap_f.close()\n else:\n with open(swap, 'a') as swap_f:\n swap_f.write(\n '\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + str(Mem_swap[1]) + ',' + str(\n Mem_swap[2]) + ',' + str(Mem_swap[3]))\n swap_f.close()\n # 统计硬盘使用情况\n print('-----------------------------disk---------------------------------------')\n pop_disk = os.popen(\"sh ./getStat.sh\")\n disk_info_cur = pop_disk.readlines()\n # len_mem = len(disk_info_cur)\n # print(len_mem)\n print(disk_info_cur)\n if not os.path.exists(Disk):\n with open(Disk, 'w') as disk_f:\n disk_f.write(\n 'Time' + ',' + 'Avail%' + ',' + 'Size' + ',' + 'Used')\n disk_f.write(\n '\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + disk_info_cur[0].strip() + ',' +\n disk_info_cur[1].strip() + ',' + disk_info_cur[2].strip())\n disk_f.close()\n else:\n with open(Disk, 'a') as disk_f:\n disk_f.write(\n '\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + disk_info_cur[0].strip() + ',' +\n disk_info_cur[1].strip() + ',' + disk_info_cur[2].strip())\n disk_f.close()\n # 统计IO使用情况\n print('-----------------------------IO---------------------------------------')\n pop_io = os.popen(\"iostat\")\n # 磁盘信息\n pop_disk_cur = os.popen(\"df -h\")\n IO_info_cur = pop_io.readlines()\n disk_info_cur = pop_disk_cur.readlines()\n # iostat后长度\n len_mem = len(IO_info_cur)\n # df -h后长度\n len_disk = len(disk_info_cur)\n opt_disk = ''\n for i in range(0, len_disk):\n if disk_info_cur[i].split()[-1] == '/opt':\n # 获取/opt下磁盘名称\n opt_disk = disk_info_cur[i].split()[0][-4:-1]\n if not os.path.exists(IO):\n with open(IO, 'w') as io_f:\n io_f.write(\n 'Time' + ',' + 'Device' + ',' + 'tps' + ',' + 'kB_read/s' + ',' + 'kB_wrtn/s' + ',' + 'kB_read' + ','\n + 'kB_wrtn')\n for i in range(6, len_mem - 1):\n if IO_info_cur[i].split()[0] == opt_disk:\n io_f.write(\n '\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + IO_info_cur[i].split()[0]\n + ',' + IO_info_cur[i].split()[1] + ',' + IO_info_cur[i].split()[2] + ',' +\n IO_info_cur[i].split()[3] + ',' + IO_info_cur[i].split()[4] + ',' + IO_info_cur[i].split()[5])\n io_f.close()\n else:\n with open(IO, 'a') as io_f:\n for i in range(6, len_mem - 1):\n if IO_info_cur[i].split()[0] == opt_disk:\n io_f.write(\n '\\n' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ',' + IO_info_cur[i].split()[0]\n + ',' + IO_info_cur[i].split()[1] + ',' + IO_info_cur[i].split()[2] + ',' +\n IO_info_cur[i].split()[3] + ',' + IO_info_cur[i].split()[4] + ',' + IO_info_cur[i].split()[5])\n io_f.close()\n\n\ndef statistic_SetTimeinterval(time_interval):\n while True:\n statistic()\n time.sleep(time_interval)\n\n\ndef main():\n statistic_SetTimeinterval(30)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zhangkunliang/My_highEfficient","sub_path":"utils/System_log_collection.py","file_name":"System_log_collection.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40917949232","text":"# prerequisites\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport imageio\nfrom torchvision.utils import make_grid, save_image\n\nfrom tqdm import tqdm\nimport numpy as np\n\nmatplotlib.style.use('ggplot')\nif torch.backends.mps.is_available():\n device = \"mps\"\n \nelse:\n device = \"cpu\"\n\n#device = \"cpu\"\nprint(device)\n\nlr = 0.0002\nnum_epochs = 50\n\nbatch_size = 128\n\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5,),(0.5,)),\n])\n\ntrain_data = datasets.MNIST(\n root = './data',\n train = True, \n transform = transform, \n download = True, \n)\n# test_data = datasets.MNIST(\n# root = './data', \n# train = False, \n# transform = transforms.ToTensor()\n# )\n \n\n# train_dataset = datasets.ImageFolder(\n# root='afhq/train',\n# transform=train_transform)\n\n# test_dataset = datasets.ImageFolder(\n# root='afhq/val',\n# transform=valid_transform)\n# Data Loader (Input Pipeline)\n\ntrain_data = datasets.MNIST(\n root='../input/data',\n train=True,\n download=True,\n transform=transform\n)\ntrain_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\n\nto_pil_image = transforms.ToPILImage()\n# train_loader = DataLoader(\n# train_data, batch_size=batch_size, shuffle=True)\n\n# train_loader = DataLoader(\n# train_dataset, batch_size=batch_size, shuffle=True\n# )\n\n\n\n#train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n#test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n\n\nz = 128\nk = 1 #number of steps to apply to the discriminator\ndiscriminator = nn.Sequential(\n nn.Linear(784, 1024),\n nn.LeakyReLU(0.2),\n nn.Dropout(0.3),\n nn.Linear(1024, 512),\n nn.LeakyReLU(0.2),\n nn.Dropout(0.3),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2),\n nn.Dropout(0.3),\n nn.Linear(256, 1),\n nn.Sigmoid()).to(device)\n\ngenerator = nn.Sequential(\n nn.Linear(z, 256),\n nn.LeakyReLU(0.2),\n nn.Linear(256, 512),\n nn.LeakyReLU(0.2),\n nn.Linear(512, 1024),\n nn.LeakyReLU(0.2),\n nn.Linear(1024, 784),\n nn.Unflatten(1, (1,28,28)),\n nn.Tanh()).to(device)\n #nn.Unflatten(1, (3, 128, 128)),\n #nn.Sigmoid()).to(device)\n\n\n\n\ndef generate_images():\n with torch.no_grad():\n z = torch.randn(64, 100).to(device)\n output = generator(z)\n generated_images = output.reshape(64, 3, 128, 128)\n return generated_images\n\n\n#shows neural net works with each other\n#rand = train_dataset[0][0].to(device)\n#print(discriminator(rand[None, :, :]))\n\n\n#gen_images = generate_images()\n#print(len(gen_images))\n#print(discriminator(gen_images).shape)\n\n\n\n#time to train them\ndef train(generator, discriminator, batch_size, epochs=200, lf=nn.BCELoss(), lr = 3e-4, device = device, z = z):\n beta1 = 0.5\n gen_opt = optim.Adam(generator.parameters(), lr=lr, betas=(beta1, 0.999))\n disc_opt = optim.Adam(discriminator.parameters(), lr=lr, betas=(beta1, 0.999))\n \n #real_targets = torch.ones(batch_size).to(device)\n #fake_targets = torch.zeros(batch_size).to(device)\n\n \n img_list = []\n G_losses = []\n D_losses = []\n iters = 0\n s_imgs = []\n print(\"Training Starting...........\")\n \n for epoch in range(epochs):\n for i, (images, label) in enumerate(train_loader):\n ############### (1) Training The Discriminator #####################\n #maximize log(D(x)) + log(1 - D(G(z)))\n batch_size = len(label)\n real_targets = torch.ones(batch_size).to(device)\n fake_targets = torch.zeros(batch_size).to(device)\n #clear the gradient\n discriminator.zero_grad()\n\n #put image to gpu\n output = discriminator(images.to(device).view(-1, 784)).view(-1)\n \n #compute loss on real images\n realImg_loss = lf(output, real_targets)\n \n #computes gradients\n realImg_loss.backward(retain_graph = True)\n \n D_x = output.mean().item()\n \n #generate noise for generator input vector\n noise = torch.randn(batch_size, z, device=device)\n \n #fake generated images from our generator\n fake_imgs = generator(noise)\n \n #pass into discriminator\n output = discriminator(fake_imgs.view(-1, 784)).view(-1)\n \n #calculate loss\n fakeImg_loss = lf(output, fake_targets)\n \n #compute gradients\n fakeImg_loss.backward(retain_graph = True)\n \n D_G_z1 = output.mean().item()\n \n errD = realImg_loss + fakeImg_loss\n\n disc_opt.step()\n ############################ That was one training batch for the Discriminator #########################\n \n ############################ Train Generator: maximize log(D(G(z)))##########################################\n \n #clear gradient\n generator.zero_grad()\n \n \n output = discriminator(fake_imgs.view(-1, 784)).view(-1)\n \n \n \n gen_loss = lf(output, real_targets) #basically, how far were off from fooling the discriminator,\n #since we are generating fake images that we want the disc to predict 1 on\n \n #compute gradients\n gen_loss.backward()\n \n D_G_z2 = output.mean().item()\n \n #update generators weights\n gen_opt.step()\n \n \n # Output training stats\n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tDiscriminator Loss: %.4f\\tGenerator Loss: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f'\n % (epoch, num_epochs, i, len(train_loader),\n errD.item(), gen_loss.item(), D_x, D_G_z1, D_G_z2))\n \n \n # Save Losses for plotting later\n G_losses.append(gen_loss.item())\n D_losses.append(errD.item())\n\n fake_imgs = fake_imgs.cpu().detach()\n generated_img = make_grid(fake_imgs)\n save_image(generated_img, f\"./fake_img{epoch}.png\")\n s_imgs.append(generated_img)\n\n imgs = [np.array(to_pil_image(img)) for img in s_imgs]\n imageio.mimsave('./generator_images.gif', imgs)\n\n \n\n \n\ntrain(generator, discriminator, batch_size)\n\n\ntorch.save(generator.state_dict(), \"./generator.pt\")\ntorch.save(discriminator.state_dict(), \"./discriminator.pt\")","repo_name":"Nasser-Mohammed/DeepFake-GAN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18877880638","text":"from django.core.paginator import PageNotAnInteger, EmptyPage\nfrom django.shortcuts import render, redirect, HttpResponse\n\nfrom download.common.customPaginator import KingPaginator\nfrom userinfo.common.tools import *\nfrom Logs.log import get_log\nfrom userinfo.common.userinit import *\nfrom .models import User\nimport re\nimport threading\nimport time\n\nlog = get_log(\"login\")\n\n\n# 登录页面\ndef login(request):\n if request.session.get(\"username\"):\n return redirect(\"index\")\n if request.method == \"POST\":\n username = request.POST.get(\"username\")\n pwd = request.POST.get(\"pwd\")\n\n try:\n count = User.objects.filter(username=username)\n except Exception as e:\n log.info(\"登录用户查询异常:\", e)\n count = None\n if not count:\n return render(request, 'user/login.html', {\"massage\": \"账号密码输入错误!\"})\n\n try:\n res = User.objects.filter(username=username, userState=False)\n except Exception as e:\n log.info(\"登录用户查询异常:\", e)\n res = None\n if res:\n return render(request, 'user/login.html', {\"massage\": \"账号已被禁用,请联系管理员!\"})\n\n res = User.objects.filter(username=username).values()\n if pwd == decrypt(res[0][\"password\"]):\n responses = redirect(\"index\")\n request.session[\"username\"] = username\n responses.set_cookie(\"user\", username)\n responses.set_cookie(\"userRoles\", res[0][\"username_role\"])\n request.session.setdefault('username', username)\n return responses\n else:\n return render(request, 'user/login.html', {\"massage\": \"账号密码输入错误!\"})\n return render(request, 'user/login.html')\n\n\n# 注册\ndef register(request):\n if request.method == \"POST\":\n u_name = request.POST.get(\"username\")\n u_pwd = request.POST.get(\"pwd\")\n u_email = request.POST.get(\"email\")\n if len(u_name) == 0 or len(u_email) == 0 or len(u_pwd) == 0:\n return HttpResponse(\"用户名、密码、邮箱不能为空11!\")\n\n if not len(re.findall(r'^\\w{5,20}$', u_name)):\n return HttpResponse(\"用户名长度为5-24位,由大小写字母、数字组成!\")\n\n if not len(re.findall(r'^\\w{6,18}$', u_pwd)):\n return HttpResponse(\"密码长度为6-18位,由字母、数字、下划线组成!\")\n\n if not len(re.findall(r'^\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*$', u_email)):\n return HttpResponse(\"邮箱格式不正确!\")\n\n try:\n count = User.objects.filter(username=u_name)\n except Exception as e:\n log.info(\"注册用户名查询异常:\", e)\n count = None\n if count:\n return HttpResponse('用户名已存在,请重新输入!')\n\n threading.Thread(target=initUserSoftId, args=(u_name,)).start()\n\n User.objects.create(username=u_name, password=encrypt(u_pwd), email=u_email, username_role='Averaged',\n regTime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),\n userState=True)\n return HttpResponse(\"注册成功,跳转到登录页面!\")\n return render(request, 'user/register.html')\n\n\n# 退出\ndef logout(request):\n request.session.flush()\n return redirect(\"login\")\n\n\n# 检查用户名是否重复\ndef check(request):\n if request.method == \"POST\":\n u_name = request.POST.get(\"username\")\n\n if len(u_name) == 0:\n return HttpResponse('用户名不能为空!')\n if len(u_name) < 6 or len(u_name) > 24:\n return HttpResponse('用户名长度为6-24位字符之间!')\n try:\n count = User.objects.filter(username=u_name)\n except Exception as e:\n log.info(\"注册用户名查询异常:\", e)\n count = None\n if count:\n return HttpResponse('用户名已存在,请重新输入!')\n else:\n return HttpResponse('用户名可以使用!')\n\n\n'''\n角色:\n1、管理员 Administrator\n2、测试人员 testers\n3、开发人员 developers\n4、产品人员 product\n5、普通用户 averaged\n'''\n\n\n# 用户管理\ndef user_list(request):\n if not request.session.get(\"username\"):\n return redirect(\"login\")\n role = request.COOKIES.get(\"userRoles\")\n if role != \"Administrator\":\n return render(request, \"permiss.html\", {\"role\": role})\n userList = User.objects.all().order_by(\"id\")\n paginator = KingPaginator(userList, 10)\n page = int(request.GET.get(\"page\", 1))\n try:\n pages = paginator.page(page)\n except PageNotAnInteger:\n pages = paginator.page(1)\n except EmptyPage:\n pages = paginator.page(paginator.num_pages)\n return render(request, \"user/userlist.html\", {\"pages\": pages, \"role\": role})\n\n\n# 用户操作 增、删、改、禁用、重置\ndef user_operate(request):\n if request.method == \"POST\":\n u_name = request.POST.get(\"username\")\n if request.POST.get(\"way\") == \"disable\":\n value = request.POST.get(\"value\")\n if value == \"启用\":\n state = True\n else:\n state = False\n User.objects.filter(username=u_name).update(userState=state)\n return HttpResponse(\"禁用成功!\")\n elif request.POST.get(\"way\") == \"delete\":\n User.objects.filter(username=u_name).delete()\n return HttpResponse(\"删除成功!\")\n elif request.POST.get(\"way\") == \"reset\":\n threading.Thread(target=initUserSoftId, args=(u_name,)).start()\n return HttpResponse(\"重置成功!\")\n elif request.POST.get(\"way\") == \"new\":\n u_pwd = request.POST.get(\"pwd\")\n u_email = request.POST.get(\"email\")\n u_role = request.POST.get(\"role\")\n User.objects.create(username=u_name, password=encrypt(u_pwd), email=u_email, username_role=u_role,\n regTime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),\n userState=True)\n return HttpResponse(\"新增成功!\")\n elif request.POST.get(\"way\") == \"update\":\n u_id = request.POST.get(\"id\")\n u_pwd = request.POST.get(\"pwd\")\n u_email = request.POST.get(\"email\")\n u_role = request.POST.get(\"role\")\n if len(u_pwd) == 0:\n User.objects.filter(id=u_id).update(username=u_name, email=u_email, username_role=u_role)\n else:\n User.objects.filter(id=u_id).update(username=u_name, password=encrypt(u_pwd), email=u_email,\n username_role=u_role)\n return HttpResponse(\"修改成功!\")\n else:\n return HttpResponse(\"传参错误!\")\n","repo_name":"king152/interfaceAutoTest","sub_path":"userinfo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74385040481","text":"import time\r\nimport emoji\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.service import Service\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom joblib import Parallel,delayed \r\nfrom nltk.corpus import stopwords \r\nimport joblib\r\nimport snscrape.modules.twitter as sntwitter\r\nimport nltk\r\n#nltk.download('vader_lexicon')\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\ndef is_neutral(sentence):\r\n sid = SentimentIntensityAnalyzer()\r\n sentiment_scores = sid.polarity_scores(sentence)\r\n return sentiment_scores['compound']==0\r\n\r\n\r\ndef total_posts():\r\n return df.shape[0]\r\ndef ret_pos():\r\n return \"{:.2f}\".format(pos)+\"%\"\r\n\r\n\r\ndef ret_neu():\r\n return \"{:.2f}\".format(neu)+\"%\"\r\n\r\n\r\ndef ret_neg():\r\n return \"{:.2f}\".format(neg)+\"%\"\r\n\r\n\r\ndef create_bar(pos, neg, neu):\r\n plt.figure()\r\n plt.bar(['POSITIVE'], [pos / 10], label='POSITIVE', color='g', width=0.5)\r\n plt.bar(['NEGATIVE'], [neg / 10], label='NEGATIVE', color='r', width=0.5)\r\n plt.bar(['NEUTRAL'], [neu / 10], label='NEUTRAL', color='c', width=0.5)\r\n plt.text('POSITIVE', pos / 10, \"{:.1f}\".format(pos)+\"%\", ha='center', va='bottom')\r\n plt.text('NEGATIVE', neg / 10, \"{:.1f}\".format(neg)+\"%\", ha='center', va='bottom')\r\n plt.text('NEUTRAL', neu / 10, \"{:.1f}\".format(neu)+\"%\", ha='center', va='bottom')\r\n plt.title(\"Bar Graph of Sentiment Analysis \"+s1)\r\n plt.xlabel(\"X-Axis\")\r\n plt.ylabel(\"Y-Axis\")\r\n plt.legend()\r\n plt.savefig(r\"D:\\News Sentiment\\UserInterface\\static\\results\\bar.jpg\")\r\n\r\n\r\ndef create_pie(pos, neg, neu):\r\n sizes = [pos, neg, neu]\r\n mylabels = ['POSITIVE', 'NEGATIVE', 'NEUTRAL']\r\n cols = ['g', 'r', 'c']\r\n plt.figure()\r\n plt.pie(sizes, labels=mylabels, colors=cols, startangle=90, autopct=\"%0.1f%%\", shadow=False, explode=(0.05, 0.1, 0.1),\r\n labeldistance=1.1,\r\n textprops={'fontsize': 12})\r\n plt.pie([1], colors=\"w\", radius=0.5)\r\n plt.title('Pie Plot of Sentiment Analysis ' + s1)\r\n plt.axis(\"equal\")\r\n plt.legend(mylabels, loc='lower center', bbox_to_anchor=(0.5, -0.15), ncol=3, fontsize=10, frameon=True, shadow=True, framealpha=0.8)\r\n plt.savefig(r\"D:\\News Sentiment\\UserInterface\\static\\results\\pie.jpg\")\r\n create_bar(pos, neg, neu) \r\n\r\n\r\n\r\n\r\n#finding out positivity,negativty and neutrality amount \r\n# of the posts from datafrme df\r\ndef create_chart():\r\n #defining variables\r\n global pos\r\n global neg\r\n global neu\r\n #using boolean masking to create sub dataframes where\r\n # the inner condition matches to True\r\n positive = df[df[\"Sentiment\"] == \"Positive\"]\r\n #finding positivity amount\r\n pos = positive.shape[0] / df.shape[0] * 100\r\n print(\"Positivity: \",\"{:.2f}\".format(pos))\r\n negative = df[df[\"Sentiment\"] == \"Negative\"]\r\n #finding negativity amount\r\n neg = negative.shape[0] / df.shape[0] * 100\r\n print(\"Negativity: \",\"{:.2f}\".format(neg))\r\n neutral = df[df[\"Sentiment\"] == \"Neutral\"]\r\n #finding neutrality amount\r\n neu = neutral.shape[0] / df.shape[0] * 100\r\n print(\"Neutrality: \",\"{:.2f}\".format(neu))\r\n create_pie(pos, neg, neu)\r\n\r\n\r\n#saving the dataframe obtained as a csv file in the specified path\r\ndef save_analysis():\r\n \r\n df.to_csv(path_or_buf=r\"D:\\News Sentiment\\UserInterface\\static\\results\\save.csv\")\r\n #print(\"Analysis report saved as save.csv\")\r\n\r\n\r\ndef preProcessing(s, listOfStopWords):\r\n # Removing hyperlinks\r\n flag = 0\r\n for i in range(0, len(s)):\r\n if s[i] == 'h':\r\n s1 = s[i:i + 4]\r\n if s1 == \"http\":\r\n for j in range(i + 4, len(s)):\r\n if s[j] == ' ':\r\n flag = 1\r\n break\r\n if flag == 1:\r\n break\r\n if flag == 1:\r\n s = s[0:i] + s[j + 1:len(s)]\r\n \r\n \r\n # Removing Punctuation marks\r\n s1 = \"\"\r\n for i in range(0, len(s)):\r\n if s[i].isalpha() or s[i].isnumeric() or s[i] == ' ':\r\n s1 = s1 + s[i]\r\n s = s1\r\n # Removing the Stop words\r\n s1 = s.split(\" \")\r\n s2 = \"\"\r\n for i in s1:\r\n i = i.lower()\r\n if i not in listOfStopWords:\r\n s2 = s2 + i + \" \"\r\n # removing emojis if any\r\n s2 = emoji.demojize(s2)\r\n s2 = s2.replace(\":\", \"\").replace(\"_\", \"\")\r\n return s2\r\n\r\n\r\ndef get_sentiment(post_len):\r\n #creating an empty Pandas Series to store sentiment_val\r\n sentiment_val = pd.Series([])\r\n #the dumped vocubulary object which includes the vocabulary dictionary is loaded\r\n vocabulary = joblib.load(r\"D:\\News Sentiment\\UserInterface\\trained_dataset\\vocabulary.pkl\")\r\n #the dumped trained logistic regression model is loaded\r\n analysingObject = joblib.load(r\"D:\\News Sentiment\\UserInterface\\trained_dataset\\sentiment.pkl\")\r\n #imports the stopwords from the Natural Language Toolkit (nltk) library for English language\r\n listOfStopWords = stopwords.words(\"english\")\r\n for i in range(post_len):\r\n #getting each_post in ith row of Posts column\r\n each_post = df._get_value(i, \"Posts\")\r\n #the post is then preprocessed by calling preprocessing function befor analysing each post\r\n each_post = preProcessing(each_post, listOfStopWords)\r\n #the preprocessed post is stored in datafrane\r\n df.at[i, \"PreprocessedPosts\"] = each_post\r\n #calling is_neutral function to check for amount of neutrality in post\r\n if is_neutral(each_post) == True:\r\n sentiment_val[i] = \"Neutral\"\r\n else:\r\n #using the trained logistic regression model to predict the sentiment of a given post\r\n #represented as a list of single element\r\n answer = analysingObject.predict(vocabulary.transform([each_post, ]))\r\n #[4] indicates positive\r\n #[0] indicates negative\r\n if answer == 4:\r\n sentiment_val[i] = \"Positive\"\r\n else:\r\n sentiment_val[i] = \"Negative\"\r\n # print((i+1),\". \",each_post,\"->\",sentiment_val [i] ,\"\\n\\n\")\r\n return sentiment_val\r\n\r\n\r\n#function to scrape tweets from Twitter\r\ndef twitter_config():\r\n #search query for twitter \r\n query = s1 \r\n i = df.shape[0] - 1\r\n #number of posts to fetch from twitter\r\n limit = 2500\r\n c = 0\r\n #using the TwitterSearchScraper class of sntwitter library to scrape tweets\r\n #based on the specified query\r\n for tweet in sntwitter.TwitterSearchScraper(query).get_items():\r\n i += 1\r\n c += 1\r\n if c < limit:\r\n #Posts column of ith row is updated with\r\n #the raw content of a tweet extracted from TwitterSearchScraper\r\n #print(tweet.rawContent)\r\n df.at[i, \"Posts\"] = tweet.rawContent \r\n #the corresponding source in the dataframe updated to Twitter\r\n df.at[i, \"Source\"] = \"Twitter\"\r\n else:\r\n break\r\n \r\n \r\n \r\n#function to store posts and apply analysis on the stored result in dataframe\r\ndef store_posts(posts):\r\n global df\r\n #crreating an empty dataframe\r\n df = pd.DataFrame()\r\n i = 0\r\n #creating an empty Pandas Series to store post_content\r\n post_content = pd.Series([]) \r\n #creating an empty Pandas Series to store post_source\r\n post_source = pd.Series([])\r\n for post in posts:\r\n #getting post_content \r\n post_content[i] = post.get_text()\r\n #storing post_source as reddit\r\n post_source[i] = \"Reddit\"\r\n i += 1\r\n \r\n \r\n #appending post_content series to dataframe\r\n df.insert(0, \"Posts\", post_content) \r\n df.insert(1, \"PreprocessedPosts\", [None] * df.shape[0])\r\n #appending post_csource series to dataframe\r\n df.insert(2, \"Source\", post_source)\r\n #calling twitter_config to scrape twitter\r\n twitter_config()\r\n post_len = df.shape[0]\r\n #calling analysis function\r\n sentiment_val = get_sentiment(post_len)\r\n #appending returned sentiment_val series to the dataframe\r\n df.insert(3, \"Sentiment\", sentiment_val)\r\n #calling save_analysis\r\n save_analysis()\r\n \r\n\r\n #calling create_chart for visual representation\r\n create_chart()\r\n\r\n\r\ndef parse_html(soup, driver):\r\n #finding all the HTML tags that are \"h3\" and have the given class attribute \r\n posts = soup.find_all(\"h3\", class_=\"_eYtD2XCVieq6emjKBH3m\") \r\n driver.close() # closing the automated Chrome Web driver\r\n store_posts(posts) \r\n\r\n\r\n\r\n\r\n\r\n #search parameter passed to this function from main.py\r\ndef connect_service(search):\r\n global s1\r\n # removing whitespaces if any invloved\r\n search = search.replace(\" \", \"\") \r\n s1 = search\r\n #Using the Service class of selenium library to automate Chrome Web browser using chromedriver.exe\r\n s = Service(r\"C:\\Users\\RAJDEEP\\Desktop\\RAJ\\News Sentiment\\UserInterface\\chromedriver.exe\")\r\n #the following method initializes a new Chrome browser window\r\n driver = webdriver.Chrome(service=s)\r\n #url to the required reddit search\r\n url = \"https://www.reddit.com/search/?q=\" + s1 + \"&t=all\"\r\n driver.get(url)#navigating to specified url\r\n delay = 0\r\n #providing auto-scroll for reddit page\r\n while delay < 250:\r\n time.sleep(0.5)\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n delay += 3\r\n #creating a soup object to extract specific elements and data from the parsed HTML code\r\n soup = BeautifulSoup(driver.page_source, \"html.parser\")\r\n parse_html(soup, driver)\r\n","repo_name":"Rajtheboss23/news-sentiment","sub_path":"scrape_analysis.py","file_name":"scrape_analysis.py","file_ext":"py","file_size_in_byte":9545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15140042247","text":"from django.http import Http404\n# from django.http import HttpResponse\n# from django.template import loader\nfrom django.shortcuts import render, get_object_or_404\n# from django.views.generic import TemplateView\n# from .models import Album\n\nfrom datetime import datetime, date, timezone, timedelta\n# import calendar\n\n# Create your views here.\ndef index(request):\n time_UTC = datetime.now(timezone.utc)\n time_AMSC = datetime.now(timezone(timedelta(hours=-5)))\n # all_albums = Album.objects.all()\n context = {'all_albums': \"Test\",\n 'time_utc': \"{}\".format(time_UTC.strftime('%A %b (%m) %d, %Y Time: %H:%M:%S')),\n 'time_amsc': \"{}\".format(time_AMSC.strftime('%A %b (%m) %d, %Y Time: %H:%M:%S')),}\n return render(request, 'radio/index.html', context)\n","repo_name":"semaciel/django-ulti","sub_path":"radio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24122449968","text":"start = \"\"\"\\\n..###\n##...\n#...#\n#.#.#\n.#.#.\"\"\"\n\ntest = \"\"\"\\\n....#\n#..#.\n#..##\n..#..\n#....\"\"\"\n\ncurrent = tuple([tuple([c for c in line]) for line in start.split(\"\\n\")])\nstates = {current}\n\nwhile True:\n new_state = []\n for i, row in enumerate(current):\n new_row = []\n for j, val in enumerate(row):\n bug_count = 0\n for a, b in ((i+1, j), (i-1, j), (i, j+1), (i, j-1)):\n try:\n if a >= 0 and b >= 0 and current[a][b] == \"#\": bug_count += 1\n except: pass\n if val == \"#\" and bug_count != 1:\n new_row.append(\".\")\n elif val == \".\" and (bug_count == 1 or bug_count == 2):\n new_row.append(\"#\")\n else:\n new_row.append(val)\n new_state.append(tuple(new_row))\n current = tuple(new_state)\n if current in states: break\n else: states.add(current)\n\nbiodiv = 0\nfor i, val in enumerate([v for l in current for v in l]):\n if val == \"#\":\n biodiv += 2**i\nprint(\"Biodiversity:\", biodiv)","repo_name":"dejohansson/advent-of-code-2019","sub_path":"day24/day24.py","file_name":"day24.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73343127843","text":"import board\nimport neopixel\n\nclass SuperNeoPixel(neopixel.NeoPixel):\n def __init__(self, pin, n, *, bpp=3, brightness=1.0, auto_write=True, pixel_order=None):\n super().__init__(pin, n, brightness=brightness, pixel_order=pixel_order, auto_write=auto_write)\n\n def set_array(self, colors, brightness = 100):\n ofs = 0\n for color in colors:\n if ofs < self.n:\n self[ofs] = color.as_list(brightness)\n ofs += 1\n","repo_name":"croftj/christmas_pi","sub_path":"SuperNeoPixel.py","file_name":"SuperNeoPixel.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5013792801","text":"\"\"\"\r\n爬取豆瓣电影数据\r\n了解ajax的基本爬取方式\r\n因为豆瓣中排行榜电影往下拉是一直都有新的东西出现,所以判定是ajax\r\n\r\n\"\"\"\r\nfrom urllib import request\r\nimport json\r\n#url是选取其中的一段的地址\r\nurl = 'https://movie.douban.com/j/chart/top_list?type=11&interval_id=100%3A90&action=&start=40&limit=20'\r\n\r\nrsp = request.urlopen(url)\r\ndata = rsp.read().decode()\r\n\r\n#一般情况下ajax所得到的页面是json格式,所以需要解码\r\n#json.loads 用于解码 JSON 数据\r\ndata = json.loads(data)\r\n\r\nprint(data)\r\n","repo_name":"GoYMS/localhost_python","sub_path":"爬虫/案例/v16.py","file_name":"v16.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23028679761","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom typing import Callable\nfrom torch.utils.data import DataLoader\nfrom torch.optim.optimizer import Optimizer\n\n\n# ====== YOUR CODE: ======\n# TODO: Play with params\ndef create_layer(m, conv, in_c, out_c, relu=True, batchnorm=True,\n not_leaky=False, relu_param=0.3, kernel_size=4,\n padding=1, stride=2, bias=False):\n m.append(conv(\n in_c,\n out_c,\n kernel_size=kernel_size,\n padding=padding,\n stride=stride,\n bias=bias\n ))\n\n if batchnorm:\n m.append(nn.BatchNorm2d(out_c))\n\n if relu:\n if not_leaky:\n m.append(nn.ReLU())\n else:\n m.append(nn.LeakyReLU(relu_param))\n\n# ========================\n\n\nclass Discriminator(nn.Module):\n def __init__(self, in_size):\n \"\"\"\n :param in_size: The size of on input image (without batch dimension).\n \"\"\"\n super().__init__()\n self.in_size = in_size\n # To extract image features you can use the EncoderCNN from the VAE\n # section or implement something new.\n # You can then use either an affine layer or another conv layer to\n # flatten the features.\n # ====== YOUR CODE: ======\n modules = []\n create_layer(modules, nn.Conv2d, in_size[0], 128)\n create_layer(modules, nn.Conv2d, 128, 256)\n create_layer(modules, nn.Conv2d, 256, 512)\n create_layer(modules, nn.Conv2d, 512, 1024)\n self.lin = nn.Linear(in_size[1] * in_size[2] * 4, 1)\n self.seq = nn.Sequential(*modules)\n\n # ========================\n\n def forward(self, x):\n \"\"\"\n :param x: Input of shape (N,C,H,W) matching the given in_size.\n :return: Discriminator class score (not probability) of\n shape (N,).\n \"\"\"\n # No need to apply sigmoid to obtain probability - we'll combine it\n # with the loss due to improved numerical stability.\n # ====== YOUR CODE: ======\n\n t = self.seq(x)\n y = self.lin(t.view(x.shape[0], -1))\n # =======================\n return y\n\n\nclass Generator(nn.Module):\n def __init__(self, z_dim, featuremap_size=4, out_channels=3):\n \"\"\"\n :param z_dim: Dimension of latent space.\n :featuremap_size: Spatial size of first feature map to create\n (determines output size). For example set to 4 for a 4x4 feature map.\n :out_channels: Number of channels in the generated image.\n \"\"\"\n super().__init__()\n self.z_dim = z_dim\n\n # To combine image features you can use the DecoderCNN from the VAE\n # section or implement something new.\n # You can assume a fixed image size.\n # ====== YOUR CODE: ======\n modules = []\n create_layer(modules, nn.ConvTranspose2d, z_dim, 1024, not_leaky=True, kernel_size=featuremap_size, padding=0)\n create_layer(modules, nn.ConvTranspose2d, 1024, 512, not_leaky=True)\n create_layer(modules, nn.ConvTranspose2d, 512, 256, not_leaky=True)\n create_layer(modules, nn.ConvTranspose2d, 256, 128, not_leaky=True)\n create_layer(modules, nn.ConvTranspose2d, 128, out_channels, relu=False, batchnorm=False)\n self.generated_images = nn.Sequential(*modules)\n # ========================\n\n def sample(self, n, with_grad=False):\n \"\"\"\n Samples from the Generator.\n :param n: Number of instance-space samples to generate.\n :param with_grad: Whether the returned samples should be part of the\n generator's computation graph or standalone tensors (i.e. should be\n be able to backprop into them and compute their gradients).\n :return: A batch of samples, shape (N,C,H,W).\n \"\"\"\n device = next(self.parameters()).device\n # Generate n latent space samples and return their reconstructions.\n # Don't use a loop.\n # ====== YOUR CODE: ======\n with torch.set_grad_enabled(with_grad):\n samples = self.forward(torch.randn((n, self.z_dim), device=device))\n # ========================\n return samples\n\n def forward(self, z):\n \"\"\"\n :param z: A batch of latent space samples of shape (N, latent_dim).\n :return: A batch of generated images of shape (N,C,H,W) which should be\n the shape which the Discriminator accepts.\n \"\"\"\n # Don't forget to make sure the output instances have the same\n # dynamic range as the original (real) images.\n # ====== YOUR CODE: ======\n x = self.generated_images(z.view(z.shape[0], -1, 1, 1))\n # ========================\n return x\n\n\ndef discriminator_loss_fn(y_data, y_generated, data_label=0, label_noise=0.0):\n \"\"\"\n Computes the combined loss of the discriminator given real and generated\n data using a binary cross-entropy metric.\n This is the loss used to update the Discriminator parameters.\n :param y_data: Discriminator class-scores of instances of data sampled\n from the dataset, shape (N,).\n :param y_generated: Discriminator class-scores of instances of data\n generated by the generator, shape (N,).\n :param data_label: 0 or 1, label of instances coming from the real dataset.\n :param label_noise: The range of the noise to add. For example, if\n data_label=0 and label_noise=0.2 then the labels of the real data will be\n uniformly sampled from the range [-0.1,+0.1].\n :return: The combined loss of both.\n \"\"\"\n assert data_label == 1 or data_label == 0\n # Apply noise to both the real data and the\n # generated labels.\n # See pytorch's BCEWithLogitsLoss for a numerically stable implementation.\n # ====== YOUR CODE: ======\n p_noise_range = data_label - label_noise / 2\n n_noise_Range = data_label + label_noise / 2\n n = torch.ones(y_data.shape).to(y_data.device).uniform_(p_noise_range, n_noise_Range)\n gen = torch.ones(y_generated.shape).to(y_generated.device).uniform_(1 - n_noise_Range, 1 - p_noise_range)\n loss_data = nn.BCEWithLogitsLoss()(y_data, n)\n loss_generated = nn.BCEWithLogitsLoss()(y_generated, gen)\n # ========================\n return loss_data + loss_generated\n\n\ndef generator_loss_fn(y_generated, data_label=0):\n \"\"\"\n Computes the loss of the generator given generated data using a\n binary cross-entropy metric.\n This is the loss used to update the Generator parameters.\n :param y_generated: Discriminator class-scores of instances of data\n generated by the generator, shape (N,).\n :param data_label: 0 or 1, label of instances coming from the real dataset.\n :return: The generator loss.\n \"\"\"\n assert data_label == 1 or data_label == 0\n # Implement the Generator loss.\n # Think about what you need to compare the input to, in order to\n # formulate the loss in terms of Binary Cross Entropy.\n # ====== YOUR CODE: ======\n loss = nn.BCEWithLogitsLoss()(y_generated, torch.ones_like(y_generated) * data_label)\n # ========================\n return loss\n\n\ndef train_batch(\n dsc_model: Discriminator,\n gen_model: Generator,\n dsc_loss_fn: Callable,\n gen_loss_fn: Callable,\n dsc_optimizer: Optimizer,\n gen_optimizer: Optimizer,\n x_data: Tensor,\n):\n \"\"\"\n Trains a GAN for over one batch, updating both the discriminator and\n generator.\n :return: The discriminator and generator losses.\n \"\"\"\n\n # 1. Show the discriminator real and generated data\n # 2. Calculate discriminator loss\n # 3. Update discriminator parameters\n # ====== YOUR CODE: ======\n # Init\n gen_optimizer.zero_grad()\n dsc_optimizer.zero_grad()\n\n gen = gen_model.sample(x_data.shape[0], with_grad=True)\n dsc_loss = dsc_loss_fn(dsc_model(x_data), dsc_model(gen.detach()))\n dsc_loss.backward()\n dsc_optimizer.step()\n # ========================\n\n # 1. Show the discriminator generated data\n # 2. Calculate generator loss\n # 3. Update generator parameters\n # ====== YOUR CODE: ======\n gen_loss = gen_loss_fn(dsc_model(gen_model.sample(x_data.shape[0], with_grad=True)))\n gen_loss.backward()\n gen_optimizer.step()\n # ========================\n\n return dsc_loss.item(), gen_loss.item()\n\n\ndef save_checkpoint(gen_model, dsc_losses, gen_losses, checkpoint_file):\n \"\"\"\n Saves a checkpoint of the generator, if necessary.\n :param gen_model: The Generator model to save.\n :param dsc_losses: Avg. discriminator loss per epoch.\n :param gen_losses: Avg. generator loss per epoch.\n :param checkpoint_file: Path without extension to save generator to.\n \"\"\"\n\n saved = False\n checkpoint_file = f\"{checkpoint_file}.pt\"\n\n # Save a checkpoint of the generator model. You can use torch.save().\n # You should decide what logic to use for deciding when to save.\n # If you save, set saved to True.\n # ====== YOUR CODE: ======\n n = len(gen_losses) - 1\n improved = gen_losses[n] <= gen_losses[n-1]\n\n if improved:\n torch.save(gen_model, checkpoint_file)\n saved = True\n # ========================\n\n return saved\n","repo_name":"TrellixVulnTeam/DL_HW4_D0EN","sub_path":"hw4/gan.py","file_name":"gan.py","file_ext":"py","file_size_in_byte":9178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29613127328","text":"# Date: August 2017\n# Author: Kutay B. Sezginel\n\"\"\"\nRead, write Lammps trajectory in xyz format.\n\"\"\"\nimport os\n\n\ndef read_trajectory(traj_path):\n \"\"\" Read xyz trajectory and return coordinates as a list\n\n Args:\n - traj_path (str): xyz trajectory path to read\n\n Returns:\n - dict: Trajectory dictionary with atoms, coordinates, timestep and xyz keys\n \"\"\"\n with open(traj_path, 'r') as t:\n traj = t.readlines()\n\n n_atoms = int(traj[0].strip()) # Get number of atoms from first line\n n_frames = int(len(traj) / (n_atoms + 2)) # Calculate number of frames (assuming n_atoms is constant)\n\n trajectory = {'atoms': [], 'coordinates': [], 'xyz': [], 'timestep': []}\n for frame in range(n_frames):\n start = frame * (n_atoms + 2) # Frame start\n end = (frame + 1) * (n_atoms + 2) # Frame end\n trajectory['xyz'].append(traj[start:end])\n trajectory['timestep'].append(traj[start + 1].strip().split()[2])\n trajectory['atoms'].append([line.split()[0] for line in traj[start + 2:end]])\n trajectory['coordinates'].append([[float(i) for i in line.split()[1:4]] for line in traj[start + 2:end]])\n\n return trajectory\n\n\ndef write_trajectory(trajectory_xyz, traj_path, frames=None):\n \"\"\" Write xyz trajectory to a file\n\n Args:\n - trajectory_xyz (list): List of lines for each frame of the xyz trajectory\n - traj_path (str): xyz trajectory path to write\n\n Returns:\n - None: Write xyz trajectory file\n \"\"\"\n if frames is None:\n frames = list(range(len(trajectory_xyz)))\n with open(traj_path, 'w') as traj:\n for frame in frames:\n xyz = trajectory_xyz[frame]\n for line in xyz:\n traj.write(line)\n\n\ndef generate_xyz(coordinates, atoms, header='therMOF'):\n \"\"\"\n Generate xyz lines from given coordinates and atom names.\n\n Args:\n - trajectory_xyz (list): List of lines for each frame of the xyz trajectory\n - traj_path (str): xyz trajectory path to write\n\n Returns:\n - list: List of xyz lines for each frame\n \"\"\"\n if len(coordinates) != len(atoms):\n raise FramesMismatchError('Number of frames do not match for coordinates (%i) and atoms (%i)'\n % (len(coordinates), len(atoms)))\n xyz_lines = []\n for frame in range(len(coordinates)):\n xyz_frame = [\"%i\\n\" % len(atoms[frame]), '%s - %i\\n' % (header, frame)]\n for atom, coor in zip(atoms[frame], coordinates[frame]):\n xyz_frame.append('%-2s %-6.4f %-6.4f %-6.4f\\n' % (atom, coor[0], coor[1], coor[2]))\n xyz_lines.append(xyz_frame)\n return xyz_lines\n\n\nclass FramesMismatchError(Exception):\n pass\n","repo_name":"kbsezginel/thermof","sub_path":"thermof/trajectory/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"29523114482","text":"# TO FIND CRITICAL POINTS OF A GIVEN FUNCTION - SUMBOLIC METHOD\n\n# LIBRARIES\nimport numpy as np\nimport sympy as sym\nimport matplotlib.pyplot as plt\nfrom scipy.signal import find_peaks\n\nx = sym.symbols('x')\ni = input(\"f(x) = \")\nfx = sym.sympify(i)\ndfx = sym.diff(fx)\n\ncrit_pnts = sym.solve(dfx)\n\nxx = np.linspace(-4,4,1001)\nfxx = sym.lambdify(x,fx)(xx)\ndfxx = sym.lambdify(x,dfx)(xx)\n\n\nprint('critical points are ' + str(crit_pnts))\n\n# EXERCISE : PLOT THE CRITICAL POINTS ON THE GIVEN FUNCTION CURVE. \n\n\nplt.plot(xx,fxx,label='f(x)')\nplt.plot(xx,dfxx,label='$f\\'(x)$')\n\n\nplt.legend()\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"SuhasPK/calculus-using-python","sub_path":"calculus_Py/critical2.py","file_name":"critical2.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73815923682","text":"from django.shortcuts import render , redirect\nfrom .models import Quiz\nfrom .forms import QuizForm\n\n\ndef show_main(request):\n quiz = Quiz.objects.order_by('id')\n return render(request , \"main/index.html\" ,{'quiz' : quiz})\n\ndef show_create(request):\n if request.method == 'POST':\n form = QuizForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"/\")\n\n form = QuizForm()\n return render(request ,\"main/CreateFB.html\", {\n \"form\" : form,\n })","repo_name":"zut3/MySiteInDjango","sub_path":"taskmanager/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74229404962","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\nmp_drawing = mp.solutions.drawing_utils\nmp_selfie_segmentation = mp.solutions.selfie_segmentation\n# You can use multiple Images for removal at a go by just specifying the name of the images in the list.\nIMAGE_FILES = ['location_to_image/image.jpg']\nBG_COLOR = (192, 192, 192) # gray, you can set to any\nwith mp_selfie_segmentation.SelfieSegmentation(model_selection=0) as selfie_segmentation:\n for idx, file in enumerate(IMAGE_FILES):\n image = cv2.imread(file)\n image_height, image_width, _ = image.shape\n results = selfie_segmentation.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n condition = np.stack((results.segmentation_mask,) * 3, axis=-1) > 0.1\n bg_image = np.zeros(image.shape, dtype=np.uint8)\n bg_image[:] = BG_COLOR\n output_image = np.where(condition, image, bg_image)\n cv2.imwrite('location_to_store_result/result' + str(idx) + '.png', output_image)\n","repo_name":"python-geeks/Automation-scripts","sub_path":"image_background_subtractor/image_background_subtractor.py","file_name":"image_background_subtractor.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":719,"dataset":"github-code","pt":"54"} +{"seq_id":"41856697356","text":"import streamlit as st\r\nimport pandas as pd\r\nimport base64\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nst.title('World Athletics Records Explorer')\r\n\r\nst.markdown(\"\"\"\r\nThis app performs simple webscraping of World Athletics records data!\r\n* **Python libraries:** base64, pandas, streamlit, requests, beautifulsoup4\r\n* **Data source:** [Worldathletics.org](https://worldathletics.org/records/all-time-toplists/sprints/100-metres/outdoor/men/senior).\r\n\"\"\")\r\n\r\nst.sidebar.header('User Input Features')\r\nselected_event = st.sidebar.selectbox('Event', ['100 metres', '200 metres', '400 metres'])\r\nselected_gender = st.sidebar.selectbox('Gender', ['Men', 'Women'])\r\n\r\n# Web scraping of World Athletics records\r\n@st.cache\r\ndef load_data(event, gender):\r\n url = f'https://worldathletics.org/records/all-time-toplists/sprints/{event.lower().replace(\" \",\"-\")}/outdoor/{gender.lower()}/senior'\r\n html = requests.get(url).content\r\n soup = BeautifulSoup(html, 'html.parser')\r\n table = soup.find('table')\r\n headers = [header.text.strip() for header in table.find_all('th')]\r\n data = []\r\n for row in table.find_all('tr')[1:]:\r\n data.append([cell.text.strip() for cell in row.find_all('td')])\r\n df = pd.DataFrame(data, columns=headers)\r\n return df\r\nrecord_stats = load_data(selected_event, selected_gender)\r\n\r\n# Sidebar - Country selection\r\nsorted_unique_country = sorted(record_stats.Country.unique())\r\nselected_country = st.sidebar.multiselect('Country', sorted_unique_country, sorted_unique_country)\r\n\r\n# Sidebar - Discipline selection\r\nunique_discipline = ['100m', '200m', '400m']\r\nselected_discipline = st.sidebar.multiselect('Discipline', unique_discipline, unique_discipline)\r\n\r\n# Filtering data\r\ndf_selected_country = record_stats[(record_stats.Country.isin(selected_country)) & (record_stats.Discipline.isin(selected_discipline))]\r\n\r\nst.header('Display Record Stats of Selected Country(s) and Discipline(s)')\r\nst.write('Data Dimension: ' + str(df_selected_country.shape[0]) + ' rows and ' + str(df_selected_country.shape[1]) + ' columns.')\r\nst.dataframe(df_selected_country)\r\n\r\n# Download World Athletics records data\r\ndef filedownload(df):\r\n csv = df.to_csv(index=False)\r\n b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions\r\n href = f'Download CSV File'\r\n return href\r\n\r\nst.markdown(filedownload(df_selected_country), unsafe_allow_html=True)","repo_name":"kevinleger579/Javaportfoliocode","sub_path":"Track and Field Python Project.py","file_name":"Track and Field Python Project.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34536559066","text":"# Part 1: 32\n# Part 2: 116\n\ndef find_sum(l, n):\n\tfor i in range(len(l)):\n\t\tfor j in range(i + 1, len(l)):\n\t\t\tif l[i] + l[j] == n:\n\t\t\t\treturn True\n\treturn False\n\ndata = open(\"Day9.txt\", \"r\").read()\ndata = data.split(\"\\n\")\ndata = list(map(int, data))\n\ntotal = 0\n\ni = 25\nwhile i < len(data):\n\tprev = data[i - 25:i]\n\tif not find_sum(prev, data[i]):\n\t\tprint(data[i])\n\ti += 1\n\nn = 20874512\ni = 0\nwhile i < len(data):\n\tj = i\n\tcur_total = 0\n\twhile cur_total < n:\n\t\tcur_total += data[j]\n\t\tj += 1\n\tprint(j)\n\tif cur_total == n:\n\t\tprint(min(data[i:j + 1]) + max(data[i:j + 1]))\n\t\tbreak\n\ti += 1\nprint(i)","repo_name":"kevinmchung/AdventOfCode","sub_path":"2020/Day9/Day9.py","file_name":"Day9.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73155756321","text":"# dataloader.py\n# prepare the data for the network\n\nfrom torch.utils.data import Dataset, DataLoader\nimport pandas as pd\nimport numpy as np\n\nclass ABCNN_Data(Dataset):\n # dataset for AB CNN\n \n def __init__ (self, datapath, stage, block_num=None, dim=1):\n self.stage = stage\n df = pd.read_csv(datapath)\n df = df.fillna(value=0)\n df2 = pd.DataFrame()\n #2401*168*112 => 2401*168*256 => 2401*16*16*168\n #fill the non-listed regions with 0s\n for i in range(256):\n df2[str(i)] = np.zeros(4416,dtype=int)\n for i in range(256):\n if str(i) in df:\n df2[str(i)] = df[str(i)]\n df = df2\n if block_num:\n # make block_num the target value\n label = df[str(block_num)]\n else:\n label = df.values\n signals = df.values\n self.max = np.max(signals)\n if dim == 1:\n signals = signals\n elif dim == 2:\n temp = np.zeros((len(signals), 16, 16))\n for i in range(len(signals)):\n temp[i] = signals[i].reshape(16, 16)\n #signals = signals.transpose()\n signals = temp\n \n #print(signals.shape)\n label = label.values\n\n # generate data and label for training and testing\n # total = 4416 cases\n train_start = 0\n train_end = 3850\n input_len = 7*24\n predict_start= train_end + input_len\n predict_end = predict_start + 14*24\n predict_hours = 1*24\n\n train_x = []\n train_y = []\n pointer = train_start\n label_pointer = pointer + input_len - 1\n while label_pointer <= train_end:\n train_x_piece = signals[pointer:pointer+input_len]\n train_x.append(train_x_piece)\n\n train_y_piece = label[label_pointer+predict_hours] #here delayed 24 hours\n train_y.append(train_y_piece)\n\n pointer += 1\n label_pointer += 1\n\n self.train_x = np.array(train_x)\n self.train_y = np.array(train_y)\n\n test_x = []\n test_y = []\n # pointer\n label_pointer = predict_start\n pointer = label_pointer - input_len + 1\n while label_pointer <= predict_end:\n test_x_piece = signals[pointer:pointer + input_len]\n test_x.append(test_x_piece)\n\n test_y_piece = label[label_pointer+predict_hours]\n test_y.append(test_y_piece)\n\n label_pointer += 1\n pointer += 1\n self.test_x = np.array(test_x)\n self.test_y = np.array(test_y)\n # print(self.test_x.shape, self.train_x.shape)\n # print(self.test_y.shape, self.train_y.shape)\n self.average = np.mean(train_y)\n \n def __len__(self):\n if self.stage == 'train':\n return len(self.train_x)\n elif self.stage == 'test':\n return len(self.test_x)\n return 0\n \n def __getitem__(self, idx):\n if self.stage == 'train':\n return self.train_x[idx], self.train_y[idx]\n elif self.stage == 'test':\n return self.test_x[idx], self.test_y[idx]\n return 0\n \n\nif __name__ == '__main__':\n print('dataloader')\n csvfilename = '/scratch/xzhou/ep/LA_crime/data/Block_Columns.csv'\n data = ABCNN_Data(datapath=csvfilename, stage='train', block_num=196,dim=1)\n dataloader = DataLoader(data, batch_size=1, shuffle=True)\n for x,y in (dataloader):\n print(x.shape, y)\n break","repo_name":"IceFireCloud/Event-Prediction","sub_path":"models/abcnn_torch/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"30980526953","text":"import requests\nfrom fake_useragent import UserAgent\nfrom bs4 import BeautifulSoup\nfrom langchain.chat_models import ChatAnthropic, ChatOpenAI\nfrom langchain import GoogleSearchAPIWrapper\n\n\ndef get_random_user_agent():\n return UserAgent().random\n\n\nclass WebPagePassageExtractor:\n def __init__(\n self,\n claims: list[str],\n max_input_chars: int = 50000,\n max_tokens_to_sample: int = 1000,\n ):\n self.claims = claims\n self.max_input_chars = max_input_chars\n self.max_tokens_to_sample = max_tokens_to_sample\n\n def run(self, url: str):\n if url.endswith(\".pdf\"):\n return \"Sorry, I can't extract passages from PDFs yet.\"\n response = requests.get(url, headers={\"User-Agent\": get_random_user_agent()})\n response.raise_for_status()\n soup = BeautifulSoup(response.text, \"html.parser\")\n page = soup.get_text()[: self.max_input_chars]\n claims_section = \"\\n\".join(self.claims)\n prompt = f\"\"\"\nI am attempting to find sources that either support or contradict the following claims: {claims_section}\nHere is a web page that I found:\n\n{page}\n\n\nPlease extract up to 5 direct quotes from the page that either support or contradict the claim. If there are no such quotes, please respond \"None\".\n\"\"\"\n model = ChatAnthropic(max_tokens_to_sample=self.max_tokens_to_sample)\n return model.call_as_llm(prompt)\n\n\nclass GoogleSearchAPIWrapperWithLinks(GoogleSearchAPIWrapper):\n \"\"\"Wrapper for Google Search API that returns\n snippets and their links, nicely formatted.\"\"\"\n\n def run(self, query: str) -> str:\n \"\"\"Run query through GoogleSearch and\n parse result with snippets and their links.\"\"\"\n formatted_results = []\n results = self._google_search_results(query, num=self.k)\n if len(results) == 0:\n return \"No good Google Search Result was found\"\n\n for result in results:\n formatted_result = f\"{result.get('title', 'No title')}\\n\"\n if \"snippet\" in result:\n formatted_result += f\"{result['snippet']}\\n\"\n if \"link\" in result:\n formatted_result += f\"Link: {result['link']}\\n\"\n formatted_results.append(formatted_result)\n\n return \"\\n\".join(formatted_results)\n","repo_name":"chaosGuppy/bullshit-bot-bot-bot","sub_path":"bullshit_bot_bot_bot/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13459943632","text":"from collections import defaultdict\n\nt = int(input())\nfor case in range(1, t + 1):\n s = list(input())\n cnt = defaultdict(int)\n for x in s:\n cnt[x] += 1\n key = tuple(cnt.keys())\n if len(key) == 2 and cnt[key[0]] == 2 and cnt[key[1]] == 2:\n print(f'#{case} Yes')\n else:\n print(f'#{case} No')","repo_name":"yootal/CodingTest","sub_path":"SWEA/D3/11856. 반반/반반.py","file_name":"반반.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45372051229","text":"from typing import Any\nfrom db.data.models.format_psycopg import FormatPsycopg\nfrom db.data.models.enums import StatusEnum\n\nmodel_data: dict[str, Any] = {\n \"name\": \"test_name\",\n \"value\": 123,\n \"status\": StatusEnum.bad,\n \"aggts\": None,\n}\n\n\ndef test_for_query() -> None:\n query = \"(%s,%s,%s,%s)\"\n assert FormatPsycopg(model_data).query == query\n\n\ndef test_for_params() -> None:\n params = [\n \"test_name\",\n 123,\n StatusEnum.bad,\n None,\n ]\n assert FormatPsycopg(model_data).params == params\n","repo_name":"Konstantin-Dudersky/smarthome","sub_path":"db/tests/data/models/format_parameters_test.py","file_name":"format_parameters_test.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37749532726","text":"\"\"\"\nNginx Support\n\n\"\"\"\nimport logging\nimport re\n\nfrom newrelic_plugin_agent.plugins import base\n\nLOGGER = logging.getLogger(__name__)\n\nPATTERN = re.compile(r'^Active connections: (?P\\d+)\\s+[\\w ]+\\n'\n r'\\s+(?P\\d+)'\n r'\\s+(?P\\d+)'\n r'\\s+(?P\\d+)'\n r'(\\s+(?P